aboutsummaryrefslogtreecommitdiff
path: root/PhysX_3.4/Source/PhysXCooking/src/convex
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /PhysX_3.4/Source/PhysXCooking/src/convex
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'PhysX_3.4/Source/PhysXCooking/src/convex')
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.cpp353
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.h100
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.cpp797
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.h95
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.cpp299
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.h82
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.cpp925
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.h177
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.cpp504
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.h100
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.cpp1328
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.h64
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.cpp1481
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.h133
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.cpp2383
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.h97
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.cpp797
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.h102
18 files changed, 9817 insertions, 0 deletions
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.cpp
new file mode 100644
index 00000000..5ab5965d
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.cpp
@@ -0,0 +1,353 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "PsUserAllocated.h"
+#include "PsUtilities.h"
+#include "PsMathUtils.h"
+#include "PsVecMath.h"
+
+#include "PxCooking.h"
+
+#include "GuConvexMeshData.h"
+#include "GuBigConvexData2.h"
+#include "GuIntersectionRayPlane.h"
+#include "GuSerialize.h"
+
+#include "BigConvexDataBuilder.h"
+#include "EdgeList.h"
+
+#include "ConvexHullBuilder.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+using namespace physx;
+using namespace Gu;
+using namespace Ps::aos;
+
+static const PxU32 gSupportVersion = 0;
+static const PxU32 gVersion = 0;
+
+BigConvexDataBuilder::BigConvexDataBuilder(const Gu::ConvexHullData* hull, BigConvexData* gm, const PxVec3* hullVerts) : mHullVerts(hullVerts)
+{
+ mSVM = gm;
+ mHull = hull;
+}
+
+BigConvexDataBuilder::~BigConvexDataBuilder()
+{
+}
+
+bool BigConvexDataBuilder::initialize()
+{
+ mSVM->mData.mSamples = PX_NEW(PxU8)[mSVM->mData.mNbSamples*2u];
+
+#if PX_DEBUG
+// printf("SVM: %d bytes\n", mNbSamples*sizeof(PxU8)*2);
+#endif
+
+ return true;
+}
+
+bool BigConvexDataBuilder::save(PxOutputStream& stream, bool platformMismatch) const
+{
+ // Export header
+ if(!WriteHeader('S', 'U', 'P', 'M', gSupportVersion, platformMismatch, stream))
+ return false;
+
+ // Save base gaussmap
+// if(!GaussMapBuilder::Save(stream, platformMismatch)) return false;
+ // Export header
+ if(!WriteHeader('G', 'A', 'U', 'S', gVersion, platformMismatch, stream))
+ return false;
+
+ // Export basic info
+ // stream.StoreDword(mSubdiv);
+ writeDword(mSVM->mData.mSubdiv, platformMismatch, stream); // PT: could now write Word here
+ // stream.StoreDword(mNbSamples);
+ writeDword(mSVM->mData.mNbSamples, platformMismatch, stream); // PT: could now write Word here
+
+ // Save map data
+ // It's an array of bytes so we don't care about 'PlatformMismatch'
+ stream.write(mSVM->mData.mSamples, sizeof(PxU8)*mSVM->mData.mNbSamples*2);
+
+ if(!saveValencies(stream, platformMismatch))
+ return false;
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute valencies for each vertex
+// we dont compute the edges again here, we have them temporary stored in mHullDataFacesByAllEdges8 structure
+bool BigConvexDataBuilder::computeValencies(const ConvexHullBuilder& meshBuilder)
+{
+ PX_ASSERT(meshBuilder.mHullDataFacesByAllEdges8);
+
+ // Create valencies
+ const PxU32 numVertices = meshBuilder.mHull->mNbHullVertices;
+ mSVM->mData.mNbVerts = numVertices;
+
+ // Get ram for valencies
+ mSVM->mData.mValencies = PX_NEW(Gu::Valency)[mSVM->mData.mNbVerts];
+ PxMemZero(mSVM->mData.mValencies, numVertices*sizeof(Gu::Valency));
+ PxU8 vertexMarker[256];
+ PxMemZero(vertexMarker,numVertices);
+ // Get ram for adjacent vertices references
+ mSVM->mData.mAdjacentVerts = PX_NEW(PxU8)[meshBuilder.mHull->mNbEdges*2u];
+
+ // Compute valencies
+ for (PxU32 i = 0; i < meshBuilder.mHull->mNbPolygons; i++)
+ {
+ PxU32 numVerts = meshBuilder.mHullDataPolygons[i].mNbVerts;
+ const PxU8* Data = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[i].mVRef8;
+ for (PxU32 j = 0; j < numVerts; j++)
+ {
+ mSVM->mData.mValencies[Data[j]].mCount++;
+ PX_ASSERT(mSVM->mData.mValencies[Data[j]].mCount != 0xffff);
+ }
+ }
+
+ // Create offsets
+ mSVM->CreateOffsets();
+
+ // mNbAdjVerts = mOffsets[mNbVerts-1] + mValencies[mNbVerts-1];
+ mSVM->mData.mNbAdjVerts = PxU32(mSVM->mData.mValencies[mSVM->mData.mNbVerts - 1].mOffset + mSVM->mData.mValencies[mSVM->mData.mNbVerts - 1].mCount);
+ PX_ASSERT(mSVM->mData.mNbAdjVerts == PxU32(meshBuilder.mHull->mNbEdges * 2));
+
+ // Create adjacent vertices
+ // parse the polygons and its vertices
+ for (PxU32 i = 0; i < meshBuilder.mHull->mNbPolygons; i++)
+ {
+ PxU32 numVerts = meshBuilder.mHullDataPolygons[i].mNbVerts;
+ const PxU8* Data = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[i].mVRef8;
+ for (PxU32 j = 0; j < numVerts; j++)
+ {
+ const PxU8 vertexIndex = Data[j];
+ PxU8 numAdj = 0;
+ // if we did not parsed this vertex, traverse to the adjacent face and then
+ // again to next till we hit back the original polygon
+ if(vertexMarker[vertexIndex] == 0)
+ {
+ PxU8 prevIndex = Data[(j+1)%numVerts];
+ mSVM->mData.mAdjacentVerts[mSVM->mData.mValencies[vertexIndex].mOffset++] = prevIndex;
+ numAdj++;
+ // now traverse the neighbors
+ PxU8 n0 = meshBuilder.mHullDataFacesByAllEdges8[(meshBuilder.mHullDataPolygons[i].mVRef8 + j)*2];
+ PxU8 n1 = meshBuilder.mHullDataFacesByAllEdges8[(meshBuilder.mHullDataPolygons[i].mVRef8 + j)*2 + 1];
+ PxU32 neighborPolygon = n0 == i ? n1 : n0;
+ while (neighborPolygon != i)
+ {
+ PxU32 numNeighborVerts = meshBuilder.mHullDataPolygons[neighborPolygon].mNbVerts;
+ const PxU8* neighborData = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[neighborPolygon].mVRef8;
+ PxU32 nextEdgeIndex = 0;
+ // search in the neighbor face for the tested vertex
+ for (PxU32 k = 0; k < numNeighborVerts; k++)
+ {
+ // search the vertexIndex
+ if(neighborData[k] == vertexIndex)
+ {
+ const PxU8 nextIndex = neighborData[(k+1)%numNeighborVerts];
+ // next index already there, pick the previous
+ if(nextIndex == prevIndex)
+ {
+ prevIndex = k == 0 ? neighborData[numNeighborVerts - 1] : neighborData[k-1];
+ nextEdgeIndex = k == 0 ? numNeighborVerts - 1 : k-1;
+ }
+ else
+ {
+ prevIndex = nextIndex;
+ nextEdgeIndex = k;
+ }
+ mSVM->mData.mAdjacentVerts[mSVM->mData.mValencies[vertexIndex].mOffset++] = prevIndex;
+ numAdj++;
+ break;
+ }
+ }
+
+ // now move to next neighbor
+ n0 = meshBuilder.mHullDataFacesByAllEdges8[(meshBuilder.mHullDataPolygons[neighborPolygon].mVRef8 + nextEdgeIndex)*2];
+ n1 = meshBuilder.mHullDataFacesByAllEdges8[(meshBuilder.mHullDataPolygons[neighborPolygon].mVRef8 + nextEdgeIndex)*2 + 1];
+ neighborPolygon = n0 == neighborPolygon ? n1 : n0;
+ }
+ vertexMarker[vertexIndex] = numAdj;
+ }
+ }
+ }
+
+ // Recreate offsets
+ mSVM->CreateOffsets();
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute the min dot product from the verts for given dir
+void BigConvexDataBuilder::precomputeSample(const PxVec3& dir, PxU8& startIndex_, float negativeDir)
+{
+ PxU8 startIndex = startIndex_;
+
+ const PxVec3* verts = mHullVerts;
+ const Valency* valency = mSVM->mData.mValencies;
+ const PxU8* adjacentVerts = mSVM->mData.mAdjacentVerts;
+
+ // we have only 256 verts
+ PxU32 smallBitMap[8] = {0,0,0,0,0,0,0,0};
+
+ float minimum = negativeDir * verts[startIndex].dot(dir);
+ PxU32 initialIndex = startIndex;
+ do
+ {
+ initialIndex = startIndex;
+ const PxU32 numNeighbours = valency[startIndex].mCount;
+ const PxU32 offset = valency[startIndex].mOffset;
+
+ for (PxU32 a = 0; a < numNeighbours; ++a)
+ {
+ const PxU8 neighbourIndex = adjacentVerts[offset + a];
+ const float dist = negativeDir * verts[neighbourIndex].dot(dir);
+ if (dist < minimum)
+ {
+ const PxU32 ind = PxU32(neighbourIndex >> 5);
+ const PxU32 mask = PxU32(1 << (neighbourIndex & 31));
+ if ((smallBitMap[ind] & mask) == 0)
+ {
+ smallBitMap[ind] |= mask;
+ minimum = dist;
+ startIndex = neighbourIndex;
+ }
+ }
+ }
+
+ } while (startIndex != initialIndex);
+
+ startIndex_ = startIndex;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Precompute the min/max vertices for cube directions.
+bool BigConvexDataBuilder::precompute(PxU32 subdiv)
+{
+ mSVM->mData.mSubdiv = Ps::to16(subdiv);
+ mSVM->mData.mNbSamples = Ps::to16(6 * subdiv*subdiv);
+
+ if (!initialize())
+ return false;
+
+ PxU8 startIndex[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ PxU8 startIndex2[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ const float halfSubdiv = float(subdiv - 1) * 0.5f;
+ for (PxU32 j = 0; j < subdiv; j++)
+ {
+ for (PxU32 i = j; i < subdiv; i++)
+ {
+ const float iSubDiv = 1.0f - i / halfSubdiv;
+ const float jSubDiv = 1.0f - j / halfSubdiv;
+
+ PxVec3 tempDir(1.0f, iSubDiv, jSubDiv);
+ // we need to normalize only once, then we permute the components
+ // as before for each i,j and j,i face direction
+ tempDir.normalize();
+
+ const PxVec3 dirs[12] = {
+ PxVec3(-tempDir.x, tempDir.y, tempDir.z),
+ PxVec3(tempDir.x, tempDir.y, tempDir.z),
+
+ PxVec3(tempDir.z, -tempDir.x, tempDir.y),
+ PxVec3(tempDir.z, tempDir.x, tempDir.y),
+
+ PxVec3(tempDir.y, tempDir.z, -tempDir.x),
+ PxVec3(tempDir.y, tempDir.z, tempDir.x),
+
+ PxVec3(-tempDir.x, tempDir.z, tempDir.y),
+ PxVec3(tempDir.x, tempDir.z, tempDir.y),
+
+ PxVec3(tempDir.y, -tempDir.x, tempDir.z),
+ PxVec3(tempDir.y, tempDir.x, tempDir.z),
+
+ PxVec3(tempDir.z, tempDir.y, -tempDir.x),
+ PxVec3(tempDir.z, tempDir.y, tempDir.x)
+ };
+
+ // compute in each direction + negative/positive dot, we have
+ // then two start indexes, which are used then for hill climbing
+ for (PxU32 dStep = 0; dStep < 12; dStep++)
+ {
+ precomputeSample(dirs[dStep], startIndex[dStep], 1.0f);
+ precomputeSample(dirs[dStep], startIndex2[dStep], -1.0f);
+ }
+
+ // decompose the vector results into face directions
+ for (PxU32 k = 0; k < 6; k++)
+ {
+ const PxU32 ksub = k*subdiv*subdiv;
+ const PxU32 offset = j + i*subdiv + ksub;
+ const PxU32 offset2 = i + j*subdiv + ksub;
+ PX_ASSERT(offset < mSVM->mData.mNbSamples);
+ PX_ASSERT(offset2 < mSVM->mData.mNbSamples);
+
+ mSVM->mData.mSamples[offset] = startIndex[k];
+ mSVM->mData.mSamples[offset + mSVM->mData.mNbSamples] = startIndex2[k];
+
+ mSVM->mData.mSamples[offset2] = startIndex[k + 6];
+ mSVM->mData.mSamples[offset2 + mSVM->mData.mNbSamples] = startIndex2[k + 6];
+ }
+ }
+ }
+ return true;
+}
+
+static const PxU32 gValencyVersion = 2;
+
+//////////////////////////////////////////////////////////////////////////
+
+bool BigConvexDataBuilder::saveValencies(PxOutputStream& stream, bool platformMismatch) const
+{
+ // Export header
+ if(!WriteHeader('V', 'A', 'L', 'E', gValencyVersion, platformMismatch, stream))
+ return false;
+
+ writeDword(mSVM->mData.mNbVerts, platformMismatch, stream);
+ writeDword(mSVM->mData.mNbAdjVerts, platformMismatch, stream);
+
+ {
+ PxU16* temp = PX_NEW_TEMP(PxU16)[mSVM->mData.mNbVerts];
+ for(PxU32 i=0;i<mSVM->mData.mNbVerts;i++)
+ temp[i] = mSVM->mData.mValencies[i].mCount;
+
+ const PxU32 maxIndex = computeMaxIndex(temp, mSVM->mData.mNbVerts);
+ writeDword(maxIndex, platformMismatch, stream);
+ StoreIndices(Ps::to16(maxIndex), mSVM->mData.mNbVerts, temp, stream, platformMismatch);
+
+ PX_DELETE_POD(temp);
+ }
+ stream.write(mSVM->mData.mAdjacentVerts, mSVM->mData.mNbAdjVerts);
+
+ return true;
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.h
new file mode 100644
index 00000000..2abf5993
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.h
@@ -0,0 +1,100 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef BIG_CONVEX_DATA_BUILDER_H
+#define BIG_CONVEX_DATA_BUILDER_H
+
+#include "foundation/PxMemory.h"
+#include "PsVecMath.h"
+
+namespace physx
+{
+ struct HullTriangleData;
+ class BigConvexData;
+ class ConvexHullBuilder;
+
+ //////////////////////////////////////////////////////////////////////////
+ //! Valencies creation structure
+ struct ValenciesCreate
+ {
+ //! Constructor
+ ValenciesCreate() { PxMemZero(this, sizeof(*this)); }
+
+ PxU32 nbVerts; //!< Number of vertices
+ PxU32 nbFaces; //!< Number of faces
+ const PxU32* dFaces; //!< List of faces (triangle list)
+ const PxU16* wFaces; //!< List of faces (triangle list)
+ bool adjacentList; //!< Compute list of adjacent vertices or not
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+
+ class BigConvexDataBuilder : public Ps::UserAllocated
+ {
+ public:
+ BigConvexDataBuilder(const Gu::ConvexHullData* hull, BigConvexData* gm, const PxVec3* hullVerts);
+ ~BigConvexDataBuilder();
+ // Support vertex map
+ bool precompute(PxU32 subdiv);
+
+ bool initialize();
+
+ bool save(PxOutputStream& stream, bool platformMismatch) const;
+
+ bool computeValencies(const ConvexHullBuilder& meshBuilder);
+ //~Support vertex map
+
+ // Valencies
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes valencies and adjacent vertices.
+ * After the call, get results with the appropriate accessors.
+ *
+ * \param vc [in] creation structure
+ * \return true if success.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool compute(const ValenciesCreate& vc) const;
+
+ bool saveValencies(PxOutputStream& stream, bool platformMismatch) const;
+ //~Valencies
+ protected:
+ PX_FORCE_INLINE void precomputeSample(const PxVec3& dir, PxU8& startIndex, float negativeDir);
+
+ private:
+ const Gu::ConvexHullData* mHull;
+ BigConvexData* mSVM;
+ const PxVec3* mHullVerts;
+
+ };
+
+}
+
+#endif // BIG_CONVEX_DATA_BUILDER_H
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.cpp
new file mode 100644
index 00000000..3b9c3ac6
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.cpp
@@ -0,0 +1,797 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "foundation/PxMemory.h"
+#include "EdgeList.h"
+#include "GuTriangle32.h"
+#include "GuConvexMesh.h"
+#include "PxCooking.h"
+#include "CookingUtils.h"
+#include "ConvexHullBuilder.h"
+#include "CmRadixSortBuffered.h"
+#include "MeshCleaner.h"
+#include "PsArray.h"
+#include "PsFoundation.h"
+#include "PsVecMath.h"
+
+
+// 7: added mHullDataFacesByVertices8
+// 8: added mEdges
+static const physx::PxU32 gVersion = 8;
+
+using namespace physx;
+using namespace Gu;
+using namespace Ps::aos;
+
+#define USE_PRECOMPUTED_HULL_PROJECTION
+
+//////////////////////////////////////////////////////////////////////////
+// default constructor
+ConvexHullBuilder::ConvexHullBuilder(Gu::ConvexHullData* hull, const bool buildGRBData) :
+ mHullDataHullVertices (NULL),
+ mHullDataPolygons (NULL),
+ mHullDataVertexData8 (NULL),
+ mHullDataFacesByEdges8 (NULL),
+ mHullDataFacesByVertices8 (NULL),
+ mHullDataFacesByAllEdges8 (NULL),
+ mEdgeData16 (NULL),
+ mEdges (NULL),
+ mHull (hull),
+ mBuildGRBData (buildGRBData)
+{
+}
+
+//////////////////////////////////////////////////////////////////////////
+// default destructor
+ConvexHullBuilder::~ConvexHullBuilder()
+{
+ PX_DELETE_POD(mEdgeData16);
+ PX_DELETE_POD(mEdges);
+
+ PX_DELETE_POD(mHullDataHullVertices);
+ PX_DELETE_POD(mHullDataPolygons);
+ PX_DELETE_POD(mHullDataVertexData8);
+ PX_DELETE_POD(mHullDataFacesByEdges8);
+ PX_DELETE_POD(mHullDataFacesByVertices8);
+ PX_DELETE_POD(mHullDataFacesByAllEdges8);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// initialize the convex hull
+// \param nbVerts [in] number of vertices used
+// \param verts [in] vertices array
+// \param indices [in] indices array
+// \param nbPolygons [in] number of polygons
+// \param hullPolygons [in] polygons array
+bool ConvexHullBuilder::init(PxU32 nbVerts, const PxVec3* verts, const PxU32* indices, const PxU32 nbIndices,
+ const PxU32 nbPolygons, const PxHullPolygon* hullPolygons, PxU32 gaussMapVertexLimit, bool doValidation, bool userPolygons)
+{
+ PX_ASSERT(indices);
+ PX_ASSERT(verts);
+ PX_ASSERT(hullPolygons);
+ PX_ASSERT(nbVerts);
+ PX_ASSERT(nbPolygons);
+
+ mHullDataHullVertices = NULL;
+ mHullDataPolygons = NULL;
+ mHullDataVertexData8 = NULL;
+ mHullDataFacesByEdges8 = NULL;
+ mHullDataFacesByVertices8 = NULL;
+ mHullDataFacesByAllEdges8 = NULL;
+
+ mEdges = NULL;
+ mEdgeData16 = NULL;
+
+ mHull->mNbHullVertices = Ps::to8(nbVerts);
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ mHullDataHullVertices = reinterpret_cast<PxVec3*>(PX_ALLOC(sizeof(PxVec3) * mHull->mNbHullVertices + 1, "PxVec3"));
+ PxMemCopy(mHullDataHullVertices, verts, mHull->mNbHullVertices*sizeof(PxVec3));
+
+ // Cleanup
+ mHull->mNbPolygons = 0;
+ PX_DELETE_POD(mHullDataVertexData8);
+ PX_FREE_AND_RESET(mHullDataPolygons);
+
+ if(nbPolygons>255)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder::init: convex hull has more than 255 polygons!");
+ return false;
+ }
+
+ // Precompute hull polygon structures
+ mHull->mNbPolygons = Ps::to8(nbPolygons);
+ mHullDataPolygons = reinterpret_cast<Gu::HullPolygonData*>(PX_ALLOC(sizeof(Gu::HullPolygonData)*mHull->mNbPolygons, "Gu::HullPolygonData"));
+
+ mHullDataVertexData8 = PX_NEW(PxU8)[nbIndices];
+ PxU8* dest = mHullDataVertexData8;
+ for(PxU32 i=0;i<nbPolygons;i++)
+ {
+ const PxHullPolygon& inPolygon = hullPolygons[i];
+ mHullDataPolygons[i].mVRef8 = PxU16(dest - mHullDataVertexData8); // Setup link for current polygon
+
+ PxU32 numVerts = inPolygon.mNbVerts;
+ PX_ASSERT(numVerts>=3); // Else something very wrong happened...
+ mHullDataPolygons[i].mNbVerts = Ps::to8(numVerts);
+
+ for (PxU32 j = 0; j < numVerts; j++)
+ {
+ dest[j] = Ps::to8(indices[inPolygon.mIndexBase + j]);
+ }
+
+ mHullDataPolygons[i].mPlane = PxPlane(inPolygon.mPlane[0],inPolygon.mPlane[1],inPolygon.mPlane[2],inPolygon.mPlane[3]);
+
+ // Next one
+ dest += numVerts;
+ }
+
+ if(!calculateVertexMapTable(nbPolygons, userPolygons))
+ return false;
+
+ // moved create edge list here from save, copy. This is a part of the validation process and
+ // we need to create the edge list anyway
+ if (!createEdgeList(doValidation, nbIndices, mHull->mNbHullVertices > gaussMapVertexLimit ? true : false))
+ return false;
+
+#ifdef USE_PRECOMPUTED_HULL_PROJECTION
+ // Loop through polygons
+ for (PxU32 j = 0; j < nbPolygons; j++)
+ {
+ // Precompute hull projection along local polygon normal
+ PxU32 NbVerts = mHull->mNbHullVertices;
+ const PxVec3* Verts = mHullDataHullVertices;
+ Gu::HullPolygonData& polygon = mHullDataPolygons[j];
+ PxReal min = PX_MAX_F32;
+ PxU8 minIndex = 0xff;
+ for (PxU8 i = 0; i < NbVerts; i++)
+ {
+ float dp = (*Verts++).dot(polygon.mPlane.n);
+ if (dp < min)
+ {
+ min = dp;
+ minIndex = i;
+ }
+ }
+ polygon.mMinIndex = minIndex;
+ }
+#endif
+
+ if(doValidation)
+ return checkHullPolygons();
+ else
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// hull polygons check
+bool ConvexHullBuilder::checkHullPolygons() const
+{
+ const PxVec3* hullVerts = mHullDataHullVertices;
+ const PxU8* vertexData = mHullDataVertexData8;
+ Gu::HullPolygonData* hullPolygons = mHullDataPolygons;
+
+ // Check hull validity
+ if(!hullVerts || !hullPolygons)
+ return false;
+
+ if(mHull->mNbPolygons<4)
+ return false;
+
+ PxVec3 max(-FLT_MAX,-FLT_MAX,-FLT_MAX);
+
+ PxVec3 hullMax = hullVerts[0];
+ PxVec3 hullMin = hullVerts[0];
+
+ for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
+ {
+ const PxVec3& hullVert = hullVerts[j];
+ if(fabsf(hullVert.x) > max.x)
+ max.x = fabsf(hullVert.x);
+
+ if(fabsf(hullVert.y) > max.y)
+ max.y = fabsf(hullVert.y);
+
+ if(fabsf(hullVert.z) > max.z)
+ max.z = fabsf(hullVert.z);
+
+ if (hullVert.x > hullMax.x)
+ {
+ hullMax.x = hullVert.x;
+ }
+ else if (hullVert.x < hullMin.x)
+ {
+ hullMin.x = hullVert.x;
+ }
+
+ if (hullVert.y > hullMax.y)
+ {
+ hullMax.y = hullVert.y;
+ }
+ else if (hullVert.y < hullMin.y)
+ {
+ hullMin.y = hullVert.y;
+ }
+
+ if (hullVert.z > hullMax.z)
+ {
+ hullMax.z = hullVert.z;
+ }
+ else if (hullVert.z < hullMin.z)
+ {
+ hullMin.z = hullVert.z;
+ }
+ }
+
+ max += PxVec3(0.02f,0.02f,0.02f);
+
+ PxVec3 testVectors[8];
+ bool foundPlane[8];
+ for (PxU32 i = 0; i < 8; i++)
+ {
+ foundPlane[i] = false;
+ }
+
+ testVectors[0] = PxVec3(max.x,max.y,max.z);
+ testVectors[1] = PxVec3(max.x,-max.y,-max.z);
+ testVectors[2] = PxVec3(max.x,max.y,-max.z);
+ testVectors[3] = PxVec3(max.x,-max.y,max.z);
+ testVectors[4] = PxVec3(-max.x,max.y,max.z);
+ testVectors[5] = PxVec3(-max.x,-max.y,max.z);
+ testVectors[6] = PxVec3(-max.x,max.y,-max.z);
+ testVectors[7] = PxVec3(-max.x,-max.y,-max.z);
+
+
+ // Extra convex hull validity check. This is less aggressive than previous convex decomposer!
+ // Loop through polygons
+ for(PxU32 i=0;i<mHull->mNbPolygons;i++)
+ {
+ const PxPlane& P = hullPolygons[i].mPlane;
+
+ for (PxU32 k = 0; k < 8; k++)
+ {
+ if(!foundPlane[k])
+ {
+ const float d = P.distance(testVectors[k]);
+ if(d >= 0)
+ {
+ foundPlane[k] = true;
+ }
+ }
+ }
+
+ // Test hull vertices against polygon plane
+ // compute the test epsilon the same way we construct the hull, verts are considered coplanar within this epsilon
+ const float planeTolerance = 0.002f;
+ const float testEpsilon = PxMax(planeTolerance * (PxMax(PxAbs(hullMax.x), PxAbs(hullMin.x)) +
+ PxMax(PxAbs(hullMax.y), PxAbs(hullMin.y)) +
+ PxMax(PxAbs(hullMax.z), PxAbs(hullMin.z))), planeTolerance);
+
+ for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
+ {
+ // Don't test vertex if it belongs to plane (to prevent numerical issues)
+ PxU32 nb = hullPolygons[i].mNbVerts;
+ bool discard=false;
+ for(PxU32 k=0;k<nb;k++)
+ {
+ if(vertexData[hullPolygons[i].mVRef8+k]==PxU8(j))
+ {
+ discard = true;
+ break;
+ }
+ }
+
+ if(!discard)
+ {
+ const float d = P.distance(hullVerts[j]);
+// if(d>0.0001f)
+ //if(d>0.02f)
+ if(d > testEpsilon)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Gu::ConvexMesh::checkHullPolygons: Some hull vertices seems to be too far from hull planes.");
+ return false;
+ }
+ }
+ }
+ }
+
+ for (PxU32 i = 0; i < 8; i++)
+ {
+ if(!foundPlane[i])
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Gu::ConvexMesh::checkHullPolygons: Hull seems to have opened volume or do (some) faces have reversed winding?");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Computes the center of the hull. It should be inside it !
+* \param center [out] hull center
+* \return true if success
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool ConvexHullBuilder::computeGeomCenter(PxVec3& center, PxU32 numFaces, HullTriangleData* faces) const
+{
+ // Checkings
+ const PxVec3* PX_RESTRICT hullVerts = mHullDataHullVertices;
+ if (!mHull->mNbHullVertices || !hullVerts) return false;
+
+ // Use the topological method
+ float totalArea = 0.0f;
+ center = PxVec3(0);
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ Gu::TriangleT<PxU32> curTri(faces[i].mRef[0], faces[i].mRef[1], faces[i].mRef[2]);
+ const float area = curTri.area(hullVerts);
+ PxVec3 curCenter; curTri.center(hullVerts, curCenter);
+ center += area * curCenter;
+ totalArea += area;
+ }
+ center /= totalArea;
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// hull data store
+PX_COMPILE_TIME_ASSERT(sizeof(Gu::EdgeDescData)==8);
+PX_COMPILE_TIME_ASSERT(sizeof(Gu::EdgeData)==8);
+bool ConvexHullBuilder::save(PxOutputStream& stream, bool platformMismatch) const
+{
+ // Export header
+ if(!WriteHeader('C', 'L', 'H', 'L', gVersion, platformMismatch, stream))
+ return false;
+
+ // Export header
+ if(!WriteHeader('C', 'V', 'H', 'L', gVersion, platformMismatch, stream))
+ return false;
+
+ // Export figures
+
+ //embed grb flag into mNbEdges
+ PxU16 hasGRBData = PxU16(mBuildGRBData);
+ hasGRBData = PxU16(hasGRBData << 15);
+ PX_ASSERT(mHull->mNbEdges <( (1 << 15) - 1));
+ const PxU16 nbEdges = PxU16(mHull->mNbEdges | hasGRBData);
+ writeDword(mHull->mNbHullVertices, platformMismatch, stream);
+ writeDword(nbEdges, platformMismatch, stream);
+ writeDword(computeNbPolygons(), platformMismatch, stream); // Use accessor to lazy-build
+ PxU32 nb=0;
+ for(PxU32 i=0;i<mHull->mNbPolygons;i++)
+ nb += mHullDataPolygons[i].mNbVerts;
+ writeDword(nb, platformMismatch, stream);
+
+ // Export triangles
+
+ writeFloatBuffer(&mHullDataHullVertices->x, PxU32(mHull->mNbHullVertices*3), platformMismatch, stream);
+
+ // Export polygons
+ // TODO: allow lazy-evaluation
+ // We can't really store the buffer in one run anymore!
+ for(PxU32 i=0;i<mHull->mNbPolygons;i++)
+ {
+ Gu::HullPolygonData tmpCopy = mHullDataPolygons[i];
+ if(platformMismatch)
+ flipData(tmpCopy);
+
+ stream.write(&tmpCopy, sizeof(Gu::HullPolygonData));
+ }
+
+ // PT: why not storeBuffer here?
+ for(PxU32 i=0;i<nb;i++)
+ stream.write(&mHullDataVertexData8[i], sizeof(PxU8));
+
+ stream.write(mHullDataFacesByEdges8, PxU32(mHull->mNbEdges*2));
+ stream.write(mHullDataFacesByVertices8, PxU32(mHull->mNbHullVertices*3));
+
+ if (mBuildGRBData)
+ writeWordBuffer(mEdges, PxU32(mHull->mNbEdges * 2), platformMismatch, stream);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+bool ConvexHullBuilder::copy(ConvexHullData& hullData)
+{
+ // set the numbers
+ hullData.mNbHullVertices = mHull->mNbHullVertices;
+ hullData.mNbEdges = mHull->mNbEdges;
+ hullData.mNbPolygons = Ps::to8(computeNbPolygons());
+ PxU32 nb = 0;
+ for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
+ nb += mHullDataPolygons[i].mNbVerts;
+
+ PxU32 bytesNeeded = Gu::computeBufferSize(hullData, nb);
+
+ // allocate the memory first.
+ void* dataMemory = PX_ALLOC(bytesNeeded, "ConvexHullData data");
+
+ PxU8* address = reinterpret_cast<PxU8*>(dataMemory);
+
+ // set data pointers
+ hullData.mPolygons = reinterpret_cast<Gu::HullPolygonData*>(address); address += sizeof(Gu::HullPolygonData) * hullData.mNbPolygons;
+ PxVec3* dataHullVertices = reinterpret_cast<PxVec3*>(address); address += sizeof(PxVec3) * hullData.mNbHullVertices;
+ PxU8* dataFacesByEdges8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * hullData.mNbEdges * 2;
+ PxU8* dataFacesByVertices8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * hullData.mNbHullVertices * 3;
+ PxU16* dataEdges = reinterpret_cast<PxU16*>(address); address += hullData.mNbEdges.isBitSet() ? sizeof(PxU16) *hullData.mNbEdges * 2 : 0;
+ PxU8* dataVertexData8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * nb; // PT: leave that one last, so that we don't need to serialize "Nb"
+
+ PX_ASSERT(!(size_t(dataHullVertices) % sizeof(PxReal)));
+ PX_ASSERT(!(size_t(hullData.mPolygons) % sizeof(PxReal)));
+ PX_ASSERT(size_t(address) <= size_t(dataMemory) + bytesNeeded);
+
+ PX_ASSERT(mHullDataHullVertices);
+ PX_ASSERT(mHullDataPolygons);
+ PX_ASSERT(mHullDataVertexData8);
+ PX_ASSERT(mHullDataFacesByEdges8);
+ PX_ASSERT(mHullDataFacesByVertices8);
+
+ // copy the data
+ PxMemCopy(dataHullVertices, &mHullDataHullVertices->x, PxU32(mHull->mNbHullVertices * 3)*sizeof(float));
+ PxMemCopy(hullData.mPolygons, mHullDataPolygons , hullData.mNbPolygons*sizeof(Gu::HullPolygonData));
+ PxMemCopy(dataVertexData8, mHullDataVertexData8, nb);
+ PxMemCopy(dataFacesByEdges8,mHullDataFacesByEdges8, PxU32(mHull->mNbEdges * 2));
+ if (hullData.mNbEdges.isBitSet())
+ PxMemCopy(dataEdges, mEdges, PxU32(mHull->mNbEdges * 2) * sizeof(PxU16));
+ PxMemCopy(dataFacesByVertices8, mHullDataFacesByVertices8, PxU32(mHull->mNbHullVertices * 3));
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// calculate vertex map table
+bool ConvexHullBuilder::calculateVertexMapTable(PxU32 nbPolygons, bool userPolygons)
+{
+ mHullDataFacesByVertices8 = PX_NEW(PxU8)[mHull->mNbHullVertices*3u];
+ PxU8 vertexMarker[256];
+ PxMemSet(vertexMarker, 0, mHull->mNbHullVertices);
+
+ for (PxU32 i = 0; i < nbPolygons; i++)
+ {
+ const Gu::HullPolygonData& polygon = mHullDataPolygons[i];
+ for (PxU32 k = 0; k < polygon.mNbVerts; ++k)
+ {
+ const PxU8 index = mHullDataVertexData8[polygon.mVRef8 + k];
+ if (vertexMarker[index] < 3)
+ {
+ //Found a polygon
+ mHullDataFacesByVertices8[index*3 + vertexMarker[index]++] = Ps::to8(i);
+ }
+ }
+ }
+
+ bool noPlaneShift = false;
+ for (PxU32 i = 0; i < mHull->mNbHullVertices; ++i)
+ {
+ if(vertexMarker[i] != 3)
+ noPlaneShift = true;
+ }
+
+ if (noPlaneShift)
+ {
+ //PCM will use the original shape, which means it will have a huge performance drop
+ if (!userPolygons)
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder: convex hull does not have vertex-to-face info! Try to use different convex mesh cooking settings.");
+ else
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder: convex hull does not have vertex-to-face info! Some of the vertices have less than 3 neighbor polygons. The vertex is most likely inside a polygon or on an edge between 2 polygons, please remove those vertices.");
+ for (PxU32 i = 0; i < mHull->mNbHullVertices; ++i)
+ {
+ mHullDataFacesByVertices8[i * 3 + 0] = 0xFF;
+ mHullDataFacesByVertices8[i * 3 + 1] = 0xFF;
+ mHullDataFacesByVertices8[i * 3 + 2] = 0xFF;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+// create edge list
+bool ConvexHullBuilder::createEdgeList(bool doValidation, PxU32 nbEdges, bool prepareBigHullData)
+{
+ // Code below could be greatly simplified if we assume manifold meshes!
+
+ //feodorb: ok, let's assume manifold meshes, since the code before this change
+ //would fail on non-maniflold meshes anyways
+
+ // We need the adjacency graph for hull polygons, similar to what we have for triangles.
+ // - sort the polygon edges and walk them in order
+ // - each edge should appear exactly twice since a convex is a manifold mesh without boundary edges
+ // - the polygon index is implicit when we walk the sorted list => get the 2 polygons back and update adjacency graph
+ //
+ // Two possible structures:
+ // - polygon to edges: needed for local search (actually: polygon to polygons)
+ // - edge to polygons: needed to compute edge normals on-the-fly
+
+ // Below is largely copied from the edge-list code
+
+ // Polygon to edges:
+ //
+ // We're dealing with convex polygons made of N vertices, defining N edges. For each edge we want the edge in
+ // an edge array.
+ //
+ // Edges to polygon:
+ //
+ // For each edge in the array, we want two polygon indices - ie an edge.
+
+ // 0) Compute the total size needed for "polygon to edges"
+ const PxU32 nbPolygons = mHull->mNbPolygons;
+ PxU32 nbEdgesUnshared = nbEdges;
+
+ // in a manifold mesh, each edge is repeated exactly twice as it shares exactly 2 faces
+ if (nbEdgesUnshared % 2 != 0)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
+ return false;
+ }
+
+ if (prepareBigHullData)
+ {
+ mHullDataFacesByAllEdges8 = PX_NEW(PxU8)[nbEdges * 2];
+ }
+
+ // 1) Get some bytes: I need one EdgesRefs for each face, and some temp buffers
+
+ // Face indices by edge indices. First face is the one where the edge is ordered from tail to head.
+ PX_DELETE_POD(mHullDataFacesByEdges8);
+ mHullDataFacesByEdges8 = PX_NEW(PxU8)[nbEdgesUnshared];
+
+ PxU32* tempBuffer = PX_NEW_TEMP(PxU32)[nbEdgesUnshared*8]; // Temp storage
+ PxU32* bufferAdd = tempBuffer;
+ PxU32* PX_RESTRICT vRefs0 = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* PX_RESTRICT vRefs1 = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* polyIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* vertexIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* polyIndex2 = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* vertexIndex2 = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* edgeIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* edgeData = tempBuffer; tempBuffer += nbEdgesUnshared;
+
+ // TODO avoroshilov: use the same "tempBuffer"
+ bool* flippedVRefs = PX_NEW_TEMP(bool)[nbEdgesUnshared]; // Temp storage
+
+ PxU32* run0 = vRefs0;
+ PxU32* run1 = vRefs1;
+ PxU32* run2 = polyIndex;
+ PxU32* run3 = vertexIndex;
+ bool* run4 = flippedVRefs;
+
+ // 2) Create a full redundant list of edges
+ PxU32 edgeCounter = 0;
+ for(PxU32 i=0;i<nbPolygons;i++)
+ {
+ PxU32 nbVerts = mHullDataPolygons[i].mNbVerts;
+ const PxU8* PX_RESTRICT Data = mHullDataVertexData8 + mHullDataPolygons[i].mVRef8;
+
+ // Loop through polygon vertices
+ for(PxU32 j=0;j<nbVerts;j++)
+ {
+ PxU32 vRef0 = Data[j];
+ PxU32 vRef1 = Data[(j+1)%nbVerts];
+ bool flipped = vRef0>vRef1;
+
+ if (flipped)
+ physx::shdfnd::swap(vRef0, vRef1);
+
+ *run0++ = vRef0;
+ *run1++ = vRef1;
+ *run2++ = i;
+ *run3++ = j;
+ *run4++ = flipped;
+ edgeData[edgeCounter] = edgeCounter;
+ edgeCounter++;
+ }
+ }
+ PX_ASSERT(PxU32(run0-vRefs0)==nbEdgesUnshared);
+ PX_ASSERT(PxU32(run1-vRefs1)==nbEdgesUnshared);
+
+ // 3) Sort the list according to both keys (VRefs0 and VRefs1)
+ Cm::RadixSortBuffered sorter;
+ const PxU32* PX_RESTRICT sorted = sorter.Sort(vRefs1, nbEdgesUnshared,Cm::RADIX_UNSIGNED).Sort(vRefs0, nbEdgesUnshared,Cm::RADIX_UNSIGNED).GetRanks();
+
+ PX_DELETE_POD(mEdges);
+ // Edges by their tail and head VRefs. NbEdgesUnshared == nbEdges * 2
+ // mEdges[edgeIdx*2 + 0] = tailVref, mEdges[edgeIdx*2 + 1] = headVref
+ // Tails and heads should be consistent with face refs, so that the edge is given in the order of
+ // his first face and opposite to the order of his second face
+ mEdges = PX_NEW(PxU16)[nbEdgesUnshared];
+
+ PX_DELETE_POD(mEdgeData16);
+ // Face to edge mapping
+ mEdgeData16 = PX_NEW(PxU16)[nbEdgesUnshared];
+
+ // TODO avoroshilov: remove this comment
+ //mHull->mNbEdges = Ps::to16(nbEdgesUnshared / 2); // #non-redundant edges
+
+ mHull->mNbEdges = 0; // #non-redundant edges
+
+ // A.B. Comment out the early exit temporary since we need to precompute the additonal edge data for GPU
+ //if (!doValidation)
+ //{
+ // // TODO avoroshilov: this codepath is not supported
+
+ // for (PxU32 i = 0; i < nbEdgesUnshared; i = i + 2)
+ // {
+ // const PxU32 sortedIndex = sorted[i]; // Between 0 and Nb
+ // const PxU32 nextSortedIndex = sorted[i + 1]; // Between 0 and Nb
+ // const PxU32 polyID = polyIndex[sortedIndex]; // Poly index
+ // const PxU32 nextPolyID = polyIndex[nextSortedIndex]; // Poly index
+ //
+ // mHullDataFacesByEdges8[(mHull->mNbEdges) * 2] = Ps::to8(polyID);
+ // mHullDataFacesByEdges8[(mHull->mNbEdges) * 2 + 1] = Ps::to8(nextPolyID);
+
+ // // store the full edge data for later use in big convex hull valencies computation
+ // if(mHullDataFacesByAllEdges8)
+ // {
+ // mHullDataFacesByAllEdges8[edgeData[sortedIndex] * 2] = Ps::to8(polyID);
+ // mHullDataFacesByAllEdges8[edgeData[sortedIndex] * 2 + 1] = Ps::to8(nextPolyID);
+
+ // mHullDataFacesByAllEdges8[edgeData[nextSortedIndex] * 2] = Ps::to8(polyID);
+ // mHullDataFacesByAllEdges8[edgeData[nextSortedIndex] * 2 + 1] = Ps::to8(nextPolyID);
+ // }
+ // mHull->mNbEdges++;
+ // }
+
+ // PX_DELETE_POD(bufferAdd);
+ // return true;
+ //}
+
+ // 4) Loop through all possible edges
+ // - clean edges list by removing redundant edges
+ // - create EdgesRef list
+ // mNbFaces = nbFaces;
+
+ // TODO avoroshilov:
+ PxU32 numFacesPerEdgeVerificationCounter = 0;
+
+ PxU16* edgeVertOutput = mEdges;
+
+ PxU32 previousRef0 = PX_INVALID_U32;
+ PxU32 previousRef1 = PX_INVALID_U32;
+ PxU32 previousIndex = PX_INVALID_U32;
+ PxU32 previousPolyId = PX_INVALID_U32;
+
+ PxU16 nbHullEdges = 0;
+ for (PxU32 i = 0; i < nbEdgesUnshared; i++)
+ {
+ const PxU32 sortedIndex = sorted[i]; // Between 0 and Nb
+ const PxU32 polyID = polyIndex[sortedIndex]; // Poly index
+ const PxU32 vertexID = vertexIndex[sortedIndex]; // Poly index
+ PxU32 sortedRef0 = vRefs0[sortedIndex]; // (SortedRef0, SortedRef1) is the sorted edge
+ PxU32 sortedRef1 = vRefs1[sortedIndex];
+ bool flipped = flippedVRefs[sortedIndex];
+
+ if (sortedRef0 != previousRef0 || sortedRef1 != previousRef1)
+ {
+ // TODO avoroshilov: remove this?
+ if (i != 0 && numFacesPerEdgeVerificationCounter != 1)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
+ return false;
+ }
+ numFacesPerEdgeVerificationCounter = 0;
+
+ // ### TODO: change this in edge list as well
+ previousRef0 = sortedRef0;
+ previousRef1 = sortedRef1;
+ previousPolyId = polyID;
+
+ //feodorb:restore the original order of VRefs (tail and head)
+ if (flipped)
+ physx::shdfnd::swap(sortedRef0, sortedRef1);
+
+ *edgeVertOutput++ = Ps::to16(sortedRef0);
+ *edgeVertOutput++ = Ps::to16(sortedRef1);
+
+ nbHullEdges++;
+ }
+ else
+ {
+ mHullDataFacesByEdges8[(nbHullEdges - 1) * 2] = Ps::to8(previousPolyId);
+ mHullDataFacesByEdges8[(nbHullEdges - 1) * 2 + 1] = Ps::to8(polyID);
+
+ ++numFacesPerEdgeVerificationCounter;
+ }
+
+ mEdgeData16[mHullDataPolygons[polyID].mVRef8 + vertexID] = Ps::to16(i / 2);
+
+ if (mHullDataFacesByAllEdges8)
+ {
+ if (previousIndex != PX_INVALID_U32)
+ {
+ // store the full edge data for later use in big convex hull valencies computation
+ mHullDataFacesByAllEdges8[edgeData[sortedIndex] * 2] = Ps::to8(polyID);
+ mHullDataFacesByAllEdges8[edgeData[sortedIndex] * 2 + 1] = Ps::to8(polyIndex[previousIndex]);
+
+ mHullDataFacesByAllEdges8[edgeData[previousIndex] * 2] = Ps::to8(polyID);
+ mHullDataFacesByAllEdges8[edgeData[previousIndex] * 2 + 1] = Ps::to8(polyIndex[previousIndex]);
+ previousIndex = PX_INVALID_U32;
+ }
+ else
+ {
+ previousIndex = sortedIndex;
+ }
+ }
+ // Create mEdgesRef on the fly
+
+ polyIndex2[i] = polyID;
+ vertexIndex2[i] = vertexID;
+ edgeIndex[i] = PxU32(nbHullEdges - 1);
+ }
+
+ mHull->mNbEdges = nbHullEdges;
+
+ //////////////////////
+
+ // 2) Get some bytes: one Pair structure / edge
+ // create this structure only for validation purpose
+ // 3) Create Counters, ie compute the #faces sharing each edge
+ if(doValidation)
+ {
+ //
+ sorted = sorter.Sort(vertexIndex2, nbEdgesUnshared, Cm::RADIX_UNSIGNED).Sort(polyIndex2, nbEdgesUnshared, Cm::RADIX_UNSIGNED).GetRanks();
+
+ for (PxU32 i = 0; i < nbEdgesUnshared; i++) edgeData[i] = edgeIndex[sorted[i]];
+
+ Gu::EdgeDescData* edgeToTriangles = PX_NEW(Gu::EdgeDescData)[PxU16(mHull->mNbEdges)];
+ PxMemZero(edgeToTriangles, sizeof(Gu::EdgeDescData)*mHull->mNbEdges);
+
+ PxU32* data = edgeData;
+ for(PxU32 i=0;i<nbEdgesUnshared;i++) // <= maybe not the same Nb
+ {
+ edgeToTriangles[*data++].Count++;
+ }
+
+ // if we don't have a manifold mesh, this can fail... but the runtime would assert in any case
+ for (PxU32 i = 0; i < mHull->mNbEdges; i++)
+ {
+ if (edgeToTriangles[i].Count != 2)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
+ return false;
+ }
+ }
+ PX_DELETE_POD(edgeToTriangles);
+ }
+
+ // ### free temp ram
+ PX_DELETE_POD(bufferAdd);
+
+ // TODO avoroshilov: use the same "tempBuffer"
+ PX_DELETE_POD(flippedVRefs);
+
+ return true;
+}
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.h
new file mode 100644
index 00000000..a3d57202
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.h
@@ -0,0 +1,95 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_CONVEXHULLBUILDER_H
+#define PX_CONVEXHULLBUILDER_H
+
+#include "GuConvexMeshData.h"
+#include "PsUserAllocated.h"
+#include "PxCooking.h"
+
+namespace physx
+{
+ struct PxHullPolygon;
+
+ namespace Gu
+ {
+ struct EdgeDescData;
+ struct ConvexHullData;
+ } // namespace Gu
+
+ struct HullTriangleData
+ {
+ PxU32 mRef[3];
+ };
+
+ class ConvexHullBuilder : public Ps::UserAllocated
+ {
+ public:
+ ConvexHullBuilder(Gu::ConvexHullData* hull, const bool buildGRBData);
+ ~ConvexHullBuilder();
+
+ bool init(PxU32 nbVerts, const PxVec3* verts, const PxU32* indices, const PxU32 nbIndices, const PxU32 nbPolygons,
+ const PxHullPolygon* hullPolygons, PxU32 gaussMapVertexLimit, bool doValidation = true, bool userPolygons = false);
+
+ bool save(PxOutputStream& stream, bool platformMismatch) const;
+ bool copy(Gu::ConvexHullData& hullData);
+
+ bool createEdgeList(bool doValidation, PxU32 nbEdges, bool prepareBigHullData);
+ bool checkHullPolygons() const;
+
+ bool calculateVertexMapTable(PxU32 nbPolygons, bool userPolygons = false);
+
+ PX_INLINE PxU32 computeNbPolygons() const
+ {
+ PX_ASSERT(mHull->mNbPolygons);
+ return mHull->mNbPolygons;
+ }
+
+ PxVec3* mHullDataHullVertices;
+ Gu::HullPolygonData* mHullDataPolygons;
+ PxU8* mHullDataVertexData8;
+ PxU8* mHullDataFacesByEdges8;
+ PxU8* mHullDataFacesByVertices8;
+ PxU8* mHullDataFacesByAllEdges8; // data used fom big hull valencies computation
+
+ PxU16* mEdgeData16; //!< Edge indices indexed by hull polygons
+ PxU16* mEdges; //!< Edge to vertex mapping
+
+ Gu::ConvexHullData* mHull;
+ bool mBuildGRBData;
+
+ protected:
+ bool computeGeomCenter(PxVec3& , PxU32 numFaces, HullTriangleData* faces) const;
+ };
+}
+
+#endif // PX_CONVEXHULLBUILDER_H
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.cpp
new file mode 100644
index 00000000..92ffc888
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.cpp
@@ -0,0 +1,299 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "ConvexHullLib.h"
+#include "Quantizer.h"
+#include "PsAllocator.h"
+#include "foundation/PxBounds3.h"
+#include "foundation/PxMemory.h"
+
+using namespace physx;
+
+namespace local
+{
+ //////////////////////////////////////////////////////////////////////////
+ // constants
+ static const float DISTANCE_EPSILON = 0.000001f; // close enough to consider two floating point numbers to be 'the same'.
+ static const float NORMAL_DISTANCE_EPSILON = 0.001f; // close enough to consider two floating point numbers to be 'the same' in normalized points cloud.
+ static const float RESIZE_VALUE = 0.01f; // if the provided points AABB is very thin resize it to this size
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks if points form a valid AABB cube, if not construct a default CUBE
+ static bool checkPointsAABBValidity(PxU32 numPoints, const PxVec3* points, PxU32 stride , float distanceEpsilon,
+ float resizeValue, PxVec3& center, PxVec3& scale, PxU32& vcount, PxVec3* vertices, bool fCheck = false)
+ {
+ const char* vtx = reinterpret_cast<const char *> (points);
+ PxBounds3 bounds;
+ bounds.setEmpty();
+
+ // get the bounding box
+ for (PxU32 i = 0; i < numPoints; i++)
+ {
+ const PxVec3& p = *reinterpret_cast<const PxVec3 *> (vtx);
+ vtx += stride;
+
+ bounds.include(p);
+ }
+
+ PxVec3 dim = bounds.getDimensions();
+ center = bounds.getCenter();
+
+ // special case, the AABB is very thin or user provided us with only input 2 points
+ // we construct an AABB cube and return it
+ if ( dim.x < distanceEpsilon || dim.y < distanceEpsilon || dim.z < distanceEpsilon || numPoints < 3 )
+ {
+ float len = FLT_MAX;
+
+ // pick the shortest size bigger than the distance epsilon
+ if ( dim.x > distanceEpsilon && dim.x < len )
+ len = dim.x;
+ if ( dim.y > distanceEpsilon && dim.y < len )
+ len = dim.y;
+ if ( dim.z > distanceEpsilon && dim.z < len )
+ len = dim.z;
+
+ // if the AABB is small in all dimensions, resize it
+ if ( len == FLT_MAX )
+ {
+ dim = PxVec3(resizeValue);
+ }
+ // if one edge is small, set to 1/5th the shortest non-zero edge.
+ else
+ {
+ if ( dim.x < distanceEpsilon )
+ dim.x = len * 0.05f;
+ else
+ dim.x *= 0.5f;
+ if ( dim.y < distanceEpsilon )
+ dim.y = len * 0.05f;
+ else
+ dim.y *= 0.5f;
+ if ( dim.z < distanceEpsilon )
+ dim.z = len * 0.05f;
+ else
+ dim.z *= 0.5f;
+ }
+
+ // construct the AABB
+ const PxVec3 extPos = center + dim;
+ const PxVec3 extNeg = center - dim;
+
+ if(fCheck)
+ vcount = 0;
+
+ vertices[vcount++] = extNeg;
+ vertices[vcount++] = PxVec3(extPos.x,extNeg.y,extNeg.z);
+ vertices[vcount++] = PxVec3(extPos.x,extPos.y,extNeg.z);
+ vertices[vcount++] = PxVec3(extNeg.x,extPos.y,extNeg.z);
+ vertices[vcount++] = PxVec3(extNeg.x,extNeg.y,extPos.z);
+ vertices[vcount++] = PxVec3(extPos.x,extNeg.y,extPos.z);
+ vertices[vcount++] = extPos;
+ vertices[vcount++] = PxVec3(extNeg.x,extPos.y,extPos.z);
+ return true; // return cube
+ }
+ else
+ {
+ scale = dim;
+ }
+ return false;
+ }
+
+}
+
+//////////////////////////////////////////////////////////////////////////
+// normalize point cloud, remove duplicates!
+bool ConvexHullLib::cleanupVertices(PxU32 svcount, const PxVec3* svertices, PxU32 stride,
+ PxU32& vcount, PxVec3* vertices, PxVec3& scale, PxVec3& center)
+{
+ if ( svcount == 0 )
+ return false;
+
+ const PxVec3* verticesToClean = svertices;
+ PxU32 numVerticesToClean = svcount;
+ Quantizer* quantizer = NULL;
+
+ // if quantization is enabled, parse the input vertices and produce new qantized vertices,
+ // that will be then cleaned the same way
+ if (mConvexMeshDesc.flags & PxConvexFlag::eQUANTIZE_INPUT)
+ {
+ quantizer = createQuantizer();
+ PxU32 vertsOutCount;
+ const PxVec3* vertsOut = quantizer->kmeansQuantize3D(svcount, svertices, stride,true, mConvexMeshDesc.quantizedCount, vertsOutCount);
+
+ if (vertsOut)
+ {
+ numVerticesToClean = vertsOutCount;
+ verticesToClean = vertsOut;
+ }
+ }
+
+ const float distanceEpsilon = local::DISTANCE_EPSILON * mCookingParams.scale.length;
+ const float resizeValue = local::RESIZE_VALUE * mCookingParams.scale.length;
+ const float normalEpsilon = local::NORMAL_DISTANCE_EPSILON; // used to determine if 2 points are the same
+
+ vcount = 0;
+ PxVec3 recip;
+
+ scale = PxVec3(1.0f);
+
+ // check for the AABB from points, if its very tiny return a resized CUBE
+ if (local::checkPointsAABBValidity(numVerticesToClean, verticesToClean, stride, distanceEpsilon, resizeValue, center, scale, vcount, vertices, false))
+ {
+ if (quantizer)
+ quantizer->release();
+ return true;
+ }
+
+ recip[0] = 1 / scale[0];
+ recip[1] = 1 / scale[1];
+ recip[2] = 1 / scale[2];
+
+ center = center.multiply(recip);
+
+ // normalize the point cloud
+ const char * vtx = reinterpret_cast<const char *> (verticesToClean);
+ for (PxU32 i = 0; i<numVerticesToClean; i++)
+ {
+ const PxVec3& p = *reinterpret_cast<const PxVec3 *>(vtx);
+ vtx+=stride;
+
+ PxVec3 normalizedP = p.multiply(recip); // normalize
+
+ PxU32 j;
+
+ // parse the already stored vertices and check the distance
+ for (j=0; j<vcount; j++)
+ {
+ PxVec3& v = vertices[j];
+
+ const float dx = fabsf(normalizedP[0] - v[0] );
+ const float dy = fabsf(normalizedP[1] - v[1] );
+ const float dz = fabsf(normalizedP[2] - v[2] );
+
+ if ( dx < normalEpsilon && dy < normalEpsilon && dz < normalEpsilon )
+ {
+ // ok, it is close enough to the old one
+ // now let us see if it is further from the center of the point cloud than the one we already recorded.
+ // in which case we keep this one instead.
+ const float dist1 = (normalizedP - center).magnitudeSquared();
+ const float dist2 = (v - center).magnitudeSquared();
+
+ if ( dist1 > dist2 )
+ {
+ v = normalizedP;
+ }
+ break;
+ }
+ }
+
+ // we dont have that vertex in the output, add it
+ if ( j == vcount )
+ {
+ vertices[vcount] = normalizedP;
+ vcount++;
+ }
+ }
+
+ // scale the verts back
+ for (PxU32 i = 0; i < vcount; i++)
+ {
+ vertices[i] = vertices[i].multiply(scale);
+ }
+
+ // ok..now make sure we didn't prune so many vertices it is now invalid.
+ // note, that the output vertices are again scaled, we need to scale them back then
+ local::checkPointsAABBValidity(vcount, vertices, sizeof(PxVec3), distanceEpsilon, resizeValue, center, scale, vcount, vertices, true);
+
+ if (quantizer)
+ quantizer->release();
+ return true;
+}
+
+void ConvexHullLib::swapLargestFace(PxConvexMeshDesc& desc)
+{
+ const PxHullPolygon* polygons = reinterpret_cast<const PxHullPolygon*>(desc.polygons.data);
+ PxHullPolygon* polygonsOut = const_cast<PxHullPolygon*>(polygons);
+
+ PxU32 largestFace = 0;
+ for (PxU32 i = 1; i < desc.polygons.count; i++)
+ {
+ if(polygons[largestFace].mNbVerts < polygons[i].mNbVerts)
+ largestFace = i;
+ }
+
+ // early exit if no swap needs to be done
+ if(largestFace == 0)
+ return;
+
+ const PxU32* indices = reinterpret_cast<const PxU32*>(desc.indices.data);
+ mSwappedIndices = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*desc.indices.count, "PxU32"));
+
+ PxHullPolygon replacedPolygon = polygons[0];
+ PxHullPolygon largestPolygon = polygons[largestFace];
+ polygonsOut[0] = polygons[largestFace];
+ polygonsOut[largestFace] = replacedPolygon;
+
+ // relocate indices
+ PxU16 indexBase = 0;
+ for (PxU32 i = 0; i < desc.polygons.count; i++)
+ {
+ if(i == 0)
+ {
+ PxMemCopy(mSwappedIndices, &indices[largestPolygon.mIndexBase],sizeof(PxU32)*largestPolygon.mNbVerts);
+ polygonsOut[0].mIndexBase = indexBase;
+ indexBase += largestPolygon.mNbVerts;
+ }
+ else
+ {
+ if(i == largestFace)
+ {
+ PxMemCopy(&mSwappedIndices[indexBase], &indices[replacedPolygon.mIndexBase], sizeof(PxU32)*replacedPolygon.mNbVerts);
+ polygonsOut[i].mIndexBase = indexBase;
+ indexBase += replacedPolygon.mNbVerts;
+ }
+ else
+ {
+ PxMemCopy(&mSwappedIndices[indexBase], &indices[polygons[i].mIndexBase], sizeof(PxU32)*polygons[i].mNbVerts);
+ polygonsOut[i].mIndexBase = indexBase;
+ indexBase += polygons[i].mNbVerts;
+ }
+ }
+ }
+
+ PX_ASSERT(indexBase == desc.indices.count);
+
+ desc.indices.data = mSwappedIndices;
+}
+
+ConvexHullLib::~ConvexHullLib()
+{
+ if (mSwappedIndices)
+ PX_FREE(mSwappedIndices);
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.h
new file mode 100644
index 00000000..19ab68fe
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.h
@@ -0,0 +1,82 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_CONVEXHULLLIB_H
+#define PX_CONVEXHULLLIB_H
+
+#include "PxConvexMeshDesc.h"
+#include "PxCooking.h"
+#include "CmPhysXCommon.h"
+
+namespace physx
+{
+ //////////////////////////////////////////////////////////////////////////
+ // base class for the convex hull libraries - inflation based and quickhull
+ class ConvexHullLib
+ {
+ PX_NOCOPY(ConvexHullLib)
+ public:
+ // functions
+ ConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params)
+ : mConvexMeshDesc(desc), mCookingParams(params), mSwappedIndices(NULL)
+ {
+ }
+
+ virtual ~ConvexHullLib();
+
+ // computes the convex hull from provided points
+ virtual PxConvexMeshCookingResult::Enum createConvexHull() = 0;
+
+ // fills the PxConvexMeshDesc with computed hull data
+ virtual void fillConvexMeshDesc(PxConvexMeshDesc& desc) = 0;
+
+ static const PxU32 gpuMaxVertsPerFace = 32;
+
+ protected:
+
+ // clean input vertices from duplicates, normalize etc.
+ bool cleanupVertices(PxU32 svcount, // input vertex count
+ const PxVec3* svertices, // vertices
+ PxU32 stride, // stride
+ PxU32& vcount, // output number of vertices
+ PxVec3* vertices, // location to store the results.
+ PxVec3& scale, // scale
+ PxVec3& center); // center
+
+ void swapLargestFace(PxConvexMeshDesc& desc);
+
+ protected:
+ const PxConvexMeshDesc& mConvexMeshDesc;
+ const PxCookingParams& mCookingParams;
+ PxU32* mSwappedIndices;
+ };
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.cpp
new file mode 100644
index 00000000..cf921a16
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.cpp
@@ -0,0 +1,925 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "foundation/PxBounds3.h"
+#include "foundation/PxMathUtils.h"
+
+#include "ConvexHullUtils.h"
+#include "VolumeIntegration.h"
+#include "PsUtilities.h"
+#include "PsVecMath.h"
+#include "GuBox.h"
+#include "GuConvexMeshData.h"
+
+using namespace physx;
+using namespace Ps::aos;
+
+namespace local
+{
+ static const float MIN_ADJACENT_ANGLE = 3.0f; // in degrees - result wont have two adjacent facets within this angle of each other.
+ static const float MAXDOT_MINANG = cosf(Ps::degToRad(MIN_ADJACENT_ANGLE)); // adjacent angle for dot product tests
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for ConvexHullCrop
+ class VertFlag
+ {
+ public:
+ PxU8 planetest;
+ PxU8 undermap;
+ PxU8 overmap;
+ };
+
+ //////////////////////////////////////////////////////////////////////////|
+ // helper class for ConvexHullCrop
+ class EdgeFlag
+ {
+ public:
+ PxI16 undermap;
+ };
+
+ //////////////////////////////////////////////////////////////////////////|
+ // helper class for ConvexHullCrop
+ class Coplanar
+ {
+ public:
+ PxU16 ea;
+ PxU8 v0;
+ PxU8 v1;
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // plane test
+ enum PlaneTestResult
+ {
+ eCOPLANAR = 0,
+ eUNDER = 1 << 0,
+ eOVER = 1 << 1
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // test where vertex lies in respect to the plane
+ static PlaneTestResult planeTest(const PxPlane& p, const PxVec3& v, float epsilon)
+ {
+ const float a = v.dot(p.n) + p.d;
+ PlaneTestResult flag = (a > epsilon) ? eOVER : ((a < -epsilon) ? eUNDER : eCOPLANAR);
+ return flag;
+ }
+
+ // computes the OBB for this set of points relative to this transform matrix. SIMD version
+ void computeOBBSIMD(PxU32 vcount, const Vec4V* points, Vec4V& sides, const QuatV& rot, Vec4V& trans)
+ {
+ PX_ASSERT(vcount);
+
+ Vec4V minV = V4Load(FLT_MAX);
+ Vec4V maxV = V4Load(FLT_MIN);
+ for (PxU32 i = 0; i < vcount; i++)
+ {
+ const Vec4V& vertexV = points[i];
+ const Vec4V t = V4Sub(vertexV, trans);
+ const Vec4V v = Vec4V_From_Vec3V(QuatRotateInv(rot, Vec3V_From_Vec4V(t)));
+
+ minV = V4Min(minV, v);
+ maxV = V4Max(maxV, v);
+ }
+
+ sides = V4Sub(maxV, minV);
+
+ Mat33V tmpMat;
+ QuatGetMat33V(rot, tmpMat.col0, tmpMat.col1, tmpMat.col2);
+ const FloatV coe = FLoad(0.5f);
+
+ const Vec4V deltaVec = V4Sub(maxV, V4Scale(sides, coe));
+
+ const Vec4V t0 = V4Scale(Vec4V_From_Vec3V(tmpMat.col0), V4GetX(deltaVec));
+ trans = V4Add(trans, t0);
+
+ const Vec4V t1 = V4Scale(Vec4V_From_Vec3V(tmpMat.col1), V4GetY(deltaVec));
+ trans = V4Add(trans, t1);
+
+ const Vec4V t2 = V4Scale(Vec4V_From_Vec3V(tmpMat.col2), V4GetZ(deltaVec));
+ trans = V4Add(trans, t2);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////
+// construct the base cube from given min/max
+ConvexHull::ConvexHull(const PxVec3& bmin, const PxVec3& bmax, const Ps::Array<PxPlane>& inPlanes)
+: mInputPlanes(inPlanes)
+{
+ // min max verts of the cube - 8 verts
+ mVertices.pushBack(PxVec3(bmin.x, bmin.y, bmin.z)); // ---
+ mVertices.pushBack(PxVec3(bmin.x, bmin.y, bmax.z)); // --+
+ mVertices.pushBack(PxVec3(bmin.x, bmax.y, bmin.z)); // -+-
+ mVertices.pushBack(PxVec3(bmin.x, bmax.y, bmax.z)); // -++
+ mVertices.pushBack(PxVec3(bmax.x, bmin.y, bmin.z)); // +--
+ mVertices.pushBack(PxVec3(bmax.x, bmin.y, bmax.z)); // +-+
+ mVertices.pushBack(PxVec3(bmax.x, bmax.y, bmin.z)); // ++-
+ mVertices.pushBack(PxVec3(bmax.x, bmax.y, bmax.z)); // +++
+
+ // cube planes - 6 planes
+ mFacets.pushBack(PxPlane(PxVec3(-1.f, 0, 0), bmin.x)); // 0,1,3,2
+ mFacets.pushBack(PxPlane(PxVec3(1.f, 0, 0), -bmax.x)); // 6,7,5,4
+ mFacets.pushBack(PxPlane(PxVec3(0, -1.f, 0), bmin.y)); // 0,4,5,1
+ mFacets.pushBack(PxPlane(PxVec3(0, 1.f, 0), -bmax.y)); // 3,7,6,2
+ mFacets.pushBack(PxPlane(PxVec3(0, 0, -1.f), bmin.z)); // 0,2,6,4
+ mFacets.pushBack(PxPlane(PxVec3(0, 0, 1.f), -bmax.z)); // 1,5,7,3
+
+ // cube edges - 24 edges
+ mEdges.pushBack(HalfEdge(11, 0, 0));
+ mEdges.pushBack(HalfEdge(23, 1, 0));
+ mEdges.pushBack(HalfEdge(15, 3, 0));
+ mEdges.pushBack(HalfEdge(16, 2, 0));
+
+ mEdges.pushBack(HalfEdge(13, 6, 1));
+ mEdges.pushBack(HalfEdge(21, 7, 1));
+ mEdges.pushBack(HalfEdge(9, 5, 1));
+ mEdges.pushBack(HalfEdge(18, 4, 1));
+
+ mEdges.pushBack(HalfEdge(19, 0, 2));
+ mEdges.pushBack(HalfEdge(6, 4, 2));
+ mEdges.pushBack(HalfEdge(20, 5, 2));
+ mEdges.pushBack(HalfEdge(0, 1, 2));
+
+ mEdges.pushBack(HalfEdge(22, 3, 3));
+ mEdges.pushBack(HalfEdge(4, 7, 3));
+ mEdges.pushBack(HalfEdge(17, 6, 3));
+ mEdges.pushBack(HalfEdge(2, 2, 3));
+
+ mEdges.pushBack(HalfEdge(3, 0, 4));
+ mEdges.pushBack(HalfEdge(14, 2, 4));
+ mEdges.pushBack(HalfEdge(7, 6, 4));
+ mEdges.pushBack(HalfEdge(8, 4, 4));
+
+ mEdges.pushBack(HalfEdge(10, 1, 5));
+ mEdges.pushBack(HalfEdge(5, 5, 5));
+ mEdges.pushBack(HalfEdge(12, 7, 5));
+ mEdges.pushBack(HalfEdge(1, 3, 5));
+}
+
+//////////////////////////////////////////////////////////////////////////
+// create the initial convex hull from given OBB
+ConvexHull::ConvexHull(const PxVec3& extent, const PxTransform& transform, const Ps::Array<PxPlane>& inPlanes)
+ : mInputPlanes(inPlanes)
+{
+ // get the OBB corner points
+ PxVec3 extentPoints[8];
+ PxMat33 rot(transform.q);
+ Gu::computeOBBPoints(extentPoints, transform.p, extent, rot.column0, rot.column1, rot.column2);
+
+ mVertices.pushBack(PxVec3(extentPoints[0].x, extentPoints[0].y, extentPoints[0].z)); // ---
+ mVertices.pushBack(PxVec3(extentPoints[4].x, extentPoints[4].y, extentPoints[4].z)); // --+
+ mVertices.pushBack(PxVec3(extentPoints[3].x, extentPoints[3].y, extentPoints[3].z)); // -+-
+ mVertices.pushBack(PxVec3(extentPoints[7].x, extentPoints[7].y, extentPoints[7].z)); // -++
+ mVertices.pushBack(PxVec3(extentPoints[1].x, extentPoints[1].y, extentPoints[1].z)); // +--
+ mVertices.pushBack(PxVec3(extentPoints[5].x, extentPoints[5].y, extentPoints[5].z)); // +-+
+ mVertices.pushBack(PxVec3(extentPoints[2].x, extentPoints[2].y, extentPoints[2].z)); // ++-
+ mVertices.pushBack(PxVec3(extentPoints[6].x, extentPoints[6].y, extentPoints[6].z)); // +++
+
+ // cube planes - 6 planes
+ PxPlane plane0(extentPoints[0], extentPoints[4], extentPoints[7]); // 0,1,3,2
+ mFacets.pushBack(PxPlane(plane0.n, plane0.d));
+
+ PxPlane plane1(extentPoints[2], extentPoints[6], extentPoints[5]); // 6,7,5,4
+ mFacets.pushBack(PxPlane(plane1.n, plane1.d));
+
+ PxPlane plane2(extentPoints[0], extentPoints[1], extentPoints[5]); // 0,4,5,1
+ mFacets.pushBack(PxPlane(plane2.n, plane2.d));
+
+ PxPlane plane3(extentPoints[7], extentPoints[6], extentPoints[2]); // 3,7,6,2
+ mFacets.pushBack(PxPlane(plane3.n, plane3.d));
+
+ PxPlane plane4(extentPoints[0], extentPoints[3], extentPoints[2]); // 0,2,6,4
+ mFacets.pushBack(PxPlane(plane4.n, plane4.d));
+
+ PxPlane plane5(extentPoints[4], extentPoints[5], extentPoints[6]); // 1,5,7,3
+ mFacets.pushBack(PxPlane(plane5.n, plane5.d));
+
+ // cube edges - 24 edges
+ mEdges.pushBack(HalfEdge(11, 0, 0));
+ mEdges.pushBack(HalfEdge(23, 1, 0));
+ mEdges.pushBack(HalfEdge(15, 3, 0));
+ mEdges.pushBack(HalfEdge(16, 2, 0));
+
+ mEdges.pushBack(HalfEdge(13, 6, 1));
+ mEdges.pushBack(HalfEdge(21, 7, 1));
+ mEdges.pushBack(HalfEdge(9, 5, 1));
+ mEdges.pushBack(HalfEdge(18, 4, 1));
+
+ mEdges.pushBack(HalfEdge(19, 0, 2));
+ mEdges.pushBack(HalfEdge(6, 4, 2));
+ mEdges.pushBack(HalfEdge(20, 5, 2));
+ mEdges.pushBack(HalfEdge(0, 1, 2));
+
+ mEdges.pushBack(HalfEdge(22, 3, 3));
+ mEdges.pushBack(HalfEdge(4, 7, 3));
+ mEdges.pushBack(HalfEdge(17, 6, 3));
+ mEdges.pushBack(HalfEdge(2, 2, 3));
+
+ mEdges.pushBack(HalfEdge(3, 0, 4));
+ mEdges.pushBack(HalfEdge(14, 2, 4));
+ mEdges.pushBack(HalfEdge(7, 6, 4));
+ mEdges.pushBack(HalfEdge(8, 4, 4));
+
+ mEdges.pushBack(HalfEdge(10, 1, 5));
+ mEdges.pushBack(HalfEdge(5, 5, 5));
+ mEdges.pushBack(HalfEdge(12, 7, 5));
+ mEdges.pushBack(HalfEdge(1, 3, 5));
+}
+
+//////////////////////////////////////////////////////////////////////////
+// finds the candidate plane, returns -1 otherwise
+PxI32 ConvexHull::findCandidatePlane(float planeTestEpsilon, float epsilon) const
+{
+ PxI32 p = -1;
+ float md = 0.0f;
+ PxU32 i, j;
+ for (i = 0; i < mInputPlanes.size(); i++)
+ {
+ float d = 0.0f;
+ float dmax = 0.0f;
+ float dmin = 0.0f;
+ for (j = 0; j < mVertices.size(); j++)
+ {
+ dmax = PxMax(dmax, mVertices[j].dot(mInputPlanes[i].n) + mInputPlanes[i].d);
+ dmin = PxMin(dmin, mVertices[j].dot(mInputPlanes[i].n) + mInputPlanes[i].d);
+ }
+
+ float dr = dmax - dmin;
+ if (dr < planeTestEpsilon)
+ dr = 1.0f; // shouldn't happen.
+ d = dmax / dr;
+ // we have a better candidate try another one
+ if (d <= md)
+ continue;
+ // check if we dont have already that plane or if the normals are nearly the same
+ for (j = 0; j<mFacets.size(); j++)
+ {
+ if (mInputPlanes[i] == mFacets[j])
+ {
+ d = 0.0f;
+ continue;
+ }
+ if (mInputPlanes[i].n.dot(mFacets[j].n)> local::MAXDOT_MINANG)
+ {
+ for (PxU32 k = 0; k < mEdges.size(); k++)
+ {
+ if (mEdges[k].p != j)
+ continue;
+ if (mVertices[mEdges[k].v].dot(mInputPlanes[i].n) + mInputPlanes[i].d < 0)
+ {
+ d = 0; // so this plane wont get selected.
+ break;
+ }
+ }
+ }
+ }
+ if (d>md)
+ {
+ p = PxI32(i);
+ md = d;
+ }
+ }
+ return (md > epsilon) ? p : -1;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// internal hull check
+bool ConvexHull::assertIntact(float epsilon) const
+{
+ PxU32 i;
+ PxU32 estart = 0;
+ for (i = 0; i < mEdges.size(); i++)
+ {
+ if (mEdges[estart].p != mEdges[i].p)
+ {
+ estart = i;
+ }
+ PxU32 inext = i + 1;
+ if (inext >= mEdges.size() || mEdges[inext].p != mEdges[i].p)
+ {
+ inext = estart;
+ }
+ PX_ASSERT(mEdges[inext].p == mEdges[i].p);
+ PxI16 nb = mEdges[i].ea;
+ if (nb == 255 || nb == -1)
+ return false;
+ PX_ASSERT(nb != -1);
+ PX_ASSERT(i == PxU32(mEdges[PxU32(nb)].ea));
+ // Check that the vertex of the next edge is the vertex of the adjacent half edge.
+ // Otherwise the two half edges are not really adjacent and we have a hole.
+ PX_ASSERT(mEdges[PxU32(nb)].v == mEdges[inext].v);
+ if (!(mEdges[PxU32(nb)].v == mEdges[inext].v))
+ return false;
+ }
+
+ for (i = 0; i < mEdges.size(); i++)
+ {
+ PX_ASSERT(local::eCOPLANAR == local::planeTest(mFacets[mEdges[i].p], mVertices[mEdges[i].v], epsilon));
+ if (local::eCOPLANAR != local::planeTest(mFacets[mEdges[i].p], mVertices[mEdges[i].v], epsilon))
+ return false;
+ if (mEdges[estart].p != mEdges[i].p)
+ {
+ estart = i;
+ }
+ PxU32 i1 = i + 1;
+ if (i1 >= mEdges.size() || mEdges[i1].p != mEdges[i].p) {
+ i1 = estart;
+ }
+ PxU32 i2 = i1 + 1;
+ if (i2 >= mEdges.size() || mEdges[i2].p != mEdges[i].p) {
+ i2 = estart;
+ }
+ if (i == i2)
+ continue; // i sliced tangent to an edge and created 2 meaningless edges
+
+ // check the face normal against the triangle from edges
+ PxVec3 localNormal = (mVertices[mEdges[i1].v] - mVertices[mEdges[i].v]).cross(mVertices[mEdges[i2].v] - mVertices[mEdges[i1].v]);
+ const float m = localNormal.magnitude();
+ if (m == 0.0f)
+ localNormal = PxVec3(1.f, 0.0f, 0.0f);
+ localNormal *= (1.0f / m);
+ if (localNormal.dot(mFacets[mEdges[i].p].n) <= 0.0f)
+ return false;
+ }
+ return true;
+}
+
+// returns the maximum number of vertices on a face
+PxU32 ConvexHull::maxNumVertsPerFace() const
+{
+ PxU32 maxVerts = 0;
+ PxU32 currentVerts = 0;
+ PxU32 estart = 0;
+ for (PxU32 i = 0; i < mEdges.size(); i++)
+ {
+ if (mEdges[estart].p != mEdges[i].p)
+ {
+ if(currentVerts > maxVerts)
+ {
+ maxVerts = currentVerts + 1;
+ }
+ currentVerts = 0;
+ estart = i;
+ }
+ else
+ {
+ currentVerts++;
+ }
+ }
+ return maxVerts;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// slice the input convexHull with the slice plane
+ConvexHull* physx::convexHullCrop(const ConvexHull& convex, const PxPlane& slice, float planeTestEpsilon)
+{
+ static const PxU8 invalidIndex = PxU8(-1);
+ PxU32 i;
+ PxU32 vertCountUnder = 0; // Running count of the vertices UNDER the slicing plane.
+
+ PX_ASSERT(convex.getEdges().size() < 480);
+
+ // Arrays of mapping information associated with features in the input convex.
+ // edgeflag[i].undermap - output index of input edge convex->edges[i]
+ // vertflag[i].undermap - output index of input vertex convex->vertices[i]
+ // vertflag[i].planetest - the side-of-plane classification of convex->vertices[i]
+ // (There are other members but they are unused.)
+ local::EdgeFlag edgeFlag[512];
+ local::VertFlag vertFlag[256];
+
+ // Lists of output features. Populated during clipping.
+ // Coplanar edges have one sibling in tmpunderedges and one in coplanaredges.
+ // coplanaredges holds the sibling that belong to the new polygon created from slicing.
+ ConvexHull::HalfEdge tmpUnderEdges[512]; // The output edge list.
+ PxPlane tmpUnderPlanes[128]; // The output plane list.
+ local::Coplanar coplanarEdges[512]; // The coplanar edge list.
+
+ PxU32 coplanarEdgesNum = 0; // Running count of coplanar edges.
+
+ // Created vertices on the slicing plane (stored for output after clipping).
+ Ps::Array<PxVec3> createdVerts;
+
+ // Logical OR of individual vertex flags.
+ PxU32 convexClipFlags = 0;
+
+ // Classify each vertex against the slicing plane as OVER | COPLANAR | UNDER.
+ // OVER - Vertex is over (outside) the slicing plane. Will not be output.
+ // COPLANAR - Vertex is on the slicing plane. A copy will be output.
+ // UNDER - Vertex is under (inside) the slicing plane. Will be output.
+ // We keep an array of information structures for each vertex in the input convex.
+ // vertflag[i].undermap - The (computed) index of convex->vertices[i] in the output.
+ // invalidIndex for OVER vertices - they are not output.
+ // initially invalidIndex for COPLANAR vertices - set later.
+ // vertflag[i].overmap - Unused - we don't care about the over part.
+ // vertflag[i].planetest - The classification (clip flag) of convex->vertices[i].
+ for (i = 0; i < convex.getVertices().size(); i++)
+ {
+ local::PlaneTestResult vertexClipFlag = local::planeTest(slice, convex.getVertices()[i], planeTestEpsilon);
+ switch (vertexClipFlag)
+ {
+ case local::eOVER:
+ case local::eCOPLANAR:
+ vertFlag[i].undermap = invalidIndex; // Initially invalid for COPLANAR
+ vertFlag[i].overmap = invalidIndex;
+ break;
+ case local::eUNDER:
+ vertFlag[i].undermap = Ps::to8(vertCountUnder++);
+ vertFlag[i].overmap = invalidIndex;
+ break;
+ }
+ vertFlag[i].planetest = PxU8(vertexClipFlag);
+ convexClipFlags |= vertexClipFlag;
+ }
+
+ // Check special case: everything UNDER or COPLANAR.
+ // This way we know we wont end up with silly faces / edges later on.
+ if ((convexClipFlags & local::eOVER) == 0)
+ {
+ // Just return a copy of the same convex.
+ ConvexHull* dst = PX_NEW_TEMP(ConvexHull)(convex);
+ return dst;
+ }
+
+ PxU16 underEdgeCount = 0; // Running count of output edges.
+ PxU16 underPlanesCount = 0; // Running count of output planes.
+
+ // Clipping Loop
+ // =============
+ //
+ // for each plane
+ //
+ // for each edge
+ //
+ // if first UNDER & second !UNDER
+ // output current edge -> tmpunderedges
+ // if we have done the sibling
+ // connect current edge to its sibling
+ // set vout = first vertex of sibling
+ // else if second is COPLANAR
+ // if we havent already copied it
+ // copy second -> createdverts
+ // set vout = index of created vertex
+ // else
+ // generate a new vertex -> createdverts
+ // set vout = index of created vertex
+ // if vin is already set and vin != vout (non-trivial edge)
+ // output coplanar edge -> tmpunderedges (one sibling)
+ // set coplanaredge to new edge index (for connecting the other sibling)
+ //
+ // else if first !UNDER & second UNDER
+ // if we have done the sibling
+ // connect current edge to its sibling
+ // set vin = second vertex of sibling (this is a bit of a pain)
+ // else if first is COPLANAR
+ // if we havent already copied it
+ // copy first -> createdverts
+ // set vin = index of created vertex
+ // else
+ // generate a new vertex -> createdverts
+ // set vin = index of created vertex
+ // if vout is already set and vin != vout (non-trivial edge)
+ // output coplanar edge -> tmpunderedges (one sibling)
+ // set coplanaredge to new edge index (for connecting the other sibling)
+ // output current edge -> tmpunderedges
+ //
+ // else if first UNDER & second UNDER
+ // output current edge -> tmpunderedges
+ //
+ // next edge
+ //
+ // if part of current plane was UNDER
+ // output current plane -> tmpunderplanes
+ //
+ // if coplanaredge is set
+ // output coplanar edge -> coplanaredges
+ //
+ // next plane
+ //
+
+ // Indexing is a bit tricky here:
+ //
+ // e0 - index of the current edge
+ // e1 - index of the next edge
+ // estart - index of the first edge in the current plane
+ // currentplane - index of the current plane
+ // enextface - first edge of next plane
+
+ PxU32 e0 = 0;
+
+ for (PxU32 currentplane = 0; currentplane < convex.getFacets().size(); currentplane++)
+ {
+
+ PxU32 eStart = e0;
+ PxU32 eNextFace = 0xffffffff;
+ PxU32 e1 = e0 + 1;
+
+ PxU8 vout = invalidIndex;
+ PxU8 vin = invalidIndex;
+
+ PxU32 coplanarEdge = invalidIndex;
+
+ // Logical OR of individual vertex flags in the current plane.
+ PxU32 planeSide = 0;
+
+ do{
+
+ // Next edge modulo logic
+ if (e1 >= convex.getEdges().size() || convex.getEdges()[e1].p != currentplane)
+ {
+ eNextFace = e1;
+ e1 = eStart;
+ }
+
+ const ConvexHull::HalfEdge& edge0 = convex.getEdges()[e0];
+ const ConvexHull::HalfEdge& edge1 = convex.getEdges()[e1];
+ const ConvexHull::HalfEdge& edgea = convex.getEdges()[PxU32(edge0.ea)];
+
+ planeSide |= vertFlag[edge0.v].planetest;
+
+ if (vertFlag[edge0.v].planetest == local::eUNDER && vertFlag[edge1.v].planetest != local::eUNDER)
+ {
+ // first is UNDER, second is COPLANAR or OVER
+
+ // Output current edge.
+ edgeFlag[e0].undermap = short(underEdgeCount);
+ tmpUnderEdges[underEdgeCount].v = vertFlag[edge0.v].undermap;
+ tmpUnderEdges[underEdgeCount].p = PxU8(underPlanesCount);
+ PX_ASSERT(tmpUnderEdges[underEdgeCount].v != invalidIndex);
+
+ if (PxU32(edge0.ea) < e0)
+ {
+ // We have already done the sibling.
+ // Connect current edge to its sibling.
+ PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
+ tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
+ tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
+ // Set vout = first vertex of (output, clipped) sibling.
+ vout = tmpUnderEdges[edgeFlag[edge0.ea].undermap].v;
+ }
+ else if (vertFlag[edge1.v].planetest == local::eCOPLANAR)
+ {
+ // Boundary case.
+ // We output coplanar vertices once.
+ if (vertFlag[edge1.v].undermap == invalidIndex)
+ {
+ createdVerts.pushBack(convex.getVertices()[edge1.v]);
+ // Remember the index so we don't output it again.
+ vertFlag[edge1.v].undermap = Ps::to8(vertCountUnder++);
+ }
+ vout = vertFlag[edge1.v].undermap;
+ }
+ else
+ {
+ // Add new vertex.
+ const PxPlane& p0 = convex.getFacets()[edge0.p];
+ const PxPlane& pa = convex.getFacets()[edgea.p];
+ createdVerts.pushBack(threePlaneIntersection(p0, pa, slice));
+ vout = Ps::to8(vertCountUnder++);
+ }
+
+ // We added an edge, increment the counter
+ underEdgeCount++;
+
+ if (vin != invalidIndex && vin != vout)
+ {
+ // We already have vin and a non-trivial edge
+ // Output coplanar edge
+ PX_ASSERT(vout != invalidIndex);
+ coplanarEdge = underEdgeCount;
+ tmpUnderEdges[underEdgeCount].v = vout;
+ tmpUnderEdges[underEdgeCount].p = PxU8(underPlanesCount);
+ tmpUnderEdges[underEdgeCount].ea = invalidIndex;
+ underEdgeCount++;
+ }
+ }
+ else if (vertFlag[edge0.v].planetest != local::eUNDER && vertFlag[edge1.v].planetest == local::eUNDER)
+ {
+ // First is OVER or COPLANAR, second is UNDER.
+
+ if (PxU32(edge0.ea) < e0)
+ {
+ // We have already done the sibling.
+ // We need the second vertex of the sibling.
+ // Which is the vertex of the next edge in the adjacent poly.
+ int nea = edgeFlag[edge0.ea].undermap + 1;
+ int p = tmpUnderEdges[edgeFlag[edge0.ea].undermap].p;
+ if (nea >= underEdgeCount || tmpUnderEdges[nea].p != p)
+ {
+ // End of polygon, next edge is first edge
+ nea -= 2;
+ while (nea > 0 && tmpUnderEdges[nea - 1].p == p)
+ nea--;
+ }
+ vin = tmpUnderEdges[nea].v;
+ PX_ASSERT(vin < vertCountUnder);
+ }
+ else if (vertFlag[edge0.v].planetest == local::eCOPLANAR)
+ {
+ // Boundary case.
+ // We output coplanar vertices once.
+ if (vertFlag[edge0.v].undermap == invalidIndex)
+ {
+ createdVerts.pushBack(convex.getVertices()[edge0.v]);
+ // Remember the index so we don't output it again.
+ vertFlag[edge0.v].undermap = Ps::to8(vertCountUnder++);
+ }
+ vin = vertFlag[edge0.v].undermap;
+ }
+ else
+ {
+ // Add new vertex.
+ const PxPlane& p0 = convex.getFacets()[edge0.p];
+ const PxPlane& pa = convex.getFacets()[edgea.p];
+ createdVerts.pushBack(threePlaneIntersection(p0, pa, slice));
+ vin = Ps::to8(vertCountUnder++);
+ }
+
+ if (vout != invalidIndex && vin != vout)
+ {
+ // We have been in and out, Add the coplanar edge
+ coplanarEdge = underEdgeCount;
+ tmpUnderEdges[underEdgeCount].v = vout;
+ tmpUnderEdges[underEdgeCount].p = Ps::to8(underPlanesCount);
+ tmpUnderEdges[underEdgeCount].ea = invalidIndex;
+ underEdgeCount++;
+ }
+
+ // Output current edge.
+ tmpUnderEdges[underEdgeCount].v = vin;
+ tmpUnderEdges[underEdgeCount].p = Ps::to8(underPlanesCount);
+ edgeFlag[e0].undermap = short(underEdgeCount);
+
+ if (PxU32(edge0.ea) < e0)
+ {
+ // We have already done the sibling.
+ // Connect current edge to its sibling.
+ PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
+ tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
+ tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
+ }
+
+ PX_ASSERT(edgeFlag[e0].undermap == underEdgeCount);
+ underEdgeCount++;
+ }
+ else if (vertFlag[edge0.v].planetest == local::eUNDER && vertFlag[edge1.v].planetest == local::eUNDER)
+ {
+ // Both UNDER
+
+ // Output current edge.
+ edgeFlag[e0].undermap = short(underEdgeCount);
+ tmpUnderEdges[underEdgeCount].v = vertFlag[edge0.v].undermap;
+ tmpUnderEdges[underEdgeCount].p = Ps::to8(underPlanesCount);
+ if (PxU32(edge0.ea) < e0)
+ {
+ // We have already done the sibling.
+ // Connect current edge to its sibling.
+ PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
+ tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
+ tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
+ }
+ underEdgeCount++;
+ }
+
+ e0 = e1;
+ e1++; // do the modulo at the beginning of the loop
+
+ } while (e0 != eStart);
+
+ e0 = eNextFace;
+
+ if (planeSide & local::eUNDER)
+ {
+ // At least part of current plane is UNDER.
+ // Output current plane.
+ tmpUnderPlanes[underPlanesCount] = convex.getFacets()[currentplane];
+ underPlanesCount++;
+ }
+
+ if (coplanarEdge != invalidIndex)
+ {
+ // We have a coplanar edge.
+ // Add to coplanaredges for later processing.
+ // (One sibling is in place but one is missing)
+ PX_ASSERT(vin != invalidIndex);
+ PX_ASSERT(vout != invalidIndex);
+ PX_ASSERT(coplanarEdge != 511);
+ coplanarEdges[coplanarEdgesNum].ea = PxU8(coplanarEdge);
+ coplanarEdges[coplanarEdgesNum].v0 = vin;
+ coplanarEdges[coplanarEdgesNum].v1 = vout;
+ coplanarEdgesNum++;
+ }
+
+ // Reset coplanar edge infos for next poly
+ vin = invalidIndex;
+ vout = invalidIndex;
+ coplanarEdge = invalidIndex;
+ }
+
+ // Add the new plane to the mix:
+ if (coplanarEdgesNum > 0)
+ {
+ tmpUnderPlanes[underPlanesCount++] = slice;
+ }
+
+ // Sort the coplanar edges in winding order.
+ for (i = 0; i < coplanarEdgesNum - 1; i++)
+ {
+ if (coplanarEdges[i].v1 != coplanarEdges[i + 1].v0)
+ {
+ PxU32 j = 0;
+ for (j = i + 2; j < coplanarEdgesNum; j++)
+ {
+ if (coplanarEdges[i].v1 == coplanarEdges[j].v0)
+ {
+ local::Coplanar tmp = coplanarEdges[i + 1];
+ coplanarEdges[i + 1] = coplanarEdges[j];
+ coplanarEdges[j] = tmp;
+ break;
+ }
+ }
+ if (j >= coplanarEdgesNum)
+ {
+ // PX_ASSERT(j<coplanaredges_num);
+ return NULL;
+ }
+ }
+ }
+
+ // PT: added this line to fix DE2904
+ if (!vertCountUnder)
+ return NULL;
+
+ // Create the output convex.
+ ConvexHull* punder = PX_NEW_TEMP(ConvexHull)(convex.getInputPlanes());
+ ConvexHull& under = *punder;
+
+ // Copy UNDER vertices
+ PxU32 k = 0;
+ for (i = 0; i < convex.getVertices().size(); i++)
+ {
+ if (vertFlag[i].planetest == local::eUNDER)
+ {
+ under.getVertices().pushBack(convex.getVertices()[i]);
+ k++;
+ }
+ }
+
+ // Copy created vertices
+ i = 0;
+ while (k < vertCountUnder)
+ {
+ under.getVertices().pushBack(createdVerts[i++]);
+ k++;
+ }
+
+ PX_ASSERT(i == createdVerts.size());
+
+ // Copy the output edges and output planes.
+ under.getEdges().resize(underEdgeCount + coplanarEdgesNum);
+ under.getFacets().resize(underPlanesCount);
+
+ // Add the coplanar edge siblings that belong to the new polygon (coplanaredges).
+ for (i = 0; i < coplanarEdgesNum; i++)
+ {
+ under.getEdges()[underEdgeCount + i].p = PxU8(underPlanesCount - 1);
+ under.getEdges()[underEdgeCount + i].ea = short(coplanarEdges[i].ea);
+ tmpUnderEdges[coplanarEdges[i].ea].ea = PxI16(underEdgeCount + i);
+ under.getEdges()[underEdgeCount + i].v = coplanarEdges[i].v0;
+ }
+
+ PxMemCopy(under.getEdges().begin(), tmpUnderEdges, sizeof(ConvexHull::HalfEdge)*underEdgeCount);
+ PxMemCopy(under.getFacets().begin(), tmpUnderPlanes, sizeof(PxPlane)*underPlanesCount);
+ return punder;
+}
+
+bool physx::computeOBBFromConvex(const PxConvexMeshDesc& desc, PxVec3& sides, PxTransform& matrix)
+{
+ PxIntegrals integrals;
+ // using the centroid of the convex for the volume integration solved accuracy issues in cases where the inertia tensor
+ // ended up close to not being positive definite and after a few further transforms the diagonalized inertia tensor ended
+ // up with negative values.
+
+ const PxVec3* verts = (reinterpret_cast<const PxVec3*>(desc.points.data));
+ const PxU32* ind = (reinterpret_cast<const PxU32*>(desc.indices.data));
+ const PxHullPolygon* polygons = (reinterpret_cast<const PxHullPolygon*>(desc.polygons.data));
+ PxVec3 mean(0.0f);
+ for (PxU32 i = 0; i < desc.points.count; i++)
+ mean += verts[i];
+ mean *= (1.0f / desc.points.count);
+
+ PxU8* indices = reinterpret_cast<PxU8*> (PX_ALLOC_TEMP(sizeof(PxU8)*desc.indices.count, "PxU8"));
+ for (PxU32 i = 0; i < desc.indices.count; i++)
+ {
+ indices[i] = Ps::to8(ind[i]);
+ }
+ // we need to move the polygon data to internal format
+ Gu::HullPolygonData* polygonData = reinterpret_cast<Gu::HullPolygonData*> (PX_ALLOC_TEMP(sizeof(Gu::HullPolygonData)*desc.polygons.count, "Gu::HullPolygonData"));
+ for (PxU32 i = 0; i < desc.polygons.count; i++)
+ {
+ polygonData[i].mPlane = PxPlane(polygons[i].mPlane[0], polygons[i].mPlane[1], polygons[i].mPlane[2], polygons[i].mPlane[3]);
+ polygonData[i].mNbVerts = Ps::to8(polygons[i].mNbVerts);
+ polygonData[i].mVRef8 = polygons[i].mIndexBase;
+ }
+
+ PxConvexMeshDesc inDesc;
+ inDesc.points.data = desc.points.data;
+ inDesc.points.count = desc.points.count;
+
+ inDesc.polygons.data = polygonData;
+ inDesc.polygons.count = desc.polygons.count;
+
+ inDesc.indices.data = indices;
+ inDesc.indices.count = desc.indices.count;
+
+ // compute volume integrals to get basis axis
+ bool status = (desc.flags & PxConvexFlag::eFAST_INERTIA_COMPUTATION) ?
+ computeVolumeIntegralsEberlySIMD(inDesc, 1.0f, integrals, mean) : computeVolumeIntegralsEberly(inDesc, 1.0f, integrals, mean);
+ if (status)
+ {
+ Vec4V* pointsV = reinterpret_cast<Vec4V*> (PX_ALLOC_TEMP(sizeof(Vec4V)*desc.points.count, "Vec4V"));
+ for (PxU32 i = 0; i < desc.points.count; i++)
+ {
+ // safe to V4 load, same as volume integration - we allocate one more vector
+ pointsV[i] = V4LoadU(&verts[i].x);
+ }
+
+ PxMat33 inertia;
+ integrals.getOriginInertia(inertia);
+ PxQuat inertiaQuat;
+ PxDiagonalize(inertia, inertiaQuat);
+ PxMat33 baseAxis(inertiaQuat);
+ Vec4V center = V4LoadU(&integrals.COM.x);
+
+ const PxU32 numSteps = 20;
+ const float subStep = Ps::degToRad(float(360/numSteps));
+
+ float bestVolume = 1e9;
+
+ for (PxU32 axis = 0; axis < 3; axis++)
+ {
+ for (PxU32 iStep = 0; iStep < numSteps; iStep++)
+ {
+ PxQuat quat(iStep*subStep, baseAxis[axis]);
+
+ Vec4V transV = center;
+ Vec4V psidesV;
+
+ const QuatV rotV = QuatVLoadU(&quat.x);
+ local::computeOBBSIMD(desc.points.count, pointsV, psidesV, rotV, transV);
+
+ PxVec3 psides;
+ V3StoreU(Vec3V_From_Vec4V(psidesV), psides);
+
+ const float volume = psides[0] * psides[1] * psides[2]; // the volume of the cube
+
+ if (volume <= bestVolume)
+ {
+ bestVolume = volume;
+ sides = psides;
+
+ V4StoreU(rotV, &matrix.q.x);
+ V3StoreU(Vec3V_From_Vec4V(transV), matrix.p);
+ }
+ }
+ }
+
+ PX_FREE_AND_RESET(pointsV);
+ }
+ else
+ {
+ PX_FREE_AND_RESET(indices);
+ PX_FREE_AND_RESET(polygonData);
+ return false;
+ }
+
+ PX_FREE_AND_RESET(indices);
+ PX_FREE_AND_RESET(polygonData);
+ return true;
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.h
new file mode 100644
index 00000000..5178b043
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.h
@@ -0,0 +1,177 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_CONVEXHULLUTILS_H
+#define PX_CONVEXHULLUTILS_H
+
+#include "foundation/PxMemory.h"
+#include "foundation/PxPlane.h"
+
+#include "CmPhysXCommon.h"
+
+#include "PsUserAllocated.h"
+#include "PsArray.h"
+#include "PsMathUtils.h"
+
+#include "PxConvexMeshDesc.h"
+
+namespace physx
+{
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for hull construction, holds the vertices and planes together
+ // while cropping the hull with planes
+ class ConvexHull : public Ps::UserAllocated
+ {
+ public:
+
+ // Helper class for halfedge representation
+ class HalfEdge
+ {
+ public:
+ PxI16 ea; // the other half of the edge (index into edges list)
+ PxU8 v; // the vertex at the start of this edge (index into vertices list)
+ PxU8 p; // the facet on which this edge lies (index into facets list)
+ HalfEdge(){}
+ HalfEdge(PxI16 _ea, PxU8 _v, PxU8 _p) :ea(_ea), v(_v), p(_p){}
+ };
+
+ ConvexHull& operator = (const ConvexHull&);
+
+ // construct the base cube hull from given max/min AABB
+ ConvexHull(const PxVec3& bmin, const PxVec3& bmax, const Ps::Array<PxPlane>& inPlanes);
+
+ // construct the base cube hull from given OBB
+ ConvexHull(const PxVec3& extent, const PxTransform& transform, const Ps::Array<PxPlane>& inPlanes);
+
+ // copy constructor
+ ConvexHull(const ConvexHull& srcHull)
+ : mInputPlanes(srcHull.getInputPlanes())
+ {
+ copyHull(srcHull);
+ }
+
+ // construct plain hull
+ ConvexHull(const Ps::Array<PxPlane>& inPlanes)
+ : mInputPlanes(inPlanes)
+ {
+ }
+
+ // finds the candidate plane, returns -1 otherwise
+ PxI32 findCandidatePlane(float planetestepsilon, float epsilon) const;
+
+ // internal check of the hull integrity
+ bool assertIntact(float epsilon) const;
+
+ // return vertices
+ const Ps::Array<PxVec3>& getVertices() const
+ {
+ return mVertices;
+ }
+
+ // return edges
+ const Ps::Array<HalfEdge>& getEdges() const
+ {
+ return mEdges;
+ }
+
+ // return faces
+ const Ps::Array<PxPlane>& getFacets() const
+ {
+ return mFacets;
+ }
+
+ // return input planes
+ const Ps::Array<PxPlane>& getInputPlanes() const
+ {
+ return mInputPlanes;
+ }
+
+ // return vertices
+ Ps::Array<PxVec3>& getVertices()
+ {
+ return mVertices;
+ }
+
+ // return edges
+ Ps::Array<HalfEdge>& getEdges()
+ {
+ return mEdges;
+ }
+
+ // return faces
+ Ps::Array<PxPlane>& getFacets()
+ {
+ return mFacets;
+ }
+
+ // returns the maximum number of vertices on a face
+ PxU32 maxNumVertsPerFace() const;
+
+ // copy the hull from source
+ void copyHull(const ConvexHull& src)
+ {
+ mVertices.resize(src.getVertices().size());
+ mEdges.resize(src.getEdges().size());
+ mFacets.resize(src.getFacets().size());
+
+ PxMemCopy(mVertices.begin(), src.getVertices().begin(), src.getVertices().size()*sizeof(PxVec3));
+ PxMemCopy(mEdges.begin(), src.getEdges().begin(), src.getEdges().size()*sizeof(HalfEdge));
+ PxMemCopy(mFacets.begin(), src.getFacets().begin(), src.getFacets().size()*sizeof(PxPlane));
+ }
+
+ private:
+ Ps::Array<PxVec3> mVertices;
+ Ps::Array<HalfEdge> mEdges;
+ Ps::Array<PxPlane> mFacets;
+ const Ps::Array<PxPlane>& mInputPlanes;
+ };
+
+ //////////////////////////////////////////////////////////////////////////|
+ // Crops the hull with a provided plane and with given epsilon
+ // returns new hull if succeeded
+ ConvexHull* convexHullCrop(const ConvexHull& convex, const PxPlane& slice, float planetestepsilon);
+
+ //////////////////////////////////////////////////////////////////////////|
+ // three planes intersection
+ PX_FORCE_INLINE PxVec3 threePlaneIntersection(const PxPlane& p0, const PxPlane& p1, const PxPlane& p2)
+ {
+ PxMat33 mp = (PxMat33(p0.n, p1.n, p2.n)).getTranspose();
+ PxMat33 mi = (mp).getInverse();
+ PxVec3 b(p0.d, p1.d, p2.d);
+ return -mi.transform(b);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // Compute OBB around given convex hull
+ bool computeOBBFromConvex(const PxConvexMeshDesc& desc, PxVec3& sides, PxTransform& matrix);
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.cpp
new file mode 100644
index 00000000..5fb356c3
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.cpp
@@ -0,0 +1,504 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "GuConvexMesh.h"
+#include "PsFoundation.h"
+#include "PsMathUtils.h"
+#include "Cooking.h"
+
+#include "GuHillClimbing.h"
+#include "GuBigConvexData2.h"
+#include "GuInternal.h"
+#include "GuSerialize.h"
+#include "VolumeIntegration.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "VolumeIntegration.h"
+#include "ConvexHullBuilder.h"
+#include "ConvexMeshBuilder.h"
+#include "BigConvexDataBuilder.h"
+
+#include "CmUtils.h"
+#include "PsVecMath.h"
+
+using namespace physx;
+using namespace Gu;
+using namespace Ps::aos;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ConvexMeshBuilder::ConvexMeshBuilder(const bool buildGRBData) : hullBuilder(&mHullData, buildGRBData), mBigConvexData(NULL), mMass(0.0f), mInertia(PxIdentity)
+{
+}
+
+ConvexMeshBuilder::~ConvexMeshBuilder()
+{
+ PX_DELETE_AND_RESET(mBigConvexData);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// load the mesh data from given polygons
+bool ConvexMeshBuilder::build(const PxConvexMeshDesc& desc, PxU32 gaussMapVertexLimit, bool validateOnly, bool userPolygons)
+{
+ if(!desc.isValid())
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "Gu::ConvexMesh::loadFromDesc: desc.isValid() failed!");
+ return false;
+ }
+
+ if(!loadConvexHull(desc, gaussMapVertexLimit, userPolygons))
+ return false;
+
+ // Compute local bounds (*after* hull has been created)
+ PxBounds3 minMaxBounds;
+ computeBoundsAroundVertices(minMaxBounds, mHullData.mNbHullVertices, hullBuilder.mHullDataHullVertices);
+ mHullData.mAABB = CenterExtents(minMaxBounds);
+
+ if(mHullData.mNbHullVertices > gaussMapVertexLimit)
+ {
+ if(!computeGaussMaps())
+ {
+ return false;
+ }
+ }
+
+ if(validateOnly)
+ return true;
+
+// TEST_INTERNAL_OBJECTS
+ computeInternalObjects();
+//~TEST_INTERNAL_OBJECTS
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+PX_COMPILE_TIME_ASSERT(sizeof(PxMaterialTableIndex)==sizeof(PxU16));
+bool ConvexMeshBuilder::save(PxOutputStream& stream, bool platformMismatch) const
+{
+ // Export header
+ if(!writeHeader('C', 'V', 'X', 'M', PX_CONVEX_VERSION, platformMismatch, stream))
+ return false;
+
+ // Export serialization flags
+ PxU32 serialFlags = 0;
+
+ writeDword(serialFlags, platformMismatch, stream);
+
+ if(!hullBuilder.save(stream, platformMismatch))
+ return false;
+
+ // Export local bounds
+// writeFloat(geomEpsilon, platformMismatch, stream);
+ writeFloat(0.0f, platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMin(0), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMin(1), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMin(2), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMax(0), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMax(1), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMax(2), platformMismatch, stream);
+
+ // Export mass info
+ writeFloat(mMass, platformMismatch, stream);
+ writeFloatBuffer(reinterpret_cast<const PxF32*>(&mInertia), 9, platformMismatch, stream);
+ writeFloatBuffer(&mHullData.mCenterOfMass.x, 3, platformMismatch, stream);
+
+ // Export gaussmaps
+ if(mBigConvexData)
+ {
+ writeFloat(1.0f, platformMismatch, stream); //gauss map flag true
+ BigConvexDataBuilder SVMB(&mHullData, mBigConvexData, hullBuilder.mHullDataHullVertices);
+ SVMB.save(stream, platformMismatch);
+ }
+ else
+ writeFloat(-1.0f, platformMismatch, stream); //gauss map flag false
+
+// TEST_INTERNAL_OBJECTS
+ writeFloat(mHullData.mInternal.mRadius, platformMismatch, stream);
+ writeFloat(mHullData.mInternal.mExtents[0], platformMismatch, stream);
+ writeFloat(mHullData.mInternal.mExtents[1], platformMismatch, stream);
+ writeFloat(mHullData.mInternal.mExtents[2], platformMismatch, stream);
+//~TEST_INTERNAL_OBJECTS
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// instead of saving the data into stream, we copy the mesh data
+// into internal Gu::ConvexMesh.
+bool ConvexMeshBuilder::copy(Gu::ConvexHullData& hullData)
+{
+ // hull builder data copy
+ hullBuilder.copy(hullData);
+
+ // mass props
+ hullData.mAABB = mHullData.mAABB;
+ hullData.mCenterOfMass = mHullData.mCenterOfMass;
+
+ // big convex data
+ if(mBigConvexData)
+ {
+ hullData.mBigConvexRawData = &mBigConvexData->mData;
+ }
+ else
+ hullData.mBigConvexRawData = NULL;
+
+ // internal data
+ hullData.mInternal.mRadius = mHullData.mInternal.mRadius;
+ hullData.mInternal.mExtents[0] = mHullData.mInternal.mExtents[0];
+ hullData.mInternal.mExtents[1] = mHullData.mInternal.mExtents[1];
+ hullData.mInternal.mExtents[2] = mHullData.mInternal.mExtents[2];
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// compute mass and inertia of the convex mesh
+void ConvexMeshBuilder::computeMassInfo(bool lowerPrecision)
+{
+ if(mMass <= 0.0f) //not yet computed.
+ {
+ PxIntegrals integrals;
+ PxConvexMeshDesc meshDesc;
+ meshDesc.points.count = mHullData.mNbHullVertices;
+ meshDesc.points.data = hullBuilder.mHullDataHullVertices;
+ meshDesc.points.stride = sizeof(PxVec3);
+
+ meshDesc.polygons.data = hullBuilder.mHullDataPolygons;
+ meshDesc.polygons.stride = sizeof(Gu::HullPolygonData);
+ meshDesc.polygons.count = hullBuilder.mHull->mNbPolygons;
+
+ meshDesc.indices.data = hullBuilder.mHullDataVertexData8;
+
+ // using the centroid of the convex for the volume integration solved accuracy issues in cases where the inertia tensor
+ // ended up close to not being positive definite and after a few further transforms the diagonalized inertia tensor ended
+ // up with negative values.
+ PxVec3 mean(0.0f);
+ for(PxU32 i=0; i < mHullData.mNbHullVertices; i++)
+ mean += hullBuilder.mHullDataHullVertices[i];
+ mean *= (1.0f / mHullData.mNbHullVertices);
+
+ bool status = lowerPrecision ?
+ computeVolumeIntegralsEberlySIMD(meshDesc, 1.0f, integrals, mean) : computeVolumeIntegralsEberly(meshDesc, 1.0f, integrals, mean);
+ if(status)
+ {
+
+ integrals.getOriginInertia(reinterpret_cast<PxMat33&>(mInertia));
+ mHullData.mCenterOfMass = integrals.COM;
+
+ //note: the mass will be negative for an inside-out mesh!
+ if(mInertia.column0.isFinite() && mInertia.column1.isFinite() && mInertia.column2.isFinite()
+ && mHullData.mCenterOfMass.isFinite() && PxIsFinite(PxReal(integrals.mass)))
+ {
+ if (integrals.mass < 0)
+ {
+ Ps::getFoundation().error(PX_WARN, "Gu::ConvexMesh: Mesh has a negative volume! Is it open or do (some) faces have reversed winding? (Taking absolute value.)");
+ integrals.mass = -integrals.mass;
+ mInertia = -mInertia;
+ }
+
+ mMass = PxReal(integrals.mass); //set mass to valid value.
+ return;
+ }
+ }
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Gu::ConvexMesh: Error computing mesh mass properties!\n");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable:4996) // permitting use of gatherStrided until we have a replacement.
+#endif
+
+bool ConvexMeshBuilder::loadConvexHull(const PxConvexMeshDesc& desc, PxU32 gaussMapVertexLimit, bool userPolygons)
+{
+ // gather points
+ PxVec3* geometry = reinterpret_cast<PxVec3*>(PxAlloca(sizeof(PxVec3)*desc.points.count));
+ Cooking::gatherStrided(desc.points.data, geometry, desc.points.count, sizeof(PxVec3), desc.points.stride);
+
+ PxU32* topology = NULL;
+
+ // gather indices
+ // store the indices into topology if we have the polygon data
+ if(desc.indices.data)
+ {
+ topology = reinterpret_cast<PxU32*>(PxAlloca(sizeof(PxU32)*desc.indices.count));
+ if (desc.flags & PxConvexFlag::e16_BIT_INDICES)
+ {
+ // conversion; 16 bit index -> 32 bit index & stride
+ PxU32* dest = topology;
+ const PxU32* pastLastDest = topology + desc.indices.count;
+ const PxU8* source = reinterpret_cast<const PxU8*>(desc.indices.data);
+ while (dest < pastLastDest)
+ {
+ const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
+ *dest++ = trig16[0];
+ *dest++ = trig16[1];
+ *dest++ = trig16[2];
+ source += desc.indices.stride;
+ }
+ }
+ else
+ {
+ Cooking::gatherStrided(desc.indices.data, topology, desc.indices.count, sizeof(PxU32), desc.indices.stride);
+ }
+ }
+
+ // gather polygons
+ PxHullPolygon* hullPolygons = NULL;
+ if(desc.polygons.data)
+ {
+ hullPolygons = reinterpret_cast<PxHullPolygon*>(PxAlloca(sizeof(PxHullPolygon)*desc.polygons.count));
+ Cooking::gatherStrided(desc.polygons.data,hullPolygons,desc.polygons.count,sizeof(PxHullPolygon),desc.polygons.stride);
+
+ // if user polygons, make sure the largest one is the first one
+ if (userPolygons)
+ {
+ PxU32 largestPolygon = 0;
+ for (PxU32 i = 1; i < desc.polygons.count; i++)
+ {
+ if(hullPolygons[i].mNbVerts > hullPolygons[largestPolygon].mNbVerts)
+ largestPolygon = i;
+ }
+ if(largestPolygon != 0)
+ {
+ PxHullPolygon movedPolygon = hullPolygons[0];
+ hullPolygons[0] = hullPolygons[largestPolygon];
+ hullPolygons[largestPolygon] = movedPolygon;
+ }
+ }
+ }
+
+ const bool doValidation = desc.flags & PxConvexFlag::eDISABLE_MESH_VALIDATION ? false : true;
+ if(!hullBuilder.init(desc.points.count, geometry, topology, desc.indices.count, desc.polygons.count, hullPolygons, gaussMapVertexLimit, doValidation))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Gu::ConvexMesh::loadConvexHull: convex hull init failed!");
+ return false;
+ }
+ computeMassInfo(desc.flags & PxConvexFlag::eFAST_INERTIA_COMPUTATION);
+
+ return true;
+}
+
+#if PX_VC
+#pragma warning(pop)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// compute polygons from given triangles. This is support function used in extensions. We do not accept triangles as an input for convex mesh desc.
+bool ConvexMeshBuilder::computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles, PxAllocatorCallback& inAllocator,
+ PxU32& outNbVerts, PxVec3*& outVertices , PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& polygons)
+{
+ if(!hullBuilder.computeHullPolygons(nbVerts,verts,nbTriangles,triangles))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexMeshBuilder::computeHullPolygons: compute convex hull polygons failed. Provided triangles dont form a convex hull.");
+ return false;
+ }
+
+ outNbVerts = hullBuilder.mHull->mNbHullVertices;
+ nbPolygons = hullBuilder.mHull->mNbPolygons;
+
+ outVertices = reinterpret_cast<PxVec3*>(inAllocator.allocate(outNbVerts*sizeof(PxVec3),"PxVec3",__FILE__,__LINE__));
+ PxMemCopy(outVertices,hullBuilder.mHullDataHullVertices,outNbVerts*sizeof(PxVec3));
+
+ nbIndices = 0;
+ for (PxU32 i = 0; i < nbPolygons; i++)
+ {
+ nbIndices += hullBuilder.mHullDataPolygons[i].mNbVerts;
+ }
+
+ indices = reinterpret_cast<PxU32*>(inAllocator.allocate(nbIndices*sizeof(PxU32),"PxU32",__FILE__,__LINE__));
+ for (PxU32 i = 0; i < nbIndices; i++)
+ {
+ indices[i] = hullBuilder.mHullDataVertexData8[i];
+ }
+
+ polygons = reinterpret_cast<PxHullPolygon*>(inAllocator.allocate(nbPolygons*sizeof(PxHullPolygon),"PxHullPolygon",__FILE__,__LINE__));
+
+ for (PxU32 i = 0; i < nbPolygons; i++)
+ {
+ const Gu::HullPolygonData& polygonData = hullBuilder.mHullDataPolygons[i];
+ PxHullPolygon& outPolygon = polygons[i];
+ outPolygon.mPlane[0] = polygonData.mPlane.n.x;
+ outPolygon.mPlane[1] = polygonData.mPlane.n.y;
+ outPolygon.mPlane[2] = polygonData.mPlane.n.z;
+ outPolygon.mPlane[3] = polygonData.mPlane.d;
+
+ outPolygon.mNbVerts = polygonData.mNbVerts;
+ outPolygon.mIndexBase = polygonData.mVRef8;
+
+ for (PxU32 j = 0; j < polygonData.mNbVerts; j++)
+ {
+ PX_ASSERT(indices[outPolygon.mIndexBase + j] == hullBuilder.mHullDataVertexData8[polygonData.mVRef8+j]);
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// compute big convex data
+bool ConvexMeshBuilder::computeGaussMaps()
+{
+ // The number of polygons is limited to 256 because the gaussmap encode 256 polys maximum
+
+ PxU32 density = 16;
+ // density = 64;
+ // density = 8;
+ // density = 2;
+
+ PX_DELETE(mBigConvexData);
+ PX_NEW_SERIALIZED(mBigConvexData,BigConvexData);
+ BigConvexDataBuilder SVMB(&mHullData, mBigConvexData, hullBuilder.mHullDataHullVertices);
+ // valencies we need to compute first, they are needed for min/max precompute
+ SVMB.computeValencies(hullBuilder);
+ SVMB.precompute(density);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// TEST_INTERNAL_OBJECTS
+
+static void ComputeInternalExtent(Gu::ConvexHullData& data, const Gu::HullPolygonData* hullPolys)
+{
+ const PxVec3 e = data.mAABB.getMax() - data.mAABB.getMin();
+
+ // PT: For that formula, see \\sw\physx\PhysXSDK\3.4\trunk\InternalDocumentation\Cooking\InternalExtents.png
+ const float r = data.mInternal.mRadius / sqrtf(3.0f);
+
+ const float epsilon = 1E-7f;
+
+ const PxU32 largestExtent = Ps::largestAxis(e);
+ PxU32 e0 = Ps::getNextIndex3(largestExtent);
+ PxU32 e1 = Ps::getNextIndex3(e0);
+ if(e[e0] < e[e1])
+ Ps::swap<PxU32>(e0,e1);
+
+ data.mInternal.mExtents[0] = FLT_MAX;
+ data.mInternal.mExtents[1] = FLT_MAX;
+ data.mInternal.mExtents[2] = FLT_MAX;
+
+ // PT: the following code does ray-vs-plane raycasts.
+
+ // find the largest box along the largest extent, with given internal radius
+ for(PxU32 i = 0; i < data.mNbPolygons; i++)
+ {
+ // concurrent with search direction
+ const float d = hullPolys[i].mPlane.n[largestExtent];
+ if((-epsilon < d && d < epsilon))
+ continue;
+
+ const float numBase = -hullPolys[i].mPlane.d - hullPolys[i].mPlane.n.dot(data.mCenterOfMass);
+ const float denBase = 1.0f/hullPolys[i].mPlane.n[largestExtent];
+ const float numn0 = r * hullPolys[i].mPlane.n[e0];
+ const float numn1 = r * hullPolys[i].mPlane.n[e1];
+
+ float num = numBase - numn0 - numn1;
+ float ext = PxMax(fabsf(num*denBase), r);
+ if(ext < data.mInternal.mExtents[largestExtent])
+ data.mInternal.mExtents[largestExtent] = ext;
+
+ num = numBase - numn0 + numn1;
+ ext = PxMax(fabsf(num *denBase), r);
+ if(ext < data.mInternal.mExtents[largestExtent])
+ data.mInternal.mExtents[largestExtent] = ext;
+
+ num = numBase + numn0 + numn1;
+ ext = PxMax(fabsf(num *denBase), r);
+ if(ext < data.mInternal.mExtents[largestExtent])
+ data.mInternal.mExtents[largestExtent] = ext;
+
+ num = numBase + numn0 - numn1;
+ ext = PxMax(fabsf(num *denBase), r);
+ if(ext < data.mInternal.mExtents[largestExtent])
+ data.mInternal.mExtents[largestExtent] = ext;
+ }
+
+ // Refine the box along e0,e1
+ for(PxU32 i = 0; i < data.mNbPolygons; i++)
+ {
+ const float denumAdd = hullPolys[i].mPlane.n[e0] + hullPolys[i].mPlane.n[e1];
+ const float denumSub = hullPolys[i].mPlane.n[e0] - hullPolys[i].mPlane.n[e1];
+
+ const float numBase = -hullPolys[i].mPlane.d - hullPolys[i].mPlane.n.dot(data.mCenterOfMass);
+ const float numn0 = data.mInternal.mExtents[largestExtent] * hullPolys[i].mPlane.n[largestExtent];
+
+ if(!(-epsilon < denumAdd && denumAdd < epsilon))
+ {
+ float num = numBase - numn0;
+ float ext = PxMax(fabsf(num/ denumAdd), r);
+ if(ext < data.mInternal.mExtents[e0])
+ data.mInternal.mExtents[e0] = ext;
+
+ num = numBase + numn0;
+ ext = PxMax(fabsf(num / denumAdd), r);
+ if(ext < data.mInternal.mExtents[e0])
+ data.mInternal.mExtents[e0] = ext;
+ }
+
+ if(!(-epsilon < denumSub && denumSub < epsilon))
+ {
+ float num = numBase - numn0;
+ float ext = PxMax(fabsf(num / denumSub), r);
+ if(ext < data.mInternal.mExtents[e0])
+ data.mInternal.mExtents[e0] = ext;
+
+ num = numBase + numn0;
+ ext = PxMax(fabsf(num / denumSub), r);
+ if(ext < data.mInternal.mExtents[e0])
+ data.mInternal.mExtents[e0] = ext;
+ }
+ }
+ data.mInternal.mExtents[e1] = data.mInternal.mExtents[e0];
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute internal objects, get the internal extent and radius
+void ConvexMeshBuilder::computeInternalObjects()
+{
+ const Gu::HullPolygonData* hullPolys = hullBuilder.mHullDataPolygons;
+ Gu::ConvexHullData& data = mHullData;
+
+ // compute the internal radius
+ data.mInternal.mRadius = FLT_MAX;
+ for(PxU32 i=0;i<data.mNbPolygons;i++)
+ {
+ const float dist = fabsf(hullPolys[i].mPlane.distance(data.mCenterOfMass));
+ if(dist<data.mInternal.mRadius)
+ data.mInternal.mRadius = dist;
+ }
+
+ ComputeInternalExtent(data, hullPolys);
+}
+
+//~TEST_INTERNAL_OBJECTS
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.h
new file mode 100644
index 00000000..57e0ca97
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.h
@@ -0,0 +1,100 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_COLLISION_CONVEXMESHBUILDER
+#define PX_COLLISION_CONVEXMESHBUILDER
+
+#include "GuConvexMeshData.h"
+#include "PxCooking.h"
+#include "ConvexPolygonsBuilder.h"
+
+namespace physx
+{
+ //////////////////////////////////////////////////////////////////////////
+ // Convex mesh builder, creates the convex mesh from given polygons and creates internal data
+ class ConvexMeshBuilder
+ {
+ public:
+ ConvexMeshBuilder(const bool buildGRBData);
+ ~ConvexMeshBuilder();
+
+ // loads the computed or given convex hull from descriptor.
+ // the descriptor does contain polygons directly, triangles are not allowed
+ bool build(const PxConvexMeshDesc&, PxU32 gaussMapVertexLimit, bool validateOnly = false, bool userPolygons = false);
+
+ // save the convex mesh into stream
+ bool save(PxOutputStream& stream, bool platformMismatch) const;
+
+ // copy the convex mesh into internal convex mesh, which can be directly used then
+ bool copy(Gu::ConvexHullData& convexData);
+
+ // loads the convex mesh from given polygons
+ bool loadConvexHull(const PxConvexMeshDesc&, PxU32 gaussMapVertexLimit, bool userPolygons);
+
+ // computed hull polygons from given triangles
+ bool computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles, PxAllocatorCallback& inAllocator,
+ PxU32& outNbVerts, PxVec3*& outVertices, PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& polygons);
+
+ // compute big convex data
+ bool computeGaussMaps();
+
+ // compute mass, inertia tensor
+ void computeMassInfo(bool lowerPrecision);
+// TEST_INTERNAL_OBJECTS
+ // internal objects
+ void computeInternalObjects();
+//~TEST_INTERNAL_OBJECTS
+
+ // return computed mass
+ PxReal getMass() const { return mMass; }
+
+ // return computed inertia tensor
+ const PxMat33& getInertia() const { return mInertia; }
+
+ // return big convex data
+ BigConvexData* getBigConvexData() const { return mBigConvexData; }
+
+ // set big convex data
+ void setBigConvexData(BigConvexData* data) { mBigConvexData = data; }
+
+ mutable ConvexPolygonsBuilder hullBuilder;
+
+ protected:
+ Gu::ConvexHullData mHullData;
+
+ BigConvexData* mBigConvexData; //!< optional, only for large meshes! PT: redundant with ptr in chull data? Could also be end of other buffer
+ PxReal mMass; //this is mass assuming a unit density that can be scaled by instances!
+ PxMat33 mInertia; //in local space of mesh!
+
+ };
+
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.cpp
new file mode 100644
index 00000000..44725819
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.cpp
@@ -0,0 +1,1328 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "foundation/PxMemory.h"
+#include "EdgeList.h"
+#include "Adjacencies.h"
+#include "MeshCleaner.h"
+#include "CmRadixSortBuffered.h"
+#include "CookingUtils.h"
+#include "PsArray.h"
+#include "PsFoundation.h"
+
+#include "ConvexPolygonsBuilder.h"
+
+
+using namespace physx;
+
+#define USE_PRECOMPUTED_HULL_PROJECTION
+
+static PX_INLINE void Flip(HullTriangleData& data)
+{
+ PxU32 tmp = data.mRef[2];
+ data.mRef[2] = data.mRef[1];
+ data.mRef[1] = tmp;
+}
+
+//////////////////////////////////////////////////////////////////////////
+//! A generic couple structure
+class Pair : public Ps::UserAllocated
+{
+public:
+ PX_FORCE_INLINE Pair() {}
+ PX_FORCE_INLINE Pair(PxU32 i0, PxU32 i1) : id0(i0), id1(i1) {}
+ PX_FORCE_INLINE ~Pair() {}
+
+ //! Operator for "if(Pair==Pair)"
+ PX_FORCE_INLINE bool operator==(const Pair& p) const { return (id0==p.id0) && (id1==p.id1); }
+ //! Operator for "if(Pair!=Pair)"
+ PX_FORCE_INLINE bool operator!=(const Pair& p) const { return (id0!=p.id0) || (id1!=p.id1); }
+
+ PxU32 id0; //!< First index of the pair
+ PxU32 id1; //!< Second index of the pair
+};
+PX_COMPILE_TIME_ASSERT(sizeof(Pair)==8);
+
+//////////////////////////////////////////////////////////////////////////
+// construct a plane
+template <class T>
+PX_INLINE PxPlane PlaneEquation(const T& t, const PxVec3* verts)
+{
+ const PxVec3& p0 = verts[t.v[0]];
+ const PxVec3& p1 = verts[t.v[1]];
+ const PxVec3& p2 = verts[t.v[2]];
+ return PxPlane(p0, p1, p2);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// negate plane
+static PX_FORCE_INLINE void negatePlane(Gu::HullPolygonData& data)
+{
+ data.mPlane.n = -data.mPlane.n;
+ data.mPlane.d = -data.mPlane.d;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Inverse a buffer in-place
+static bool inverseBuffer(PxU32 nbEntries, PxU8* entries)
+{
+ if(!nbEntries || !entries) return false;
+
+ for(PxU32 i=0; i < (nbEntries>>1); i++)
+ Ps::swap(entries[i], entries[nbEntries-1-i]);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Extracts a line-strip from a list of non-sorted line-segments (slow)
+static bool findLineStrip(Ps::Array<PxU32>& lineStrip, const Ps::Array<Pair>& lineSegments)
+{
+ // Ex:
+ //
+ // 4-2
+ // 0-1
+ // 2-3
+ // 4-0
+ // 7-3
+ // 7-1
+ //
+ // => 0-1-7-3-2-4-0
+
+ // 0-0-1-1-2-2-3-3-4-4-7-7
+
+ // 0-1
+ // 0-4
+ // 1-7
+ // 2-3
+ // 2-4
+ // 3-7
+
+ // Naive implementation below
+
+ Ps::Array<Pair> Copy(lineSegments);
+
+RunAgain:
+ {
+ PxU32 nbSegments = Copy.size();
+ for(PxU32 j=0;j<nbSegments;j++)
+ {
+ PxU32 ID0 = Copy[j].id0;
+ PxU32 ID1 = Copy[j].id1;
+
+ for(PxU32 i=j+1;i<nbSegments;i++)
+ {
+ if(
+ (Copy[i].id0==ID0 && Copy[i].id1==ID1)
+ || (Copy[i].id1==ID0 && Copy[i].id0==ID1)
+ )
+ {
+ // Duplicate segment found => remove both
+ PX_ASSERT(Copy.size()>=2);
+ Copy.remove(i);
+ Copy.remove(j);
+ goto RunAgain;
+ }
+ }
+ }
+ // Goes through when everything's fine
+ }
+
+ PxU32 ref0 = 0xffffffff;
+ PxU32 ref1 = 0xffffffff;
+ if(Copy.size()>=1)
+ {
+ Pair* Segments = Copy.begin();
+ if(Segments)
+ {
+ ref0 = Segments->id0;
+ ref1 = Segments->id1;
+ lineStrip.pushBack(ref0);
+ lineStrip.pushBack(ref1);
+ PX_ASSERT(Copy.size()>=1);
+ Copy.remove(0);
+ }
+ }
+
+Wrap:
+ // Look for same vertex ref in remaining segments
+ PxU32 nb = Copy.size();
+ if(!nb)
+ {
+ // ### check the line is actually closed?
+ return true;
+ }
+
+ for(PxU32 i=0;i<nb;i++)
+ {
+ PxU32 newRef0 = Copy[i].id0;
+ PxU32 newRef1 = Copy[i].id1;
+
+ // We look for Ref1 only
+ if(newRef0==ref1)
+ {
+ // r0 - r1
+ // r1 - x
+ lineStrip.pushBack(newRef1); // Output the other reference
+ ref0 = newRef0;
+ ref1 = newRef1;
+ Copy.remove(i);
+ goto Wrap;
+ }
+ else if(newRef1==ref1)
+ {
+ // r0 - r1
+ // x - r1 => r1 - x
+ lineStrip.pushBack(newRef0); // Output the other reference
+ ref0 = newRef1;
+ ref1 = newRef0;
+ Copy.remove(i);
+ goto Wrap;
+ }
+ }
+ return false;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Test for duplicate triangles
+PX_COMPILE_TIME_ASSERT(sizeof(Gu::TriangleT<PxU32>)==sizeof(PxVec3)); // ...
+static bool TestDuplicateTriangles(PxU32& nbFaces, Gu::TriangleT<PxU32>* faces, bool repair)
+{
+ if(!nbFaces || !faces)
+ return true;
+
+ Gu::TriangleT<PxU32>* indices32 = reinterpret_cast<Gu::TriangleT<PxU32>*>(PxAlloca(nbFaces*sizeof(Gu::TriangleT<PxU32>)));
+ for(PxU32 i=0;i<nbFaces;i++)
+ {
+ indices32[i].v[0] = faces[i].v[0];
+ indices32[i].v[1] = faces[i].v[1];
+ indices32[i].v[2] = faces[i].v[2];
+ }
+
+ // Radix-sort power...
+ ReducedVertexCloud reducer(reinterpret_cast<PxVec3*>(indices32), nbFaces);
+ REDUCEDCLOUD rc;
+ reducer.Reduce(&rc);
+ if(rc.NbRVerts<nbFaces)
+ {
+ if(repair)
+ {
+ nbFaces = rc.NbRVerts;
+ for(PxU32 i=0;i<nbFaces;i++)
+ {
+ const Gu::TriangleT<PxU32>* curTri = reinterpret_cast<const Gu::TriangleT<PxU32>*>(&rc.RVerts[i]);
+ faces[i].v[0] = curTri->v[0];
+ faces[i].v[1] = curTri->v[1];
+ faces[i].v[2] = curTri->v[2];
+ }
+ }
+ return false; // Test failed
+ }
+ return true; // Test succeeded
+}
+
+//////////////////////////////////////////////////////////////////////////
+// plane culling test
+static PX_FORCE_INLINE bool testCulling(const Gu::TriangleT<PxU32>& triangle, const PxVec3* verts, const PxVec3& center)
+{
+ const PxPlane plane(verts[triangle.v[0]], verts[triangle.v[1]], verts[triangle.v[2]]);
+ return plane.distance(center)>0.0f;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// face normals test
+static bool TestUnifiedNormals(PxU32 nbVerts, const PxVec3* verts, PxU32 nbFaces, Gu::TriangleT<PxU32>* faces, bool repair)
+{
+ if(!nbVerts || !verts || !nbFaces || !faces)
+ return false;
+
+ // Unify normals so that all hull faces are well oriented
+
+ // Compute geometric center - we need a vertex inside the hull
+ const float coeff = 1.0f / float(nbVerts);
+ PxVec3 geomCenter(0.0f, 0.0f, 0.0f);
+ for(PxU32 i=0;i<nbVerts;i++)
+ {
+ geomCenter.x += verts[i].x * coeff;
+ geomCenter.y += verts[i].y * coeff;
+ geomCenter.z += verts[i].z * coeff;
+ }
+
+ // We know the hull is (hopefully) convex so we can easily test whether a point is inside the hull or not.
+ // The previous geometric center must be invisible from any hull face: that's our test to decide whether a normal
+ // must be flipped or not.
+ bool status = true;
+ for(PxU32 i=0;i<nbFaces;i++)
+ {
+ // Test face visibility from the geometric center (supposed to be inside the hull).
+ // All faces must be invisible from this point to ensure a strict CCW order.
+ if(testCulling(faces[i], verts, geomCenter))
+ {
+ if(repair) faces[i].flip();
+ status = false;
+ }
+ }
+
+ return status;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// clean the mesh
+static bool CleanFaces(PxU32& nbFaces, Gu::TriangleT<PxU32>* faces, PxU32& nbVerts, PxVec3* verts)
+{
+ // Brute force mesh cleaning.
+ // PT: I added this back on Feb-18-05 because it fixes bugs with hulls from QHull.
+ MeshCleaner cleaner(nbVerts, verts, nbFaces, faces->v, 0.0f);
+ if (!cleaner.mNbTris)
+ return false;
+
+ nbVerts = cleaner.mNbVerts;
+ nbFaces = cleaner.mNbTris;
+
+ PxMemCopy(verts, cleaner.mVerts, cleaner.mNbVerts*sizeof(PxVec3));
+
+ for (PxU32 i = 0; i < cleaner.mNbTris; i++)
+ {
+ faces[i].v[0] = cleaner.mIndices[i * 3 + 0];
+ faces[i].v[1] = cleaner.mIndices[i * 3 + 1];
+ faces[i].v[2] = cleaner.mIndices[i * 3 + 2];
+ }
+
+ // Get rid of duplicates
+ TestDuplicateTriangles(nbFaces, faces, true);
+
+ // Unify normals
+ TestUnifiedNormals(nbVerts, verts, nbFaces, faces, true);
+
+ // Remove zero-area triangles
+ // TestZeroAreaTriangles(nbFaces, faces, verts, true);
+
+ // Unify normals again
+ TestUnifiedNormals(nbVerts, verts, nbFaces, faces, true);
+
+ // Get rid of duplicates again
+ TestDuplicateTriangles(nbFaces, faces, true);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// check the newly constructed faces
+static bool CheckFaces(PxU32 nbFaces, const Gu::TriangleT<PxU32>* faces, PxU32 nbVerts, const PxVec3* verts)
+{
+ // Remove const since we use functions that can do both testing & repairing. But we won't change the data.
+ Gu::TriangleT<PxU32>* f = const_cast<Gu::TriangleT<PxU32>*>(faces);
+
+ // Test duplicate faces
+ if(!TestDuplicateTriangles(nbFaces, f, false))
+ return false;
+
+ // Test unified normals
+ if(!TestUnifiedNormals(nbVerts, verts, nbFaces, f, false))
+ return false;
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute the newell plane from the face verts
+static bool computeNewellPlane(PxPlane& plane, PxU32 nbVerts, const PxU8* indices, const PxVec3* verts)
+{
+ if(!nbVerts || !indices || !verts)
+ return false;
+
+ PxVec3 centroid(0,0,0), normal(0,0,0);
+ for(PxU32 i=nbVerts-1, j=0; j<nbVerts; i=j, j++)
+ {
+ normal.x += (verts[indices[i]].y - verts[indices[j]].y) * (verts[indices[i]].z + verts[indices[j]].z);
+ normal.y += (verts[indices[i]].z - verts[indices[j]].z) * (verts[indices[i]].x + verts[indices[j]].x);
+ normal.z += (verts[indices[i]].x - verts[indices[j]].x) * (verts[indices[i]].y + verts[indices[j]].y);
+ centroid += verts[indices[j]];
+ }
+ plane.n = normal;
+ plane.n.normalize();
+ plane.d = -(centroid.dot(plane.n))/float(nbVerts);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Analyses a redundant vertices and splits the polygons if necessary.
+* \relates ConvexHull
+* \fn extractHullPolygons(Container& polygon_data, const ConvexHull& hull)
+* \param nb_polygons [out] number of extracted polygons
+* \param polygon_data [out] polygon data: (Nb indices, index 0, index 1... index N)(Nb indices, index 0, index 1... index N)(...)
+* \param hull [in] convex hull
+* \param redundantVertices [out] redundant vertices found inside the polygons - we want to remove them because of PCM
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static void checkRedundantVertices(PxU32& nb_polygons, Ps::Array<PxU32>& polygon_data, const ConvexPolygonsBuilder& hull, Ps::Array<PxU32>& triangle_data, Ps::Array<PxU32>& redundantVertices)
+{
+ const PxU32* dFaces = reinterpret_cast<const PxU32*>(hull.getFaces());
+ bool needToSplitPolygons = false;
+
+ bool* polygonMarkers = reinterpret_cast<bool*>(PxAlloca(nb_polygons*sizeof(bool)));
+ PxMemZero(polygonMarkers, nb_polygons*sizeof(bool));
+
+ bool* redundancyMarkers = reinterpret_cast<bool*>(PxAlloca(redundantVertices.size()*sizeof(bool)));
+ PxMemZero(redundancyMarkers, redundantVertices.size()*sizeof(bool));
+
+ // parse through the redundant vertices and if we cannot remove them split just the actual polygon if possible
+ Ps::Array<PxU32> polygonsContainer;
+ PxU32 numEntries = 0;
+ for (PxU32 i = redundantVertices.size(); i--;)
+ {
+ numEntries = 0;
+ polygonsContainer.clear();
+ // go through polygons, if polygons does have only 3 verts we cannot remove any vertex from it, try to decompose the second one
+ PxU32* Data = polygon_data.begin();
+ for(PxU32 t=0;t<nb_polygons;t++)
+ {
+ PxU32 nbVerts = *Data++;
+ PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
+
+ for(PxU32 j=0;j<nbVerts;j++)
+ {
+ if(redundantVertices[i] == Data[j])
+ {
+ polygonsContainer.pushBack(t);
+ polygonsContainer.pushBack(nbVerts);
+ numEntries++;
+ break;
+ }
+ }
+ Data += nbVerts;
+ }
+
+ bool needToSplit = false;
+ for (PxU32 j = 0; j < numEntries; j++)
+ {
+ PxU32 numInternalVertices = polygonsContainer[j*2 + 1];
+ if(numInternalVertices == 3)
+ {
+ needToSplit = true;
+ }
+ }
+
+ // now lets mark the polygons for split
+ if(needToSplit)
+ {
+ // mark the redundant vertex, it is solved by spliting, dont report it
+ needToSplitPolygons = true;
+ redundancyMarkers[i] = true;
+ for (PxU32 j = 0; j < numEntries; j++)
+ {
+ PxU32 polygonNumber = polygonsContainer[j*2];
+ PxU32 numInternalPolygons = polygonsContainer[j*2 + 1];
+ if(numInternalPolygons != 3)
+ {
+ polygonMarkers[polygonNumber] = true;
+ }
+ }
+ }
+ }
+
+ if(needToSplitPolygons)
+ {
+ // parse from the end so we can remove it and not change the order
+ for (PxU32 i = redundantVertices.size(); i--;)
+ {
+ // remove it
+ if(redundancyMarkers[i])
+ {
+ redundantVertices.remove(i);
+ }
+ }
+
+ Ps::Array<PxU32> newPolygon_data;
+ Ps::Array<PxU32> newTriangle_data;
+ PxU32 newNb_polygons = 0;
+
+ PxU32* data = polygon_data.begin();
+ PxU32* triData = triangle_data.begin();
+ for(PxU32 i=0;i<nb_polygons;i++)
+ {
+ PxU32 nbVerts = *data++;
+ PxU32 nbTris = *triData++;
+ if(polygonMarkers[i])
+ {
+ // split the polygon into triangles
+ for(PxU32 k=0;k< nbTris; k++)
+ {
+ newNb_polygons++;
+ const PxU32 faceIndex = triData[k];
+ newPolygon_data.pushBack(PxU32(3));
+ newPolygon_data.pushBack(dFaces[3*faceIndex]);
+ newPolygon_data.pushBack(dFaces[3*faceIndex + 1]);
+ newPolygon_data.pushBack(dFaces[3*faceIndex + 2]);
+ newTriangle_data.pushBack(PxU32(1));
+ newTriangle_data.pushBack(faceIndex);
+ }
+ }
+ else
+ {
+ newNb_polygons++;
+ // copy the original polygon
+ newPolygon_data.pushBack(nbVerts);
+ for(PxU32 j=0;j<nbVerts;j++)
+ newPolygon_data.pushBack(data[j]);
+
+ // copy the original polygon triangles
+ newTriangle_data.pushBack(nbTris);
+ for(PxU32 k=0;k< nbTris; k++)
+ {
+ newTriangle_data.pushBack(triData[k]);
+ }
+ }
+ data += nbVerts;
+ triData += nbTris;
+ }
+
+ // now put the data to output
+ polygon_data.clear();
+ triangle_data.clear();
+
+ // the copy does copy even the data
+ polygon_data = newPolygon_data;
+ triangle_data = newTriangle_data;
+ nb_polygons = newNb_polygons;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Analyses a convex hull made of triangles and extracts polygon data out of it.
+* \relates ConvexHull
+* \fn extractHullPolygons(Ps::Array<PxU32>& polygon_data, const ConvexHull& hull)
+* \param nb_polygons [out] number of extracted polygons
+* \param polygon_data [out] polygon data: (Nb indices, index 0, index 1... index N)(Nb indices, index 0, index 1... index N)(...)
+* \param hull [in] convex hull
+* \param triangle_data [out] triangle data
+* \param rendundantVertices [out] redundant vertices found inside the polygons - we want to remove them because of PCM
+* \return true if success
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static bool extractHullPolygons(PxU32& nb_polygons, Ps::Array<PxU32>& polygon_data, const ConvexPolygonsBuilder& hull, Ps::Array<PxU32>* triangle_data, Ps::Array<PxU32>& rendundantVertices)
+{
+ PxU32 nbFaces = hull.getNbFaces();
+ const PxVec3* hullVerts = hull.mHullDataHullVertices;
+ const PxU32 nbVertices = hull.mHull->mNbHullVertices;
+
+ const PxU16* wFaces = NULL;
+ const PxU32* dFaces = reinterpret_cast<const PxU32*>(hull.getFaces());
+ PX_ASSERT(wFaces || dFaces);
+
+ ADJACENCIESCREATE create;
+ create.NbFaces = nbFaces;
+ create.DFaces = dFaces;
+ create.WFaces = wFaces;
+ create.Verts = hullVerts;
+ //Create.Epsilon = 0.01f; // PT: trying to fix Rob Elam bug. Also fixes TTP 2467
+ // Create.Epsilon = 0.001f; // PT: for "Bruno's bug"
+ create.Epsilon = 0.005f; // PT: middle-ground seems to fix both. Expose this param?
+
+
+ AdjacenciesBuilder adj;
+ if(!adj.Init(create)) return false;
+
+ PxU32 nbBoundaryEdges = adj.ComputeNbBoundaryEdges();
+ if(nbBoundaryEdges) return false; // A valid hull shouldn't have open edges!!
+
+ bool* markers = reinterpret_cast<bool*>(PxAlloca(nbFaces*sizeof(bool)));
+ PxMemZero(markers, nbFaces*sizeof(bool));
+
+ PxU8* vertexMarkers = reinterpret_cast<PxU8*>(PxAlloca(nbVertices*sizeof(PxU8)));
+ PxMemZero(vertexMarkers, nbVertices*sizeof(PxU8));
+
+ PxU32 currentFace = 0; // Start with first triangle
+ nb_polygons = 0;
+ do
+ {
+ currentFace = 0;
+ while(currentFace<nbFaces && markers[currentFace]) currentFace++;
+
+ // Start from "closest" face and floodfill through inactive edges
+ struct Local
+ {
+ static void FloodFill(Ps::Array<PxU32>& indices, const AdjTriangle* faces, PxU32 current, bool* inMarkers)
+ {
+ if(inMarkers[current]) return;
+ inMarkers[current] = true;
+
+ indices.pushBack(current);
+ const AdjTriangle& AT = faces[current];
+
+ // We can floodfill through inactive edges since the mesh is convex (inactive==planar)
+ if(!AT.HasActiveEdge01()) FloodFill(indices, faces, AT.GetAdjTri(EDGE01), inMarkers);
+ if(!AT.HasActiveEdge20()) FloodFill(indices, faces, AT.GetAdjTri(EDGE02), inMarkers);
+ if(!AT.HasActiveEdge12()) FloodFill(indices, faces, AT.GetAdjTri(EDGE12), inMarkers);
+ }
+
+ static bool GetNeighborFace(PxU32 index,PxU32 triangleIndex,const AdjTriangle* faces, const PxU32* dfaces, PxU32& neighbor, PxU32& current)
+ {
+ PxU32 currentIndex = index;
+ PxU32 previousIndex = index;
+ bool firstFace = true;
+ bool next = true;
+ while (next)
+ {
+ const AdjTriangle& currentAT = faces[currentIndex];
+ PxU32 refTr0 = dfaces[currentIndex*3 + 0];
+ PxU32 refTr1 = dfaces[currentIndex*3 + 1];
+
+ PxU32 edge[2];
+ edge[0] = 1;
+ edge[1] = 2;
+ if(triangleIndex == refTr0)
+ {
+ edge[0] = 0;
+ edge[1] = 1;
+ }
+ else
+ {
+ if(triangleIndex == refTr1)
+ {
+ edge[0] = 0;
+ edge[1] = 2;
+ }
+ }
+
+ if(currentAT.HasActiveEdge(edge[0]) && currentAT.HasActiveEdge(edge[1]))
+ {
+ return false;
+ }
+
+ if(!currentAT.HasActiveEdge(edge[0]) && !currentAT.HasActiveEdge(edge[1]))
+ {
+ // not interested in testing transition vertices
+ if(currentIndex == index)
+ {
+ return false;
+ }
+
+ // transition one
+ for (PxU32 i = 0; i < 2; i++)
+ {
+ PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[i]));
+
+ // exit if we circle around the vertex back to beginning
+ if(testIndex == index && previousIndex != index)
+ {
+ return false;
+ }
+
+ if(testIndex != previousIndex)
+ {
+ // move to next
+ previousIndex = currentIndex;
+ currentIndex = testIndex;
+ break;
+ }
+ }
+ }
+ else
+ {
+ if(!currentAT.HasActiveEdge(edge[0]))
+ {
+ PxU32 t = edge[0];
+ edge[0] = edge[1];
+ edge[1] = t;
+ }
+
+ if(currentAT.HasActiveEdge(edge[0]))
+ {
+ PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[0]));
+ if(firstFace)
+ {
+ firstFace = false;
+ }
+ else
+ {
+ neighbor = testIndex;
+ current = currentIndex;
+ return true;
+ }
+ }
+
+ if(!currentAT.HasActiveEdge(edge[1]))
+ {
+ PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[1]));
+ if(testIndex != index)
+ {
+ previousIndex = currentIndex;
+ currentIndex = testIndex;
+ }
+ }
+ }
+
+ }
+
+ return false;
+ }
+
+ static bool CheckFloodFillFace(PxU32 index,const AdjTriangle* faces, const PxU32* dfaces)
+ {
+ if(!dfaces)
+ return true;
+
+ const AdjTriangle& checkedAT = faces[index];
+
+ PxU32 refTr0 = dfaces[index*3 + 0];
+ PxU32 refTr1 = dfaces[index*3 + 1];
+ PxU32 refTr2 = dfaces[index*3 + 2];
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ if(!checkedAT.HasActiveEdge(i))
+ {
+ PxU32 testTr0 = refTr1;
+ PxU32 testTr1 = refTr2;
+ PxU32 testIndex0 = 0;
+ PxU32 testIndex1 = 1;
+ if(i == 0)
+ {
+ testTr0 = refTr0;
+ testTr1 = refTr1;
+ testIndex0 = 1;
+ testIndex1 = 2;
+ }
+ else
+ {
+ if(i == 1)
+ {
+ testTr0 = refTr0;
+ testTr1 = refTr2;
+ testIndex0 = 0;
+ testIndex1 = 2;
+ }
+ }
+
+ PxU32 adjFaceTested = checkedAT.GetAdjTri(SharedEdgeIndex(testIndex0));
+
+ PxU32 neighborIndex00;
+ PxU32 neighborIndex01;
+ bool found0 = GetNeighborFace(index,testTr0,faces,dfaces, neighborIndex00, neighborIndex01);
+ PxU32 neighborIndex10;
+ PxU32 neighborIndex11;
+ bool found1 = GetNeighborFace(adjFaceTested,testTr0,faces,dfaces, neighborIndex10, neighborIndex11);
+
+ if(found0 && found1 && neighborIndex00 == neighborIndex11 && neighborIndex01 == neighborIndex10)
+ {
+ return false;
+ }
+
+ adjFaceTested = checkedAT.GetAdjTri(SharedEdgeIndex(testIndex1));
+ found0 = GetNeighborFace(index,testTr1,faces,dfaces,neighborIndex00,neighborIndex01);
+ found1 = GetNeighborFace(adjFaceTested,testTr1,faces,dfaces,neighborIndex10,neighborIndex11);
+
+ if(found0 && found1 && neighborIndex00 == neighborIndex11 && neighborIndex01 == neighborIndex10)
+ {
+ return false;
+ }
+
+ }
+ }
+
+ return true;
+ }
+
+ static bool CheckFloodFill(Ps::Array<PxU32>& indices,AdjTriangle* faces,bool* inMarkers, const PxU32* dfaces)
+ {
+ bool valid = true;
+
+ for(PxU32 i=0;i<indices.size();i++)
+ {
+ //const AdjTriangle& AT = faces[indices.GetEntry(i)];
+
+ for(PxU32 j= i + 1;j<indices.size();j++)
+ {
+ const AdjTriangle& testAT = faces[indices[j]];
+
+ if(testAT.GetAdjTri(EDGE01) == indices[i])
+ {
+ if(testAT.HasActiveEdge01())
+ {
+ valid = false;
+ }
+ }
+ if(testAT.GetAdjTri(EDGE02) == indices[i])
+ {
+ if(testAT.HasActiveEdge20())
+ {
+ valid = false;
+ }
+ }
+ if(testAT.GetAdjTri(EDGE12) == indices[i])
+ {
+ if(testAT.HasActiveEdge12())
+ {
+ valid = false;
+ }
+ }
+
+ if(!valid)
+ break;
+ }
+
+ if(!CheckFloodFillFace(indices[i], faces, dfaces))
+ {
+ valid = false;
+ }
+
+ if(!valid)
+ break;
+ }
+
+ if(!valid)
+ {
+ for(PxU32 i=0;i<indices.size();i++)
+ {
+ AdjTriangle& AT = faces[indices[i]];
+ AT.mATri[0] |= 0x20000000;
+ AT.mATri[1] |= 0x20000000;
+ AT.mATri[2] |= 0x20000000;
+
+ inMarkers[indices[i]] = false;
+ }
+
+ indices.forceSize_Unsafe(0);
+
+ return true;
+ }
+
+ return false;
+ }
+ };
+
+ if(currentFace!=nbFaces)
+ {
+ Ps::Array<PxU32> indices; // Indices of triangles forming hull polygon
+
+ bool doFill = true;
+ while (doFill)
+ {
+ Local::FloodFill(indices, adj.mFaces, currentFace, markers);
+
+ doFill = Local::CheckFloodFill(indices,adj.mFaces,markers, dFaces);
+ }
+
+ // Now it would be nice to recreate a closed linestrip, similar to silhouette extraction. The line is composed of active edges, this time.
+
+
+ Ps::Array<Pair> activeSegments;
+ //Container ActiveSegments;
+ // Loop through triangles composing the polygon
+ for(PxU32 i=0;i<indices.size();i++)
+ {
+ const PxU32 currentTriIndex = indices[i]; // Catch current triangle
+ const PxU32 vRef0 = dFaces ? dFaces[currentTriIndex*3+0] : wFaces[currentTriIndex*3+0];
+ const PxU32 vRef1 = dFaces ? dFaces[currentTriIndex*3+1] : wFaces[currentTriIndex*3+1];
+ const PxU32 vRef2 = dFaces ? dFaces[currentTriIndex*3+2] : wFaces[currentTriIndex*3+2];
+
+ // Keep active edges
+ if(adj.mFaces[currentTriIndex].HasActiveEdge01()) { activeSegments.pushBack(Pair(vRef0,vRef1)); }
+ if(adj.mFaces[currentTriIndex].HasActiveEdge20()) { activeSegments.pushBack(Pair(vRef0,vRef2)); }
+ if(adj.mFaces[currentTriIndex].HasActiveEdge12()) { activeSegments.pushBack(Pair(vRef1,vRef2)); }
+ }
+
+ // We assume the polygon is convex. In that case it should always be possible to retriangulate it so that the triangles are
+ // implicit (in particular, it should always be possible to remove interior triangles)
+
+ Ps::Array<PxU32> lineStrip;
+ if(findLineStrip(lineStrip, activeSegments))
+ {
+ PxU32 nb = lineStrip.size();
+ if(nb)
+ {
+ const PxU32* entries = lineStrip.begin();
+ PX_ASSERT(entries[0] == entries[nb-1]); // findLineStrip() is designed that way. Might not be what we want!
+
+ // We get rid of the last (duplicated) index
+ polygon_data.pushBack(nb-1);
+ for (PxU32 i = 0; i < nb-1; i++)
+ {
+ vertexMarkers[entries[i]]++;
+ polygon_data.pushBack(entries[i]);
+ }
+ nb_polygons++;
+
+ // Loop through vertices composing the line strip polygon end mark the redundant vertices inside the polygon
+ for(PxU32 i=0;i<indices.size();i++)
+ {
+ const PxU32 CurrentTriIndex = indices[i]; // Catch current triangle
+ const PxU32 VRef0 = dFaces ? dFaces[CurrentTriIndex*3+0] : wFaces[CurrentTriIndex*3+0];
+ const PxU32 VRef1 = dFaces ? dFaces[CurrentTriIndex*3+1] : wFaces[CurrentTriIndex*3+1];
+ const PxU32 VRef2 = dFaces ? dFaces[CurrentTriIndex*3+2] : wFaces[CurrentTriIndex*3+2];
+
+ bool found0 = false;
+ bool found1 = false;
+ bool found2 = false;
+
+ for (PxU32 j=0;j < nb - 1; j++)
+ {
+ if(VRef0 == entries[j])
+ {
+ found0 = true;
+ }
+
+ if(VRef1 == entries[j])
+ {
+ found1 = true;
+ }
+
+ if(VRef2 == entries[j])
+ {
+ found2 = true;
+ }
+
+ if(found0 && found1 && found2)
+ break;
+ }
+
+ if(!found0)
+ {
+ if(rendundantVertices.find(VRef0) == rendundantVertices.end())
+ rendundantVertices.pushBack(VRef0);
+ }
+
+ if(!found1)
+ {
+ if(rendundantVertices.find(VRef1) == rendundantVertices.end())
+ rendundantVertices.pushBack(VRef1);
+
+ }
+
+ if(!found2)
+ {
+ if(rendundantVertices.find(VRef2) == rendundantVertices.end())
+ rendundantVertices.pushBack(VRef2);
+ }
+ }
+
+ // If needed, output triangle indices used to build this polygon
+ if(triangle_data)
+ {
+ triangle_data->pushBack(indices.size());
+ for (PxU32 j = 0; j < indices.size(); j++)
+ triangle_data->pushBack(indices[j]);
+ }
+ }
+ }
+ else
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Meshmerizer::extractHullPolygons: line strip extraction failed");
+ return false;
+ }
+ }
+ }
+ while(currentFace!=nbFaces);
+
+ for (PxU32 i = 0; i < nbVertices; i++)
+ {
+ if(vertexMarkers[i] < 3)
+ {
+ if(rendundantVertices.find(i) == rendundantVertices.end())
+ rendundantVertices.pushBack(i);
+ }
+ }
+
+ if(rendundantVertices.size() > 0 && triangle_data)
+ checkRedundantVertices(nb_polygons,polygon_data,hull,*triangle_data,rendundantVertices);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+ConvexPolygonsBuilder::ConvexPolygonsBuilder(Gu::ConvexHullData* hull, const bool buildGRBData)
+ : ConvexHullBuilder(hull, buildGRBData), mNbHullFaces(0), mFaces(NULL)
+{
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+ConvexPolygonsBuilder::~ConvexPolygonsBuilder()
+{
+ PX_DELETE_POD(mFaces);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute hull polygons from given hull triangles
+bool ConvexPolygonsBuilder::computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles)
+{
+ PX_ASSERT(triangles);
+ PX_ASSERT(verts);
+
+ mHullDataHullVertices = NULL;
+ mHullDataPolygons = NULL;
+ mHullDataVertexData8 = NULL;
+ mHullDataFacesByEdges8 = NULL;
+ mHullDataFacesByVertices8 = NULL;
+
+ mNbHullFaces = nbTriangles;
+ mHull->mNbHullVertices = Ps::to8(nbVerts);
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ mHullDataHullVertices = reinterpret_cast<PxVec3*>(PX_ALLOC(sizeof(PxVec3) * mHull->mNbHullVertices + 1, "PxVec3"));
+ PxMemCopy(mHullDataHullVertices, verts, mHull->mNbHullVertices*sizeof(PxVec3));
+
+ mFaces = PX_NEW(HullTriangleData)[mNbHullFaces];
+ for(PxU32 i=0;i<mNbHullFaces;i++)
+ {
+ PX_ASSERT(triangles[i*3+0]<=0xffff);
+ PX_ASSERT(triangles[i*3+1]<=0xffff);
+ PX_ASSERT(triangles[i*3+2]<=0xffff);
+ mFaces[i].mRef[0] = triangles[i*3+0];
+ mFaces[i].mRef[1] = triangles[i*3+1];
+ mFaces[i].mRef[2] = triangles[i*3+2];
+ }
+
+ Gu::TriangleT<PxU32>* hullAsIndexedTriangle = reinterpret_cast<Gu::TriangleT<PxU32>*>(mFaces);
+
+ // We don't trust the user at all... So, clean the hull.
+ PxU32 nbHullVerts = mHull->mNbHullVertices;
+ CleanFaces(mNbHullFaces, hullAsIndexedTriangle, nbHullVerts, mHullDataHullVertices);
+ PX_ASSERT(nbHullVerts<256);
+ mHull->mNbHullVertices = Ps::to8(nbHullVerts);
+
+ // ...and then run the full tests again.
+ if(!CheckFaces(mNbHullFaces, hullAsIndexedTriangle, mHull->mNbHullVertices, mHullDataHullVertices))
+ return false;
+
+ // Transform triangles-to-polygons
+ if(!createPolygonData())
+ return false;
+
+ return checkHullPolygons();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Computes polygon data.
+* \return true if success
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool ConvexPolygonsBuilder::createPolygonData()
+{
+ // Cleanup
+ mHull->mNbPolygons = 0;
+ PX_DELETE_POD(mHullDataVertexData8);
+ PX_DELETE_POD(mHullDataFacesByVertices8);
+ PX_FREE_AND_RESET(mHullDataPolygons);
+
+ // Extract polygon data from triangle data
+ Ps::Array<PxU32> temp;
+ Ps::Array<PxU32> temp2;
+ Ps::Array<PxU32> rendundantVertices;
+ PxU32 nbPolygons;
+ if(!extractHullPolygons(nbPolygons, temp, *this, &temp2,rendundantVertices))
+ return false;
+
+ PxVec3* reducedHullDataHullVertices = mHullDataHullVertices;
+ PxU8 numReducedHullDataVertices = mHull->mNbHullVertices;
+
+ if(rendundantVertices.size() > 0)
+ {
+ numReducedHullDataVertices = Ps::to8(mHull->mNbHullVertices - rendundantVertices.size());
+ reducedHullDataHullVertices = static_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*numReducedHullDataVertices,"Reduced vertices hull data"));
+ PxU8* remapTable = PX_NEW(PxU8)[mHull->mNbHullVertices];
+
+ PxU8 currentIndex = 0;
+ for (PxU8 i = 0; i < mHull->mNbHullVertices; i++)
+ {
+ if(rendundantVertices.find(i) == rendundantVertices.end())
+ {
+ PX_ASSERT(currentIndex < numReducedHullDataVertices);
+ reducedHullDataHullVertices[currentIndex] = mHullDataHullVertices[i];
+ remapTable[i] = currentIndex;
+ currentIndex++;
+ }
+ else
+ {
+ remapTable[i] = 0xFF;
+ }
+ }
+
+ PxU32* data = temp.begin();
+ for(PxU32 i=0;i<nbPolygons;i++)
+ {
+ PxU32 nbVerts = *data++;
+ PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
+
+ for(PxU32 j=0;j<nbVerts;j++)
+ {
+ PX_ASSERT(data[j] < mHull->mNbHullVertices);
+ data[j] = remapTable[data[j]];
+ }
+
+ data += nbVerts;
+ }
+
+ PX_DELETE_POD(remapTable);
+ }
+
+ if(nbPolygons>255)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder: convex hull has more than 255 polygons!");
+ return false;
+ }
+
+ // Precompute hull polygon structures
+ mHull->mNbPolygons = Ps::to8(nbPolygons);
+ mHullDataPolygons = reinterpret_cast<Gu::HullPolygonData*>(PX_ALLOC(sizeof(Gu::HullPolygonData)*mHull->mNbPolygons, "Gu::HullPolygonData"));
+ PxMemZero(mHullDataPolygons, sizeof(Gu::HullPolygonData)*mHull->mNbPolygons);
+
+ // The winding hasn't been preserved so we need to handle this. Basically we need to "unify normals"
+ // exactly as we did at hull creation time - except this time we work on polygons
+ PxVec3 geomCenter;
+ computeGeomCenter(geomCenter, mNbHullFaces, mFaces);
+
+ // Loop through polygons
+ // We have N polygons => remove N entries for number of vertices
+ PxU32 tmp = temp.size() - nbPolygons;
+ mHullDataVertexData8 = PX_NEW(PxU8)[tmp];
+ PxU8* dest = mHullDataVertexData8;
+ const PxU32* data = temp.begin();
+ const PxU32* triData = temp2.begin();
+ for(PxU32 i=0;i<nbPolygons;i++)
+ {
+ mHullDataPolygons[i].mVRef8 = PxU16(dest - mHullDataVertexData8); // Setup link for current polygon
+ PxU32 nbVerts = *data++;
+ PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
+ mHullDataPolygons[i].mNbVerts = Ps::to8(nbVerts);
+
+ PxU32 index = 0;
+ for(PxU32 j=0;j<nbVerts;j++)
+ {
+ if(data[j] != 0xFF)
+ {
+ dest[index] = Ps::to8(data[j]);
+ index++;
+ }
+ else
+ {
+ mHullDataPolygons[i].mNbVerts--;
+ }
+ }
+
+ // Compute plane equation
+ {
+ computeNewellPlane(mHullDataPolygons[i].mPlane, mHullDataPolygons[i].mNbVerts, dest, reducedHullDataHullVertices);
+
+ PxU32 nbTris = *triData++; // #tris in current poly
+ bool flip = false;
+ for(PxU32 k=0;k< nbTris; k++)
+ {
+ PxU32 triIndex = *triData++; // Index of one triangle composing polygon
+ PX_ASSERT(triIndex<mNbHullFaces);
+ const Gu::TriangleT<PxU32>& T = reinterpret_cast<const Gu::TriangleT<PxU32>&>(mFaces[triIndex]);
+ const PxPlane PL = PlaneEquation(T, mHullDataHullVertices);
+ if(k==0 && PL.n.dot(mHullDataPolygons[i].mPlane.n) < 0.0f)
+ {
+ flip = true;
+ }
+ }
+ if(flip)
+ {
+ negatePlane(mHullDataPolygons[i]);
+ inverseBuffer(mHullDataPolygons[i].mNbVerts, dest);
+ }
+
+ for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
+ {
+ float d = - (mHullDataPolygons[i].mPlane.n).dot(mHullDataHullVertices[j]);
+ if(d<mHullDataPolygons[i].mPlane.d) mHullDataPolygons[i].mPlane.d=d;
+ }
+ }
+
+ // "Unify normal"
+ if(mHullDataPolygons[i].mPlane.distance(geomCenter)>0.0f)
+ {
+ inverseBuffer(mHullDataPolygons[i].mNbVerts, dest);
+
+ negatePlane(mHullDataPolygons[i]);
+ PX_ASSERT(mHullDataPolygons[i].mPlane.distance(geomCenter)<=0.0f);
+ }
+
+ // Next one
+ data += nbVerts; // Skip vertex indices
+ dest += mHullDataPolygons[i].mNbVerts;
+ }
+
+ if(reducedHullDataHullVertices != mHullDataHullVertices)
+ {
+ PxMemCopy(mHullDataHullVertices,reducedHullDataHullVertices,sizeof(PxVec3)*numReducedHullDataVertices);
+ PX_FREE(reducedHullDataHullVertices);
+
+ mHull->mNbHullVertices = numReducedHullDataVertices;
+ }
+
+ //calculate the vertex map table
+ if(!calculateVertexMapTable(nbPolygons))
+ return false;
+
+#ifdef USE_PRECOMPUTED_HULL_PROJECTION
+ // Loop through polygons
+ for(PxU32 j=0;j<nbPolygons;j++)
+ {
+ // Precompute hull projection along local polygon normal
+ PxU32 nbVerts = mHull->mNbHullVertices;
+ const PxVec3* verts = mHullDataHullVertices;
+ Gu::HullPolygonData& polygon = mHullDataPolygons[j];
+ PxReal min = PX_MAX_F32;
+ PxU8 minIndex = 0xff;
+ for (PxU8 i = 0; i < nbVerts; i++)
+ {
+ float dp = (*verts++).dot(polygon.mPlane.n);
+ if(dp < min)
+ {
+ min = dp;
+ minIndex = i;
+ }
+ }
+ polygon.mMinIndex = minIndex;
+ }
+#endif
+
+ // Triangulate newly created polygons to recreate a clean vertex cloud.
+ return createTrianglesFromPolygons();
+}
+
+//////////////////////////////////////////////////////////////////////////
+// create back triangles from polygons
+bool ConvexPolygonsBuilder::createTrianglesFromPolygons()
+{
+ if (!mHull->mNbPolygons || !mHullDataPolygons) return false;
+
+ PxU32 maxNbTriangles = 0;
+ for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
+ {
+ if (mHullDataPolygons[i].mNbVerts < 3)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder::CreateTrianglesFromPolygons: convex hull has a polygon with less than 3 vertices!");
+ return false;
+ }
+ maxNbTriangles += mHullDataPolygons[i].mNbVerts - 2;
+ }
+
+ HullTriangleData* tmpFaces = PX_NEW(HullTriangleData)[maxNbTriangles];
+
+ HullTriangleData* currFace = tmpFaces;
+ PxU32 nbTriangles = 0;
+ const PxU8* vertexData = mHullDataVertexData8;
+ const PxVec3* hullVerts = mHullDataHullVertices;
+ for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
+ {
+ const PxU8* data = vertexData + mHullDataPolygons[i].mVRef8;
+ PxU32 nbVerts = mHullDataPolygons[i].mNbVerts;
+
+ // Triangulate the polygon such that all all generated triangles have one and the same vertex
+ // in common.
+ //
+ // Make sure to avoid creating zero area triangles. Imagine the following polygon:
+ //
+ // 4 3
+ // *------------------*
+ // | |
+ // *---*----*----*----*
+ // 5 6 0 1 2
+ //
+ // Choosing vertex 0 as the shared vertex, the following zero area triangles will be created:
+ // [0 1 2], [0 5 6]
+ //
+ // Check for these triangles and discard them
+ // Note: Such polygons should only occur if the user defines the convex hull, i.e., the triangles
+ // of the convex shape, himself. If the convex hull is built from the vertices only, the
+ // hull algorithm removes the useless vertices.
+ //
+ for (PxU32 j = 0; j < nbVerts - 2; j++)
+ {
+ currFace->mRef[0] = data[0];
+ currFace->mRef[1] = data[(j + 1) % nbVerts];
+ currFace->mRef[2] = data[(j + 2) % nbVerts];
+
+ const PxVec3& p0 = hullVerts[currFace->mRef[0]];
+ const PxVec3& p1 = hullVerts[currFace->mRef[1]];
+ const PxVec3& p2 = hullVerts[currFace->mRef[2]];
+
+ const float area = ((p1 - p0).cross(p2 - p0)).magnitudeSquared();
+
+ if (area != 0.0f) // Else discard the triangle
+ {
+ nbTriangles++;
+ currFace++;
+ }
+ }
+ }
+
+ PX_DELETE_POD(mFaces);
+ HullTriangleData* faces;
+ PX_ASSERT(nbTriangles <= maxNbTriangles);
+ if (maxNbTriangles == nbTriangles)
+ {
+ // No zero area triangles, hence the face buffer has correct size and can be used directly.
+ faces = tmpFaces;
+ }
+ else
+ {
+ // Resize face buffer because some triangles were discarded.
+ faces = PX_NEW(HullTriangleData)[nbTriangles];
+ if (!faces)
+ {
+ PX_DELETE_POD(tmpFaces);
+ return false;
+ }
+ PxMemCopy(faces, tmpFaces, sizeof(HullTriangleData)*nbTriangles);
+ PX_DELETE_POD(tmpFaces);
+ }
+ mFaces = faces;
+ mNbHullFaces = nbTriangles;
+ // TODO: at this point useless vertices should be removed from the hull. The current fix is to initialize
+ // support vertices to known valid vertices, but it's not really convincing.
+
+ // Re-unify normals
+ PxVec3 geomCenter;
+ computeGeomCenter(geomCenter, mNbHullFaces, mFaces);
+
+ for (PxU32 i = 0; i < mNbHullFaces; i++)
+ {
+ const PxPlane P(hullVerts[mFaces[i].mRef[0]],
+ hullVerts[mFaces[i].mRef[1]],
+ hullVerts[mFaces[i].mRef[2]]);
+ if (P.distance(geomCenter) > 0.0f)
+ {
+ Flip(mFaces[i]);
+ }
+ }
+ return true;
+}
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.h
new file mode 100644
index 00000000..52044eb0
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.h
@@ -0,0 +1,64 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_CONVEXPOLYGONSBUILDER_H
+#define PX_CONVEXPOLYGONSBUILDER_H
+
+#include "ConvexHullBuilder.h"
+
+namespace physx
+{
+ //////////////////////////////////////////////////////////////////////////
+ // extended convex hull builder for a case where we build polygons from input triangles
+ class ConvexPolygonsBuilder : public ConvexHullBuilder
+ {
+ public:
+ ConvexPolygonsBuilder(Gu::ConvexHullData* hull, const bool buildGRBData);
+ ~ConvexPolygonsBuilder();
+
+ bool computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles);
+
+ PX_INLINE PxU32 getNbFaces()const { return mNbHullFaces; }
+ PX_INLINE const HullTriangleData* getFaces() const { return mFaces; }
+
+
+ private:
+ bool createPolygonData();
+ bool createTrianglesFromPolygons();
+
+ private:
+ PxU32 mNbHullFaces; //!< Number of faces in the convex hull
+ HullTriangleData* mFaces; //!< Triangles.
+
+ };
+}
+
+#endif // PX_CONVEXHULLBUILDER_H
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.cpp
new file mode 100644
index 00000000..8f60275c
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.cpp
@@ -0,0 +1,1481 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "PsAlloca.h"
+#include "PsUserAllocated.h"
+#include "PsMathUtils.h"
+#include "PsUtilities.h"
+
+#include "foundation/PxMath.h"
+#include "foundation/PxBounds3.h"
+#include "foundation/PxPlane.h"
+#include "foundation/PxMemory.h"
+
+#include "InflationConvexHullLib.h"
+#include "ConvexHullUtils.h"
+
+using namespace physx;
+
+namespace local
+{
+ //////////////////////////////////////////////////////////////////////////
+ // constants
+ static const float DIMENSION_EPSILON_MULTIPLY = 0.001f; // used to scale down bounds dimension and set as epsilon used in the hull generator
+ static const float DIR_ANGLE_MULTIPLY = 0.025f; // used in maxIndexInDirSterid for direction check modifier
+ static const float VOLUME_EPSILON = (1e-20f); // volume epsilon used for simplex valid
+ static const float MIN_ADJACENT_ANGLE = 3.0f; // in degrees - result wont have two adjacent facets within this angle of each other. !AB expose this parameter or use the one PxCookingParams
+ static const float PAPERWIDTH = 0.001f; // used in hull construction from planes, within paperwidth its considered coplanar
+
+ //////////////////////////////////////////////////////////////////////////
+ // gets the most distant index along the given dir filtering allowed indices
+ PxI32 maxIndexInDirFiltered(const PxVec3 *p,PxU32 count,const PxVec3 &dir, bool* tempNotAllowed)
+ {
+ PX_ASSERT(count);
+ PxI32 m=-1;
+ for(PxU32 i=0;i < count; i++)
+ {
+ if(!tempNotAllowed[i])
+ {
+ if(m==-1 || p[i].dot(dir) > p[m].dot(dir))
+ m= PxI32(i);
+ }
+ }
+ PX_ASSERT(m!=-1);
+ return m;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // gets orthogonal more significant vector
+ static PxVec3 orth(const PxVec3& v)
+ {
+ PxVec3 a= v.cross(PxVec3(0,0,1.f));
+ PxVec3 b= v.cross(PxVec3(0,1.f,0));
+ PxVec3 out = (a.magnitudeSquared() > b.magnitudeSquared())? a : b;
+ out.normalize();
+ return out;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // find the most distant index in given direction dir
+ PxI32 maxIndexInDirSterid(const PxVec3* p,PxU32 count,const PxVec3& dir,Ps::Array<PxU8> &allow)
+ {
+ // if the found vertex does not get hit by a slightly rotated ray, it
+ // may not be the extreme we are looking for. Therefore it is marked
+ // as disabled for the direction search and different candidate is chosen.
+ PX_ALLOCA(tempNotAllowed,bool,count);
+ PxMemSet(tempNotAllowed,0,count*sizeof(bool));
+
+ PxI32 m=-1;
+ while(m==-1)
+ {
+ // get the furthest index along dir
+ m = maxIndexInDirFiltered(p,count,dir,tempNotAllowed);
+ PX_ASSERT(m >= 0);
+
+ if(allow[PxU32(m)] == 3)
+ return m;
+
+ // get orthogonal vectors to the dir
+ PxVec3 u = orth(dir);
+ PxVec3 v = u.cross(dir);
+
+ PxI32 ma=-1;
+ // we shoot a ray close to the original dir and hope to get the same index
+ // if we not hit the same index we try it with bigger precision
+ // if we still fail to hit the same index we drop the index and iterate again
+ for(float x = 0.0f ; x <= 360.0f ; x+= 45.0f)
+ {
+ float s0 = PxSin(Ps::degToRad(x));
+ float c0 = PxCos(Ps::degToRad(x));
+ PxI32 mb = maxIndexInDirFiltered(p,count,dir+(u*s0+v*c0)*DIR_ANGLE_MULTIPLY,tempNotAllowed);
+ if(ma==m && mb==m)
+ {
+ allow[PxU32(m)]=3;
+ return m;
+ }
+ if(ma!=-1 && ma!=mb)
+ {
+ PxI32 mc = ma;
+ for(float xx = x-40.0f ; xx <= x ; xx+= 5.0f)
+ {
+ float s = PxSin(Ps::degToRad(xx));
+ float c = PxCos(Ps::degToRad(xx));
+ int md = maxIndexInDirFiltered(p,count,dir+(u*s+v*c)*DIR_ANGLE_MULTIPLY,tempNotAllowed);
+ if(mc==m && md==m)
+ {
+ allow[PxU32(m)]=3;
+ return m;
+ }
+ mc=md;
+ }
+ }
+ ma=mb;
+ }
+ tempNotAllowed[m]=true;
+ m=-1;
+ }
+ PX_ASSERT(0);
+ return m;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // Simplex helper class - just holds the 4 indices
+ class HullSimplex
+ {
+ public:
+ PxI32 x,y,z,w;
+ HullSimplex(){}
+ HullSimplex(PxI32 _x,PxI32 _y, PxI32 _z,PxI32 _w){x=_x;y=_y;z=_z;w=_w;}
+ const PxI32& operator[](PxI32 i) const
+ {
+ return reinterpret_cast<const PxI32*>(this)[i];
+ }
+ PxI32& operator[](PxI32 i)
+ {
+ return reinterpret_cast<PxI32*>(this)[i];
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks the volume of given simplex
+ static bool hasVolume(const PxVec3* verts, PxU32 p0, PxU32 p1, PxU32 p2, PxU32 p3)
+ {
+ PxVec3 result3 = (verts[p1]-verts[p0]).cross(verts[p2]-verts[p0]);
+ if ((result3).magnitude() < VOLUME_EPSILON && (result3).magnitude() > -VOLUME_EPSILON) // Almost collinear or otherwise very close to each other
+ return false;
+ result3.normalize();
+ const float result = result3.dot(verts[p3]-verts[p0]);
+ return (result > VOLUME_EPSILON || result < -VOLUME_EPSILON); // Returns true if volume is significantly non-zero
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // finds the hull simplex http://en.wikipedia.org/wiki/Simplex
+ // in - vertices, vertex count, dimensions
+ // out - indices forming the simplex
+ static HullSimplex findSimplex(const PxVec3* verts, PxU32 verts_count, Ps::Array<PxU8>& allow,const PxVec3& minMax)
+ {
+ // pick the basis vectors
+ PxVec3 basisVector[3];
+ PxVec3 basis[3];
+ basisVector[0] = PxVec3( 1.0f, 0.02f, 0.01f);
+ basisVector[1] = PxVec3(-0.02f, 1.0f, -0.01f);
+ basisVector[2] = PxVec3( 0.01f, 0.02f, 1.0f );
+
+ PxU32 index0 = 0;
+ PxU32 index1 = 1;
+ PxU32 index2 = 2;
+
+ // make the order of the basis vector depending on the points bounds, first basis test will be done
+ // along the longest axis
+ if(minMax.z > minMax.x && minMax.z > minMax.y)
+ {
+ index0 = 2;
+ index1 = 0;
+ index2 = 1;
+ }
+ else
+ {
+ if(minMax.y > minMax.x && minMax.y > minMax.z)
+ {
+ index0 = 1;
+ index1 = 2;
+ index2 = 0;
+ }
+ }
+
+ // pick the fist basis vector
+ basis[0] = basisVector[index0];
+ // find the indices along the pos/neg direction
+ PxI32 p0 = maxIndexInDirSterid(verts,verts_count, basis[0],allow);
+ PxI32 p1 = maxIndexInDirSterid(verts,verts_count,-basis[0],allow);
+
+ // set the first simplex axis
+ basis[0] = verts[p0]-verts[p1];
+ // if the points are the same or the basis vector is zero, terminate we failed to find a simplex
+ if(p0==p1 || basis[0]==PxVec3(0.0f))
+ return HullSimplex(-1,-1,-1,-1);
+
+ // get the orthogonal vectors against the new basis[0] vector
+ basis[1] = basisVector[index1].cross(basis[0]); //cross(float3( 1, 0.02f, 0),basis[0]);
+ basis[2] = basisVector[index2].cross(basis[0]); //cross(float3(-0.02f, 1, 0),basis[0]);
+ // pick the longer basis vector
+ basis[1] = ((basis[1]).magnitudeSquared() > (basis[2]).magnitudeSquared()) ? basis[1] : basis[2];
+ basis[1].normalize();
+
+ // get the index along the picked second axis
+ PxI32 p2 = maxIndexInDirSterid(verts,verts_count,basis[1],allow);
+ // if we got the same point, try the negative direction
+ if(p2 == p0 || p2 == p1)
+ {
+ p2 = maxIndexInDirSterid(verts,verts_count,-basis[1],allow);
+ }
+ // we failed to create the simplex the points are the same as the base line
+ if(p2 == p0 || p2 == p1)
+ return HullSimplex(-1,-1,-1,-1);
+
+ // set the second simplex edge
+ basis[1] = verts[p2] - verts[p0];
+ // get the last orthogonal direction
+ basis[2] = basis[1].cross(basis[0]);
+ basis[2].normalize();
+
+ // get the index along the last direction
+ PxI32 p3 = maxIndexInDirSterid(verts,verts_count,basis[2],allow);
+ if(p3==p0||p3==p1||p3==p2||!HullSimplex::hasVolume(verts, PxU32(p0), PxU32(p1), PxU32(p2), PxU32(p3)))
+ {
+ p3 = maxIndexInDirSterid(verts,verts_count,-basis[2],allow);
+ }
+ // if this index was already chosen terminate we dont have the simplex
+ if(p3==p0||p3==p1||p3==p2)
+ return HullSimplex(-1,-1,-1,-1);
+
+ PX_ASSERT(!(p0==p1||p0==p2||p0==p3||p1==p2||p1==p3||p2==p3));
+
+ // check the axis order
+ if((verts[p3]-verts[p0]).dot((verts[p1]-verts[p0]).cross(verts[p2]-verts[p0])) < 0)
+ {
+ Ps::swap(p2,p3);
+ }
+ return HullSimplex(p0,p1,p2,p3);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper struct for hull expand
+ struct ExpandPlane
+ {
+ PxPlane mPlane;
+ int mAdjacency[3]; // 1 - 0, 2 - 0, 2 - 1
+ int mExpandPoint;
+ float mExpandDistance;
+ int mIndices[3];
+ int mTrisIndex;
+
+ ExpandPlane()
+ {
+ for (int i = 0; i < 3; i++)
+ {
+ mAdjacency[i] = -1;
+ mIndices[i] = -1;
+ }
+
+ mExpandDistance = -FLT_MAX;
+ mExpandPoint = -1;
+ mTrisIndex = -1;
+ }
+ };
+
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for triangle representation
+ class int3
+ {
+ public:
+ PxI32 x,y,z;
+ int3(){}
+ int3(PxI32 _x,PxI32 _y, PxI32 _z){x=_x;y=_y;z=_z;}
+ const PxI32& operator[](PxI32 i) const
+ {
+ return reinterpret_cast<const PxI32*>(this)[i];
+ }
+ PxI32& operator[](PxI32 i)
+ {
+ return reinterpret_cast<PxI32*>(this)[i];
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for triangle representation
+ class Tri : public int3, public Ps::UserAllocated
+ {
+ public:
+ int3 n;
+ PxI32 id;
+ PxI32 vmax;
+ float rise;
+
+ // get the neighbor index for edge
+ PxI32& neib(PxI32 a, PxI32 b)
+ {
+ static PxI32 er=-1;
+ for(PxI32 i=0;i<3;i++)
+ {
+ PxI32 i1= (i+1)%3;
+ PxI32 i2= (i+2)%3;
+ if((*this)[i]==a && (*this)[i1]==b) return n[i2];
+ if((*this)[i]==b && (*this)[i1]==a) return n[i2];
+ }
+ PX_ASSERT(0);
+ return er;
+ }
+
+ // get triangle normal
+ PxVec3 getNormal(const PxVec3* verts) const
+ {
+ // return the normal of the triangle
+ // inscribed by v0, v1, and v2
+ const PxVec3& v0 = verts[(*this)[0]];
+ const PxVec3& v1 = verts[(*this)[1]];
+ const PxVec3& v2 = verts[(*this)[2]];
+ PxVec3 cp= (v1-v0).cross(v2-v1);
+ float m= (cp).magnitude();
+ if(m==0)
+ return PxVec3(1.f,0.0f,0.0f);
+ return cp*(1.0f/m);
+ }
+
+ float getArea2(const PxVec3* verts) const
+ {
+ const PxVec3& v0 = verts[(*this)[0]];
+ const PxVec3& v1 = verts[(*this)[1]];
+ const PxVec3& v2 = verts[(*this)[2]];
+ return ((v0-v1).cross(v2-v0)).magnitudeSquared();
+ }
+
+ friend class HullTriangles;
+ protected:
+
+ Tri(PxI32 a, PxI32 b, PxI32 c) : int3(a, b, c), n(-1,-1,-1)
+ {
+ vmax=-1;
+ rise = 0.0f;
+ }
+
+ ~Tri()
+ {
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks if for given triangle the point is above the triangle in the normal direction
+ // value is checked against an epsilon
+ static PxI32 above(const PxVec3* vertices, const Tri& t, const PxVec3& p, float epsilon)
+ {
+ PxVec3 n = t.getNormal(vertices);
+ return (n.dot(p-vertices[t[0]]) > epsilon);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks if given triangle does contain the vertex v
+ static int hasVert(const int3& t, int v)
+ {
+ return (t[0]==v || t[1]==v || t[2]==v) ;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for hull triangles management
+ class HullTriangles
+ {
+ public:
+ HullTriangles()
+ {
+ mTriangles.reserve(256);
+ }
+
+ ~HullTriangles()
+ {
+ for (PxU32 i = 0; i < mTriangles.size(); i++)
+ {
+ if(mTriangles[i])
+ delete mTriangles[i];
+ }
+ mTriangles.clear();
+ }
+
+ const Tri* operator[](PxU32 i) const
+ {
+ return mTriangles[i];
+ }
+
+ Tri* operator[](PxU32 i)
+ {
+ return mTriangles[i];
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////
+
+ const Ps::Array<Tri*>& getTriangles() const
+ {
+ return mTriangles;
+ }
+ Ps::Array<Tri*>& getTriangles()
+ {
+ return mTriangles;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+
+ PxU32 size() const
+ {
+ return mTriangles.size();
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // delete triangle from the array
+ Tri* createTri(PxI32 a, PxI32 b, PxI32 c)
+ {
+ Tri* tri = PX_NEW_TEMP(Tri)(a, b, c);
+ tri->id = PxI32(mTriangles.size());
+ mTriangles.pushBack(tri);
+ return tri;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // delete triangle from the array
+ void deleteTri(Tri* tri)
+ {
+ PX_ASSERT((mTriangles)[PxU32(tri->id)]==tri);
+ (mTriangles)[PxU32(tri->id)] = NULL;
+ delete tri;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // check triangle
+ void checkit(Tri* t) const
+ {
+ PX_ASSERT((mTriangles)[PxU32(t->id)]==t);
+ for(int i=0;i<3;i++)
+ {
+ const int i1=(i+1)%3;
+ const int i2=(i+2)%3;
+ const int a = (*t)[i1];
+ const int b = (*t)[i2];
+ PX_ASSERT(a!=b);
+ PX_ASSERT( (mTriangles)[PxU32(t->n[i])]->neib(b,a) == t->id);
+ PX_UNUSED(a);
+ PX_UNUSED(b);
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // find the triangle, which has the greatest rise (distance in the direction of normal)
+ // return such a triangle if it does exist and if the rise is bigger than given epsilon
+ Tri* findExtrudable(float epsilon) const
+ {
+ Tri* t = NULL;
+ for(PxU32 i=0; i < mTriangles.size(); i++)
+ {
+ if(!t || ((mTriangles)[i] && (t->rise < (mTriangles)[i]->rise)))
+ {
+ t = (mTriangles)[i];
+ }
+ }
+ if(!t)
+ return NULL;
+ return (t->rise > epsilon) ? t : NULL;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // extrude the given triangle t0 with triangle v
+ void extrude(Tri* t0, PxI32 v)
+ {
+ int3 t= *t0;
+ PxI32 n = PxI32(mTriangles.size());
+ // create the 3 extruded triangles
+ Tri* ta = createTri(v, t[1], t[2]);
+ ta->n = int3(t0->n[0],n+1,n+2);
+ (mTriangles)[PxU32(t0->n[0])]->neib(t[1],t[2]) = n+0;
+ Tri* tb = createTri(v, t[2], t[0]);
+ tb->n = int3(t0->n[1],n+2,n+0);
+ (mTriangles)[PxU32(t0->n[1])]->neib(t[2],t[0]) = n+1;
+ Tri* tc = createTri(v, t[0], t[1]);
+ tc->n = int3(t0->n[2],n+0,n+1);
+ (mTriangles)[PxU32(t0->n[2])]->neib(t[0],t[1]) = n+2;
+ checkit(ta);
+ checkit(tb);
+ checkit(tc);
+
+ // check if the added triangle is not already inserted
+ // in that case we remove both and fix the neighbors
+ // for the remaining triangles
+ if(hasVert(*(mTriangles)[PxU32(ta->n[0])],v))
+ removeb2b(ta,(mTriangles)[PxU32(ta->n[0])]);
+ if(hasVert(*(mTriangles)[PxU32(tb->n[0])],v))
+ removeb2b(tb,(mTriangles)[PxU32(tb->n[0])]);
+ if(hasVert(*(mTriangles)[PxU32(tc->n[0])],v))
+ removeb2b(tc,(mTriangles)[PxU32(tc->n[0])]);
+ deleteTri(t0);
+ }
+
+ protected:
+ //////////////////////////////////////////////////////////////////////////
+ // remove the 2 triangles which are the same and fix the neighbor triangles
+ void b2bfix(Tri* s, Tri* t)
+ {
+ for(int i=0;i<3;i++)
+ {
+ const int i1=(i+1)%3;
+ const int i2=(i+2)%3;
+ const int a = (*s)[i1];
+ const int b = (*s)[i2];
+ PX_ASSERT((mTriangles)[PxU32(s->neib(a,b))]->neib(b,a) == s->id);
+ PX_ASSERT((mTriangles)[PxU32(t->neib(a,b))]->neib(b,a) == t->id);
+ (mTriangles)[PxU32(s->neib(a,b))]->neib(b,a) = t->neib(b,a);
+ (mTriangles)[PxU32(t->neib(b,a))]->neib(a,b) = s->neib(a,b);
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // remove the 2 triangles which are the same and fix the neighbor triangles
+ void removeb2b(Tri* s, Tri* t)
+ {
+ b2bfix(s,t);
+ deleteTri(s);
+ deleteTri(t);
+ }
+
+
+ private:
+ Ps::Array<Tri*> mTriangles;
+ };
+
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+
+InflationConvexHullLib::InflationConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params)
+ : ConvexHullLib(desc,params), mFinished(false)
+{
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Main function to create the hull.
+// Construct the hull with set input parameters - ConvexMeshDesc and CookingParams
+PxConvexMeshCookingResult::Enum InflationConvexHullLib::createConvexHull()
+{
+ PxConvexMeshCookingResult::Enum res = PxConvexMeshCookingResult::eFAILURE;
+
+ PxU32 vcount = mConvexMeshDesc.points.count;
+ if ( vcount < 8 )
+ vcount = 8;
+
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ PxVec3* vsource = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP( sizeof(PxVec3)*vcount + 1, "PxVec3"));
+ PxVec3 scale;
+ PxVec3 center;
+ PxU32 ovcount;
+
+ // cleanup the vertices first
+ if(!cleanupVertices(mConvexMeshDesc.points.count, reinterpret_cast<const PxVec3*> (mConvexMeshDesc.points.data), mConvexMeshDesc.points.stride,
+ ovcount, vsource, scale, center ))
+ return res;
+
+ // scale vertices back to their original size.
+ for (PxU32 i=0; i<ovcount; i++)
+ {
+ PxVec3& v = vsource[i];
+ v.multiply(scale);
+ }
+
+ // compute the actual hull
+ ConvexHullLibResult::ErrorCode hullResult = computeHull(ovcount,vsource);
+ if(hullResult == ConvexHullLibResult::eSUCCESS)
+ {
+ mFinished = true;
+ res = PxConvexMeshCookingResult::eSUCCESS;
+ }
+ else
+ {
+ if(hullResult == ConvexHullLibResult::eZERO_AREA_TEST_FAILED)
+ res = PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
+ }
+
+ if(vsource)
+ {
+ PX_FREE(vsource);
+ }
+
+ return res;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// computes the hull and stores results into mHullResult
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::computeHull(PxU32 vertsCount, const PxVec3* verts)
+{
+ PX_ASSERT(verts);
+ PX_ASSERT(vertsCount > 0);
+
+ ConvexHull* hullOut = NULL;
+ ConvexHullLibResult::ErrorCode res = calchull(verts, vertsCount, hullOut);
+ if ((res == ConvexHullLibResult::eFAILURE) || (res == ConvexHullLibResult::eZERO_AREA_TEST_FAILED))
+ return res;
+
+ PX_ASSERT(hullOut);
+
+ // parse the hullOut and fill the result with vertices and polygons
+ mHullResult.mIndices = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*(hullOut->getEdges().size()), "PxU32"));
+ mHullResult.mIndexCount=hullOut->getEdges().size();
+
+ mHullResult.mPolygonCount = hullOut->getFacets().size();
+ mHullResult.mPolygons = reinterpret_cast<PxHullPolygon*> (PX_ALLOC_TEMP(sizeof(PxHullPolygon)*mHullResult.mPolygonCount, "PxHullPolygon"));
+
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ mHullResult.mVertices = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*hullOut->getVertices().size() + 1, "PxVec3"));
+ mHullResult.mVcount = hullOut->getVertices().size();
+ PxMemCopy(mHullResult.mVertices,hullOut->getVertices().begin(),sizeof(PxVec3)*mHullResult.mVcount);
+
+ PxU32 i=0;
+ PxU32 k=0;
+ PxU32 j = 1;
+ while(i<hullOut->getEdges().size())
+ {
+ j=1;
+ PxHullPolygon& polygon = mHullResult.mPolygons[k];
+ // get num indices per polygon
+ while(j+i < hullOut->getEdges().size() && hullOut->getEdges()[i].p == hullOut->getEdges()[i+j].p)
+ {
+ j++;
+ }
+ polygon.mNbVerts = Ps::to16(j);
+ polygon.mIndexBase = Ps::to16(i);
+
+ // get the plane
+ polygon.mPlane[0] = hullOut->getFacets()[k].n[0];
+ polygon.mPlane[1] = hullOut->getFacets()[k].n[1];
+ polygon.mPlane[2] = hullOut->getFacets()[k].n[2];
+
+ polygon.mPlane[3] = hullOut->getFacets()[k].d;
+
+ while(j--)
+ {
+ mHullResult.mIndices[i] = hullOut->getEdges()[i].v;
+ i++;
+ }
+ k++;
+ }
+
+ PX_ASSERT(k==hullOut->getFacets().size());
+ PX_DELETE(hullOut);
+
+ return res;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// internal function taking the cleaned vertices and constructing the
+// new hull from them.
+// 1. using the incremental algorithm create base hull from the input vertices
+// 2. if we reached the vertex limit, we expand the hull
+// 3. otherwise we compute the new planes and inflate them
+// 4. we crop the AABB with the computed planes to construct the new hull
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::calchull(const PxVec3* verts, PxU32 verts_count, ConvexHull*& hullOut)
+{
+ // calculate the actual hull using the incremental algorithm
+ local::HullTriangles triangles;
+ ConvexHullLibResult::ErrorCode rc = calchullgen(verts,verts_count, triangles);
+ if ((rc == ConvexHullLibResult::eFAILURE) || (rc == ConvexHullLibResult::eZERO_AREA_TEST_FAILED))
+ return rc;
+
+ // if vertex limit reached construct the hullOut from the expanded planes
+ if(rc == ConvexHullLibResult::eVERTEX_LIMIT_REACHED)
+ {
+ if(mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING)
+ rc = expandHull(verts,verts_count,triangles,hullOut);
+ else
+ rc = expandHullOBB(verts,verts_count,triangles,hullOut);
+ if ((rc == ConvexHullLibResult::eFAILURE) || (rc == ConvexHullLibResult::eZERO_AREA_TEST_FAILED))
+ return rc;
+
+ return ConvexHullLibResult::eSUCCESS;
+ }
+
+ Ps::Array<PxPlane> planes;
+ if(!calchullplanes(verts,triangles,planes))
+ return ConvexHullLibResult::eFAILURE;
+
+ if(!overhull(verts, verts_count, planes,hullOut))
+ return ConvexHullLibResult::eFAILURE;
+
+ return ConvexHullLibResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// computes the actual hull using the incremental algorithm
+// in - vertices, numVertices
+// out - triangles
+// 1. construct the initial simplex
+// 2. each step take the most furthers vertex from the hull and add it
+// 3. terminate if we reached the hull limit or all verts are used
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::calchullgen(const PxVec3* verts, PxU32 verts_count, local::HullTriangles& triangles)
+{
+ // at least 4 verts so we can construct a simplex
+ // limit is 256 for OBB slicing or fixed limit for plane shifting
+ PxU32 vlimit = (mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING) ? mConvexMeshDesc.vertexLimit : 256u;
+ PxU32 numHullVerts = 4;
+ if(verts_count < 4)
+ return ConvexHullLibResult::eFAILURE;
+
+ PxU32 j;
+ PxBounds3 bounds;
+ bounds.setEmpty();
+
+ Ps::Array<PxU8> isextreme;
+ isextreme.reserve(verts_count);
+
+ Ps::Array<PxU8> allow;
+ allow.reserve(verts_count);
+
+ for(j=0; j < verts_count; j++)
+ {
+ allow.pushBack(1);
+ isextreme.pushBack(0);
+ bounds.include(verts[j]);
+ }
+
+ const PxVec3 dimensions = bounds.getDimensions();
+ const float epsilon = dimensions.magnitude() * local::DIMENSION_EPSILON_MULTIPLY;
+ mTolerance = 0.001f;
+ mPlaneTolerance = epsilon;
+
+ const bool useAreaTest = mConvexMeshDesc.flags & PxConvexFlag::eCHECK_ZERO_AREA_TRIANGLES ? true : false;
+ const float areaEpsilon = useAreaTest ? mCookingParams.areaTestEpsilon * 2.0f : epsilon*epsilon*0.1f;
+
+ // find the simplex
+ local::HullSimplex p = local::findSimplex(verts,verts_count,allow, dimensions);
+ if(p.x==-1) // simplex failed
+ return ConvexHullLibResult::eFAILURE;
+
+ // a valid interior point
+ PxVec3 center = (verts[p[0]]+verts[p[1]]+verts[p[2]]+verts[p[3]]) /4.0f;
+
+ // add the simplex triangles into the triangle array
+ local::Tri *t0 = triangles.createTri(p[2], p[3], p[1]); t0->n=local::int3(2,3,1);
+ local::Tri *t1 = triangles.createTri(p[3], p[2], p[0]); t1->n=local::int3(3,2,0);
+ local::Tri *t2 = triangles.createTri(p[0], p[1], p[3]); t2->n=local::int3(0,1,3);
+ local::Tri *t3 = triangles.createTri(p[1], p[0], p[2]); t3->n=local::int3(1,0,2);
+ // mark the simplex indices as extremes
+ isextreme[PxU32(p[0])]=isextreme[PxU32(p[1])]=isextreme[PxU32(p[2])]=isextreme[PxU32(p[3])]=1;
+
+ // check if the added simplex triangles are valid
+ triangles.checkit(t0);
+ triangles.checkit(t1);
+ triangles.checkit(t2);
+ triangles.checkit(t3);
+
+ // parse the initial triangles and set max vertex along the normal and its distance
+ for(j=0;j< triangles.size(); j++)
+ {
+ local::Tri *t=(triangles.getTriangles())[j];
+ PX_ASSERT(t);
+ PX_ASSERT(t->vmax<0);
+ PxVec3 n= (*t).getNormal(verts);
+ t->vmax = local::maxIndexInDirSterid(verts,verts_count,n,allow);
+ t->rise = n.dot(verts[t->vmax]-verts[(*t)[0]]);
+
+ // use the areaTest to drop small triangles, which can cause trouble to the simulation,
+ // if we drop triangles from the initial simplex, we let the user know that the provided points form
+ // a simplex which is too small for given area threshold
+ if(useAreaTest && ((verts[(*t)[1]]-verts[(*t)[0]]).cross(verts[(*t)[2]]-verts[(*t)[1]])).magnitude() < areaEpsilon)
+ {
+ triangles.deleteTri(t0);
+ triangles.deleteTri(t1);
+ triangles.deleteTri(t2);
+ triangles.deleteTri(t3);
+ return ConvexHullLibResult::eZERO_AREA_TEST_FAILED;
+ }
+ }
+
+ local::Tri *te;
+ // lower the vertex limit, we did already set 4 verts
+ vlimit-=4;
+ // iterate over triangles till we reach the limit or we dont have triangles with
+ // significant rise or we cannot add any triangles at all
+ while(vlimit >0 && ((te = triangles.findExtrudable(epsilon)) != NULL))
+ {
+ PxI32 v = te->vmax;
+ PX_ASSERT(!isextreme[PxU32(v)]); // wtf we've already done this vertex
+ // set as extreme point
+ isextreme[PxU32(v)]=1;
+
+ j=triangles.size();
+ // go through the triangles and extrude the extreme point if it is above it
+ while(j--)
+ {
+ if(!(triangles)[j])
+ continue;
+ const local::Tri& t= *(triangles)[j];
+ if(above(verts,t,verts[v],0.01f*epsilon))
+ {
+ triangles.extrude((triangles)[j],v);
+ }
+ }
+
+ // now check for those degenerate cases where we have a flipped triangle or a really skinny triangle
+ j=triangles.size();
+ while(j--)
+ {
+ if(!(triangles)[j])
+ continue;
+ if(!hasVert(*(triangles)[j],v))
+ break;
+ local::int3 nt=*(triangles)[j];
+ if(above(verts,*(triangles)[j],center,0.01f*epsilon) || ((verts[nt[1]]-verts[nt[0]]).cross(verts[nt[2]]-verts[nt[1]])).magnitude() < areaEpsilon)
+ {
+ local::Tri *nb = (triangles)[PxU32((triangles)[j]->n[0])];
+ PX_ASSERT(nb);
+ PX_ASSERT(!hasVert(*nb,v));
+ PX_ASSERT(PxU32(nb->id)<j);
+ triangles.extrude(nb,v);
+ j=triangles.size();
+ }
+ }
+
+ // get new rise and vmax for the new triangles
+ j=triangles.size();
+ while(j--)
+ {
+ local::Tri *t=(triangles)[j];
+ if(!t)
+ continue;
+ if(t->vmax >= 0)
+ break;
+ PxVec3 n= t->getNormal(verts);
+ t->vmax = local::maxIndexInDirSterid(verts,verts_count,n,allow);
+ if(isextreme[PxU32(t->vmax)])
+ {
+ t->vmax=-1; // already done that vertex - algorithm needs to be able to terminate.
+ }
+ else
+ {
+ t->rise = n.dot(verts[t->vmax]-verts[(*t)[0]]);
+ }
+ }
+ // we added a vertex we lower the limit
+ vlimit --;
+ numHullVerts++;
+ }
+
+ if((mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING) && vlimit == 0)
+ return ConvexHullLibResult::eVERTEX_LIMIT_REACHED;
+ if (!(mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING) && numHullVerts > mConvexMeshDesc.vertexLimit)
+ return ConvexHullLibResult::eVERTEX_LIMIT_REACHED;
+
+ return ConvexHullLibResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// expand the hull with the from the limited triangles set
+// expand hull will do following steps:
+// 1. get planes from triangles that form the best hull with given vertices
+// 2. compute the adjacency information for the planes
+// 3. expand the planes to have all vertices inside the planes volume
+// 4. compute new points by 3 adjacency planes intersections
+// 5. take those points and create the hull from them
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::expandHull(const PxVec3* verts, PxU32 vertsCount, const local::HullTriangles& triangles, ConvexHull*& hullOut)
+{
+#if PX_DEBUG
+ struct LocalTests
+ {
+ static bool PlaneCheck(const PxVec3* verts_, PxU32 verts_count_, Ps::Array<local::ExpandPlane>& planes)
+ {
+ for(PxU32 i=0;i<planes.size();i++)
+ {
+ const local::ExpandPlane& expandPlane = planes[i];
+ if(expandPlane.mTrisIndex != -1)
+ {
+ for(PxU32 j=0;j<verts_count_;j++)
+ {
+ const PxVec3& vertex = verts_[j];
+
+ PX_ASSERT(expandPlane.mPlane.distance(vertex) < 0.02f);
+
+ if(expandPlane.mPlane.distance(vertex) > 0.02f)
+ {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+ }
+ };
+#endif
+
+
+ Ps::Array<local::ExpandPlane> planes;
+
+ // need planes and the adjacency for the triangle
+ int numPoints = 0;
+ for(PxU32 i=0; i < triangles.size();i++)
+ {
+ local::ExpandPlane expandPlane;
+ if((triangles)[i])
+ {
+ const local::Tri *t=(triangles)[i];
+ PxPlane p;
+ p.n = t->getNormal(verts);
+ p.d = -p.n.dot(verts[(*t)[0]]);
+ expandPlane.mPlane = p;
+
+ for (int l = 0; l < 3; l++)
+ {
+ if(t->n[l] > numPoints)
+ {
+ numPoints = t->n[l];
+ }
+ }
+
+ for(PxU32 j=0;j<triangles.size();j++)
+ {
+ if((triangles)[j] && i != j)
+ {
+ const local::Tri *testTris=(triangles)[j];
+
+ int numId0 = 0;
+ int numId1 = 0;
+ int numId2 = 0;
+
+ for (int k = 0; k < 3; k++)
+ {
+ int testI = (*testTris)[k];
+ if(testI == (*t)[0] || testI == (*t)[1])
+ {
+ numId0++;
+ }
+ if(testI == (*t)[0] || testI == (*t)[2])
+ {
+ numId1++;
+ }
+ if(testI == (*t)[2] || testI == (*t)[1])
+ {
+ numId2++;
+ }
+ }
+
+ if(numId0 == 2)
+ {
+ PX_ASSERT(expandPlane.mAdjacency[0] == -1);
+ expandPlane.mAdjacency[0] = int(j);
+ }
+ if(numId1 == 2)
+ {
+ PX_ASSERT(expandPlane.mAdjacency[1] == -1);
+ expandPlane.mAdjacency[1] = int(j);
+ }
+ if(numId2 == 2)
+ {
+ PX_ASSERT(expandPlane.mAdjacency[2] == -1);
+ expandPlane.mAdjacency[2] = int(j);
+ }
+ }
+ }
+
+ expandPlane.mTrisIndex = int(i);
+ }
+ planes.pushBack(expandPlane);
+ }
+ numPoints++;
+
+ // go over the planes now and expand them
+ for(PxU32 i=0;i< vertsCount;i++)
+ {
+ const PxVec3& vertex = verts[i];
+
+ for(PxU32 j=0;j< triangles.size();j++)
+ {
+ local::ExpandPlane& expandPlane = planes[j];
+ if(expandPlane.mTrisIndex != -1)
+ {
+ float dist = expandPlane.mPlane.distance(vertex);
+ if(dist > 0 && dist > expandPlane.mExpandDistance)
+ {
+ expandPlane.mExpandDistance = dist;
+ expandPlane.mExpandPoint = int(i);
+ }
+ }
+ }
+ }
+
+ // expand the planes
+ for(PxU32 i=0;i<planes.size();i++)
+ {
+ local::ExpandPlane& expandPlane = planes[i];
+ if(expandPlane.mTrisIndex != -1)
+ {
+ if(expandPlane.mExpandPoint >= 0)
+ expandPlane.mPlane.d -= expandPlane.mExpandDistance;
+ }
+ }
+
+ PX_ASSERT(LocalTests::PlaneCheck(verts,vertsCount,planes));
+
+ Ps::Array <int> translateTable;
+ Ps::Array <PxVec3> points;
+ numPoints = 0;
+
+ // find new triangle points and store them
+ for(PxU32 i=0;i<planes.size();i++)
+ {
+ local::ExpandPlane& expandPlane = planes[i];
+ if(expandPlane.mTrisIndex != -1)
+ {
+ const local::Tri *expandTri=(triangles)[PxU32(expandPlane.mTrisIndex)];
+
+ for (int j = 0; j < 3; j++)
+ {
+ local::ExpandPlane& plane1 = planes[PxU32(expandPlane.mAdjacency[j])];
+ local::ExpandPlane& plane2 = planes[PxU32(expandPlane.mAdjacency[(j + 1)%3])];
+ const local::Tri *tri1=(triangles)[PxU32(expandPlane.mAdjacency[j])];
+ const local::Tri *tri2=(triangles)[PxU32(expandPlane.mAdjacency[(j + 1)%3])];
+
+ int indexE = -1;
+ int index1 = -1;
+ int index2 = -1;
+ for (int l = 0; l < 3; l++)
+ {
+ for (int k = 0; k < 3; k++)
+ {
+ for (int m = 0; m < 3; m++)
+ {
+ if((*expandTri)[l] == (*tri1)[k] && (*expandTri)[l] == (*tri2)[m])
+ {
+ indexE = l;
+ index1 = k;
+ index2 = m;
+ }
+ }
+ }
+ }
+
+ PX_ASSERT(indexE != -1);
+
+ int foundIndex = -1;
+ for (PxU32 u = 0; u < translateTable.size(); u++)
+ {
+ if(translateTable[u] == ((*expandTri)[indexE]))
+ {
+ foundIndex = int(u);
+ break;
+ }
+ }
+
+ PxVec3 point = threePlaneIntersection(expandPlane.mPlane, plane1.mPlane, plane2.mPlane);
+
+ if(foundIndex == -1)
+ {
+ expandPlane.mIndices[indexE] = numPoints;
+ plane1.mIndices[index1] = numPoints;
+ plane2.mIndices[index2] = numPoints;
+
+ points.pushBack(point);
+ translateTable.pushBack((*expandTri)[indexE]);
+ numPoints++;
+ }
+ else
+ {
+ if(expandPlane.mPlane.distance(points[PxU32(foundIndex)]) < -0.02f || plane1.mPlane.distance(points[PxU32(foundIndex)]) < -0.02f || plane2.mPlane.distance(points[PxU32(foundIndex)]) < -0.02f)
+ {
+ points[PxU32(foundIndex)] = point;
+ }
+
+ expandPlane.mIndices[indexE] = foundIndex;
+ plane1.mIndices[index1] = foundIndex;
+ plane2.mIndices[index2] = foundIndex;
+ }
+
+ }
+ }
+ }
+
+ // construct again the hull from the new points
+ local::HullTriangles outTriangles;
+ ConvexHullLibResult::ErrorCode rc = calchullgen(points.begin(),PxU32(numPoints), outTriangles);
+ if ((rc == ConvexHullLibResult::eFAILURE) || (rc == ConvexHullLibResult::eZERO_AREA_TEST_FAILED))
+ return rc;
+
+ // cleanup the unused vertices
+ Ps::Array<PxVec3> usedVertices;
+ translateTable.clear();
+ translateTable.resize(points.size());
+ for (PxU32 i = 0; i < points.size(); i++)
+ {
+ for (PxU32 j = 0; j < outTriangles.size(); j++)
+ {
+ const local::Tri* tri = outTriangles[j];
+ if(tri)
+ {
+ if((*tri)[0] == int(i) || (*tri)[1] == int(i) || (*tri)[2] == int(i))
+ {
+ translateTable[i] = int(usedVertices.size());
+ usedVertices.pushBack(points[i]);
+ break;
+ }
+ }
+ }
+ }
+
+ // now construct the hullOut
+ Ps::Array<PxPlane> inputPlanes; // < just a blank input planes
+ ConvexHull* c = PX_NEW_TEMP(ConvexHull)(inputPlanes);
+
+ // copy the vertices
+ for (PxU32 i = 0; i < usedVertices.size(); i++)
+ {
+ c->getVertices().pushBack(usedVertices[i]);
+ }
+
+ // copy planes and create edges
+ PxU32 numFaces = 0;
+ for (PxU32 i = 0; i < outTriangles.size(); i++)
+ {
+ const local::Tri* tri = outTriangles[i];
+ if(tri)
+ {
+ PxPlane triPlane;
+ triPlane.n = tri->getNormal(points.begin());
+ triPlane.d = -triPlane.n.dot(points[PxU32((*tri)[0])]);
+ c->getFacets().pushBack(triPlane);
+
+ for (int j = 0; j < 3; j++)
+ {
+ ConvexHull::HalfEdge edge;
+ edge.p = Ps::to8(numFaces);
+ edge.v = Ps::to8(translateTable[PxU32((*tri)[j])]);
+ c->getEdges().pushBack(edge);
+ }
+ numFaces++;
+ }
+ }
+ hullOut = c;
+ return ConvexHullLibResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// expand the hull from the limited triangles set
+// 1. collect all planes
+// 2. create OBB from the input verts
+// 3. slice the OBB with the planes
+// 5. iterate till vlimit is reached
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::expandHullOBB(const PxVec3* verts, PxU32 vertsCount, const local::HullTriangles& triangles, ConvexHull*& hullOut)
+{
+ Ps::Array<PxPlane> expandPlanes;
+ expandPlanes.reserve(triangles.size());
+
+ PxU32* indices = PX_NEW_TEMP(PxU32)[triangles.size()*3];
+ PxHullPolygon* polygons = PX_NEW_TEMP(PxHullPolygon)[triangles.size()];
+
+ PxU16 currentIndex = 0;
+ PxU32 currentFace = 0;
+
+ // collect expand planes
+ for (PxU32 i = 0; i < triangles.size(); i++)
+ {
+ local::ExpandPlane expandPlane;
+ if ((triangles)[i])
+ {
+ const local::Tri *t = (triangles)[i];
+ PxPlane p;
+ p.n = t->getNormal(verts);
+ p.d = -p.n.dot(verts[(*t)[0]]);
+
+ // store the polygon
+ PxHullPolygon& polygon = polygons[currentFace++];
+ polygon.mIndexBase = currentIndex;
+ polygon.mNbVerts = 3;
+ polygon.mPlane[0] = p.n[0];
+ polygon.mPlane[1] = p.n[1];
+ polygon.mPlane[2] = p.n[2];
+ polygon.mPlane[3] = p.d;
+
+ // store the index list
+ indices[currentIndex++] = PxU32((*t)[0]);
+ indices[currentIndex++] = PxU32((*t)[1]);
+ indices[currentIndex++] = PxU32((*t)[2]);
+
+ expandPlanes.pushBack(p);
+ }
+ }
+
+ PxTransform obbTransform;
+ PxVec3 sides;
+
+ // compute the OBB
+ PxConvexMeshDesc convexDesc;
+ convexDesc.points.count = vertsCount;
+ convexDesc.points.data = verts;
+ convexDesc.points.stride = sizeof(PxVec3);
+
+ convexDesc.indices.count = currentIndex;
+ convexDesc.indices.stride = sizeof(PxU32);
+ convexDesc.indices.data = indices;
+
+ convexDesc.polygons.count = currentFace;
+ convexDesc.polygons.data = polygons;
+ convexDesc.polygons.stride = sizeof(PxHullPolygon);
+
+ convexDesc.flags = mConvexMeshDesc.flags;
+
+ computeOBBFromConvex(convexDesc, sides, obbTransform);
+
+ // free the memory used for the convex mesh desc
+ PX_FREE_AND_RESET(indices);
+ PX_FREE_AND_RESET(polygons);
+
+ // crop the OBB
+ PxU32 maxplanes = PxMin(PxU32(256), expandPlanes.size());
+
+ ConvexHull* c = PX_NEW_TEMP(ConvexHull)(sides*0.5f, obbTransform, expandPlanes);
+
+ const float planeTolerance = mPlaneTolerance;
+ const float epsilon = mTolerance;
+
+ PxI32 k;
+ while (maxplanes-- && (k = c->findCandidatePlane(planeTolerance, epsilon)) >= 0)
+ {
+ ConvexHull* tmp = c;
+ c = convexHullCrop(*tmp, expandPlanes[PxU32(k)], planeTolerance);
+ if (c == NULL)
+ {
+ c = tmp;
+ break;
+ } // might want to debug this case better!!!
+ if (!c->assertIntact(planeTolerance))
+ {
+ PX_DELETE(c);
+ c = tmp;
+ break;
+ } // might want to debug this case better too!!!
+
+ // check for vertex limit
+ if (c->getVertices().size() > mConvexMeshDesc.vertexLimit)
+ {
+ PX_DELETE(c);
+ c = tmp;
+ maxplanes = 0;
+ break;
+ }
+ PX_DELETE(tmp);
+ }
+
+ PX_ASSERT(c->assertIntact(planeTolerance));
+
+ hullOut = c;
+
+ return ConvexHullLibResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// calculate the planes from given triangles
+// 1. merge triangles with similar normal
+// 2. inflate the planes
+// 3. store the new triangles
+bool InflationConvexHullLib::calchullplanes(const PxVec3* verts, local::HullTriangles& triangles, Ps::Array<PxPlane>& planes)
+{
+ PxU32 i,j;
+ float maxdot_minang = cosf(Ps::degToRad(local::MIN_ADJACENT_ANGLE));
+
+ // parse the triangles and check the angle between them, if the angle is below MIN_ADJACENT_ANGLE
+ // merge the triangles into single plane
+ for(i=0;i<triangles.size();i++)
+ {
+ if(triangles[i])
+ {
+ for(j=i+1;j<triangles.size();j++)
+ {
+ if(triangles[i] && triangles[j])
+ {
+ local::Tri *ti = triangles[i];
+ local::Tri *tj = triangles[j];
+ PxVec3 ni = ti->getNormal(verts);
+ PxVec3 nj = tj->getNormal(verts);
+ if(ni.dot(nj) > maxdot_minang)
+ {
+ // somebody has to die, keep the biggest triangle
+ if( ti->getArea2(verts) < tj->getArea2(verts))
+ {
+ triangles.deleteTri(triangles[i]);
+ }
+ else
+ {
+ triangles.deleteTri(triangles[j]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // now add for each triangle that left a plane
+ for(i=0;i<triangles.size();i++)
+ {
+ if(triangles[i])
+ {
+
+ local::Tri *t = triangles[i];
+ PxVec3 n = t->getNormal(verts);
+ float d = -n.dot(verts[(*t)[0]]) - mCookingParams.skinWidth;
+ PxPlane p(n,d);
+ planes.pushBack(p);
+ }
+ }
+
+ // delete the triangles we don't need them anymore
+ for(i=0;i< triangles.size(); i++)
+ {
+ if(triangles[i])
+ {
+ triangles.deleteTri(triangles[i]);
+ }
+ }
+ triangles.getTriangles().clear();
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// create new points from the given planes, which form the new hull
+// 1. form an AABB from the input verts
+// 2. slice the AABB with the planes
+// 3. if sliced hull is still valid use it, otherwise step back, try different plane
+// 4. exit if limit reached or all planes added
+bool InflationConvexHullLib::overhull(const PxVec3* verts, PxU32 vertsCount,const Ps::Array<PxPlane>& planes, ConvexHull*& hullOut)
+{
+ PxU32 i,j;
+ if(vertsCount < 4)
+ return false;
+
+ const PxU32 planesLimit = 256;
+ PxU32 maxplanes = PxMin(planesLimit,planes.size());
+
+ // get the bounds
+ PxBounds3 bounds;
+ bounds.setEmpty();
+ for(i=0;i<vertsCount;i++)
+ {
+ bounds.include(verts[i]);
+ }
+ float diameter = bounds.getDimensions().magnitude();
+ PxVec3 emin = bounds.minimum;
+ PxVec3 emax = bounds.maximum;
+ float epsilon = 0.01f; // size of object is taken into account within candidate plane function. Used to multiply here by magnitude(emax-emin)
+ float planetestepsilon = (emax-emin).magnitude() * local::PAPERWIDTH;
+ // todo: add bounding cube planes to force bevel. or try instead not adding the diameter expansion ??? must think.
+ // ConvexH *convex = ConvexHMakeCube(bmin - float3(diameter,diameter,diameter),bmax+float3(diameter,diameter,diameter));
+
+ // now expand the axis aligned planes by half diameter, !AB what is the point here?
+ float maxdot_minang = cosf(Ps::degToRad(local::MIN_ADJACENT_ANGLE));
+ for(j=0;j<6;j++)
+ {
+ PxVec3 n(0,0,0);
+ n[j/2] = (j%2) ? 1.0f : -1.0f;
+ for(i=0; i < planes.size(); i++)
+ {
+ if(n.dot(planes[i].n) > maxdot_minang)
+ {
+ (*((j%2)?&emax:&emin)) += n * (diameter*0.5f);
+ break;
+ }
+ }
+ }
+
+ ConvexHull* c = PX_NEW_TEMP(ConvexHull)(emin,emax, planes);
+ PxI32 k;
+ // find the candidate plane and crop the hull
+ while(maxplanes-- && (k= c->findCandidatePlane(planetestepsilon, epsilon))>=0)
+ {
+ ConvexHull* tmp = c;
+ c = convexHullCrop(*tmp,planes[PxU32(k)], planetestepsilon);
+ if(c==NULL)
+ {
+ c=tmp;
+ break;
+ } // might want to debug this case better!!!
+ if(!c->assertIntact(planetestepsilon))
+ {
+ PX_DELETE(c);
+ c=tmp;
+ break;
+ } // might want to debug this case better too!!!
+
+ // check for vertex limit
+ if(c->getVertices().size() > mConvexMeshDesc.vertexLimit)
+ {
+ PX_DELETE(c);
+ c=tmp;
+ maxplanes = 0;
+ break;
+ }
+ // check for vertex limit per face if necessary, GRB supports max 32 verts per face
+ if ((mConvexMeshDesc.flags & PxConvexFlag::eGPU_COMPATIBLE) && c->maxNumVertsPerFace() > gpuMaxVertsPerFace)
+ {
+ PX_DELETE(c);
+ c = tmp;
+ maxplanes = 0;
+ break;
+ }
+ PX_DELETE(tmp);
+ }
+
+ PX_ASSERT(c->assertIntact(planetestepsilon));
+ hullOut = c;
+
+ return true;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+// fill the data
+void InflationConvexHullLib::fillConvexMeshDesc(PxConvexMeshDesc& outDesc)
+{
+ PX_ASSERT(mFinished);
+
+ outDesc.indices.count = mHullResult.mIndexCount;
+ outDesc.indices.stride = sizeof(PxU32);
+ outDesc.indices.data = mHullResult.mIndices;
+
+ outDesc.points.count = mHullResult.mVcount;
+ outDesc.points.stride = sizeof(PxVec3);
+ outDesc.points.data = mHullResult.mVertices;
+
+ outDesc.polygons.count = mHullResult.mPolygonCount;
+ outDesc.polygons.stride = sizeof(PxHullPolygon);
+ outDesc.polygons.data = mHullResult.mPolygons;
+
+ swapLargestFace(outDesc);
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+InflationConvexHullLib::~InflationConvexHullLib()
+{
+ if(mHullResult.mIndices)
+ {
+ PX_FREE(mHullResult.mIndices);
+ }
+
+ if(mHullResult.mPolygons)
+ {
+ PX_FREE(mHullResult.mPolygons);
+ }
+
+ if(mHullResult.mVertices)
+ {
+ PX_FREE(mHullResult.mVertices);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.h b/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.h
new file mode 100644
index 00000000..8369691f
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.h
@@ -0,0 +1,133 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_INFLATION_CONVEXHULLLIB_H
+#define PX_INFLATION_CONVEXHULLLIB_H
+
+#include "ConvexHullLib.h"
+#include "Ps.h"
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+
+namespace local
+{
+ class HullTriangles;
+}
+
+namespace physx
+{
+ class ConvexHull;
+
+ //////////////////////////////////////////////////////////////////////////
+ // internal hull lib results
+ struct ConvexHullLibResult
+ {
+ // return code
+ enum ErrorCode
+ {
+ eSUCCESS = 0, // success!
+ eFAILURE, // failed.
+ eVERTEX_LIMIT_REACHED, // vertex limit reached fallback.
+ eZERO_AREA_TEST_FAILED// area test failed - failed to create simplex
+ };
+
+ PxU32 mVcount;
+ PxU32 mIndexCount;
+ PxU32 mPolygonCount;
+ PxVec3* mVertices;
+ PxU32* mIndices;
+ PxHullPolygon* mPolygons;
+
+
+ ConvexHullLibResult()
+ : mVcount(0), mIndexCount(0), mPolygonCount(0),
+ mVertices(NULL), mIndices(NULL), mPolygons(NULL)
+ {
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // inflation based hull library. Using the legacy Stan hull lib with inflation
+ // We construct the hull using incremental method and then inflate the resulting planes
+ // by specified skinWidth. We take the planes and crop AABB with them to construct
+ // the final hull. This method may reduce the number of polygons significantly
+ // in case of lot of vertices are used. On the other hand, we produce new vertices
+ // and enlarge the original hull constructed from the given input points.
+ // Generally speaking, the increase of vertices is usually too big, so it is not worthy
+ // to use this algorithm to reduce the number of polygons. This method is also very unprecise
+ // and may produce invalid hulls. It is recommended to use the new quickhull library.
+ class InflationConvexHullLib: public ConvexHullLib, public Ps::UserAllocated
+ {
+ PX_NOCOPY(InflationConvexHullLib)
+ public:
+
+ // functions
+ InflationConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params);
+
+ ~InflationConvexHullLib();
+
+ // computes the convex hull from provided points
+ virtual PxConvexMeshCookingResult::Enum createConvexHull();
+
+ // fills the convexmeshdesc with computed hull data
+ virtual void fillConvexMeshDesc(PxConvexMeshDesc& desc);
+
+ protected:
+ // internal
+
+ // compute the hull
+ ConvexHullLibResult::ErrorCode computeHull(PxU32 vertsCount, const PxVec3* verts);
+
+ // computes the hull
+ ConvexHullLibResult::ErrorCode calchull(const PxVec3* verts, PxU32 verts_count, ConvexHull*& hullOut);
+
+ // computes the actual hull using the incremental algorithm
+ ConvexHullLibResult::ErrorCode calchullgen(const PxVec3* verts, PxU32 verts_count, local::HullTriangles& triangles);
+
+ // calculates the hull planes from the triangles
+ bool calchullplanes(const PxVec3* verts, local::HullTriangles& triangles, Ps::Array<PxPlane>& planes);
+
+ // construct the hull from given planes - create new verts
+ bool overhull(const PxVec3* verts, PxU32 vertsCount,const Ps::Array<PxPlane>& planes, ConvexHull*& hullOut);
+
+ // expand the hull with the limited triangles set
+ ConvexHullLibResult::ErrorCode expandHull(const PxVec3* verts, PxU32 vertsCount, const local::HullTriangles& triangles, ConvexHull*& hullOut);
+
+ // expand the hull with the limited triangles set
+ ConvexHullLibResult::ErrorCode expandHullOBB(const PxVec3* verts, PxU32 vertsCount, const local::HullTriangles& triangles, ConvexHull*& hullOut);
+
+ private:
+ bool mFinished;
+ ConvexHullLibResult mHullResult;
+ float mTolerance;
+ float mPlaneTolerance;
+ };
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.cpp
new file mode 100644
index 00000000..13b88364
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.cpp
@@ -0,0 +1,2383 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "QuickHullConvexHullLib.h"
+#include "ConvexHullUtils.h"
+
+#include "PsAllocator.h"
+#include "PsUserAllocated.h"
+#include "PsSort.h"
+#include "PsMathUtils.h"
+#include "PsFoundation.h"
+#include "PsUtilities.h"
+#include "PsBitUtils.h"
+
+#include "foundation/PxMath.h"
+#include "foundation/PxPlane.h"
+#include "foundation/PxBounds3.h"
+#include "foundation/PxMemory.h"
+
+using namespace physx;
+
+namespace local
+{
+ //////////////////////////////////////////////////////////////////////////
+ static const float MIN_ADJACENT_ANGLE = 3.0f; // in degrees - result wont have two adjacent facets within this angle of each other.
+ static const float PLANE_THICKNES = 3.0f * PX_EPS_F32; // points within this distance are considered on a plane
+ static const float ACCEPTANCE_EPSILON_MULTIPLY = 2000.0f; // used to scale up plane tolerance to accept new points into convex, plane thickness tolerance is too high for point acceptance
+ static const float PLANE_TOLERANCE = 0.001f; // points within this distance are considered on a plane for post adjacent merging and eye vertex acceptance
+ static const float MAXDOT_MINANG = cosf(Ps::degToRad(MIN_ADJACENT_ANGLE)); // adjacent angle for dot product tests
+
+ //////////////////////////////////////////////////////////////////////////
+
+ struct QuickHullFace;
+ class ConvexHull;
+ class HullPlanes;
+
+ //////////////////////////////////////////////////////////////////////////
+ template<typename T, bool useIndexing>
+ class MemBlock
+ {
+ public:
+ MemBlock(PxU32 preallocateSize)
+ : mPreallocateSize(preallocateSize), mCurrentBlock(0), mCurrentIndex(0)
+ {
+ PX_ASSERT(preallocateSize);
+ T* block = reinterpret_cast<T*>(PX_ALLOC_TEMP(sizeof(T)*preallocateSize, "Quickhull MemBlock"));
+ mBlocks.pushBack(block);
+ }
+
+ MemBlock()
+ : mPreallocateSize(0), mCurrentBlock(0), mCurrentIndex(0)
+ {
+ }
+
+ void init(PxU32 preallocateSize)
+ {
+ PX_ASSERT(preallocateSize);
+ PX_ASSERT(mPreallocateSize == 0);
+ mPreallocateSize = preallocateSize;
+ T* block = reinterpret_cast<T*>(PX_ALLOC_TEMP(sizeof(T)*preallocateSize, "Quickhull MemBlock"));
+ if(useIndexing)
+ {
+ for (PxU32 i = 0; i < mPreallocateSize; i++)
+ {
+ // placement new to index data
+ PX_PLACEMENT_NEW(&block[i], T)(i);
+ }
+ }
+ mBlocks.pushBack(block);
+ }
+
+ ~MemBlock()
+ {
+ for (PxU32 i = 0; i < mBlocks.size(); i++)
+ {
+ PX_FREE(mBlocks[i]);
+ }
+ mBlocks.clear();
+ }
+
+ T* getItem(PxU32 index)
+ {
+ const PxU32 block = index/mPreallocateSize;
+ const PxU32 itemIndex = index % mPreallocateSize;
+ PX_ASSERT(block <= mCurrentBlock);
+ PX_ASSERT(itemIndex < mPreallocateSize);
+ return &(mBlocks[block])[itemIndex];
+ }
+
+ T* getFreeItem()
+ {
+ PX_ASSERT(mPreallocateSize);
+ // check if we have enough space in block, otherwise allocate new block
+ if(mCurrentIndex < mPreallocateSize)
+ {
+ return &(mBlocks[mCurrentBlock])[mCurrentIndex++];
+ }
+ else
+ {
+ T* block = reinterpret_cast<T*>(PX_ALLOC_TEMP(sizeof(T)*mPreallocateSize, "Quickhull MemBlock"));
+ mCurrentBlock++;
+ if (useIndexing)
+ {
+ for (PxU32 i = 0; i < mPreallocateSize; i++)
+ {
+ // placement new to index data
+ PX_PLACEMENT_NEW(&block[i], T)(mCurrentBlock*mPreallocateSize + i);
+ }
+ }
+ mBlocks.pushBack(block);
+ mCurrentIndex = 0;
+ return &(mBlocks[mCurrentBlock])[mCurrentIndex++];
+ }
+ }
+
+ private:
+ PxU32 mPreallocateSize;
+ PxU32 mCurrentBlock;
+ PxU32 mCurrentIndex;
+ Ps::Array<T*> mBlocks;
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // representation of quick hull vertex
+ struct QuickHullVertex
+ {
+ PxVec3 point; // point vector
+ PxU32 index; // point index for compare
+ float dist; // distance from plane if necessary
+
+ QuickHullVertex* next; // link to next vertex, linked list used for conflict list
+
+ PX_FORCE_INLINE bool operator==(const QuickHullVertex& vertex) const
+ {
+ return index == vertex.index ? true : false;
+ }
+
+ PX_FORCE_INLINE bool operator <(const QuickHullVertex& vertex) const
+ {
+ return dist < vertex.dist ? true : false;
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // representation of quick hull half edge
+ struct QuickHullHalfEdge
+ {
+ QuickHullHalfEdge() : prev(NULL), next(NULL), twin(NULL), face(NULL)
+ {
+ }
+
+ QuickHullHalfEdge(PxU32 )
+ : prev(NULL), next(NULL), twin(NULL), face(NULL)
+ {
+ }
+
+ QuickHullVertex tail; // tail vertex, head vertex is the tail of the twin
+
+ QuickHullHalfEdge* prev; // previous edge
+ QuickHullHalfEdge* next; // next edge
+ QuickHullHalfEdge* twin; // twin/opposite edge
+
+ QuickHullFace* face; // face where the edge belong
+
+ PX_FORCE_INLINE const QuickHullVertex& getTail() const
+ {
+ return tail;
+ }
+
+ PX_FORCE_INLINE const QuickHullVertex& getHead() const
+ {
+ PX_ASSERT(twin);
+ return twin->tail;
+ }
+
+ PX_FORCE_INLINE void setTwin(QuickHullHalfEdge* edge)
+ {
+ twin = edge;
+ edge->twin = this;
+ }
+
+ PX_FORCE_INLINE QuickHullFace* getOppositeFace() const
+ {
+ return twin->face;
+ }
+
+ float getOppositeFaceDistance() const;
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+
+ typedef Ps::Array<QuickHullVertex*> QuickHullVertexArray;
+ typedef Ps::Array<QuickHullHalfEdge*> QuickHullHalfEdgeArray;
+ typedef Ps::Array<QuickHullFace*> QuickHullFaceArray;
+
+ //////////////////////////////////////////////////////////////////////////
+ // representation of quick hull face
+ struct QuickHullFace
+ {
+ enum FaceState
+ {
+ eVISIBLE,
+ eDELETED,
+ eNON_CONVEX
+ };
+
+ QuickHullHalfEdge* edge; // starting edge
+ PxU16 numEdges; // num edges on the face
+ QuickHullVertex* conflictList; // conflict list, used to determine unclaimed vertices
+
+ PxVec3 normal; // Newell plane normal
+ float area; // face area
+ PxVec3 centroid; // face centroid
+
+ float planeOffset; // Newell plane offset
+ float expandOffset; // used for plane expansion if vertex limit reached
+
+ FaceState state; // face validity state
+
+ QuickHullFace* nextFace; // used to indicate next free face in faceList
+ PxU32 index; // face index for compare identification
+
+ public:
+ QuickHullFace()
+ : edge(NULL), numEdges(0), conflictList(NULL), area(0.0f), planeOffset(0.0f), expandOffset(-FLT_MAX),
+ state(eVISIBLE), nextFace(NULL)
+ {
+ }
+
+ QuickHullFace(PxU32 ind)
+ : edge(NULL), numEdges(0), conflictList(NULL), area(0.0f), planeOffset(0.0f), expandOffset(-FLT_MAX),
+ state(eVISIBLE), nextFace(NULL), index(ind)
+ {
+ }
+
+ ~QuickHullFace()
+ {
+ }
+
+ // get edge on index
+ PX_FORCE_INLINE QuickHullHalfEdge* getEdge(PxU32 i) const
+ {
+ QuickHullHalfEdge* he = edge;
+ while (i > 0)
+ {
+ he = he->next;
+ i--;
+ }
+ return he;
+ }
+
+ // distance from a plane to provided point
+ PX_FORCE_INLINE float distanceToPlane(const PxVec3 p) const
+ {
+ return normal.dot(p) - planeOffset;
+ }
+
+ // compute face normal and centroid
+ PX_FORCE_INLINE void computeNormalAndCentroid()
+ {
+ PX_ASSERT(edge);
+ normal = PxVec3(PxZero);
+ numEdges = 1;
+
+ QuickHullHalfEdge* testEdge = edge;
+ QuickHullHalfEdge* startEdge = NULL;
+ float minDist = FLT_MAX;
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ const float d = (testEdge->tail.point - testEdge->next->tail.point).magnitudeSquared();
+ if (d < minDist)
+ {
+ minDist = d;
+ startEdge = testEdge;
+ }
+ testEdge = testEdge->next;
+ }
+ PX_ASSERT(startEdge);
+
+ QuickHullHalfEdge* he = startEdge->next;
+ const PxVec3& p0 = startEdge->tail.point;
+ const PxVec3 d = he->tail.point - p0;
+ centroid = startEdge->tail.point;
+
+ do
+ {
+ numEdges++;
+ centroid += he->tail.point;
+
+ normal += d.cross(he->next->tail.point - p0);
+
+ he = he->next;
+ } while (he != startEdge);
+
+ area = normal.normalize();
+ centroid *= (1.0f / float(numEdges));
+
+ planeOffset = normal.dot(centroid);
+ }
+
+ // merge adjacent face
+ void mergeAdjacentFace(QuickHullHalfEdge* halfEdge, QuickHullFaceArray& discardedFaces);
+
+ // check face consistency
+ bool checkFaceConsistency();
+
+ private:
+ // connect halfedges
+ QuickHullFace* connectHalfEdges(QuickHullHalfEdge* hedgePrev, QuickHullHalfEdge* hedge);
+
+ // check if the face does have only 3 vertices
+ PX_FORCE_INLINE bool isTriangle() const
+ {
+ return numEdges == 3 ? true : false;
+ }
+
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ struct QuickHullResult
+ {
+ enum Enum
+ {
+ eSUCCESS, // ok
+ eZERO_AREA_TEST_FAILED, // area test failed for simplex
+ eVERTEX_LIMIT_REACHED, // vertex limit reached need to expand hull
+ ePOLYGONS_LIMIT_REACHED, // polygons hard limit reached
+ eFAILURE // general failure
+ };
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // Quickhull base class holding the hull during construction
+ class QuickHull : public Ps::UserAllocated
+ {
+ PX_NOCOPY(QuickHull)
+ public:
+
+ QuickHull(const PxCookingParams& params, const PxConvexMeshDesc& desc);
+
+ ~QuickHull();
+
+ // preallocate the edges, faces, vertices
+ void preallocate(PxU32 numVertices);
+
+ // parse the input verts, store them into internal format
+ void parseInputVertices(const PxVec3* verts, PxU32 numVerts);
+
+ // release the hull and data
+ void releaseHull();
+
+ // sets the precomputed min/max data
+ void setPrecomputedMinMax(const QuickHullVertex* minVertex,const QuickHullVertex* maxVertex, const float tolerance,const float planeTolerance);
+
+ // main entry function to build the hull from provided points
+ QuickHullResult::Enum buildHull();
+
+ PxU32 maxNumVertsPerFace() const;
+
+ protected:
+ // compute min max verts
+ void computeMinMaxVerts();
+
+ // find the initial simplex
+ bool findSimplex();
+
+ // add the initial simplex
+ void addSimplex(QuickHullVertex* simplex, bool flipTriangle);
+
+ // finds next point to add
+ QuickHullVertex* nextPointToAdd(QuickHullFace*& eyeFace);
+
+ // adds point to the hull
+ bool addPointToHull(const QuickHullVertex* vertex, QuickHullFace& face);
+
+ // creates new face from given triangles
+ QuickHullFace* createTriangle(const QuickHullVertex& v0, const QuickHullVertex& v1, const QuickHullVertex& v2);
+
+ // adds point to the face conflict list
+ void addPointToFace(QuickHullFace& face, QuickHullVertex* vertex, float dist);
+
+ // removes eye point from the face conflict list
+ void removeEyePointFromFace(QuickHullFace& face, const QuickHullVertex* vertex);
+
+ // calculate the horizon fro the eyePoint against a given face
+ void calculateHorizon(const PxVec3& eyePoint, QuickHullHalfEdge* edge, QuickHullFace& face, QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& removedFaces);
+
+ // adds new faces from given horizon and eyePoint
+ void addNewFacesFromHorizon(const QuickHullVertex* eyePoint, const QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& newFaces);
+
+ // merge adjacent face
+ bool doAdjacentMerge(QuickHullFace& face, bool mergeWrtLargeFace);
+
+ // merge adjacent face doing normal test
+ bool doPostAdjacentMerge(QuickHullFace& face, const float minAngle);
+
+ // delete face points
+ void deleteFacePoints(QuickHullFace& faceToDelete, QuickHullFace* absorbingFace);
+
+ // resolve unclaimed points
+ void resolveUnclaimedPoints(const QuickHullFaceArray& newFaces);
+
+ // merges polygons with similar normals
+ void postMergeHull();
+
+ // check if 2 faces can be merged
+ bool canMergeFaces(const QuickHullHalfEdge& he, float planeTolerance);
+
+ // get next free face
+ PX_FORCE_INLINE QuickHullFace* getFreeHullFace()
+ {
+ return mFreeFaces.getFreeItem();
+ }
+
+ // get next free half edge
+ PX_FORCE_INLINE QuickHullHalfEdge* getFreeHullHalfEdge()
+ {
+ return mFreeHalfEdges.getFreeItem();
+ }
+
+ protected:
+ friend class physx::QuickHullConvexHullLib;
+
+ const PxCookingParams& mCookingParams; // cooking params
+ const PxConvexMeshDesc& mConvexDesc; // convex desc
+
+ PxVec3 mInteriorPoint; // interior point for int/ext tests
+
+ PxU32 mMaxVertices; // maximum number of vertices (can be different as we may add vertices during the cleanup
+ PxU32 mNumVertices; // actual number of vertices
+
+ QuickHullVertex* mVerticesList; // vertices list preallocated
+ MemBlock<QuickHullHalfEdge, false> mFreeHalfEdges; // free half edges
+ MemBlock<QuickHullFace, true> mFreeFaces; // free faces
+
+ QuickHullFaceArray mHullFaces; // actual hull faces, contains also invalid and not used faces
+ PxU32 mNumHullFaces; // actual number of hull faces
+
+ bool mPrecomputedMinMax; // if we got the precomputed min/max values
+ QuickHullVertex mMinVertex[3]; // min vertex
+ QuickHullVertex mMaxVertex[3]; // max vertex
+ float mTolerance; // hull tolerance, used for plane thickness and merge strategy
+ float mPlaneTolerance; // used for post merge stage
+
+ QuickHullVertexArray mUnclaimedPoints; // holds temp unclaimed points
+
+ QuickHullHalfEdgeArray mHorizon; // array for horizon computation
+ QuickHullFaceArray mNewFaces; // new faces created during horizon computation
+ QuickHullFaceArray mRemovedFaces; // removd faces during horizon computation
+ QuickHullFaceArray mDiscardedFaces; // discarded faces during face merging
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // return the distance from opposite face
+ float QuickHullHalfEdge::getOppositeFaceDistance() const
+ {
+ PX_ASSERT(face);
+ PX_ASSERT(twin);
+ return face->distanceToPlane(twin->face->centroid);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // merge adjacent face from provided half edge.
+ // 1. set new half edges
+ // 2. connect the new half edges - check we did not produced redundant triangles, discard them
+ // 3. recompute the plane and check consistency
+ void QuickHullFace::mergeAdjacentFace(QuickHullHalfEdge* hedgeAdj, QuickHullFaceArray& discardedFaces)
+ {
+ QuickHullFace* oppFace = hedgeAdj->getOppositeFace();
+
+ discardedFaces.pushBack(oppFace);
+ oppFace->state = QuickHullFace::eDELETED;
+
+ QuickHullHalfEdge* hedgeOpp = hedgeAdj->twin;
+
+ QuickHullHalfEdge* hedgeAdjPrev = hedgeAdj->prev;
+ QuickHullHalfEdge* hedgeAdjNext = hedgeAdj->next;
+ QuickHullHalfEdge* hedgeOppPrev = hedgeOpp->prev;
+ QuickHullHalfEdge* hedgeOppNext = hedgeOpp->next;
+
+ // check if we are lining up with the face in adjPrev dir
+ while (hedgeAdjPrev->getOppositeFace() == oppFace)
+ {
+ hedgeAdjPrev = hedgeAdjPrev->prev;
+ hedgeOppNext = hedgeOppNext->next;
+ }
+
+ // check if we are lining up with the face in adjNext dir
+ while (hedgeAdjNext->getOppositeFace() == oppFace)
+ {
+ hedgeOppPrev = hedgeOppPrev->prev;
+ hedgeAdjNext = hedgeAdjNext->next;
+ }
+
+ QuickHullHalfEdge* hedge;
+
+ // set new face owner for the line up edges
+ for (hedge = hedgeOppNext; hedge != hedgeOppPrev->next; hedge = hedge->next)
+ {
+ hedge->face = this;
+ }
+
+ // if we are about to delete the shared edge, check if its not the starting edge of the face
+ if (hedgeAdj == edge)
+ {
+ edge = hedgeAdjNext;
+ }
+
+ // handle the half edges at the head
+ QuickHullFace* discardedFace;
+ discardedFace = connectHalfEdges(hedgeOppPrev, hedgeAdjNext);
+ if (discardedFace != NULL)
+ {
+ discardedFaces.pushBack(discardedFace);
+ }
+
+ // handle the half edges at the tail
+ discardedFace = connectHalfEdges(hedgeAdjPrev, hedgeOppNext);
+ if (discardedFace != NULL)
+ {
+ discardedFaces.pushBack(discardedFace);
+ }
+
+ computeNormalAndCentroid();
+ PX_ASSERT(checkFaceConsistency());
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // connect half edges of 2 adjacent faces
+ // if we find redundancy - edges are in a line, we drop the addional face if it is just a skinny triangle
+ QuickHullFace* QuickHullFace::connectHalfEdges(QuickHullHalfEdge* hedgePrev, QuickHullHalfEdge* hedge)
+ {
+ QuickHullFace* discardedFace = NULL;
+
+ // redundant edge - can be in a line
+ if (hedgePrev->getOppositeFace() == hedge->getOppositeFace())
+ {
+ // then there is a redundant edge that we can get rid off
+ QuickHullFace* oppFace = hedge->getOppositeFace();
+ QuickHullHalfEdge* hedgeOpp;
+
+ if (hedgePrev == edge)
+ {
+ edge = hedge;
+ }
+
+ // check if its not a skinny face with just 3 vertices - 3 edges
+ if (oppFace->isTriangle())
+ {
+ // then we can get rid of the opposite face altogether
+ hedgeOpp = hedge->twin->prev->twin;
+
+ oppFace->state = QuickHullFace::eDELETED;
+ discardedFace = oppFace;
+ }
+ else
+ {
+ // if not triangle, merge the 2 opposite halfedges into one
+ hedgeOpp = hedge->twin->next;
+
+ if (oppFace->edge == hedgeOpp->prev)
+ {
+ oppFace->edge = hedgeOpp;
+ }
+ hedgeOpp->prev = hedgeOpp->prev->prev;
+ hedgeOpp->prev->next = hedgeOpp;
+ }
+
+ hedge->prev = hedgePrev->prev;
+ hedge->prev->next = hedge;
+
+ hedge->twin = hedgeOpp;
+ hedgeOpp->twin = hedge;
+
+ // oppFace was modified, so need to recompute
+ oppFace->computeNormalAndCentroid();
+ }
+ else
+ {
+ // just merge the halfedges
+ hedgePrev->next = hedge;
+ hedge->prev = hedgePrev;
+ }
+ return discardedFace;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // check face consistency
+ bool QuickHullFace::checkFaceConsistency()
+ {
+ // do a sanity check on the face
+ QuickHullHalfEdge* hedge = edge;
+ PxU32 numv = 0;
+
+ // check degenerate face
+ do
+ {
+ numv++;
+ hedge = hedge->next;
+ } while (hedge != edge);
+
+ // degenerate face found
+ PX_ASSERT(numv > 2);
+
+ numv = 0;
+ hedge = edge;
+ do
+ {
+ QuickHullHalfEdge* hedgeOpp = hedge->twin;
+
+ // check if we have twin set
+ PX_ASSERT(hedgeOpp != NULL);
+
+ // twin for the twin must be the original edge
+ PX_ASSERT(hedgeOpp->twin == hedge);
+
+ QuickHullFace* oppFace = hedgeOpp->face;
+
+ PX_UNUSED(oppFace);
+
+ // opposite edge face must be set and valid
+ PX_ASSERT(oppFace != NULL);
+ PX_ASSERT(oppFace->state != QuickHullFace::eDELETED);
+
+ // edges face must be this one
+ PX_ASSERT(hedge->face == this);
+
+ hedge = hedge->next;
+ } while (hedge != edge);
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+
+ QuickHull::QuickHull(const PxCookingParams& params, const PxConvexMeshDesc& desc)
+ : mCookingParams(params), mConvexDesc(desc), mVerticesList(NULL), mNumHullFaces(0), mPrecomputedMinMax(false),
+ mTolerance(-1.0f), mPlaneTolerance(-1.0f)
+ {
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+
+ QuickHull::~QuickHull()
+ {
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // sets the precomputed min/max values
+ void QuickHull::setPrecomputedMinMax(const QuickHullVertex* minVertex,const QuickHullVertex* maxVertex, const float tolerance,const float planeTolerance)
+ {
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ mMinVertex[i] = minVertex[i];
+ mMaxVertex[i] = maxVertex[i];
+ }
+
+ mTolerance = tolerance;
+ mPlaneTolerance = planeTolerance;
+
+ mPrecomputedMinMax = true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // preallocate internal buffers
+ void QuickHull::preallocate(PxU32 numVertices)
+ {
+ PX_ASSERT(numVertices > 0);
+
+ // max num vertices = numVertices
+ mMaxVertices = PxMax(PxU32(8), numVertices); // 8 is min, since we can expand to AABB during the clean vertices phase
+ mVerticesList = reinterpret_cast<QuickHullVertex*> (PX_ALLOC_TEMP(sizeof(QuickHullVertex)*mMaxVertices, "QuickHullVertex"));
+
+ // estimate the max half edges
+ PxU32 maxHalfEdges = (3 * mMaxVertices - 6) * 3;
+ mFreeHalfEdges.init(maxHalfEdges);
+
+ // estimate the max faces
+ PxU32 maxFaces = (2 * mMaxVertices - 4);
+ mFreeFaces.init(maxFaces*2);
+
+ mHullFaces.reserve(maxFaces);
+ mUnclaimedPoints.reserve(numVertices);
+
+ mNewFaces.reserve(32);
+ mRemovedFaces.reserve(32);
+ mDiscardedFaces.reserve(32);
+ mHorizon.reserve(PxMin(numVertices,PxU32(128)));
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // release internal buffers
+ void QuickHull::releaseHull()
+ {
+ if (mVerticesList)
+ {
+ PX_FREE_AND_RESET(mVerticesList);
+ }
+ mHullFaces.clear();
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // returns the maximum number of vertices on a face
+ PxU32 QuickHull::maxNumVertsPerFace() const
+ {
+ PxU32 numFaces = mHullFaces.size();
+ PxU32 maxVerts = 0;
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ const local::QuickHullFace& face = *mHullFaces[i];
+ if (face.state == local::QuickHullFace::eVISIBLE)
+ {
+ if (face.numEdges > maxVerts)
+ maxVerts = face.numEdges;
+ }
+ }
+ return maxVerts;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // parse the input vertices and store them in the hull
+ void QuickHull::parseInputVertices(const PxVec3* verts, PxU32 numVerts)
+ {
+ PX_ASSERT(verts);
+ PX_ASSERT(numVerts <= mMaxVertices);
+
+ mNumVertices = numVerts;
+ for (PxU32 i = 0; i < numVerts; i++)
+ {
+ mVerticesList[i].point = verts[i];
+ mVerticesList[i].index = i;
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // compute min max verts
+ void QuickHull::computeMinMaxVerts()
+ {
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ mMinVertex[i] = mVerticesList[0];
+ mMaxVertex[i] = mVerticesList[0];
+ }
+
+ PxVec3 max = mVerticesList[0].point;
+ PxVec3 min = mVerticesList[0].point;
+
+ // get the max min vertices along the x,y,z
+ for (PxU32 i = 1; i < mNumVertices; i++)
+ {
+ const QuickHullVertex& testVertex = mVerticesList[i];
+ const PxVec3& testPoint = testVertex.point;
+ if (testPoint.x > max.x)
+ {
+ max.x = testPoint.x;
+ mMaxVertex[0] = testVertex;
+ }
+ else if (testPoint.x < min.x)
+ {
+ min.x = testPoint.x;
+ mMinVertex[0] = testVertex;
+ }
+
+ if (testPoint.y > max.y)
+ {
+ max.y = testPoint.y;
+ mMaxVertex[1] = testVertex;
+ }
+ else if (testPoint.y < min.y)
+ {
+ min.y = testPoint.y;
+ mMinVertex[1] = testVertex;
+ }
+
+ if (testPoint.z > max.z)
+ {
+ max.z = testPoint.z;
+ mMaxVertex[2] = testVertex;
+ }
+ else if (testPoint.z < min.z)
+ {
+ min.z = testPoint.z;
+ mMinVertex[2] = testVertex;
+ }
+ }
+
+ mTolerance = PxMax(local::PLANE_THICKNES * (PxMax(PxAbs(max.x), PxAbs(min.x)) +
+ PxMax(PxAbs(max.y), PxAbs(min.y)) +
+ PxMax(PxAbs(max.z), PxAbs(min.z))), local::PLANE_THICKNES);
+ mPlaneTolerance = local::PLANE_TOLERANCE;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // find the initial simplex
+ // 1. search in max axis from compute min,max
+ // 2. 3rd point is the furthest vertex from the initial line
+ // 3. 4th vertex is along the line, 3rd vertex normal
+ bool QuickHull::findSimplex()
+ {
+ float max = 0;
+ PxU32 imax = 0;
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ float diff = mMaxVertex[i].point[i] - mMinVertex[i].point[i];
+ if (diff > max)
+ {
+ max = diff;
+ imax = i;
+ }
+ }
+
+ if (max <= mTolerance)
+ {
+ // should not happen as we clear the vertices before and expand them if they are really close to each other
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be almost at the same place");
+ return false;
+ }
+
+ QuickHullVertex simplex[4];
+
+ // set first two vertices to be those with the greatest
+ // one dimensional separation
+ simplex[0] = mMaxVertex[imax];
+ simplex[1] = mMinVertex[imax];
+
+ // set third vertex to be the vertex farthest from
+ // the line between simplex[0] and simplex[1]
+ PxVec3 normal;
+ float maxDist = 0;
+ PxVec3 u01 = (simplex[1].point - simplex[0].point);
+ u01.normalize();
+
+ for (PxU32 i = 0; i < mNumVertices; i++)
+ {
+ const QuickHullVertex& testVert = mVerticesList[i];
+ const PxVec3& testPoint = testVert.point;
+ const PxVec3 diff = testPoint - simplex[0].point;
+ const PxVec3 xprod = u01.cross(diff);
+ const float lenSqr = xprod.magnitudeSquared();
+ if (lenSqr > maxDist && testVert.index != simplex[0].index && testVert.index != simplex[1].index)
+ {
+ maxDist = lenSqr;
+ simplex[2] = testVert;
+ normal = xprod;
+ }
+ }
+
+ if (PxSqrt(maxDist) <= 100 * mTolerance)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be colinear.");
+ return false;
+ }
+ normal.normalize();
+
+ // set the forth vertex in the normal direction
+ const float d0 = simplex[2].point.dot(normal);
+ maxDist = 0.0f;
+ for (PxU32 i = 0; i < mNumVertices; i++)
+ {
+ const QuickHullVertex& testVert = mVerticesList[i];
+ const PxVec3& testPoint = testVert.point;
+ const float dist = PxAbs(testPoint.dot(normal) - d0);
+ if (dist > maxDist && testVert.index != simplex[0].index &&
+ testVert.index != simplex[1].index && testVert.index != simplex[2].index)
+ {
+ maxDist = dist;
+ simplex[3] = testVert;
+ }
+ }
+
+ if (PxAbs(maxDist) <= 100 * mTolerance)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be coplanar.");
+ return false;
+ }
+
+ // now create faces from those triangles
+ addSimplex(&simplex[0], simplex[3].point.dot(normal) - d0 < 0);
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // create triangle from given vertices, produce new face and connect the half edges
+ QuickHullFace* QuickHull::createTriangle(const QuickHullVertex& v0, const QuickHullVertex& v1, const QuickHullVertex& v2)
+ {
+ QuickHullFace* face = getFreeHullFace();
+
+ QuickHullHalfEdge* he0 = getFreeHullHalfEdge();
+ he0->face = face;
+ he0->tail = v0;
+
+ QuickHullHalfEdge* he1 = getFreeHullHalfEdge();
+ he1->face = face;
+ he1->tail = v1;
+
+ QuickHullHalfEdge* he2 = getFreeHullHalfEdge();
+ he2->face = face;
+ he2->tail = v2;
+
+ he0->prev = he2;
+ he0->next = he1;
+ he1->prev = he0;
+ he1->next = he2;
+ he2->prev = he1;
+ he2->next = he0;
+
+ face->edge = he0;
+ face->nextFace = NULL;
+
+ // compute the normal and offset
+ face->computeNormalAndCentroid();
+ return face;
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////
+ // add initial simplex to the quickhull
+ // construct triangles from the simplex points and connect them with half edges
+ void QuickHull::addSimplex(QuickHullVertex* simplex, bool flipTriangle)
+ {
+ PX_ASSERT(simplex);
+
+ // get interior point
+ PxVec3 vectorSum = simplex[0].point;
+ for (PxU32 i = 1; i < 4; i++)
+ {
+ vectorSum += simplex[i].point;
+ }
+ mInteriorPoint = vectorSum / 4.0f;
+
+ QuickHullFace* tris[4];
+ // create the triangles from the initial simplex
+ if (flipTriangle)
+ {
+ tris[0] = createTriangle(simplex[0], simplex[1], simplex[2]);
+ tris[1] = createTriangle(simplex[3], simplex[1], simplex[0]);
+ tris[2] = createTriangle(simplex[3], simplex[2], simplex[1]);
+ tris[3] = createTriangle(simplex[3], simplex[0], simplex[2]);
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ PxU32 k = (i + 1) % 3;
+ tris[i + 1]->getEdge(1)->setTwin(tris[k + 1]->getEdge(0));
+ tris[i + 1]->getEdge(2)->setTwin(tris[0]->getEdge(k));
+ }
+ }
+ else
+ {
+ tris[0] = createTriangle(simplex[0], simplex[2], simplex[1]);
+ tris[1] = createTriangle(simplex[3], simplex[0], simplex[1]);
+ tris[2] = createTriangle(simplex[3], simplex[1], simplex[2]);
+ tris[3] = createTriangle(simplex[3], simplex[2], simplex[0]);
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ PxU32 k = (i + 1) % 3;
+ tris[i + 1]->getEdge(0)->setTwin(tris[k + 1]->getEdge(1));
+ tris[i + 1]->getEdge(2)->setTwin(tris[0]->getEdge((3 - i) % 3));
+ }
+ }
+
+ // push back the first 4 faces created from the simplex
+ for (PxU32 i = 0; i < 4; i++)
+ {
+ mHullFaces.pushBack(tris[i]);
+ }
+ mNumHullFaces = 4;
+
+ // go through points and add point to faces if they are on the plane
+ for (PxU32 i = 0; i < mNumVertices; i++)
+ {
+ const QuickHullVertex& v = mVerticesList[i];
+
+ if (v == simplex[0] || v == simplex[1] || v == simplex[2] || v == simplex[3])
+ {
+ continue;
+ }
+
+ float maxDist = mTolerance;
+ QuickHullFace* maxFace = NULL;
+ for (PxU32 k = 0; k < 4; k++)
+ {
+ const float dist = tris[k]->distanceToPlane(v.point);
+ if (dist > maxDist)
+ {
+ maxFace = tris[k];
+ maxDist = dist;
+ }
+ }
+
+ if (maxFace != NULL)
+ {
+ addPointToFace(*maxFace, &mVerticesList[i], maxDist);
+ }
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // adds a point to the conflict list
+ // the trick here is to store the most furthest point as the last, thats the only one we care about
+ // the rest is not important, we just need to store them and claim to new faces later, if the
+ // faces most furthest point is the current global maximum
+ void QuickHull::addPointToFace(QuickHullFace& face, QuickHullVertex* vertex, float dist)
+ {
+ // if we dont have a conflict list, store the vertex as the first one in the conflict list
+ vertex->dist = dist;
+ if(!face.conflictList)
+ {
+ face.conflictList = vertex;
+ vertex->dist = dist;
+ vertex->next = NULL;
+ return;
+ }
+
+ PX_ASSERT(face.conflictList);
+
+ // this is not the furthest vertex, store it as next in the linked list
+ if (face.conflictList->dist > dist)
+ {
+ vertex->next = face.conflictList->next;
+ face.conflictList->next = vertex;
+ }
+ else
+ {
+ // this is the furthest vertex, store it as first in the linked list
+ vertex->next = face.conflictList;
+ face.conflictList = vertex;
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // removes eye point from a conflict list
+ // we know that the vertex must the last, as we store it at the back, so just popback()
+ void QuickHull::removeEyePointFromFace(QuickHullFace& face, const QuickHullVertex* vertex)
+ {
+ PX_UNUSED(vertex);
+ // the picked vertex should always be the first in the linked list
+ PX_ASSERT(face.conflictList == vertex);
+
+ face.conflictList = face.conflictList->next;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // merge polygons with similar normals
+ void QuickHull::postMergeHull()
+ {
+ // merge faces with similar normals
+ for (PxU32 i = 0; i < mHullFaces.size(); i++)
+ {
+ QuickHullFace& face = *mHullFaces[i];
+
+ if (face.state == QuickHullFace::eVISIBLE)
+ {
+ PX_ASSERT(face.checkFaceConsistency());
+ while (doPostAdjacentMerge(face, local::MAXDOT_MINANG));
+ }
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // builds the hull
+ // 1. find the initial simplex
+ // 2. check if simplex has a valid area
+ // 3. add vertices to the hull. We add vertex most furthest from the hull
+ // 4. terminate if hull limit reached or we have added all vertices
+ QuickHullResult::Enum QuickHull::buildHull()
+ {
+ QuickHullVertex* eyeVtx = NULL;
+ QuickHullFace* eyeFace;
+
+ // compute the vertex min max along x,y,z
+ if(!mPrecomputedMinMax)
+ computeMinMaxVerts();
+
+ // find the initial simplex of the hull
+ if (!findSimplex())
+ {
+ return QuickHullResult::eFAILURE;
+ }
+
+ // simplex area test
+ const bool useAreaTest = mConvexDesc.flags & PxConvexFlag::eCHECK_ZERO_AREA_TRIANGLES ? true : false;
+ const float areaEpsilon = mCookingParams.areaTestEpsilon * 2.0f;
+ if (useAreaTest)
+ {
+ for (PxU32 i = 0; i < mHullFaces.size(); i++)
+ {
+ if (mHullFaces[i]->area < areaEpsilon)
+ {
+ return QuickHullResult::eZERO_AREA_TEST_FAILED;
+ }
+ }
+ }
+
+ // add points to the hull
+ PxU32 numVerts = 4; // initial vertex count - simplex vertices
+ while ((eyeVtx = nextPointToAdd(eyeFace)) != NULL)
+ {
+ // if plane shifting vertex limit, we need the reduced hull
+ if((mConvexDesc.flags & PxConvexFlag::ePLANE_SHIFTING) && (numVerts >= mConvexDesc.vertexLimit))
+ break;
+
+ PX_ASSERT(eyeFace);
+ if (!addPointToHull(eyeVtx, *eyeFace))
+ {
+ // we hit the polygons hard limit
+ return QuickHullResult::ePOLYGONS_LIMIT_REACHED;
+ }
+ numVerts++;
+ }
+
+ // vertex limit has been reached. We did not stopped the iteration, since we
+ // will use the produced hull to compute OBB from it and use the planes
+ // to slice the initial OBB
+ if (numVerts >= mConvexDesc.vertexLimit)
+ {
+ return QuickHullResult::eVERTEX_LIMIT_REACHED;
+ }
+
+ return QuickHullResult::eSUCCESS;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // finds the best point to add to the hull
+ // go through the faces conflict list and pick the global maximum
+ QuickHullVertex* QuickHull::nextPointToAdd(QuickHullFace*& eyeFace)
+ {
+ QuickHullVertex* eyeVtx = NULL;
+ QuickHullFace* eyeF = NULL;
+ float maxDist = PxMax(mTolerance*ACCEPTANCE_EPSILON_MULTIPLY, mPlaneTolerance);
+ for (PxU32 i = 0; i < mHullFaces.size(); i++)
+ {
+ if (mHullFaces[i]->state == QuickHullFace::eVISIBLE && mHullFaces[i]->conflictList)
+ {
+ const float dist = mHullFaces[i]->conflictList->dist;
+ if (maxDist < dist)
+ {
+ maxDist = dist;
+ eyeVtx = mHullFaces[i]->conflictList;
+ eyeF = mHullFaces[i];
+ }
+ }
+ }
+
+ eyeFace = eyeF;
+ return eyeVtx;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // adds vertex to the hull
+ // returns false if the new faces count would hit the hull face hard limit (255)
+ bool QuickHull::addPointToHull(const QuickHullVertex* eyeVtx, QuickHullFace& eyeFace)
+ {
+ // removes the eyePoint from the conflict list
+ removeEyePointFromFace(eyeFace, eyeVtx);
+
+ // calculates the horizon from the eyePoint
+ calculateHorizon(eyeVtx->point, NULL, eyeFace, mHorizon, mRemovedFaces);
+
+ // check if we dont hit the polygons hard limit
+ if (mNumHullFaces + mHorizon.size() > 255)
+ {
+ // make the faces visible again and quit
+ for (PxU32 i = 0; i < mRemovedFaces.size(); i++)
+ {
+ mRemovedFaces[i]->state = QuickHullFace::eVISIBLE;
+ }
+ mNumHullFaces += mRemovedFaces.size();
+ return false;
+ }
+
+ // adds new faces from given horizon and eyePoint
+ addNewFacesFromHorizon(eyeVtx, mHorizon, mNewFaces);
+
+ // first merge pass ... merge faces which are non-convex
+ // as determined by the larger face
+ for (PxU32 i = 0; i < mNewFaces.size(); i++)
+ {
+ QuickHullFace& face = *mNewFaces[i];
+
+ if (face.state == QuickHullFace::eVISIBLE)
+ {
+ PX_ASSERT(face.checkFaceConsistency());
+ while (doAdjacentMerge(face, true));
+ }
+ }
+
+ // second merge pass ... merge faces which are non-convex
+ // wrt either face
+ for (PxU32 i = 0; i < mNewFaces.size(); i++)
+ {
+ QuickHullFace& face = *mNewFaces[i];
+ if (face.state == QuickHullFace::eNON_CONVEX)
+ {
+ face.state = QuickHullFace::eVISIBLE;
+ while (doAdjacentMerge(face, false));
+ }
+ }
+
+ resolveUnclaimedPoints(mNewFaces);
+
+ mHorizon.clear();
+ mNewFaces.clear();
+ mRemovedFaces.clear();
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // merge adjacent faces
+ // We merge 2 adjacent faces if they lie on the same thick plane defined by the mTolerance
+ // we do this in 2 steps to ensure we dont leave non-convex faces
+ bool QuickHull::doAdjacentMerge(QuickHullFace& face, bool mergeWrtLargeFace)
+ {
+ QuickHullHalfEdge* hedge = face.edge;
+
+ bool convex = true;
+ do
+ {
+ const QuickHullFace& oppFace = *hedge->getOppositeFace();
+ bool merge = false;
+
+ if (mergeWrtLargeFace)
+ {
+ // merge faces if they are parallel or non-convex
+ // wrt to the larger face; otherwise, just mark
+ // the face non-convex for the second pass.
+ if (face.area > oppFace.area)
+ {
+ if (hedge->getOppositeFaceDistance() > -mTolerance)
+ {
+ merge = true;
+ }
+ else if (hedge->twin->getOppositeFaceDistance() > -mTolerance)
+ {
+ convex = false;
+ }
+ }
+ else
+ {
+ if (hedge->twin->getOppositeFaceDistance() > -mTolerance)
+ {
+ merge = true;
+ }
+ else if (hedge->getOppositeFaceDistance() > -mTolerance)
+ {
+ convex = false;
+ }
+ }
+ }
+ else
+ {
+ // then merge faces if they are definitively non-convex
+ if (hedge->getOppositeFaceDistance() > -mTolerance ||
+ hedge->twin->getOppositeFaceDistance() > -mTolerance)
+ {
+ merge = true;
+ }
+ }
+
+ if (merge)
+ {
+ mDiscardedFaces.clear();
+ face.mergeAdjacentFace(hedge, mDiscardedFaces);
+ mNumHullFaces -= mDiscardedFaces.size();
+ for (PxU32 i = 0; i < mDiscardedFaces.size(); i++)
+ {
+ deleteFacePoints(*mDiscardedFaces[i], &face);
+ }
+ PX_ASSERT(face.checkFaceConsistency());
+ return true;
+ }
+ hedge = hedge->next;
+ } while (hedge != face.edge);
+
+ if (!convex)
+ {
+ face.state = QuickHullFace::eNON_CONVEX;
+ }
+ return false;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // merge adjacent faces doing normal test
+ // we try to merge more aggressively 2 faces with the same normal.
+ // We use bigger tolerance for the plane thickness in the end - mPlaneTolerance.
+ bool QuickHull::doPostAdjacentMerge(QuickHullFace& face, const float maxdot_minang)
+ {
+ QuickHullHalfEdge* hedge = face.edge;
+
+ do
+ {
+ const QuickHullFace& oppFace = *hedge->getOppositeFace();
+ bool merge = false;
+ const PxVec3& ni = face.normal;
+ const PxVec3& nj = oppFace.normal;
+ const float dotP = ni.dot(nj);
+
+ if (dotP > maxdot_minang)
+ {
+ if (face.area > oppFace.area)
+ {
+ // check if we can merge the 2 faces
+ merge = canMergeFaces(*hedge, mPlaneTolerance);
+ }
+ }
+
+ if (merge)
+ {
+ QuickHullFaceArray discardedFaces;
+ face.mergeAdjacentFace(hedge, discardedFaces);
+ mNumHullFaces -= discardedFaces.size();
+ for (PxU32 i = 0; i < discardedFaces.size(); i++)
+ {
+ deleteFacePoints(*discardedFaces[i], &face);
+ }
+ PX_ASSERT(face.checkFaceConsistency());
+ return true;
+ }
+ hedge = hedge->next;
+ } while (hedge != face.edge);
+
+ return false;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks if 2 adjacent faces can be merged
+ // 1. creates a face with merged vertices
+ // 2. computes new normal and centroid
+ // 3. checks that all verts are not too far away from the plane
+ // 4. checks that the new polygon is still convex
+ // 5. checks if we are about to merge only 2 neighbor faces, we dont
+ // want to merge additional faces, that might corrupt the convexity
+ bool QuickHull::canMergeFaces(const QuickHullHalfEdge& he, float planeTolerance)
+ {
+ const QuickHullFace& face1 = *he.face;
+ const QuickHullFace& face2 = *he.twin->face;
+
+ // construct the merged face
+ PX_ALLOCA(edges, QuickHullHalfEdge, (face1.numEdges + face2.numEdges));
+ PxMemSet(edges, 0, (face1.numEdges + face2.numEdges)*sizeof(QuickHullHalfEdge));
+ QuickHullFace mergedFace;
+ mergedFace.edge = &edges[0];
+
+ // copy the first face edges
+ PxU32 currentEdge = 0;
+ QuickHullHalfEdge* copyHe = he.next;
+ while (copyHe != &he)
+ {
+ edges[currentEdge].face = &mergedFace;
+ edges[currentEdge].tail = copyHe->tail;
+ edges[currentEdge].next = &edges.mPointer[currentEdge + 1];
+
+ currentEdge++;
+ copyHe = copyHe->next;
+ }
+
+ // copy the second face edges
+ copyHe = he.twin->next;
+ while (copyHe != he.twin)
+ {
+ edges[currentEdge].face = &mergedFace;
+ edges[currentEdge].tail = copyHe->tail;
+ edges[currentEdge].next = &edges.mPointer[currentEdge + 1];
+
+ currentEdge++;
+ copyHe = copyHe->next;
+ }
+ edges[--currentEdge].next = &edges.mPointer[0];
+
+ // compute normal and centroid
+ mergedFace.computeNormalAndCentroid();
+
+ // test the vertex distance
+ QuickHullHalfEdge* qhe = mergedFace.edge;
+ do
+ {
+ const QuickHullVertex& vertex = qhe->tail;
+ const float dist = mergedFace.distanceToPlane(vertex.point);
+ if (dist > planeTolerance)
+ {
+ return false;
+ }
+ qhe = qhe->next;
+ } while (qhe != mergedFace.edge);
+
+ // check the convexity
+ qhe = mergedFace.edge;
+ do
+ {
+ const QuickHullVertex& vertex = qhe->tail;
+ const QuickHullVertex& nextVertex = qhe->next->tail;
+
+ PxVec3 edgeVector = nextVertex.point - vertex.point;
+ edgeVector.normalize();
+ const PxVec3 outVector = -mergedFace.normal.cross(edgeVector);
+
+ QuickHullHalfEdge* testHe = qhe->next;
+ do
+ {
+ const QuickHullVertex& testVertex = testHe->tail;
+ const float dist = (testVertex.point - vertex.point).dot(outVector);
+
+ if (dist > mTolerance)
+ return false;
+
+ testHe = testHe->next;
+ } while (testHe != qhe->next);
+
+ qhe = qhe->next;
+ } while (qhe != mergedFace.edge);
+
+
+ const QuickHullFace* oppFace = he.getOppositeFace();
+
+ QuickHullHalfEdge* hedgeOpp = he.twin;
+
+ QuickHullHalfEdge* hedgeAdjPrev = he.prev;
+ QuickHullHalfEdge* hedgeAdjNext = he.next;
+ QuickHullHalfEdge* hedgeOppPrev = hedgeOpp->prev;
+ QuickHullHalfEdge* hedgeOppNext = hedgeOpp->next;
+
+ // check if we are lining up with the face in adjPrev dir
+ while (hedgeAdjPrev->getOppositeFace() == oppFace)
+ {
+ hedgeAdjPrev = hedgeAdjPrev->prev;
+ hedgeOppNext = hedgeOppNext->next;
+ }
+
+ // check if we are lining up with the face in adjNext dir
+ while (hedgeAdjNext->getOppositeFace() == oppFace)
+ {
+ hedgeOppPrev = hedgeOppPrev->prev;
+ hedgeAdjNext = hedgeAdjNext->next;
+ }
+
+ // no redundant merges, just clean merge of 2 neighbour faces
+ if (hedgeOppPrev->getOppositeFace() == hedgeAdjNext->getOppositeFace())
+ {
+ return false;
+ }
+
+ if (hedgeAdjPrev->getOppositeFace() == hedgeOppNext->getOppositeFace())
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // delete face points and store them as unclaimed, so we can add them back to new faces later
+ void QuickHull::deleteFacePoints(QuickHullFace& face, QuickHullFace* absorbingFace)
+ {
+ // no conflict list for this face
+ if(!face.conflictList)
+ return;
+
+ QuickHullVertex* unclaimedVertex = face.conflictList;
+ QuickHullVertex* vertexToClaim = NULL;
+ while (unclaimedVertex)
+ {
+ vertexToClaim = unclaimedVertex;
+ unclaimedVertex = unclaimedVertex->next;
+ vertexToClaim->next = NULL;
+ if (!absorbingFace)
+ {
+ mUnclaimedPoints.pushBack(vertexToClaim);
+ }
+ else
+ {
+ const float dist = absorbingFace->distanceToPlane(vertexToClaim->point);
+ if (dist > mTolerance)
+ {
+ addPointToFace(*absorbingFace, vertexToClaim, dist);
+ }
+ else
+ {
+ mUnclaimedPoints.pushBack(vertexToClaim);
+ }
+ }
+ }
+
+ face.conflictList = NULL;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // calculate the horizon from the eyePoint against a given face
+ void QuickHull::calculateHorizon(const PxVec3& eyePoint, QuickHullHalfEdge* edge0, QuickHullFace& face, QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& removedFaces)
+ {
+ deleteFacePoints(face, NULL);
+ face.state = QuickHullFace::eDELETED;
+ removedFaces.pushBack(&face);
+ mNumHullFaces--;
+ QuickHullHalfEdge* edge;
+ if (edge0 == NULL)
+ {
+ edge0 = face.getEdge(0);
+ edge = edge0;
+ }
+ else
+ {
+ edge = edge0->next;
+ }
+
+ do
+ {
+ QuickHullFace* oppFace = edge->getOppositeFace();
+ if (oppFace->state == QuickHullFace::eVISIBLE)
+ {
+ const float dist = oppFace->distanceToPlane(eyePoint);
+ if (dist > mTolerance)
+ {
+ calculateHorizon(eyePoint, edge->twin, *oppFace, horizon, removedFaces);
+ }
+ else
+ {
+ horizon.pushBack(edge);
+ }
+ }
+ edge = edge->next;
+ } while (edge != edge0);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // adds new faces from given horizon and eyePoint
+ void QuickHull::addNewFacesFromHorizon(const QuickHullVertex* eyePoint, const QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& newFaces)
+ {
+ QuickHullHalfEdge* hedgeSidePrev = NULL;
+ QuickHullHalfEdge* hedgeSideBegin = NULL;
+
+ for (PxU32 i = 0; i < horizon.size(); i++)
+ {
+ const QuickHullHalfEdge& horizonHe = *horizon[i];
+
+ QuickHullFace* face = createTriangle(*eyePoint, horizonHe.getHead(), horizonHe.getTail());
+ mHullFaces.pushBack(face);
+ mNumHullFaces++;
+ face->getEdge(2)->setTwin(horizonHe.twin);
+
+ QuickHullHalfEdge* hedgeSide = face->edge;
+ if (hedgeSidePrev != NULL)
+ {
+ hedgeSide->next->setTwin(hedgeSidePrev);
+ }
+ else
+ {
+ hedgeSideBegin = hedgeSide;
+ }
+ newFaces.pushBack(face);
+ hedgeSidePrev = hedgeSide;
+ }
+ hedgeSideBegin->next->setTwin(hedgeSidePrev);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // resolve unclaimed points
+ void QuickHull::resolveUnclaimedPoints(const QuickHullFaceArray& newFaces)
+ {
+ for (PxU32 i = 0; i < mUnclaimedPoints.size(); i++)
+ {
+ QuickHullVertex* vtx = mUnclaimedPoints[i];
+
+ float maxDist = mTolerance;
+ QuickHullFace* maxFace = NULL;
+ for (PxU32 j = 0; j < newFaces.size(); j++)
+ {
+ const QuickHullFace& newFace = *newFaces[j];
+ if (newFace.state == QuickHullFace::eVISIBLE)
+ {
+ const float dist = newFace.distanceToPlane(vtx->point);
+ if (dist > maxDist)
+ {
+ maxDist = dist;
+ maxFace = newFaces[j];
+ }
+ }
+ }
+ if (maxFace != NULL)
+ {
+ addPointToFace(*maxFace, vtx, maxDist);
+ }
+ }
+
+ mUnclaimedPoints.clear();
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper struct for hull expand point
+ struct ExpandPoint
+ {
+ PxPlane plane[3]; // the 3 planes that will give us the point
+ PxU32 planeIndex[3]; // index of the planes for identification
+
+ bool operator==(const ExpandPoint& expPoint) const
+ {
+ if (expPoint.planeIndex[0] == planeIndex[0] && expPoint.planeIndex[1] == planeIndex[1] &&
+ expPoint.planeIndex[2] == planeIndex[2])
+ return true;
+ else
+ return false;
+}
+ };
+
+//////////////////////////////////////////////////////////////////////////
+ // gets the half edge neighbors and form the expand point
+ void getExpandPoint(const QuickHullHalfEdge& he, ExpandPoint& expandPoint, const Ps::Array<PxU32>* translationTable = NULL)
+ {
+ // set the first 2 - the edge face and the twin face
+ expandPoint.planeIndex[0] = (translationTable) ? ((*translationTable)[he.face->index]) : (he.face->index);
+
+ PxU32 index = translationTable ? ((*translationTable)[he.twin->face->index]) : he.twin->face->index;
+ if (index < expandPoint.planeIndex[0])
+ {
+ expandPoint.planeIndex[1] = expandPoint.planeIndex[0];
+ expandPoint.planeIndex[0] = index;
+ }
+ else
+ {
+ expandPoint.planeIndex[1] = index;
+ }
+
+ // now the 3rd one is the next he twin index
+ index = translationTable ? (*translationTable)[he.next->twin->face->index] : he.next->twin->face->index;
+ if (index < expandPoint.planeIndex[0])
+ {
+ expandPoint.planeIndex[2] = expandPoint.planeIndex[1];
+ expandPoint.planeIndex[1] = expandPoint.planeIndex[0];
+ expandPoint.planeIndex[0] = index;
+ }
+ else
+ {
+ if (index < expandPoint.planeIndex[1])
+ {
+ expandPoint.planeIndex[2] = expandPoint.planeIndex[1];
+ expandPoint.planeIndex[1] = index;
+ }
+ else
+ {
+ expandPoint.planeIndex[2] = index;
+ }
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // adds the expand point, don't add similar point
+ void addExpandPoint(const ExpandPoint& expandPoint, Ps::Array<ExpandPoint>& expandPoints)
+ {
+ for (PxU32 i = expandPoints.size(); i--;)
+ {
+ if (expandPoint == expandPoints[i])
+ {
+ return;
+ }
+ }
+
+ expandPoints.pushBack(expandPoint);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper for 3 planes intersection
+ static PxVec3 threePlaneIntersection(const PxPlane &p0, const PxPlane &p1, const PxPlane &p2)
+ {
+ PxMat33 mp = (PxMat33(p0.n, p1.n, p2.n)).getTranspose();
+ PxMat33 mi = (mp).getInverse();
+ PxVec3 b(p0.d, p1.d, p2.d);
+ return -mi.transform(b);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+QuickHullConvexHullLib::QuickHullConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params)
+ : ConvexHullLib(desc, params),mQuickHull(NULL), mCropedConvexHull(NULL), mVertsOut(NULL), mIndicesOut(NULL), mPolygonsOut(NULL)
+{
+ mQuickHull = PX_NEW_TEMP(local::QuickHull)(params, desc);
+ mQuickHull->preallocate(desc.points.count);
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+QuickHullConvexHullLib::~QuickHullConvexHullLib()
+{
+ mQuickHull->releaseHull();
+ PX_DELETE(mQuickHull);
+
+ if(mCropedConvexHull)
+ {
+ PX_DELETE(mCropedConvexHull);
+ }
+
+ PX_FREE(mVertsOut);
+ PX_FREE(mPolygonsOut);
+ PX_FREE(mIndicesOut);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// create the hull
+// 1. clean the input vertices
+// 2. check we can construct the simplex, if not expand the input verts
+// 3. prepare the quickhull - preallocate, parse input verts
+// 4. construct the hull
+// 5. post merge faces if limit not reached
+// 6. if limit reached, expand the hull
+PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::createConvexHull()
+{
+ PxConvexMeshCookingResult::Enum res = PxConvexMeshCookingResult::eFAILURE;
+
+ PxU32 vcount = mConvexMeshDesc.points.count;
+ if ( vcount < 8 )
+ vcount = 8;
+
+ PxVec3* outvsource = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP( sizeof(PxVec3)*vcount, "PxVec3"));
+ PxVec3 scale;
+ PxVec3 center;
+ PxU32 outvcount;
+
+ // cleanup the vertices first
+ if(!cleanupVertices(mConvexMeshDesc.points.count, reinterpret_cast<const PxVec3*> (mConvexMeshDesc.points.data), mConvexMeshDesc.points.stride,
+ outvcount, outvsource, scale, center ))
+ {
+ PX_FREE(outvsource);
+ return res;
+ }
+
+ // scale vertices back to their original size.
+ // move the vertices to the origin
+ for (PxU32 i=0; i< outvcount; i++)
+ {
+ PxVec3& v = outvsource[i];
+ v.multiply(scale);
+ }
+
+ local::QuickHullVertex minimumVertex[3];
+ local::QuickHullVertex maximumVertex[3];
+ float tolerance;
+ float planeTolerance;
+ bool canReuse = cleanupForSimplex(outvsource, outvcount, &minimumVertex[0], &maximumVertex[0], tolerance, planeTolerance);
+
+ mQuickHull->parseInputVertices(outvsource,outvcount);
+
+ if(canReuse)
+ {
+ mQuickHull->setPrecomputedMinMax(minimumVertex, maximumVertex, tolerance, planeTolerance);
+ }
+
+ local::QuickHullResult::Enum qhRes = mQuickHull->buildHull();
+
+ switch(qhRes)
+ {
+ case local::QuickHullResult::eZERO_AREA_TEST_FAILED:
+ res = PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
+ break;
+ case local::QuickHullResult::eSUCCESS:
+ mQuickHull->postMergeHull();
+ res = PxConvexMeshCookingResult::eSUCCESS;
+ break;
+ case local::QuickHullResult::ePOLYGONS_LIMIT_REACHED:
+ res = PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED;
+ break;
+ case local::QuickHullResult::eVERTEX_LIMIT_REACHED:
+ {
+ // expand the hull
+ if(mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING)
+ res = expandHull();
+ else
+ res = expandHullOBB();
+ }
+ break;
+ case local::QuickHullResult::eFAILURE:
+ break;
+ };
+
+ // check if we need to build GRB compatible mesh
+ // if hull was cropped we already have a compatible mesh, if not check
+ // the max verts per face
+ if((mConvexMeshDesc.flags & PxConvexFlag::eGPU_COMPATIBLE) && !mCropedConvexHull &&
+ res == PxConvexMeshCookingResult::eSUCCESS)
+ {
+ PX_ASSERT(mQuickHull);
+ // if we hit the vertex per face limit, expand the hull by cropping OBB
+ if(mQuickHull->maxNumVertsPerFace() > gpuMaxVertsPerFace)
+ {
+ res = expandHullOBB();
+ }
+ }
+
+ PX_FREE(outvsource);
+ return res;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// fixup the input vertices to be not colinear or coplanar for the initial simplex find
+bool QuickHullConvexHullLib::cleanupForSimplex(PxVec3* vertices, PxU32 vertexCount, local::QuickHullVertex* minimumVertex,
+ local::QuickHullVertex* maximumVertex, float& tolerance, float& planeTolerance)
+{
+ bool retVal = true;
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ minimumVertex[i].point = vertices[0];
+ minimumVertex[i].index = 0;
+ maximumVertex[i].point = vertices[0];
+ maximumVertex[i].index = 0;
+
+ }
+
+ PxVec3 max = vertices[0];
+ PxVec3 min = vertices[0];
+
+ // get the max min vertices along the x,y,z
+ for (PxU32 i = 1; i < vertexCount; i++)
+ {
+ const PxVec3& testPoint = vertices[i];
+ if (testPoint.x > max.x)
+ {
+ max.x = testPoint.x;
+ maximumVertex[0].point = testPoint;
+ maximumVertex[0].index = i;
+ }
+ else if (testPoint.x < min.x)
+ {
+ min.x = testPoint.x;
+ minimumVertex[0].point = testPoint;
+ minimumVertex[0].index = i;
+ }
+
+ if (testPoint.y > max.y)
+ {
+ max.y = testPoint.y;
+ maximumVertex[1].point = testPoint;
+ maximumVertex[1].index = i;
+ }
+ else if (testPoint.y < min.y)
+ {
+ min.y = testPoint.y;
+ minimumVertex[1].point = testPoint;
+ minimumVertex[1].index = i;
+ }
+
+ if (testPoint.z > max.z)
+ {
+ max.z = testPoint.z;
+ maximumVertex[2].point = testPoint;
+ maximumVertex[2].index = i;
+ }
+ else if (testPoint.z < min.z)
+ {
+ min.z = testPoint.z;
+ minimumVertex[2].point = testPoint;
+ minimumVertex[2].index = i;
+ }
+ }
+
+ tolerance = PxMax(local::PLANE_THICKNES * (PxMax(PxAbs(max.x), PxAbs(min.x)) +
+ PxMax(PxAbs(max.y), PxAbs(min.y)) +
+ PxMax(PxAbs(max.z), PxAbs(min.z))), local::PLANE_THICKNES);
+
+ planeTolerance = local::PLANE_TOLERANCE;
+
+ float fmax = 0;
+ PxU32 imax = 0;
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ float diff = (maximumVertex[i].point)[i] - (minimumVertex[i].point)[i];
+ if (diff > fmax)
+ {
+ fmax = diff;
+ imax = i;
+ }
+ }
+
+ PxVec3 simplex[4];
+
+ // set first two vertices to be those with the greatest
+ // one dimensional separation
+ simplex[0] = maximumVertex[imax].point;
+ simplex[1] = minimumVertex[imax].point;
+
+ // set third vertex to be the vertex farthest from
+ // the line between simplex[0] and simplex[1]
+ PxVec3 normal;
+ float maxDist = 0;
+ imax = 0;
+ PxVec3 u01 = (simplex[1] - simplex[0]);
+ u01.normalize();
+
+ for (PxU32 i = 0; i < vertexCount; i++)
+ {
+ const PxVec3& testPoint = vertices[i];
+ const PxVec3 diff = testPoint - simplex[0];
+ const PxVec3 xprod = u01.cross(diff);
+ const float lenSqr = xprod.magnitudeSquared();
+ if (lenSqr > maxDist)
+ {
+ maxDist = lenSqr;
+ simplex[2] = testPoint;
+ normal = xprod;
+ imax = i;
+ }
+ }
+
+ if (PxSqrt(maxDist) <= 100 * tolerance)
+ {
+ // points are collinear, we have to move the point further
+ PxVec3 u02 = simplex[2] - simplex[0];
+ float fT = u02.dot(u01);
+ const float sqrLen = u01.magnitudeSquared();
+ fT /= sqrLen;
+ PxVec3 n = u02 - fT*u01;
+ n.normalize();
+ const PxVec3 mP = simplex[2] + n * 100.0f * tolerance;
+ simplex[2] = mP;
+ vertices[imax] = mP;
+ retVal = false;
+ }
+ normal.normalize();
+
+ // set the forth vertex in the normal direction
+ float d0 = simplex[2].dot(normal);
+ maxDist = 0.0f;
+ imax = 0;
+ for (PxU32 i = 0; i < vertexCount; i++)
+ {
+ const PxVec3& testPoint = vertices[i];
+ float dist = PxAbs(testPoint.dot(normal) - d0);
+ if (dist > maxDist)
+ {
+ maxDist = dist;
+ simplex[3] = testPoint;
+ imax = i;
+ }
+ }
+
+ if (PxAbs(maxDist) <= 100.0f * tolerance)
+ {
+ float dist = (vertices[imax].dot(normal) - d0);
+ if (dist > 0)
+ vertices[imax] = vertices[imax] + normal * 100.0f * tolerance;
+ else
+ vertices[imax] = vertices[imax] - normal * 100.0f * tolerance;
+ retVal = false;
+ }
+
+ return retVal;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// expand the hull with the from the limited triangles set
+// expand hull will do following steps:
+// 1. get expand points from hull that form the best hull with given vertices
+// 2. expand the planes to have all vertices inside the planes volume
+// 3. compute new points by 3 adjacency planes intersections
+// 4. take those points and create the hull from them
+PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::expandHull()
+{
+ Ps::Array<local::ExpandPoint> expandPoints;
+ expandPoints.reserve(mQuickHull->mNumVertices);
+
+ // go over faces and gather expand points
+ for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
+ {
+ const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ local::ExpandPoint expandPoint;
+ local::QuickHullHalfEdge* he = face.edge;
+ local::getExpandPoint(*he, expandPoint);
+ local::addExpandPoint(expandPoint, expandPoints);
+ he = he->next;
+ while (he != face.edge)
+ {
+ local::getExpandPoint(*he, expandPoint);
+ local::addExpandPoint(expandPoint, expandPoints);
+ he = he->next;
+ }
+ }
+ }
+
+
+ // go over the planes now and expand them
+ for(PxU32 iVerts=0;iVerts< mQuickHull->mNumVertices;iVerts++)
+ {
+ const local::QuickHullVertex& vertex = mQuickHull->mVerticesList[iVerts];
+
+ for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
+ {
+ local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ const float dist = face.distanceToPlane(vertex.point);
+ if(dist > 0 && dist > face.expandOffset)
+ {
+ face.expandOffset = dist;
+ }
+ }
+ }
+ }
+
+ // fill the expand points planes
+ for(PxU32 i=0;i<expandPoints.size();i++)
+ {
+ local::ExpandPoint& expandPoint = expandPoints[i];
+ for (PxU32 k = 0; k < 3; k++)
+ {
+ const local::QuickHullFace& face = *mQuickHull->mFreeFaces.getItem(expandPoint.planeIndex[k]);
+ PX_ASSERT(face.index == expandPoint.planeIndex[k]);
+ PxPlane plane;
+ plane.n = face.normal;
+ plane.d = -face.planeOffset;
+ if(face.expandOffset > 0.0f)
+ plane.d -= face.expandOffset;
+ expandPoint.plane[k] = plane;
+ }
+ }
+
+ // now find the plane intersection
+ PX_ALLOCA(vertices,PxVec3,expandPoints.size());
+ for(PxU32 i=0;i<expandPoints.size();i++)
+ {
+ local::ExpandPoint& expandPoint = expandPoints[i];
+ vertices[i] = local::threePlaneIntersection(expandPoint.plane[0],expandPoint.plane[1],expandPoint.plane[2]);
+ }
+
+ // construct again the hull from the new points
+ local::QuickHull* newHull = PX_NEW_TEMP(local::QuickHull)(mQuickHull->mCookingParams, mQuickHull->mConvexDesc);
+ newHull->preallocate(expandPoints.size());
+ newHull->parseInputVertices(vertices,expandPoints.size());
+
+ local::QuickHullResult::Enum qhRes = newHull->buildHull();
+ switch(qhRes)
+ {
+ case local::QuickHullResult::eZERO_AREA_TEST_FAILED:
+ {
+ newHull->releaseHull();
+ PX_DELETE(newHull);
+ return PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
+ }
+ case local::QuickHullResult::eSUCCESS:
+ case local::QuickHullResult::eVERTEX_LIMIT_REACHED:
+ case local::QuickHullResult::ePOLYGONS_LIMIT_REACHED:
+ {
+ mQuickHull->releaseHull();
+ PX_DELETE(mQuickHull);
+ mQuickHull = newHull;
+ }
+ break;
+ case local::QuickHullResult::eFAILURE:
+ {
+ newHull->releaseHull();
+ PX_DELETE(newHull);
+ return PxConvexMeshCookingResult::eFAILURE;
+ }
+ };
+
+ return PxConvexMeshCookingResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// expand the hull from the limited triangles set
+// 1. collect all planes
+// 2. create OBB from the input verts
+// 3. slice the OBB with the planes
+// 5. iterate till vlimit is reached
+PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::expandHullOBB()
+{
+ Ps::Array<PxPlane> expandPlanes;
+ expandPlanes.reserve(mQuickHull->mHullFaces.size());
+
+ // collect expand planes
+ for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
+ {
+ local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if (face.state == local::QuickHullFace::eVISIBLE)
+ {
+ PxPlane plane;
+ plane.n = face.normal;
+ plane.d = -face.planeOffset;
+ if (face.expandOffset > 0.0f)
+ plane.d -= face.expandOffset;
+
+ expandPlanes.pushBack(plane);
+ }
+ }
+
+
+ PxTransform obbTransform;
+ PxVec3 sides;
+
+ // compute the OBB
+ PxConvexMeshDesc convexDesc;
+ fillConvexMeshDescFromQuickHull(convexDesc);
+ convexDesc.flags = mConvexMeshDesc.flags;
+ computeOBBFromConvex(convexDesc, sides, obbTransform);
+
+ // free the memory used for the convex mesh desc
+ PX_FREE_AND_RESET(mVertsOut);
+ PX_FREE_AND_RESET(mPolygonsOut);
+ PX_FREE_AND_RESET(mIndicesOut);
+
+ // crop the OBB
+ PxU32 maxplanes = PxMin(PxU32(256), expandPlanes.size());
+
+ ConvexHull* c = PX_NEW_TEMP(ConvexHull)(sides*0.5f,obbTransform, expandPlanes);
+
+ const float planeTolerance = mQuickHull->mPlaneTolerance;
+ const float epsilon = mQuickHull->mTolerance;
+
+ PxI32 k;
+ while (maxplanes-- && (k = c->findCandidatePlane(planeTolerance, epsilon)) >= 0)
+ {
+ ConvexHull* tmp = c;
+ c = convexHullCrop(*tmp, expandPlanes[PxU32(k)], planeTolerance);
+ if (c == NULL)
+ {
+ c = tmp;
+ break;
+ } // might want to debug this case better!!!
+ if (!c->assertIntact(planeTolerance))
+ {
+ PX_DELETE(c);
+ c = tmp;
+ break;
+ } // might want to debug this case better too!!!
+
+ // check for vertex limit
+ if (c->getVertices().size() > mConvexMeshDesc.vertexLimit)
+ {
+ PX_DELETE(c);
+ c = tmp;
+ maxplanes = 0;
+ break;
+ }
+ // check for vertex limit per face if necessary, GRB supports max 32 verts per face
+ if ((mConvexMeshDesc.flags & PxConvexFlag::eGPU_COMPATIBLE) && c->maxNumVertsPerFace() > gpuMaxVertsPerFace)
+ {
+ PX_DELETE(c);
+ c = tmp;
+ maxplanes = 0;
+ break;
+ }
+ PX_DELETE(tmp);
+ }
+
+ PX_ASSERT(c->assertIntact(planeTolerance));
+
+ mCropedConvexHull = c;
+
+ return PxConvexMeshCookingResult::eSUCCESS;
+}
+
+
+
+
+//////////////////////////////////////////////////////////////////////////
+// fill the descriptor with computed verts, indices and polygons
+void QuickHullConvexHullLib::fillConvexMeshDesc(PxConvexMeshDesc& desc)
+{
+ if (mCropedConvexHull)
+ fillConvexMeshDescFromCroppedHull(desc);
+ else
+ fillConvexMeshDescFromQuickHull(desc);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// fill the descriptor with computed verts, indices and polygons from quickhull convex
+void QuickHullConvexHullLib::fillConvexMeshDescFromQuickHull(PxConvexMeshDesc& desc)
+{
+ // get the number of indices needed
+ PxU32 numIndices = 0;
+ PxU32 numFaces = mQuickHull->mHullFaces.size();
+ PxU32 numFacesOut = 0;
+ PxU32 largestFace = 0; // remember the largest face, we store it as the first face, required for GRB test (max 32 vers per face supported)
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ numFacesOut++;
+ numIndices += face.numEdges;
+ if(face.numEdges > mQuickHull->mHullFaces[largestFace]->numEdges)
+ largestFace = i;
+ }
+ }
+
+ // allocate out buffers
+ PxU32* indices = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*numIndices, "PxU32"));
+ PxI32* translateTable = reinterpret_cast<PxI32*> (PX_ALLOC_TEMP(sizeof(PxU32)*mQuickHull->mNumVertices, "PxU32"));
+ PxMemSet(translateTable,-1,mQuickHull->mNumVertices*sizeof(PxU32));
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ PxVec3* vertices = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*mQuickHull->mNumVertices + 1, "PxVec3"));
+ PxHullPolygon* polygons = reinterpret_cast<PxHullPolygon*> (PX_ALLOC_TEMP(sizeof(PxHullPolygon)*numFacesOut, "PxHullPolygon"));
+
+ // go over the hullPolygons and mark valid vertices, create translateTable
+ PxU32 numVertices = 0;
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ local::QuickHullHalfEdge* he = face.edge;
+ if(translateTable[he->tail.index] == -1)
+ {
+ vertices[numVertices] = he->tail.point;
+ translateTable[he->tail.index] = PxI32(numVertices);
+ numVertices++;
+ }
+ he = he->next;
+ while (he != face.edge)
+ {
+ if(translateTable[he->tail.index] == -1)
+ {
+ vertices[numVertices] = he->tail.point;
+ translateTable[he->tail.index] = PxI32(numVertices);
+ numVertices++;
+ }
+ he = he->next;
+ }
+ }
+ }
+
+
+ desc.points.count = numVertices;
+ desc.points.data = vertices;
+ desc.points.stride = sizeof(PxVec3);
+
+ desc.indices.count = numIndices;
+ desc.indices.data = indices;
+ desc.indices.stride = sizeof(PxU32);
+
+ desc.polygons.count = numFacesOut;
+ desc.polygons.data = polygons;
+ desc.polygons.stride = sizeof(PxHullPolygon);
+
+ PxU16 indexOffset = 0;
+ numFacesOut = 0;
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ // faceIndex - store the largest face first then the rest
+ PxU32 faceIndex;
+ if(i == 0)
+ {
+ faceIndex = largestFace;
+ }
+ else
+ {
+ faceIndex = (i == largestFace) ? 0 : i;
+ }
+
+ const local::QuickHullFace& face = *mQuickHull->mHullFaces[faceIndex];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ //create index data
+ local::QuickHullHalfEdge* he = face.edge;
+ PxU32 index = 0;
+ indices[index + indexOffset] = PxU32(translateTable[he->tail.index]);
+ index++;
+ he = he->next;
+ while (he != face.edge)
+ {
+ indices[index + indexOffset] = PxU32(translateTable[he->tail.index]);
+ index++;
+ he = he->next;
+ }
+
+ // create polygon
+ PxHullPolygon polygon;
+ polygon.mPlane[0] = face.normal[0];
+ polygon.mPlane[1] = face.normal[1];
+ polygon.mPlane[2] = face.normal[2];
+ polygon.mPlane[3] = -face.normal.dot(face.centroid);
+
+ polygon.mIndexBase = indexOffset;
+ polygon.mNbVerts = face.numEdges;
+ indexOffset += face.numEdges;
+ polygons[numFacesOut] = polygon;
+ numFacesOut++;
+ }
+ }
+
+ PX_ASSERT(mQuickHull->mNumHullFaces == numFacesOut);
+
+ mVertsOut = vertices;
+ mIndicesOut = indices;
+ mPolygonsOut = polygons;
+
+ PX_FREE(translateTable);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// fill the desc from cropped hull data
+void QuickHullConvexHullLib::fillConvexMeshDescFromCroppedHull(PxConvexMeshDesc& outDesc)
+{
+ PX_ASSERT(mCropedConvexHull);
+
+ // parse the hullOut and fill the result with vertices and polygons
+ mIndicesOut = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*(mCropedConvexHull->getEdges().size()), "PxU32"));
+ PxU32 numIndices = mCropedConvexHull->getEdges().size();
+
+ PxU32 numPolygons = mCropedConvexHull->getFacets().size();
+ mPolygonsOut = reinterpret_cast<PxHullPolygon*> (PX_ALLOC_TEMP(sizeof(PxHullPolygon)*numPolygons, "PxHullPolygon"));
+
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ mVertsOut = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*mCropedConvexHull->getVertices().size() + 1, "PxVec3"));
+ PxU32 numVertices = mCropedConvexHull->getVertices().size();
+ PxMemCopy(mVertsOut, mCropedConvexHull->getVertices().begin(), sizeof(PxVec3)*numVertices);
+
+ PxU32 i = 0;
+ PxU32 k = 0;
+ PxU32 j = 1;
+ while (i < mCropedConvexHull->getEdges().size())
+ {
+ j = 1;
+ PxHullPolygon& polygon = mPolygonsOut[k];
+ // get num indices per polygon
+ while (j + i < mCropedConvexHull->getEdges().size() && mCropedConvexHull->getEdges()[i].p == mCropedConvexHull->getEdges()[i + j].p)
+ {
+ j++;
+ }
+ polygon.mNbVerts = Ps::to16(j);
+ polygon.mIndexBase = Ps::to16(i);
+
+ // get the plane
+ polygon.mPlane[0] = mCropedConvexHull->getFacets()[k].n[0];
+ polygon.mPlane[1] = mCropedConvexHull->getFacets()[k].n[1];
+ polygon.mPlane[2] = mCropedConvexHull->getFacets()[k].n[2];
+
+ polygon.mPlane[3] = mCropedConvexHull->getFacets()[k].d;
+
+ while (j--)
+ {
+ mIndicesOut[i] = mCropedConvexHull->getEdges()[i].v;
+ i++;
+ }
+ k++;
+ }
+
+ PX_ASSERT(k == mCropedConvexHull->getFacets().size());
+
+ outDesc.indices.count = numIndices;
+ outDesc.indices.stride = sizeof(PxU32);
+ outDesc.indices.data = mIndicesOut;
+
+ outDesc.points.count = numVertices;
+ outDesc.points.stride = sizeof(PxVec3);
+ outDesc.points.data = mVertsOut;
+
+ outDesc.polygons.count = numPolygons;
+ outDesc.polygons.stride = sizeof(PxHullPolygon);
+ outDesc.polygons.data = mPolygonsOut;
+
+ swapLargestFace(outDesc);
+}
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.h b/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.h
new file mode 100644
index 00000000..ad077654
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.h
@@ -0,0 +1,97 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_QUICKHULL_CONVEXHULLLIB_H
+#define PX_QUICKHULL_CONVEXHULLLIB_H
+
+#include "ConvexHullLib.h"
+#include "Ps.h"
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+
+namespace local
+{
+ class QuickHull;
+ struct QuickHullVertex;
+}
+
+namespace physx
+{
+ class ConvexHull;
+
+ //////////////////////////////////////////////////////////////////////////
+ // Quickhull lib constructs the hull from given input points. The resulting hull
+ // will only contain a subset of the input points. The algorithm does incrementally
+ // adds most furthest vertices to the starting simplex. The produced hulls are build with high precision
+ // and produce more stable and correct results, than the legacy algorithm.
+ class QuickHullConvexHullLib: public ConvexHullLib, public Ps::UserAllocated
+ {
+ PX_NOCOPY(QuickHullConvexHullLib)
+ public:
+
+ // functions
+ QuickHullConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params);
+
+ ~QuickHullConvexHullLib();
+
+ // computes the convex hull from provided points
+ virtual PxConvexMeshCookingResult::Enum createConvexHull();
+
+ // fills the convexmeshdesc with computed hull data
+ virtual void fillConvexMeshDesc(PxConvexMeshDesc& desc);
+
+ protected:
+ // if vertex limit reached we need to expand the hull using the OBB slicing
+ PxConvexMeshCookingResult::Enum expandHullOBB();
+
+ // if vertex limit reached we need to expand the hull using the plane shifting
+ PxConvexMeshCookingResult::Enum expandHull();
+
+ // checks for collinearity and co planarity
+ // returns true if the simplex was ok, we can reuse the computed tolerances and min/max values
+ bool cleanupForSimplex(PxVec3* vertices, PxU32 vertexCount, local::QuickHullVertex* minimumVertex,
+ local::QuickHullVertex* maximumVertex, float& tolerance, float& planeTolerance);
+
+ // fill the result desc from quick hull convex
+ void fillConvexMeshDescFromQuickHull(PxConvexMeshDesc& desc);
+
+ // fill the result desc from cropped hull convex
+ void fillConvexMeshDescFromCroppedHull(PxConvexMeshDesc& desc);
+
+ private:
+ local::QuickHull* mQuickHull; // the internal quick hull representation
+ ConvexHull* mCropedConvexHull; //the hull cropped from OBB, used for vertex limit path
+
+ PxVec3* mVertsOut; // vertices for output
+ PxU32* mIndicesOut; // inidices for output
+ PxHullPolygon* mPolygonsOut; // polygons for output
+ };
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.cpp
new file mode 100644
index 00000000..f388a32c
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.cpp
@@ -0,0 +1,797 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+//#ifdef PX_COOKING
+
+/*
+* This code computes volume integrals needed to compute mass properties of polyhedral bodies.
+* Based on public domain code by Brian Mirtich.
+*/
+#include "foundation/PxMemory.h"
+#include "VolumeIntegration.h"
+#include "PxSimpleTriangleMesh.h"
+#include "PxConvexMeshDesc.h"
+#include "GuConvexMeshData.h"
+#include "PsUtilities.h"
+#include "PsVecMath.h"
+
+
+namespace physx
+{
+
+ using namespace Ps::aos;
+
+namespace
+{
+
+ class VolumeIntegrator
+ {
+ public:
+ VolumeIntegrator(PxSimpleTriangleMesh mesh, PxF64 mDensity);
+ ~VolumeIntegrator();
+ bool computeVolumeIntegrals(PxIntegrals& ir);
+ private:
+ struct Normal
+ {
+ PxVec3 normal;
+ PxF32 w;
+ };
+
+ struct Face
+ {
+ PxF64 Norm[3];
+ PxF64 w;
+ PxU32 Verts[3];
+ };
+
+ // Data structures
+ PxF64 mMass; //!< Mass
+ PxF64 mDensity; //!< Density
+ PxSimpleTriangleMesh mesh;
+ //Normal * faceNormals; //!< temp face normal data structure
+
+
+
+
+ unsigned int mA; //!< Alpha
+ unsigned int mB; //!< Beta
+ unsigned int mC; //!< Gamma
+
+ // Projection integrals
+ PxF64 mP1;
+ PxF64 mPa; //!< Pi Alpha
+ PxF64 mPb; //!< Pi Beta
+ PxF64 mPaa; //!< Pi Alpha^2
+ PxF64 mPab; //!< Pi AlphaBeta
+ PxF64 mPbb; //!< Pi Beta^2
+ PxF64 mPaaa; //!< Pi Alpha^3
+ PxF64 mPaab; //!< Pi Alpha^2Beta
+ PxF64 mPabb; //!< Pi AlphaBeta^2
+ PxF64 mPbbb; //!< Pi Beta^3
+
+ // Face integrals
+ PxF64 mFa; //!< FAlpha
+ PxF64 mFb; //!< FBeta
+ PxF64 mFc; //!< FGamma
+ PxF64 mFaa; //!< FAlpha^2
+ PxF64 mFbb; //!< FBeta^2
+ PxF64 mFcc; //!< FGamma^2
+ PxF64 mFaaa; //!< FAlpha^3
+ PxF64 mFbbb; //!< FBeta^3
+ PxF64 mFccc; //!< FGamma^3
+ PxF64 mFaab; //!< FAlpha^2Beta
+ PxF64 mFbbc; //!< FBeta^2Gamma
+ PxF64 mFcca; //!< FGamma^2Alpha
+
+ // The 10 volume integrals
+ PxF64 mT0; //!< ~Total mass
+ PxF64 mT1[3]; //!< Location of the center of mass
+ PxF64 mT2[3]; //!< Moments of inertia
+ PxF64 mTP[3]; //!< Products of inertia
+
+ // Internal methods
+ // bool Init();
+ PxVec3 computeCenterOfMass();
+ void computeInertiaTensor(PxF64* J);
+ void computeCOMInertiaTensor(PxF64* J);
+ void computeFaceNormal(Face & f, PxU32 * indices);
+
+ void computeProjectionIntegrals(const Face& f);
+ void computeFaceIntegrals(const Face& f);
+ };
+
+ #define X 0u
+ #define Y 1u
+ #define Z 2u
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Constructor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ VolumeIntegrator::VolumeIntegrator(PxSimpleTriangleMesh mesh_, PxF64 density)
+ {
+ mDensity = density;
+ mMass = 0.0;
+ this->mesh = mesh_;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Destructor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ VolumeIntegrator::~VolumeIntegrator()
+ {
+ }
+
+ void VolumeIntegrator::computeFaceNormal(Face & f, PxU32 * indices)
+ {
+ const PxU8 * vertPointer = reinterpret_cast<const PxU8*>(mesh.points.data);
+
+ //two edges
+ PxVec3 d1 = (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[1] )) - (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[0] ));
+ PxVec3 d2 = (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[2] )) - (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[1] ));
+
+
+ PxVec3 normal = d1.cross(d2);
+
+ normal.normalize();
+
+ f.w = - PxF64(normal.dot( (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[0] )) ));
+
+ f.Norm[0] = PxF64(normal.x);
+ f.Norm[1] = PxF64(normal.y);
+ f.Norm[2] = PxF64(normal.z);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes volume integrals for a polyhedron by summing surface integrals over its faces.
+ * \param ir [out] a result structure.
+ * \return true if success
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool VolumeIntegrator::computeVolumeIntegrals(PxIntegrals& ir)
+ {
+ // Clear all integrals
+ mT0 = mT1[X] = mT1[Y] = mT1[Z] = mT2[X] = mT2[Y] = mT2[Z] = mTP[X] = mTP[Y] = mTP[Z] = 0;
+
+ Face f;
+ const PxU8 * trigPointer = reinterpret_cast<const PxU8*>(mesh.triangles.data);
+ for(PxU32 i=0;i<mesh.triangles.count;i++, trigPointer += mesh.triangles.stride)
+ {
+
+ if (mesh.flags & PxMeshFlag::e16_BIT_INDICES)
+ {
+ f.Verts[0] = (reinterpret_cast<const PxU16 *>(trigPointer))[0];
+ f.Verts[1] = (reinterpret_cast<const PxU16 *>(trigPointer))[1];
+ f.Verts[2] = (reinterpret_cast<const PxU16 *>(trigPointer))[2];
+ }
+ else
+ {
+ f.Verts[0] = (reinterpret_cast<const PxU32 *>(trigPointer)[0]);
+ f.Verts[1] = (reinterpret_cast<const PxU32 *>(trigPointer)[1]);
+ f.Verts[2] = (reinterpret_cast<const PxU32 *>(trigPointer)[2]);
+ }
+
+ if (mesh.flags & PxMeshFlag::eFLIPNORMALS)
+ {
+ PxU32 t = f.Verts[1];
+ f.Verts[1] = f.Verts[2];
+ f.Verts[2] = t;
+ }
+
+ //compute face normal:
+ computeFaceNormal(f,f.Verts);
+
+ // Compute alpha/beta/gamma as the right-handed permutation of (x,y,z) that maximizes |n|
+ PxF64 nx = fabs(f.Norm[X]);
+ PxF64 ny = fabs(f.Norm[Y]);
+ PxF64 nz = fabs(f.Norm[Z]);
+ if (nx > ny && nx > nz) mC = X;
+ else mC = (ny > nz) ? Y : Z;
+ mA = (mC + 1) % 3;
+ mB = (mA + 1) % 3;
+
+ // Compute face contribution
+ computeFaceIntegrals(f);
+
+ // Update integrals
+ mT0 += f.Norm[X] * ((mA == X) ? mFa : ((mB == X) ? mFb : mFc));
+
+ mT1[mA] += f.Norm[mA] * mFaa;
+ mT1[mB] += f.Norm[mB] * mFbb;
+ mT1[mC] += f.Norm[mC] * mFcc;
+
+ mT2[mA] += f.Norm[mA] * mFaaa;
+ mT2[mB] += f.Norm[mB] * mFbbb;
+ mT2[mC] += f.Norm[mC] * mFccc;
+
+ mTP[mA] += f.Norm[mA] * mFaab;
+ mTP[mB] += f.Norm[mB] * mFbbc;
+ mTP[mC] += f.Norm[mC] * mFcca;
+ }
+
+ mT1[X] /= 2; mT1[Y] /= 2; mT1[Z] /= 2;
+ mT2[X] /= 3; mT2[Y] /= 3; mT2[Z] /= 3;
+ mTP[X] /= 2; mTP[Y] /= 2; mTP[Z] /= 2;
+
+ // Fill result structure
+ ir.COM = computeCenterOfMass();
+ computeInertiaTensor(reinterpret_cast<PxF64*>(ir.inertiaTensor));
+ computeCOMInertiaTensor(reinterpret_cast<PxF64*>(ir.COMInertiaTensor));
+ ir.mass = mMass;
+ return true;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes the center of mass.
+ * \return The center of mass.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ PxVec3 VolumeIntegrator::computeCenterOfMass()
+ {
+ // Compute center of mass
+ PxVec3 COM(0.0f, 0.0f, 0.0f);
+ if(mT0!=0.0)
+ {
+ COM.x = float(mT1[X] / mT0);
+ COM.y = float(mT1[Y] / mT0);
+ COM.z = float(mT1[Z] / mT0);
+ }
+ return COM;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Setups the inertia tensor relative to the origin.
+ * \param it [out] the returned inertia tensor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void VolumeIntegrator::computeInertiaTensor(PxF64* it)
+ {
+ PxF64 J[3][3];
+
+ // Compute inertia tensor
+ J[X][X] = mDensity * (mT2[Y] + mT2[Z]);
+ J[Y][Y] = mDensity * (mT2[Z] + mT2[X]);
+ J[Z][Z] = mDensity * (mT2[X] + mT2[Y]);
+
+ J[X][Y] = J[Y][X] = - mDensity * mTP[X];
+ J[Y][Z] = J[Z][Y] = - mDensity * mTP[Y];
+ J[Z][X] = J[X][Z] = - mDensity * mTP[Z];
+
+ PxMemCopy(it, J, 9*sizeof(PxF64));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Setups the inertia tensor relative to the COM.
+ * \param it [out] the returned inertia tensor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void VolumeIntegrator::computeCOMInertiaTensor(PxF64* it)
+ {
+ PxF64 J[3][3];
+
+ mMass = mDensity * mT0;
+
+ const PxVec3 COM = computeCenterOfMass();
+ const PxVec3 MassCOM(PxF32(mMass) * COM);
+ const PxVec3 MassCOM2(MassCOM.x * COM.x, MassCOM.y * COM.y, MassCOM.z * COM.z);
+
+ // Compute initial inertia tensor
+ computeInertiaTensor(reinterpret_cast<PxF64*>(J));
+
+ // Translate inertia tensor to center of mass
+ // Huyghens' theorem:
+ // Jx'x' = Jxx - m*(YG^2+ZG^2)
+ // Jy'y' = Jyy - m*(ZG^2+XG^2)
+ // Jz'z' = Jzz - m*(XG^2+YG^2)
+ // XG, YG, ZG = new origin
+ // YG^2+ZG^2 = dx^2
+ J[X][X] -= PxF64(MassCOM2.y + MassCOM2.z);
+ J[Y][Y] -= PxF64(MassCOM2.z + MassCOM2.x);
+ J[Z][Z] -= PxF64(MassCOM2.x + MassCOM2.y);
+
+ // Huyghens' theorem:
+ // Jx'y' = Jxy - m*XG*YG
+ // Jy'z' = Jyz - m*YG*ZG
+ // Jz'x' = Jzx - m*ZG*XG
+ // ### IS THE SIGN CORRECT ?
+ J[X][Y] = J[Y][X] += PxF64(MassCOM.x * COM.y);
+ J[Y][Z] = J[Z][Y] += PxF64(MassCOM.y * COM.z);
+ J[Z][X] = J[X][Z] += PxF64(MassCOM.z * COM.x);
+
+ PxMemCopy(it, J, 9*sizeof(PxF64));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes integrals over a face projection from the coordinates of the projections vertices.
+ * \param f [in] a face structure.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void VolumeIntegrator::computeProjectionIntegrals(const Face& f)
+ {
+ mP1 = mPa = mPb = mPaa = mPab = mPbb = mPaaa = mPaab = mPabb = mPbbb = 0.0;
+
+ const PxU8* vertPointer = reinterpret_cast<const PxU8*>(mesh.points.data);
+ for(PxU32 i=0;i<3;i++)
+ {
+ const PxVec3& p0 = *reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * (f.Verts[i]) );
+ const PxVec3& p1 = *reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * (f.Verts[(i+1) % 3]) );
+
+
+ PxF64 a0 = PxF64(p0[mA]);
+ PxF64 b0 = PxF64(p0[mB]);
+ PxF64 a1 = PxF64(p1[mA]);
+ PxF64 b1 = PxF64(p1[mB]);
+
+ PxF64 da = a1 - a0; // DeltaA
+ PxF64 db = b1 - b0; // DeltaB
+
+ PxF64 a0_2 = a0 * a0; // Alpha0^2
+ PxF64 a0_3 = a0_2 * a0; // ...
+ PxF64 a0_4 = a0_3 * a0;
+
+ PxF64 b0_2 = b0 * b0;
+ PxF64 b0_3 = b0_2 * b0;
+ PxF64 b0_4 = b0_3 * b0;
+
+ PxF64 a1_2 = a1 * a1;
+ PxF64 a1_3 = a1_2 * a1;
+
+ PxF64 b1_2 = b1 * b1;
+ PxF64 b1_3 = b1_2 * b1;
+
+ PxF64 C1 = a1 + a0;
+
+ PxF64 Ca = a1*C1 + a0_2;
+ PxF64 Caa = a1*Ca + a0_3;
+ PxF64 Caaa = a1*Caa + a0_4;
+
+ PxF64 Cb = b1*(b1 + b0) + b0_2;
+ PxF64 Cbb = b1*Cb + b0_3;
+ PxF64 Cbbb = b1*Cbb + b0_4;
+
+ PxF64 Cab = 3*a1_2 + 2*a1*a0 + a0_2;
+ PxF64 Kab = a1_2 + 2*a1*a0 + 3*a0_2;
+
+ PxF64 Caab = a0*Cab + 4*a1_3;
+ PxF64 Kaab = a1*Kab + 4*a0_3;
+
+ PxF64 Cabb = 4*b1_3 + 3*b1_2*b0 + 2*b1*b0_2 + b0_3;
+ PxF64 Kabb = b1_3 + 2*b1_2*b0 + 3*b1*b0_2 + 4*b0_3;
+
+ mP1 += db*C1;
+ mPa += db*Ca;
+ mPaa += db*Caa;
+ mPaaa += db*Caaa;
+ mPb += da*Cb;
+ mPbb += da*Cbb;
+ mPbbb += da*Cbbb;
+ mPab += db*(b1*Cab + b0*Kab);
+ mPaab += db*(b1*Caab + b0*Kaab);
+ mPabb += da*(a1*Cabb + a0*Kabb);
+ }
+
+ mP1 /= 2.0;
+ mPa /= 6.0;
+ mPaa /= 12.0;
+ mPaaa /= 20.0;
+ mPb /= -6.0;
+ mPbb /= -12.0;
+ mPbbb /= -20.0;
+ mPab /= 24.0;
+ mPaab /= 60.0;
+ mPabb /= -60.0;
+ }
+
+ #define SQR(x) ((x)*(x)) //!< Returns x square
+ #define CUBE(x) ((x)*(x)*(x)) //!< Returns x cube
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes surface integrals over a polyhedral face from the integrals over its projection.
+ * \param f [in] a face structure.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void VolumeIntegrator::computeFaceIntegrals(const Face& f)
+ {
+ computeProjectionIntegrals(f);
+
+ PxF64 w = f.w;
+ const PxF64* n = f.Norm;
+ PxF64 k1 = 1 / n[mC];
+ PxF64 k2 = k1 * k1;
+ PxF64 k3 = k2 * k1;
+ PxF64 k4 = k3 * k1;
+
+ mFa = k1 * mPa;
+ mFb = k1 * mPb;
+ mFc = -k2 * (n[mA]*mPa + n[mB]*mPb + w*mP1);
+
+ mFaa = k1 * mPaa;
+ mFbb = k1 * mPbb;
+ mFcc = k3 * (SQR(n[mA])*mPaa + 2*n[mA]*n[mB]*mPab + SQR(n[mB])*mPbb + w*(2*(n[mA]*mPa + n[mB]*mPb) + w*mP1));
+
+ mFaaa = k1 * mPaaa;
+ mFbbb = k1 * mPbbb;
+ mFccc = -k4 * (CUBE(n[mA])*mPaaa + 3*SQR(n[mA])*n[mB]*mPaab
+ + 3*n[mA]*SQR(n[mB])*mPabb + CUBE(n[mB])*mPbbb
+ + 3*w*(SQR(n[mA])*mPaa + 2*n[mA]*n[mB]*mPab + SQR(n[mB])*mPbb)
+ + w*w*(3*(n[mA]*mPa + n[mB]*mPb) + w*mP1));
+
+ mFaab = k1 * mPaab;
+ mFbbc = -k2 * (n[mA]*mPabb + n[mB]*mPbbb + w*mPbb);
+ mFcca = k3 * (SQR(n[mA])*mPaaa + 2*n[mA]*n[mB]*mPaab + SQR(n[mB])*mPabb + w*(2*(n[mA]*mPaa + n[mB]*mPab) + w*mPa));
+ }
+
+ /*
+ * This code computes volume integrals needed to compute mass properties of polyhedral bodies.
+ * Based on public domain code by David Eberly.
+ */
+
+ class VolumeIntegratorEberly
+ {
+ public:
+ VolumeIntegratorEberly(const PxConvexMeshDesc& mesh, PxF64 mDensity);
+ ~VolumeIntegratorEberly();
+ bool computeVolumeIntegralsSIMD(PxIntegrals& ir, const PxVec3& origin);
+ bool computeVolumeIntegrals(PxIntegrals& ir, const PxVec3& origin);
+
+ private:
+ VolumeIntegratorEberly& operator=(const VolumeIntegratorEberly&);
+ const PxConvexMeshDesc& mDesc;
+ PxF64 mMass;
+ PxReal mMassR;
+ PxF64 mDensity;
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Constructor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ VolumeIntegratorEberly::VolumeIntegratorEberly(const PxConvexMeshDesc& desc, PxF64 density)
+ : mDesc(desc), mMass(0), mMassR(0), mDensity(density)
+ {
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Destructor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ VolumeIntegratorEberly::~VolumeIntegratorEberly()
+ {
+ }
+
+ PX_FORCE_INLINE void subexpressions(PxF64 w0, PxF64 w1, PxF64 w2, PxF64& f1, PxF64& f2, PxF64& f3, PxF64& g0, PxF64& g1, PxF64& g2)
+ {
+ PxF64 temp0 = w0 + w1;
+ f1 = temp0 + w2;
+ PxF64 temp1 = w0*w0;
+ PxF64 temp2 = temp1 + w1*temp0;
+ f2 = temp2 + w2*f1;
+ f3 = w0*temp1 + w1*temp2 + w2*f2;
+ g0 = f2 + w0*(f1 + w0);
+ g1 = f2 + w1*(f1 + w1);
+ g2 = f2 + w2*(f1 + w2);
+ }
+
+ PX_FORCE_INLINE void subexpressionsSIMD(const Vec4V& w0, const Vec4V& w1, const Vec4V& w2,
+ Vec4V& f1, Vec4V& f2, Vec4V& f3, Vec4V& g0, Vec4V& g1, Vec4V& g2)
+ {
+ const Vec4V temp0 = V4Add(w0, w1);
+ f1 = V4Add(temp0, w2);
+ const Vec4V temp1 = V4Mul(w0,w0);
+ const Vec4V temp2 = V4MulAdd(w1, temp0, temp1);
+ f2 = V4MulAdd(w2, f1, temp2);
+
+ // f3 = w0.multiply(temp1) + w1.multiply(temp2) + w2.multiply(f2);
+ const Vec4V ad0 = V4Mul(w0, temp1);
+ const Vec4V ad1 = V4MulAdd(w1, temp2, ad0);
+ f3 = V4MulAdd(w2, f2, ad1);
+
+ g0 = V4MulAdd(w0, V4Add(f1, w0), f2); // f2 + w0.multiply(f1 + w0);
+ g1 = V4MulAdd(w1, V4Add(f1, w1), f2); // f2 + w1.multiply(f1 + w1);
+ g2 = V4MulAdd(w2, V4Add(f1, w2), f2); // f2 + w2.multiply(f1 + w2);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes volume integrals for a polyhedron by summing surface integrals over its faces. SIMD version
+ * \param ir [out] a result structure.
+ * \param origin [in] the origin of the mesh vertices. All vertices will be shifted accordingly prior to computing the volume integrals.
+ Can improve accuracy, for example, if the centroid is used in the case of a convex mesh. Note: the returned inertia will not be relative to this origin but relative to (0,0,0).
+ * \return true if success
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool VolumeIntegratorEberly::computeVolumeIntegralsSIMD(PxIntegrals& ir, const PxVec3& origin)
+ {
+ FloatV mult = FLoad(1.0f/6.0f);
+ const Vec4V multV = V4Load(1.0f/24.0f);
+ const Vec4V multV2 = V4Load(1.0f/60.0f);
+ const Vec4V multVV = V4Load(1.0f/120.0f);
+
+ // order: 1, x, y, z, x^2, y^2, z^2, xy, yz, zx
+ FloatV intg = FLoad(0.0f);
+ Vec4V intgV = V4Load(0.0f);
+ Vec4V intgV2 = V4Load(0.0f);
+ Vec4V intgVV = V4Load(0.0f);
+
+ const Vec4V originV = Vec4V_From_PxVec3_WUndefined(origin);
+ const FloatV zeroV = FLoad(0.0f);
+
+ const PxVec3* hullVerts = static_cast<const PxVec3*> (mDesc.points.data);
+ const Gu::HullPolygonData* hullPolygons = static_cast<const Gu::HullPolygonData*> (mDesc.polygons.data);
+
+ for (PxU32 i = 0; i < mDesc.polygons.count; i++)
+ {
+ const Gu::HullPolygonData& polygon = hullPolygons[i];
+ const PxU8* data = static_cast<const PxU8*>(mDesc.indices.data) + polygon.mVRef8;
+ const PxU32 nbVerts = polygon.mNbVerts;
+
+ PX_ASSERT(nbVerts > 2);
+
+ const Vec4V normalV = V4LoadU(&polygon.mPlane.n.x);
+
+ for (PxU32 j = 0; j < nbVerts - 2; j++)
+ {
+ // Should be safe to V4Load, we allocate one more vertex each time
+ const Vec4V vertex0 = V4LoadU(&hullVerts[data[0]].x);
+ const Vec4V vertex1 = V4LoadU(&hullVerts[data[j + 1]].x);
+ const Vec4V vertex2 = V4LoadU(&hullVerts[data[j + 2]].x);
+
+ const Vec4V p0 = V4Sub(vertex0, originV);
+ Vec4V p1 = V4Sub(vertex1, originV);
+ Vec4V p2 = V4Sub(vertex2, originV);
+
+ const Vec4V p0YZX = V4PermYZXW(p0);
+ const Vec4V p1YZX = V4PermYZXW(p1);
+ const Vec4V p2YZX = V4PermYZXW(p2);
+
+ // get edges and cross product of edges
+ Vec4V d = V4Cross(V4Sub(p1, p0), V4Sub(p2, p0)); // (p1 - p0).cross(p2 - p0);
+
+ const FloatV dist = V4Dot3(d, normalV);
+ //if(cp.dot(normalV) < 0)
+ if(FAllGrtr(zeroV, dist))
+ {
+ d = V4Neg(d);
+ Vec4V temp = p1;
+ p1 = p2;
+ p2 = temp;
+ }
+
+ // compute integral terms
+ Vec4V f1; Vec4V f2; Vec4V f3; Vec4V g0; Vec4V g1; Vec4V g2;
+
+ subexpressionsSIMD(p0, p1, p2, f1, f2, f3, g0, g1, g2);
+
+ // update integrals
+ intg = FScaleAdd(V4GetX(d), V4GetX(f1), intg); //intg += d.x*f1.x;
+
+ intgV = V4MulAdd(d, f2, intgV); // intgV +=d.multiply(f2);
+ intgV2 = V4MulAdd(d, f3, intgV2); // intgV2 += d.multiply(f3);
+
+ const Vec4V ad0 = V4Mul(p0YZX, g0);
+ const Vec4V ad1 = V4MulAdd(p1YZX, g1, ad0);
+ const Vec4V ad2 = V4MulAdd(p2YZX, g2, ad1);
+ intgVV = V4MulAdd(d, ad2, intgVV); //intgVV += d.multiply(p0YZX.multiply(g0) + p1YZX.multiply(g1) + p2YZX.multiply(g2));
+ }
+ }
+
+ intg = FMul(intg, mult); // intg *= mult;
+ intgV = V4Mul(intgV, multV);
+ intgV2 = V4Mul(intgV2, multV2);
+ intgVV = V4Mul(intgVV, multVV);
+
+ // center of mass ir.COM = intgV/mMassR;
+ const Vec4V comV = V4ScaleInv(intgV, intg);
+ // we rewrite the mass, but then we set it back
+ V4StoreU(comV, &ir.COM.x);
+
+ FStore(intg, &mMassR);
+ ir.mass = PxF64(mMassR); // = intg;
+
+ PxVec3 intg2;
+ V3StoreU(Vec3V_From_Vec4V(intgV2), intg2);
+
+ PxVec3 intVV;
+ V3StoreU(Vec3V_From_Vec4V(intgVV), intVV);
+
+ // inertia tensor relative to the provided origin parameter
+ ir.inertiaTensor[0][0] = PxF64(intg2.y + intg2.z);
+ ir.inertiaTensor[1][1] = PxF64(intg2.x + intg2.z);
+ ir.inertiaTensor[2][2] = PxF64(intg2.x + intg2.y);
+ ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = PxF64(-intVV.x);
+ ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = PxF64(-intVV.y);
+ ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = PxF64(-intVV.z);
+
+ // inertia tensor relative to center of mass
+ ir.COMInertiaTensor[0][0] = ir.inertiaTensor[0][0] -PxF64(mMassR*(ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z));
+ ir.COMInertiaTensor[1][1] = ir.inertiaTensor[1][1] -PxF64(mMassR*(ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x));
+ ir.COMInertiaTensor[2][2] = ir.inertiaTensor[2][2] -PxF64(mMassR*(ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y));
+ ir.COMInertiaTensor[0][1] = ir.COMInertiaTensor[1][0] = (ir.inertiaTensor[0][1] +PxF64(mMassR*ir.COM.x*ir.COM.y));
+ ir.COMInertiaTensor[1][2] = ir.COMInertiaTensor[2][1] = (ir.inertiaTensor[1][2] +PxF64(mMassR*ir.COM.y*ir.COM.z));
+ ir.COMInertiaTensor[0][2] = ir.COMInertiaTensor[2][0] = (ir.inertiaTensor[0][2] +PxF64(mMassR*ir.COM.z*ir.COM.x));
+
+ // inertia tensor relative to (0,0,0)
+ if (!origin.isZero())
+ {
+ PxVec3 sum = ir.COM + origin;
+ ir.inertiaTensor[0][0] -= PxF64(mMassR*((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z) - (sum.y*sum.y+sum.z*sum.z)));
+ ir.inertiaTensor[1][1] -= PxF64(mMassR*((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x) - (sum.z*sum.z+sum.x*sum.x)));
+ ir.inertiaTensor[2][2] -= PxF64(mMassR*((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y) - (sum.x*sum.x+sum.y*sum.y)));
+ ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = ir.inertiaTensor[0][1] + PxF64(mMassR*((ir.COM.x*ir.COM.y) - (sum.x*sum.y)));
+ ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = ir.inertiaTensor[1][2] + PxF64(mMassR*((ir.COM.y*ir.COM.z) - (sum.y*sum.z)));
+ ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = ir.inertiaTensor[0][2] + PxF64(mMassR*((ir.COM.z*ir.COM.x) - (sum.z*sum.x)));
+ ir.COM = sum;
+ }
+
+ return true;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes volume integrals for a polyhedron by summing surface integrals over its faces.
+ * \param ir [out] a result structure.
+ * \param origin [in] the origin of the mesh vertices. All vertices will be shifted accordingly prior to computing the volume integrals.
+ Can improve accuracy, for example, if the centroid is used in the case of a convex mesh. Note: the returned inertia will not be relative to this origin but relative to (0,0,0).
+ * \return true if success
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool VolumeIntegratorEberly::computeVolumeIntegrals(PxIntegrals& ir, const PxVec3& origin)
+ {
+ const PxF64 mult[10] = {1.0/6.0,1.0/24.0,1.0/24.0,1.0/24.0,1.0/60.0,1.0/60.0,1.0/60.0,1.0/120.0,1.0/120.0,1.0/120.0};
+ PxF64 intg[10] = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; // order: 1, x, y, z, x^2, y^2, z^2, xy, yz, zx
+ const PxVec3* hullVerts = static_cast<const PxVec3*> (mDesc.points.data);
+
+ for (PxU32 i = 0; i < mDesc.polygons.count; i++)
+ {
+ const Gu::HullPolygonData& polygon = (static_cast<const Gu::HullPolygonData*> (mDesc.polygons.data))[i];
+ const PxU8* Data = static_cast<const PxU8*>(mDesc.indices.data) + polygon.mVRef8;
+ const PxU32 NbVerts = polygon.mNbVerts;
+ for (PxU32 j = 0; j < NbVerts - 2; j++)
+ {
+ const PxVec3 p0 = hullVerts[Data[0]] - origin;
+ PxVec3 p1 = hullVerts[Data[(j + 1) % NbVerts]] - origin;
+ PxVec3 p2 = hullVerts[Data[(j + 2) % NbVerts]] - origin;
+
+ PxVec3 cp = (p1 - p0).cross(p2 - p0);
+
+ if(cp.dot(polygon.mPlane.n) < 0)
+ {
+ cp = -cp;
+ Ps::swap(p1,p2);
+ }
+
+ PxF64 x0 = PxF64(p0.x); PxF64 y0 = PxF64(p0.y); PxF64 z0 = PxF64(p0.z);
+ PxF64 x1 = PxF64(p1.x); PxF64 y1 = PxF64(p1.y); PxF64 z1 = PxF64(p1.z);
+ PxF64 x2 = PxF64(p2.x); PxF64 y2 = PxF64(p2.y); PxF64 z2 = PxF64(p2.z);
+
+ // get edges and cross product of edges
+ PxF64 d0 = PxF64(cp.x); PxF64 d1 = PxF64(cp.y); PxF64 d2 = PxF64(cp.z);
+
+ // compute integral terms
+ PxF64 f1x; PxF64 f2x; PxF64 f3x; PxF64 g0x; PxF64 g1x; PxF64 g2x;
+ PxF64 f1y; PxF64 f2y; PxF64 f3y; PxF64 g0y; PxF64 g1y; PxF64 g2y;
+ PxF64 f1z; PxF64 f2z; PxF64 f3z; PxF64 g0z; PxF64 g1z; PxF64 g2z;
+
+ subexpressions(x0, x1, x2, f1x, f2x, f3x, g0x, g1x, g2x);
+ subexpressions(y0, y1, y2, f1y, f2y, f3y, g0y, g1y, g2y);
+ subexpressions(z0, z1, z2, f1z, f2z, f3z, g0z, g1z, g2z);
+
+ // update integrals
+ intg[0] += d0*f1x;
+ intg[1] += d0*f2x; intg[2] += d1*f2y; intg[3] += d2*f2z;
+ intg[4] += d0*f3x; intg[5] += d1*f3y; intg[6] += d2*f3z;
+ intg[7] += d0*(y0*g0x + y1*g1x + y2*g2x);
+ intg[8] += d1*(z0*g0y + z1*g1y + z2*g2y);
+ intg[9] += d2*(x0*g0z + x1*g1z + x2*g2z);
+
+ }
+ }
+
+ for (PxU32 i = 0; i < 10; i++)
+ {
+ intg[i] *= mult[i];
+ }
+
+ ir.mass = mMass = intg[0];
+ // center of mass
+ ir.COM.x = PxReal(intg[1]/mMass);
+ ir.COM.y = PxReal(intg[2]/mMass);
+ ir.COM.z = PxReal(intg[3]/mMass);
+
+ // inertia tensor relative to the provided origin parameter
+ ir.inertiaTensor[0][0] = intg[5]+intg[6];
+ ir.inertiaTensor[1][1] = intg[4]+intg[6];
+ ir.inertiaTensor[2][2] = intg[4]+intg[5];
+ ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = -intg[7];
+ ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = -intg[8];
+ ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = -intg[9];
+
+ // inertia tensor relative to center of mass
+ ir.COMInertiaTensor[0][0] = ir.inertiaTensor[0][0] -mMass*PxF64((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z));
+ ir.COMInertiaTensor[1][1] = ir.inertiaTensor[1][1] -mMass*PxF64((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x));
+ ir.COMInertiaTensor[2][2] = ir.inertiaTensor[2][2] -mMass*PxF64((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y));
+ ir.COMInertiaTensor[0][1] = ir.COMInertiaTensor[1][0] = (ir.inertiaTensor[0][1] +mMass*PxF64(ir.COM.x*ir.COM.y));
+ ir.COMInertiaTensor[1][2] = ir.COMInertiaTensor[2][1] = (ir.inertiaTensor[1][2] +mMass*PxF64(ir.COM.y*ir.COM.z));
+ ir.COMInertiaTensor[0][2] = ir.COMInertiaTensor[2][0] = (ir.inertiaTensor[0][2] +mMass*PxF64(ir.COM.z*ir.COM.x));
+
+ // inertia tensor relative to (0,0,0)
+ if (!origin.isZero())
+ {
+ PxVec3 sum = ir.COM + origin;
+ ir.inertiaTensor[0][0] -= mMass*PxF64((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z) - (sum.y*sum.y+sum.z*sum.z));
+ ir.inertiaTensor[1][1] -= mMass*PxF64((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x) - (sum.z*sum.z+sum.x*sum.x));
+ ir.inertiaTensor[2][2] -= mMass*PxF64((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y) - (sum.x*sum.x+sum.y*sum.y));
+ ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = ir.inertiaTensor[0][1] + mMass*PxF64((ir.COM.x*ir.COM.y) - (sum.x*sum.y));
+ ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = ir.inertiaTensor[1][2] + mMass*PxF64((ir.COM.y*ir.COM.z) - (sum.y*sum.z));
+ ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = ir.inertiaTensor[0][2] + mMass*PxF64((ir.COM.z*ir.COM.x) - (sum.z*sum.x));
+ ir.COM = sum;
+ }
+
+ return true;
+ }
+} // namespace
+
+// Wrapper
+bool computeVolumeIntegrals(const PxSimpleTriangleMesh& mesh, PxReal density, PxIntegrals& integrals)
+{
+ VolumeIntegrator v(mesh, PxF64(density));
+ return v.computeVolumeIntegrals(integrals);
+}
+
+// Wrapper
+bool computeVolumeIntegralsEberly(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin)
+{
+ VolumeIntegratorEberly v(mesh, PxF64(density));
+ v.computeVolumeIntegrals(integrals, origin);
+ return true;
+}
+
+// Wrapper
+bool computeVolumeIntegralsEberlySIMD(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin)
+{
+ VolumeIntegratorEberly v(mesh, PxF64(density));
+ v.computeVolumeIntegralsSIMD(integrals, origin);
+ return true;
+}
+
+}
+
+//#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.h b/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.h
new file mode 100644
index 00000000..559dc2f9
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.h
@@ -0,0 +1,102 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_FOUNDATION_NXVOLUMEINTEGRATION
+#define PX_FOUNDATION_NXVOLUMEINTEGRATION
+/** \addtogroup foundation
+ @{
+*/
+
+
+#include "foundation/Px.h"
+#include "foundation/PxVec3.h"
+#include "foundation/PxMat33.h"
+#include "CmPhysXCommon.h"
+
+namespace physx
+{
+
+class PxSimpleTriangleMesh;
+class PxConvexMeshDesc;
+
+/**
+\brief Data structure used to store mass properties.
+*/
+struct PxIntegrals
+ {
+ PxVec3 COM; //!< Center of mass
+ PxF64 mass; //!< Total mass
+ PxF64 inertiaTensor[3][3]; //!< Inertia tensor (mass matrix) relative to the origin
+ PxF64 COMInertiaTensor[3][3]; //!< Inertia tensor (mass matrix) relative to the COM
+
+ /**
+ \brief Retrieve the inertia tensor relative to the center of mass.
+
+ \param inertia Inertia tensor.
+ */
+ void getInertia(PxMat33& inertia)
+ {
+ for(PxU32 j=0;j<3;j++)
+ {
+ for(PxU32 i=0;i<3;i++)
+ {
+ inertia(i,j) = PxF32(COMInertiaTensor[i][j]);
+ }
+ }
+ }
+
+ /**
+ \brief Retrieve the inertia tensor relative to the origin.
+
+ \param inertia Inertia tensor.
+ */
+ void getOriginInertia(PxMat33& inertia)
+ {
+ for(PxU32 j=0;j<3;j++)
+ {
+ for(PxU32 i=0;i<3;i++)
+ {
+ inertia(i,j) = PxF32(inertiaTensor[i][j]);
+ }
+ }
+ }
+ };
+
+ bool computeVolumeIntegrals(const PxSimpleTriangleMesh& mesh, PxReal density, PxIntegrals& integrals);
+
+ // specialized method taking polygons directly, so we don't need to compute and store triangles for each polygon
+ bool computeVolumeIntegralsEberly(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin); // Eberly simplified method
+
+ // specialized method taking polygons directly, so we don't need to compute and store triangles for each polygon, SIMD version
+ bool computeVolumeIntegralsEberlySIMD(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin); // Eberly simplified method
+}
+
+ /** @} */
+#endif