aboutsummaryrefslogtreecommitdiff
path: root/PhysX_3.4/Source/PhysXCooking/src
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /PhysX_3.4/Source/PhysXCooking/src
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'PhysX_3.4/Source/PhysXCooking/src')
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/Adjacencies.cpp712
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/Adjacencies.h234
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/Cooking.cpp493
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/Cooking.h87
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/CookingUtils.cpp120
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/CookingUtils.h79
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/EdgeList.cpp753
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/EdgeList.h110
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/MeshCleaner.cpp233
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/MeshCleaner.h55
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/Quantizer.cpp338
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/Quantizer.h76
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.cpp353
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.h100
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.cpp797
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.h95
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.cpp299
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.h82
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.cpp925
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.h177
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.cpp504
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.h100
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.cpp1328
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.h64
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.cpp1481
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.h133
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.cpp2383
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.h97
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.cpp797
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.h102
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.cpp29
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.h337
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.cpp84
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.h35
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/QuickSelect.h114
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.cpp893
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.h51
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.cpp1443
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.h120
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/windows/WindowsCookingDelayLoadHook.cpp82
40 files changed, 16295 insertions, 0 deletions
diff --git a/PhysX_3.4/Source/PhysXCooking/src/Adjacencies.cpp b/PhysX_3.4/Source/PhysXCooking/src/Adjacencies.cpp
new file mode 100644
index 00000000..63797c1a
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/Adjacencies.cpp
@@ -0,0 +1,712 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxMemory.h"
+#include "EdgeList.h"
+#include "Adjacencies.h"
+#include "CmRadixSortBuffered.h"
+#include "GuSerialize.h"
+#include "PsFoundation.h"
+
+using namespace physx;
+using namespace Gu;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Flips the winding.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+void AdjTriangle::Flip()
+{
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ // Call the Triangle method
+ IndexedTriangle::Flip();
+#endif
+
+ // Flip links. We flipped vertex references 1 & 2, i.e. links 0 & 1.
+ physx::shdfnd::swap(mATri[0], mATri[1]);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Computes the number of boundary edges in a triangle.
+ * \return the number of boundary edges. (0 => 3)
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+PxU32 AdjTriangle::ComputeNbBoundaryEdges() const
+{
+ // Look for boundary edges
+ PxU32 Nb = 0;
+ if(IS_BOUNDARY(mATri[0])) Nb++;
+ if(IS_BOUNDARY(mATri[1])) Nb++;
+ if(IS_BOUNDARY(mATri[2])) Nb++;
+ return Nb;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Computes the number of valid neighbors.
+ * \return the number of neighbors. (0 => 3)
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+PxU32 AdjTriangle::ComputeNbNeighbors() const
+{
+ PxU32 Nb = 0;
+ if(!IS_BOUNDARY(mATri[0])) Nb++;
+ if(!IS_BOUNDARY(mATri[1])) Nb++;
+ if(!IS_BOUNDARY(mATri[2])) Nb++;
+ return Nb;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Checks whether the triangle has a particular neighbor or not.
+ * \param tref [in] the triangle reference to look for
+ * \param index [out] the corresponding index in the triangle (NULL if not needed)
+ * \return true if the triangle has the given neighbor
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool AdjTriangle::HasNeighbor(PxU32 tref, PxU32* index) const
+{
+ // ### could be optimized
+ if(!IS_BOUNDARY(mATri[0]) && MAKE_ADJ_TRI(mATri[0])==tref) { if(index) *index = 0; return true; }
+ if(!IS_BOUNDARY(mATri[1]) && MAKE_ADJ_TRI(mATri[1])==tref) { if(index) *index = 1; return true; }
+ if(!IS_BOUNDARY(mATri[2]) && MAKE_ADJ_TRI(mATri[2])==tref) { if(index) *index = 2; return true; }
+ return false;
+}
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Constructor.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+Adjacencies::Adjacencies() : mNbFaces(0), mFaces(NULL)
+{
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Destructor.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+Adjacencies::~Adjacencies()
+{
+ PX_DELETE_ARRAY(mFaces);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Computes the number of boundary edges.
+ * \return the number of boundary edges.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+PxU32 Adjacencies::ComputeNbBoundaryEdges() const
+{
+ // Checking
+ if(!mFaces) return 0;
+
+ // Look for boundary edges
+ PxU32 Nb = 0;
+ for(PxU32 i=0;i<mNbFaces;i++)
+ {
+ AdjTriangle* CurTri = &mFaces[i];
+ Nb+=CurTri->ComputeNbBoundaryEdges();
+ }
+ return Nb;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Computes the boundary vertices. A boundary vertex is defined as a vertex shared by at least one boundary edge.
+ * \param nb_verts [in] the number of vertices
+ * \param bound_status [out] a user-provided array of bool
+ * \return true if success. The user-array is filled with true or false (boundary vertex / not boundary vertex)
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+bool Adjacencies::GetBoundaryVertices(PxU32 nb_verts, bool* bound_status) const
+#else
+bool Adjacencies::GetBoundaryVertices(PxU32 nb_verts, bool* bound_status, const Gu::TriangleT<PxU32>* faces) const
+#endif
+{
+ // We need the adjacencies
+ if(!mFaces || !bound_status || !nb_verts)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Adjacencies::GetBoundaryVertices: NULL parameter!");
+ return false;
+ }
+
+#ifndef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ if(!faces)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Adjacencies::GetBoundaryVertices: NULL parameter!");
+ return false;
+ }
+#endif
+
+ // Init
+ PxMemZero(bound_status, nb_verts*sizeof(bool));
+
+ // Loop through faces
+ for(PxU32 i=0;i<mNbFaces;i++)
+ {
+ AdjTriangle* CurTri = &mFaces[i];
+ if(IS_BOUNDARY(CurTri->mATri[0]))
+ {
+ // Two boundary vertices: 0 - 1
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ PxU32 VRef0 = CurTri->v[0]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
+ PxU32 VRef1 = CurTri->v[1]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
+#else
+ PxU32 VRef0 = faces[i].v[0]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
+ PxU32 VRef1 = faces[i].v[1]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
+#endif
+ }
+ if(IS_BOUNDARY(CurTri->mATri[1]))
+ {
+ // Two boundary vertices: 0 - 2
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ PxU32 VRef0 = CurTri->v[0]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
+ PxU32 VRef1 = CurTri->v[2]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
+#else
+ PxU32 VRef0 = faces[i].v[0]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
+ PxU32 VRef1 = faces[i].v[2]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
+#endif
+ }
+ if(IS_BOUNDARY(CurTri->mATri[2]))
+ {
+ // Two boundary vertices: 1 - 2
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ PxU32 VRef0 = CurTri->v[1]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
+ PxU32 VRef1 = CurTri->v[2]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
+#else
+ PxU32 VRef0 = faces[i].v[1]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
+ PxU32 VRef1 = faces[i].v[2]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
+#endif
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Assigns a new edge code to the counterpart link of a given link.
+ * \param link [in] the link to modify - shouldn't be a boundary link
+ * \param edge_nb [in] the new edge number
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+void Adjacencies::AssignNewEdgeCode(PxU32 link, PxU8 edge_nb)
+{
+ if(!IS_BOUNDARY(link))
+ {
+ PxU32 Id = MAKE_ADJ_TRI(link); // Triangle ID
+ PxU32 Edge = GET_EDGE_NB(link); // Counterpart edge ID
+ AdjTriangle* Tri = &mFaces[Id]; // Adjacent triangle
+
+ // Get link whose edge code is invalid
+ PxU32 AdjLink = Tri->mATri[Edge]; // Link to ourself (i.e. to 'link')
+ SET_EDGE_NB(AdjLink, edge_nb); // Assign new edge code
+ Tri->mATri[Edge] = AdjLink; // Put link back
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Modifies the existing database so that reference 'vref' of triangle 'curtri' becomes the last one.
+ * Provided reference must already exist in provided triangle.
+ * \param cur_tri [in] the triangle
+ * \param vref [in] the reference
+ * \return true if success.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+bool Adjacencies::MakeLastRef(AdjTriangle& cur_tri, PxU32 vref)
+#else
+bool Adjacencies::MakeLastRef(AdjTriangle& cur_tri, PxU32 vref, Gu::TriangleT<PxU32>* cur_topo)
+#endif
+{
+#ifndef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ // Checkings
+ if(!cur_topo)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Adjacencies::MakeLastRef: NULL parameter!");
+ return false;
+ }
+#endif
+ // We want pattern (x y vref)
+ // Edge 0-1 is (x y)
+ // Edge 0-2 is (x vref)
+ // Edge 1-2 is (y vref)
+
+ // First thing is to scroll the existing references in order for vref to become the last one. Scrolling assures winding order is conserved.
+
+ // Edge code need fixing as well:
+ // The two MSB for each link encode the counterpart edge in adjacent triangle. We swap the link positions, but adjacent triangles remain the
+ // same. In other words, edge codes are still valid for current triangle since counterpart edges have not been swapped. *BUT* edge codes of
+ // the three possible adjacent triangles *are* now invalid. We need to fix edge codes, but for adjacent triangles...
+
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ if(cur_tri.v[0]==vref)
+#else
+ if(cur_topo->v[0]==vref)
+#endif
+ {
+ // Pattern is (vref x y)
+ // Edge 0-1 is (vref x)
+ // Edge 0-2 is (vref y)
+ // Edge 1-2 is (x y)
+
+ // Catch original data
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ PxU32 Ref0 = cur_tri.v[0]; PxU32 Link01 = cur_tri.mATri[0];
+ PxU32 Ref1 = cur_tri.v[1]; PxU32 Link02 = cur_tri.mATri[1];
+ PxU32 Ref2 = cur_tri.v[2]; PxU32 Link12 = cur_tri.mATri[2];
+
+ // Swap
+ cur_tri.v[0] = Ref1;
+ cur_tri.v[1] = Ref2;
+ cur_tri.v[2] = Ref0;
+#else
+ PxU32 Ref0 = cur_topo->v[0]; PxU32 Link01 = cur_tri.mATri[0];
+ PxU32 Ref1 = cur_topo->v[1]; PxU32 Link02 = cur_tri.mATri[1];
+ PxU32 Ref2 = cur_topo->v[2]; PxU32 Link12 = cur_tri.mATri[2];
+
+ // Swap
+ cur_topo->v[0] = Ref1;
+ cur_topo->v[1] = Ref2;
+ cur_topo->v[2] = Ref0;
+#endif
+ cur_tri.mATri[0] = Link12; // Edge 0-1 now encodes Ref1-Ref2, i.e. previous Link12
+ cur_tri.mATri[1] = Link01; // Edge 0-2 now encodes Ref1-Ref0, i.e. previous Link01
+ cur_tri.mATri[2] = Link02; // Edge 1-2 now encodes Ref2-Ref0, i.e. previous Link02
+
+ // Fix edge codes
+ AssignNewEdgeCode(Link01, 1);
+ AssignNewEdgeCode(Link02, 2);
+ AssignNewEdgeCode(Link12, 0);
+
+ return true;
+ }
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ else if(cur_tri.v[1]==vref)
+#else
+ else if(cur_topo->v[1]==vref)
+#endif
+ {
+ // Pattern is (x vref y)
+ // Edge 0-1 is (x vref)
+ // Edge 0-2 is (x y)
+ // Edge 1-2 is (vref y)
+
+ // Catch original data
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ PxU32 Ref0 = cur_tri.v[0]; PxU32 Link01 = cur_tri.mATri[0];
+ PxU32 Ref1 = cur_tri.v[1]; PxU32 Link02 = cur_tri.mATri[1];
+ PxU32 Ref2 = cur_tri.v[2]; PxU32 Link12 = cur_tri.mATri[2];
+
+ // Swap
+ cur_tri.v[0] = Ref2;
+ cur_tri.v[1] = Ref0;
+ cur_tri.v[2] = Ref1;
+#else
+ PxU32 Ref0 = cur_topo->v[0]; PxU32 Link01 = cur_tri.mATri[0];
+ PxU32 Ref1 = cur_topo->v[1]; PxU32 Link02 = cur_tri.mATri[1];
+ PxU32 Ref2 = cur_topo->v[2]; PxU32 Link12 = cur_tri.mATri[2];
+
+ // Swap
+ cur_topo->v[0] = Ref2;
+ cur_topo->v[1] = Ref0;
+ cur_topo->v[2] = Ref1;
+#endif
+ cur_tri.mATri[0] = Link02; // Edge 0-1 now encodes Ref2-Ref0, i.e. previous Link02
+ cur_tri.mATri[1] = Link12; // Edge 0-2 now encodes Ref2-Ref1, i.e. previous Link12
+ cur_tri.mATri[2] = Link01; // Edge 1-2 now encodes Ref0-Ref1, i.e. previous Link01
+
+ // Fix edge codes
+ AssignNewEdgeCode(Link01, 2);
+ AssignNewEdgeCode(Link02, 0);
+ AssignNewEdgeCode(Link12, 1);
+
+ return true;
+ }
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ else if(cur_tri.v[2]==vref)
+#else
+ else if(cur_topo->v[2]==vref)
+#endif
+ {
+ // Nothing to do, provided reference already is the last one
+ return true;
+ }
+
+ // Here the provided reference doesn't belong to the provided triangle.
+ return false;
+}
+
+bool Adjacencies::Load(PxInputStream& stream)
+{
+ // Import header
+ PxU32 Version;
+ bool Mismatch;
+ if(!ReadHeader('A', 'D', 'J', 'A', Version, Mismatch, stream))
+ return false;
+
+ // Import adjacencies
+ mNbFaces = readDword(Mismatch, stream);
+ mFaces = PX_NEW(AdjTriangle)[mNbFaces];
+ stream.read(mFaces, sizeof(AdjTriangle)*mNbFaces);
+
+ return true;
+}
+
+//#ifdef PX_COOKING
+
+ //! An edge class used to compute the adjacency structures.
+ class AdjEdge : public Gu::EdgeData, public Ps::UserAllocated
+ {
+ public:
+ //! Constructor
+ PX_INLINE AdjEdge() {}
+ //! Destructor
+ PX_INLINE ~AdjEdge() {}
+
+ PxU32 mFaceNb; //!< Owner face
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Adds a new edge to the database.
+ * \param ref0 [in] vertex reference for the new edge
+ * \param ref1 [in] vertex reference for the new edge
+ * \param face [in] owner face
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ static void AddEdge(PxU32 ref0, PxU32 ref1, PxU32 face, PxU32& nb_edges, AdjEdge* edges)
+ {
+ // Store edge data
+ edges[nb_edges].Ref0 = ref0;
+ edges[nb_edges].Ref1 = ref1;
+ edges[nb_edges].mFaceNb = face;
+ nb_edges++;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Adds a new triangle to the database.
+ * \param ref0 [in] vertex reference for the new triangle
+ * \param ref1 [in] vertex reference for the new triangle
+ * \param ref2 [in] vertex reference for the new triangle
+ * \param id [in] triangle index
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ static void AddTriangle(PxU32 ref0, PxU32 ref1, PxU32 ref2, PxU32 id, AdjTriangle* faces, PxU32& nb_edges, AdjEdge* edges)
+ {
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ // Store vertex-references
+ faces[id].v[0] = ref0;
+ faces[id].v[1] = ref1;
+ faces[id].v[2] = ref2;
+#endif
+ // Reset links
+ faces[id].mATri[0] = PX_INVALID_U32;
+ faces[id].mATri[1] = PX_INVALID_U32;
+ faces[id].mATri[2] = PX_INVALID_U32;
+
+ // Add edge 01 to database
+ if(ref0<ref1) AddEdge(ref0, ref1, id, nb_edges, edges);
+ else AddEdge(ref1, ref0, id, nb_edges, edges);
+ // Add edge 02 to database
+ if(ref0<ref2) AddEdge(ref0, ref2, id, nb_edges, edges);
+ else AddEdge(ref2, ref0, id, nb_edges, edges);
+ // Add edge 12 to database
+ if(ref1<ref2) AddEdge(ref1, ref2, id, nb_edges, edges);
+ else AddEdge(ref2, ref1, id, nb_edges, edges);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Updates the links in two adjacent triangles.
+ * \param first_tri [in] index of the first triangle
+ * \param second_tri [in] index of the second triangle
+ * \param ref0 [in] the common edge's first vertex reference
+ * \param ref1 [in] the common edge's second vertex reference
+ * \return true if success.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ static bool UpdateLink(PxU32 first_tri, PxU32 second_tri, PxU32 ref0, PxU32 ref1, AdjTriangle* faces)
+#else
+ static bool UpdateLink(PxU32 first_tri, PxU32 second_tri, PxU32 ref0, PxU32 ref1, AdjTriangle* faces, const ADJACENCIESCREATE& create)
+#endif
+ {
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ AdjTriangle& Tri0 = faces[first_tri]; // Catch the first triangle
+ AdjTriangle& Tri1 = faces[second_tri]; // Catch the second triangle
+
+ // Get the edge IDs. 0xff means input references are wrong.
+ PxU8 EdgeNb0 = Tri0.FindEdge(ref0, ref1); if(EdgeNb0==0xff) return SetIceError("Adjacencies::UpdateLink: invalid edge reference in first triangle");
+ PxU8 EdgeNb1 = Tri1.FindEdge(ref0, ref1); if(EdgeNb1==0xff) return SetIceError("Adjacencies::UpdateLink: invalid edge reference in second triangle");
+
+ // Update links. The two most significant bits contain the counterpart edge's ID.
+ Tri0.mATri[EdgeNb0] = second_tri |(PxU32(EdgeNb1)<<30);
+ Tri1.mATri[EdgeNb1] = first_tri |(PxU32(EdgeNb0)<<30);
+#else
+ Gu::TriangleT<PxU32> FirstTri, SecondTri;
+ if(create.DFaces)
+ {
+ FirstTri.v[0] = create.DFaces[first_tri*3+0];
+ FirstTri.v[1] = create.DFaces[first_tri*3+1];
+ FirstTri.v[2] = create.DFaces[first_tri*3+2];
+ SecondTri.v[0] = create.DFaces[second_tri*3+0];
+ SecondTri.v[1] = create.DFaces[second_tri*3+1];
+ SecondTri.v[2] = create.DFaces[second_tri*3+2];
+ }
+ if(create.WFaces)
+ {
+ FirstTri.v[0] = create.WFaces[first_tri*3+0];
+ FirstTri.v[1] = create.WFaces[first_tri*3+1];
+ FirstTri.v[2] = create.WFaces[first_tri*3+2];
+ SecondTri.v[0] = create.WFaces[second_tri*3+0];
+ SecondTri.v[1] = create.WFaces[second_tri*3+1];
+ SecondTri.v[2] = create.WFaces[second_tri*3+2];
+ }
+
+ // Get the edge IDs. 0xff means input references are wrong.
+ const PxU8 EdgeNb0 = FirstTri.findEdge(ref0, ref1);
+ const PxU8 EdgeNb1 = SecondTri.findEdge(ref0, ref1);
+ if(EdgeNb0==0xff || EdgeNb1==0xff)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Adjacencies::UpdateLink: invalid edge reference");
+ return false;
+ }
+
+ // Update links. The two most significant bits contain the counterpart edge's ID.
+ faces[first_tri].mATri[EdgeNb0] = second_tri |(PxU32(EdgeNb1)<<30);
+ faces[second_tri].mATri[EdgeNb1] = first_tri |(PxU32(EdgeNb0)<<30);
+#endif
+ return true;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Creates the adjacency structures.
+ * \return true if success.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ static bool CreateDatabase(AdjTriangle* faces, PxU32 nb_edges, const AdjEdge* edges)
+#else
+ static bool CreateDatabase(AdjTriangle* faces, PxU32 nb_edges, const AdjEdge* edges, const ADJACENCIESCREATE& create)
+#endif
+ {
+ Cm::RadixSortBuffered Core;
+ {
+ // Multiple sorts - this rewritten version uses less ram
+ // PT: TTP 2994: the mesh has 343000+ edges, so yeah, sure, allocating more than 1mb on the stack causes overflow...
+ PxU32* VRefs = PX_NEW_TEMP(PxU32)[nb_edges];
+
+ // Sort according to mRef0, then mRef1
+ PxU32 i;
+ for(i=0;i<nb_edges;i++)
+ VRefs[i] = edges[i].Ref0;
+ Core.Sort(VRefs, nb_edges);
+ for(i=0;i<nb_edges;i++)
+ VRefs[i] = edges[i].Ref1;
+ Core.Sort(VRefs, nb_edges);
+
+ PX_DELETE_POD(VRefs);
+ }
+ const PxU32* Sorted = Core.GetRanks();
+
+ // Read the list in sorted order, look for similar edges
+ PxU32 LastRef0 = edges[Sorted[0]].Ref0;
+ PxU32 LastRef1 = edges[Sorted[0]].Ref1;
+ PxU32 Count = 0;
+ PxU32 TmpBuffer[3];
+
+ while(nb_edges--)
+ {
+ PxU32 SortedIndex = *Sorted++;
+ PxU32 Face = edges[SortedIndex].mFaceNb; // Owner face
+ PxU32 Ref0 = edges[SortedIndex].Ref0; // Vertex ref #1
+ PxU32 Ref1 = edges[SortedIndex].Ref1; // Vertex ref #2
+ if(Ref0==LastRef0 && Ref1==LastRef1)
+ {
+ // Current edge is the same as last one
+ TmpBuffer[Count++] = Face; // Store face number
+ // Only works with manifold meshes (i.e. an edge is not shared by more than 2 triangles)
+ if(Count==3)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Adjacencies::CreateDatabase: can't work on non-manifold meshes.");
+ return false;
+ }
+ }
+ else
+ {
+ // Here we have a new edge (LastRef0, LastRef1) shared by Count triangles stored in TmpBuffer
+ if(Count==2)
+ {
+ // if Count==1 => edge is a boundary edge: it belongs to a single triangle.
+ // Hence there's no need to update a link to an adjacent triangle.
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ if(!UpdateLink(TmpBuffer[0], TmpBuffer[1], LastRef0, LastRef1, faces)) return false;
+#else
+ if(!UpdateLink(TmpBuffer[0], TmpBuffer[1], LastRef0, LastRef1, faces, create)) return false;
+#endif
+ }
+ // Reset for next edge
+ Count = 0;
+ TmpBuffer[Count++] = Face;
+ LastRef0 = Ref0;
+ LastRef1 = Ref1;
+ }
+ }
+ bool Status = true;
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ if(Count==2) Status = UpdateLink(TmpBuffer[0], TmpBuffer[1], LastRef0, LastRef1, faces);
+#else
+ if(Count==2) Status = UpdateLink(TmpBuffer[0], TmpBuffer[1], LastRef0, LastRef1, faces, create);
+#endif
+ return Status;
+ }
+
+AdjacenciesBuilder::AdjacenciesBuilder()
+{
+}
+
+AdjacenciesBuilder::~AdjacenciesBuilder()
+{
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Initializes the component.
+ * \param create [in] the creation structure
+ * \return true if success.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool AdjacenciesBuilder::Init(const ADJACENCIESCREATE& create)
+{
+ // Checkings
+ if(!create.NbFaces) return false;
+
+ // Get some bytes
+ mNbFaces = create.NbFaces;
+ mFaces = PX_NEW(AdjTriangle)[mNbFaces];
+
+ AdjEdge* Edges = PX_NEW_TEMP(AdjEdge)[mNbFaces*3];
+ PxU32 NbEdges=0;
+
+ // Feed me with triangles.....
+ for(PxU32 i=0;i<mNbFaces;i++)
+ {
+ // Get correct vertex references
+ const PxU32 Ref0 = create.DFaces ? create.DFaces[i*3+0] : create.WFaces ? create.WFaces[i*3+0] : 0;
+ const PxU32 Ref1 = create.DFaces ? create.DFaces[i*3+1] : create.WFaces ? create.WFaces[i*3+1] : 1;
+ const PxU32 Ref2 = create.DFaces ? create.DFaces[i*3+2] : create.WFaces ? create.WFaces[i*3+2] : 2;
+
+ // Add a triangle to the database
+ AddTriangle(Ref0, Ref1, Ref2, i, mFaces, NbEdges, Edges);
+ }
+
+ // At this point of the process we have mFaces & Edges filled with input data. That is:
+ // - a list of triangles with 3 NULL links (i.e. PX_INVALID_U32)
+ // - a list of mNbFaces*3 edges, each edge having 2 vertex references and an owner face.
+
+ // Here NbEdges should be equal to mNbFaces*3.
+ PX_ASSERT(NbEdges==mNbFaces*3);
+
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ bool Status = CreateDatabase(mFaces, NbEdges, Edges);
+#else
+ bool Status = CreateDatabase(mFaces, NbEdges, Edges, create);
+#endif
+
+ // We don't need the edges anymore
+ PX_DELETE_ARRAY(Edges);
+
+#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
+ // Now create convex information. This creates coupling between adjacencies & edge-list but in this case it's actually the goal:
+ // mixing the two structures to save memory.
+ if(Status && create.Verts)
+ {
+ Gu::EDGELISTCREATE ELC;
+ ELC.NbFaces = create.NbFaces;
+ ELC.DFaces = create.DFaces; // That's where I like having a unified way to do things... We
+ ELC.WFaces = create.WFaces; // can just directly copy the same pointers.
+ ELC.FacesToEdges = true;
+ ELC.Verts = create.Verts;
+ ELC.Epsilon = create.Epsilon;
+
+ Gu::EdgeListBuilder EL;
+ if(EL.init(ELC))
+ {
+ for(PxU32 i=0;i<mNbFaces;i++)
+ {
+ const Gu::EdgeTriangleData& ET = EL.getEdgeTriangle(i);
+ if(Gu::EdgeTriangleAC::HasActiveEdge01(ET)) mFaces[i].mATri[EDGE01] |= 0x20000000;
+ else mFaces[i].mATri[EDGE01] &= ~0x20000000;
+ if(Gu::EdgeTriangleAC::HasActiveEdge20(ET)) mFaces[i].mATri[EDGE02] |= 0x20000000;
+ else mFaces[i].mATri[EDGE02] &= ~0x20000000;
+ if(Gu::EdgeTriangleAC::HasActiveEdge12(ET)) mFaces[i].mATri[EDGE12] |= 0x20000000;
+ else mFaces[i].mATri[EDGE12] &= ~0x20000000;
+
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge01(ET) && mFaces[i].HasActiveEdge01()) || (!Gu::EdgeTriangleAC::HasActiveEdge01(ET) && !mFaces[i].HasActiveEdge01()));
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge20(ET) && mFaces[i].HasActiveEdge20()) || (!Gu::EdgeTriangleAC::HasActiveEdge20(ET) && !mFaces[i].HasActiveEdge20()));
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge12(ET) && mFaces[i].HasActiveEdge12()) || (!Gu::EdgeTriangleAC::HasActiveEdge12(ET) && !mFaces[i].HasActiveEdge12()));
+ }
+ }
+ }
+#endif
+
+ return Status;
+}
+/*
+bool AdjacenciesBuilder::Save(Stream& stream) const
+{
+ bool PlatformMismatch = PxPlatformMismatch();
+
+ // Export header
+ if(!WriteHeader('A', 'D', 'J', 'A', gVersion, PlatformMismatch, stream))
+ return false;
+
+ // Export adjacencies
+// stream.StoreDword(mNbFaces);
+ WriteDword(mNbFaces, PlatformMismatch, stream);
+
+// stream.StoreBuffer(mFaces, sizeof(AdjTriangle)*mNbFaces);
+ WriteDwordBuffer((const PxU32*)mFaces, mNbFaces*3, PlatformMismatch, stream);
+
+ return true;
+}*/
+//#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/Adjacencies.h b/PhysX_3.4/Source/PhysXCooking/src/Adjacencies.h
new file mode 100644
index 00000000..5378c569
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/Adjacencies.h
@@ -0,0 +1,234 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_PHYSICS_GEOMUTILS_PX_ADJACENCIES
+#define PX_PHYSICS_GEOMUTILS_PX_ADJACENCIES
+
+#define MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
+#include "foundation/Px.h"
+#include "GuTriangle32.h"
+
+namespace physx
+{
+
+#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
+ #define ADJ_TRIREF_MASK 0x1fffffff //!< Masks 3 bits
+ #define IS_CONVEX_EDGE(x) (x & 0x20000000) //!< Returns true for convex edges
+#else
+ #define ADJ_TRIREF_MASK 0x3fffffff //!< Masks 2 bits
+#endif
+
+ #define MAKE_ADJ_TRI(x) (x & ADJ_TRIREF_MASK) //!< Transforms a link into a triangle reference.
+ #define GET_EDGE_NB(x) (x>>30) //!< Transforms a link into a counterpart edge ID.
+// #define IS_BOUNDARY(x) (x==PX_INVALID_U32) //!< Returns true for boundary edges.
+ #define IS_BOUNDARY(x) ((x & ADJ_TRIREF_MASK)==ADJ_TRIREF_MASK) //!< Returns true for boundary edges.
+
+ // Forward declarations
+ class Adjacencies;
+
+ enum SharedEdgeIndex
+ {
+ EDGE01 = 0,
+ EDGE02 = 1,
+ EDGE12 = 2
+ };
+
+/* PX_INLINE void GetEdgeIndices(SharedEdgeIndex edge_index, PxU32& id0, PxU32& id1)
+ {
+ if(edge_index==0)
+ {
+ id0 = 0;
+ id1 = 1;
+ }
+ else if(edge_index==1)
+ {
+ id0 = 0;
+ id1 = 2;
+ }
+ else if(edge_index==2)
+ {
+ id0 = 1;
+ id1 = 2;
+ }
+ }*/
+
+ //! Sets a new edge code
+ #define SET_EDGE_NB(link, code) \
+ link&=ADJ_TRIREF_MASK; \
+ link|=code<<30; \
+
+ //! A triangle class used to compute the adjacency structures.
+ class AdjTriangle
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ : public IndexedTriangle
+#else
+ : public Ps::UserAllocated
+#endif
+ {
+ public:
+ //! Constructor
+ PX_INLINE AdjTriangle() {}
+ //! Destructor
+ PX_INLINE ~AdjTriangle() {}
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes the number of boundary edges in a triangle.
+ * \return the number of boundary edges. (0 => 3)
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ PxU32 ComputeNbBoundaryEdges() const;
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes the number of valid neighbors.
+ * \return the number of neighbors. (0 => 3)
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ PxU32 ComputeNbNeighbors() const;
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Checks whether the triangle has a particular neighbor or not.
+ * \param tref [in] the triangle reference to look for
+ * \param index [out] the corresponding index in the triangle (NULL if not needed)
+ * \return true if the triangle has the given neighbor
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool HasNeighbor(PxU32 tref, PxU32* index=NULL) const;
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Flips the winding.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void Flip();
+
+ // Data access
+ PX_INLINE PxU32 GetLink(SharedEdgeIndex edge_index) const { return mATri[edge_index]; }
+ PX_INLINE PxU32 GetAdjTri(SharedEdgeIndex edge_index) const { return MAKE_ADJ_TRI(mATri[edge_index]); }
+ PX_INLINE PxU32 GetAdjEdge(SharedEdgeIndex edge_index) const { return GET_EDGE_NB(mATri[edge_index]); }
+ PX_INLINE Ps::IntBool IsBoundaryEdge(SharedEdgeIndex edge_index) const { return IS_BOUNDARY(mATri[edge_index]); }
+#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
+ PX_INLINE Ps::IntBool HasActiveEdge01() const { return Ps::IntBool(IS_CONVEX_EDGE(mATri[EDGE01])); }
+ PX_INLINE Ps::IntBool HasActiveEdge20() const { return Ps::IntBool(IS_CONVEX_EDGE(mATri[EDGE02])); }
+ PX_INLINE Ps::IntBool HasActiveEdge12() const { return Ps::IntBool(IS_CONVEX_EDGE(mATri[EDGE12])); }
+ PX_INLINE Ps::IntBool HasActiveEdge(PxU32 i) const { return Ps::IntBool(IS_CONVEX_EDGE(mATri[i])); }
+#endif
+// private:
+ //! Links/References of adjacent triangles. The 2 most significant bits contains the counterpart edge in the adjacent triangle.
+ //! mATri[0] refers to edge 0-1
+ //! mATri[1] refers to edge 0-2
+ //! mATri[2] refers to edge 1-2
+ PxU32 mATri[3];
+ };
+
+ //! The adjacencies creation structure.
+ struct ADJACENCIESCREATE
+ {
+ //! Constructor
+ ADJACENCIESCREATE() : NbFaces(0), DFaces(NULL), WFaces(NULL)
+ {
+#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
+ Verts = NULL;
+ Epsilon = 0.1f;
+// Epsilon = 0.001f;
+#endif
+ }
+
+ PxU32 NbFaces; //!< Number of faces in source topo
+ const PxU32* DFaces; //!< List of faces (dwords) or NULL
+ const PxU16* WFaces; //!< List of faces (words) or NULL
+#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
+ const PxVec3* Verts;
+ float Epsilon;
+#endif
+ };
+
+ class Adjacencies : public Ps::UserAllocated
+ {
+ public:
+ Adjacencies();
+ ~Adjacencies();
+
+ PxU32 mNbFaces; //!< Number of faces involved in the computation.
+ AdjTriangle* mFaces; //!< A list of AdjTriangles (one/face)
+
+ bool Load(PxInputStream& stream);
+ // Basic mesh walking
+ PX_INLINE const AdjTriangle* GetAdjacentFace(const AdjTriangle& current_tri, SharedEdgeIndex edge_nb) const
+ {
+ // No checkings here, make sure mFaces has been created
+
+ // Catch the link
+ PxU32 Link = current_tri.GetLink(edge_nb);
+
+ // Returns NULL for boundary edges
+ if(IS_BOUNDARY(Link)) return NULL;
+
+ // Else transform into face index
+ PxU32 Id = MAKE_ADJ_TRI(Link);
+
+ // Possible counterpart edge is:
+ // PxU32 Edge = GET_EDGE_NB(Link);
+
+ // And returns adjacent triangle
+ return &mFaces[Id];
+ }
+ // Helpers
+ PxU32 ComputeNbBoundaryEdges() const;
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ bool GetBoundaryVertices(PxU32 nb_verts, bool* bound_status) const;
+#else
+ bool GetBoundaryVertices(PxU32 nb_verts, bool* bound_status, const Gu::TriangleT<PxU32>* faces) const;
+#endif
+ //
+#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
+ bool MakeLastRef(AdjTriangle& cur_tri, PxU32 vref);
+#else
+ bool MakeLastRef(AdjTriangle& cur_tri, PxU32 vref, Gu::TriangleT<PxU32>* cur_topo);
+#endif
+ private:
+ // New edge codes assignment
+ void AssignNewEdgeCode(PxU32 link, PxU8 edge_nb);
+ };
+
+//#ifdef PX_COOKING
+ class AdjacenciesBuilder : public Adjacencies
+ {
+ public:
+ AdjacenciesBuilder();
+ ~AdjacenciesBuilder();
+
+ bool Init(const ADJACENCIESCREATE& create);
+// bool Save(Stream& stream) const;
+ };
+//#endif
+
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/Cooking.cpp b/PhysX_3.4/Source/PhysXCooking/src/Cooking.cpp
new file mode 100644
index 00000000..e793b48f
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/Cooking.cpp
@@ -0,0 +1,493 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "foundation/PxErrorCallback.h"
+#include "PsFoundation.h"
+#include "PsUtilities.h"
+#include "PsFPU.h"
+#include "CmPhysXCommon.h"
+#include "PxPhysXConfig.h"
+#include "PxSimpleTriangleMesh.h"
+#include "PxTriangleMeshDesc.h"
+#include "PxConvexMeshDesc.h"
+#include "PxCooking.h"
+#include "Cooking.h"
+#include "mesh/TriangleMeshBuilder.h"
+#include "GuConvexMesh.h"
+#include "ConvexMeshBuilder.h"
+#include "InflationConvexHullLib.h"
+#include "QuickHullConvexHullLib.h"
+#include "CmIO.h"
+#include "PxHeightFieldDesc.h"
+#include "GuHeightField.h"
+#include "HeightFieldCooking.h"
+#include "common/PxPhysicsInsertionCallback.h"
+#include "CmUtils.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+using namespace physx;
+using namespace Gu;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void Cooking::setParams(const PxCookingParams& params)
+{
+ mParams = params;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+const PxCookingParams& Cooking::getParams()
+{
+ return mParams;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool Cooking::platformMismatch()
+{
+ // Get current endianness (the one for the platform where cooking is performed)
+ PxI8 currentEndian = Ps::littleEndian();
+
+ bool mismatch = true;
+ switch(mParams.targetPlatform)
+ {
+ case PxPlatform::ePC:
+ mismatch = currentEndian!=1; // The PC files must be little endian
+ break;
+ case PxPlatform::eARM:
+ mismatch = currentEndian!=1;
+ break;
+ }
+ return mismatch;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void Cooking::release()
+{
+ delete this;
+
+ Ps::Foundation::decRefCount();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool Cooking::validateTriangleMesh(const PxTriangleMeshDesc& desc)
+{
+ // cooking code does lots of float bitwise reinterpretation that generates exceptions
+ PX_FPU_GUARD;
+
+ if(!desc.isValid())
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "Cooking::validateTriangleMesh: user-provided triangle mesh descriptor is invalid!");
+ return false;
+ }
+
+ // PT: validation code doesn't look at midphase data, so ideally we wouldn't build the midphase structure at all here.
+ BV4TriangleMeshBuilder builder(mParams);
+ return builder.loadFromDesc(desc, NULL, true /*doValidate*/);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool Cooking::cookTriangleMesh(TriangleMeshBuilder& builder, const PxTriangleMeshDesc& desc, PxOutputStream& stream, PxTriangleMeshCookingResult::Enum* condition)
+{
+ // cooking code does lots of float bitwise reinterpretation that generates exceptions
+ PX_FPU_GUARD;
+
+ if (condition)
+ *condition = PxTriangleMeshCookingResult::eSUCCESS;
+ if(!builder.loadFromDesc(desc, condition, false))
+ {
+ if (condition)
+ *condition = PxTriangleMeshCookingResult::eFAILURE;
+ return false;
+ }
+
+ builder.save(stream, platformMismatch(), mParams);
+ return true;
+}
+
+bool Cooking::cookTriangleMesh(const PxTriangleMeshDesc& desc, PxOutputStream& stream, PxTriangleMeshCookingResult::Enum* condition)
+{
+ if((mParams.midphaseDesc.getType() == PxMeshMidPhase::eINVALID) || (mParams.midphaseDesc.getType() == PxMeshMidPhase::eBVH33))
+ {
+ RTreeTriangleMeshBuilder builder(mParams);
+ return cookTriangleMesh(builder, desc, stream, condition);
+ }
+ else
+ {
+ BV4TriangleMeshBuilder builder(mParams);
+ return cookTriangleMesh(builder, desc, stream, condition);
+ }
+}
+
+PxTriangleMesh* Cooking::createTriangleMesh(TriangleMeshBuilder& builder, const PxTriangleMeshDesc& desc, PxPhysicsInsertionCallback& insertionCallback)
+{
+ // cooking code does lots of float bitwise reinterpretation that generates exceptions
+ PX_FPU_GUARD;
+
+ if(!builder.loadFromDesc(desc, NULL, false))
+ return NULL;
+
+ // check if the indices can be moved from 32bits to 16bits
+ if(!(mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eFORCE_32BIT_INDICES))
+ builder.checkMeshIndicesSize();
+
+ PxConcreteType::Enum type;
+ if(builder.getMidphaseID()==PxMeshMidPhase::eBVH33)
+ type = PxConcreteType::eTRIANGLE_MESH_BVH33;
+ else
+ type = PxConcreteType::eTRIANGLE_MESH_BVH34;
+
+ return static_cast<PxTriangleMesh*>(insertionCallback.buildObjectFromData(type, &builder.getMeshData()));
+}
+
+PxTriangleMesh* Cooking::createTriangleMesh(const PxTriangleMeshDesc& desc, PxPhysicsInsertionCallback& insertionCallback)
+{
+ if((mParams.midphaseDesc.getType() == PxMeshMidPhase::eINVALID) || (mParams.midphaseDesc.getType() == PxMeshMidPhase::eBVH33))
+ {
+ RTreeTriangleMeshBuilder builder(mParams);
+ return createTriangleMesh(builder, desc, insertionCallback);
+ }
+ else
+ {
+ BV4TriangleMeshBuilder builder(mParams);
+ return createTriangleMesh(builder, desc, insertionCallback);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// cook convex mesh from given desc, internal function to be shared between create/cook convex mesh
+bool Cooking::cookConvexMeshInternal(const PxConvexMeshDesc& desc_, ConvexMeshBuilder& meshBuilder, ConvexHullLib* hullLib,
+ PxConvexMeshCookingResult::Enum* condition)
+{
+ if (condition)
+ *condition = PxConvexMeshCookingResult::eFAILURE;
+
+ if (!desc_.isValid())
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "Cooking::cookConvexMesh: user-provided convex mesh descriptor is invalid!");
+ return false;
+ }
+
+ if (mParams.areaTestEpsilon <= 0.0f)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "Cooking::cookConvexMesh: user-provided convex mesh areaTestEpsilon is invalid!");
+ return false;
+ }
+
+ PxConvexMeshDesc desc = desc_;
+ bool polygonsLimitReached = false;
+
+ // the convex will be cooked from provided points
+ if (desc_.flags & PxConvexFlag::eCOMPUTE_CONVEX)
+ {
+ PX_ASSERT(hullLib);
+
+ PxConvexMeshCookingResult::Enum res = hullLib->createConvexHull();
+ if (res == PxConvexMeshCookingResult::eSUCCESS || res == PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED)
+ {
+ if (res == PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED)
+ polygonsLimitReached = true;
+
+ hullLib->fillConvexMeshDesc(desc);
+ }
+ else
+ {
+ if (res == PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED)
+ {
+ *condition = PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
+ }
+
+ return false;
+ }
+ }
+
+ if (desc.points.count >= 256)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Cooking::cookConvexMesh: user-provided hull must have less than 256 vertices!");
+ return false;
+ }
+
+ if (!meshBuilder.build(desc, mParams.gaussMapLimit, false, hullLib ? false : true))
+ {
+ return false;
+ }
+
+ if (condition)
+ {
+ *condition = polygonsLimitReached ? PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED : PxConvexMeshCookingResult::eSUCCESS;
+ }
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// cook convex mesh from given desc, save the results into stream
+bool Cooking::cookConvexMesh(const PxConvexMeshDesc& desc_, PxOutputStream& stream, PxConvexMeshCookingResult::Enum* condition)
+{
+ PX_FPU_GUARD;
+ // choose cooking library if needed
+ ConvexHullLib* hullLib = NULL;
+ PxConvexMeshDesc desc = desc_;
+
+ if(desc_.flags & PxConvexFlag::eCOMPUTE_CONVEX)
+ {
+ const PxU32 gpuMaxVertsLimit = 64;
+
+ // GRB supports 64 verts max
+ if(desc_.flags & PxConvexFlag::eGPU_COMPATIBLE)
+ {
+ desc.vertexLimit = gpuMaxVertsLimit;
+ }
+
+ if(mParams.convexMeshCookingType == PxConvexMeshCookingType::eINFLATION_INCREMENTAL_HULL)
+ {
+ hullLib = PX_NEW(InflationConvexHullLib) (desc, mParams);
+ }
+ else
+ {
+ hullLib = PX_NEW(QuickHullConvexHullLib) (desc, mParams);
+ }
+ }
+
+ ConvexMeshBuilder meshBuilder(mParams.buildGPUData);
+ if(!cookConvexMeshInternal(desc,meshBuilder,hullLib , condition))
+ {
+ if(hullLib)
+ PX_DELETE(hullLib);
+ return false;
+ }
+
+ // save the cooked results into stream
+ if(!meshBuilder.save(stream, platformMismatch()))
+ {
+ if (condition)
+ {
+ *condition = PxConvexMeshCookingResult::eFAILURE;
+ }
+ if(hullLib)
+ PX_DELETE(hullLib);
+ return false;
+ }
+
+ if(hullLib)
+ PX_DELETE(hullLib);
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// cook convex mesh from given desc, copy the results into internal convex mesh
+// and insert the mesh into PxPhysics
+PxConvexMesh* Cooking::createConvexMesh(const PxConvexMeshDesc& desc, PxPhysicsInsertionCallback& insertionCallback)
+{
+ PX_FPU_GUARD;
+ // choose cooking library if needed
+ ConvexHullLib* hullLib = NULL;
+ if(desc.flags & PxConvexFlag::eCOMPUTE_CONVEX)
+ {
+ if (mParams.convexMeshCookingType == PxConvexMeshCookingType::eINFLATION_INCREMENTAL_HULL)
+ {
+ hullLib = PX_NEW(InflationConvexHullLib) (desc, mParams);
+ }
+ else
+ {
+ hullLib = PX_NEW(QuickHullConvexHullLib) (desc, mParams);
+ }
+ }
+
+ // cook the mesh
+ ConvexMeshBuilder meshBuilder(mParams.buildGPUData);
+ if (!cookConvexMeshInternal(desc, meshBuilder, hullLib, NULL))
+ {
+ if(hullLib)
+ PX_DELETE(hullLib);
+ return NULL;
+ }
+
+ // copy the constructed data into the new mesh
+ Gu::ConvexHullData meshData;
+ meshBuilder.copy(meshData);
+
+ // insert into physics
+ Gu::ConvexMesh* convexMesh = static_cast<Gu::ConvexMesh*>(insertionCallback.buildObjectFromData(PxConcreteType::eCONVEX_MESH, &meshData));
+ if (!convexMesh)
+ {
+ if (hullLib)
+ PX_DELETE(hullLib);
+ return NULL;
+ }
+
+ convexMesh->setMass(meshBuilder.getMass());
+ convexMesh->setInertia(meshBuilder.getInertia());
+ if(meshBuilder.getBigConvexData())
+ {
+ convexMesh->setBigConvexData(meshBuilder.getBigConvexData());
+ meshBuilder.setBigConvexData(NULL);
+ }
+
+ if(hullLib)
+ PX_DELETE(hullLib);
+ return convexMesh;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+bool Cooking::validateConvexMesh(const PxConvexMeshDesc& desc)
+{
+ ConvexMeshBuilder mesh(mParams.buildGPUData);
+ return mesh.build(desc, mParams.gaussMapLimit, true);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool Cooking::computeHullPolygons(const PxSimpleTriangleMesh& mesh, PxAllocatorCallback& inCallback,PxU32& nbVerts, PxVec3*& vertices,
+ PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& hullPolygons)
+{
+ PxVec3* geometry = reinterpret_cast<PxVec3*>(PxAlloca(sizeof(PxVec3)*mesh.points.count));
+ Cooking::gatherStrided(mesh.points.data, geometry, mesh.points.count, sizeof(PxVec3), mesh.points.stride);
+
+ PxU32* topology = reinterpret_cast<PxU32*>(PxAlloca(sizeof(PxU32)*3*mesh.triangles.count));
+ if (mesh.flags & PxMeshFlag::e16_BIT_INDICES)
+ {
+ // conversion; 16 bit index -> 32 bit index & stride
+ PxU32* dest = topology;
+ const PxU32* pastLastDest = topology + 3*mesh.triangles.count;
+ const PxU8* source = reinterpret_cast<const PxU8*>(mesh.triangles.data);
+ while (dest < pastLastDest)
+ {
+ const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
+ *dest++ = trig16[0];
+ *dest++ = trig16[1];
+ *dest++ = trig16[2];
+ source += mesh.triangles.stride;
+ }
+ }
+ else
+ {
+ Cooking::gatherStrided(mesh.triangles.data, topology, mesh.triangles.count, sizeof(PxU32) * 3, mesh.triangles.stride);
+ }
+
+ ConvexMeshBuilder meshBuilder(mParams.buildGPUData);
+ if(!meshBuilder.computeHullPolygons(mesh.points.count,geometry,mesh.triangles.count,topology,inCallback, nbVerts, vertices,nbIndices,indices,nbPolygons,hullPolygons))
+ return false;
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool Cooking::cookHeightField(const PxHeightFieldDesc& desc, PxOutputStream& stream)
+{
+ PX_FPU_GUARD;
+
+ if(!desc.isValid())
+ {
+ #if PX_CHECKED
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "Cooking::createHeightField: user-provided heightfield descriptor is invalid!");
+ #endif
+ return false;
+ }
+
+ Gu::HeightField hf(NULL);
+
+ if(!hf.loadFromDesc(desc))
+ {
+ hf.releaseMemory();
+ return false;
+ }
+
+ if (!saveHeightField(hf, stream, platformMismatch()))
+ {
+ hf.releaseMemory();
+ return false;
+ }
+
+ hf.releaseMemory();
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+PxHeightField* Cooking::createHeightField(const PxHeightFieldDesc& desc, PxPhysicsInsertionCallback& insertionCallback)
+{
+ PX_FPU_GUARD;
+
+ if(!desc.isValid())
+ {
+ #if PX_CHECKED
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "Cooking::createHeightField: user-provided heightfield descriptor is invalid!");
+ #endif
+ return NULL;
+ }
+
+ Gu::HeightField* hf;
+ PX_NEW_SERIALIZED(hf, Gu::HeightField)(NULL);
+
+ if(!hf->loadFromDesc(desc))
+ {
+ PX_DELETE(hf);
+ return NULL;
+ }
+
+ // create heightfield and set the HF data
+ Gu::HeightField* heightField = static_cast<Gu::HeightField*>(insertionCallback.buildObjectFromData(PxConcreteType::eHEIGHTFIELD, &hf->mData));
+ if(!heightField)
+ {
+ PX_DELETE(hf);
+ return NULL;
+ }
+
+ // copy the Gu::HeightField variables
+ heightField->mSampleStride = hf->mSampleStride;
+ heightField->mNbSamples = hf->mNbSamples;
+ heightField->mMinHeight = hf->mMinHeight;
+ heightField->mMaxHeight = hf->mMaxHeight;
+ heightField->mModifyCount = hf->mModifyCount;
+
+ PX_DELETE(hf);
+ return heightField;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+PxCooking* PxCreateCooking(PxU32 /*version*/, PxFoundation& foundation, const PxCookingParams& params)
+{
+ PX_ASSERT(static_cast<Ps::Foundation*>(&foundation) == &Ps::Foundation::getInstance());
+ PX_UNUSED(foundation);
+
+ Ps::Foundation::incRefCount();
+
+ return PX_NEW(Cooking)(params);
+}
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/Cooking.h b/PhysX_3.4/Source/PhysXCooking/src/Cooking.h
new file mode 100644
index 00000000..d54aea0f
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/Cooking.h
@@ -0,0 +1,87 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_PSCOOKING_H
+#define PX_PSCOOKING_H
+
+#include "foundation/PxMemory.h"
+#include "PxCooking.h"
+#include "PsUserAllocated.h"
+
+namespace physx
+{
+class TriangleMeshBuilder;
+class ConvexMeshBuilder;
+class ConvexHullLib;
+
+class Cooking: public PxCooking, public Ps::UserAllocated
+{
+public:
+ Cooking(const PxCookingParams& params): mParams(params) {}
+
+ virtual void release();
+ virtual void setParams(const PxCookingParams& params);
+ virtual const PxCookingParams& getParams();
+ virtual bool platformMismatch();
+ virtual bool cookTriangleMesh(const PxTriangleMeshDesc& desc, PxOutputStream& stream, PxTriangleMeshCookingResult::Enum* condition = NULL);
+ virtual PxTriangleMesh* createTriangleMesh(const PxTriangleMeshDesc& desc, PxPhysicsInsertionCallback& insertionCallback);
+ virtual bool validateTriangleMesh(const PxTriangleMeshDesc& desc);
+
+ virtual bool cookConvexMesh(const PxConvexMeshDesc& desc, PxOutputStream& stream, PxConvexMeshCookingResult::Enum* condition);
+ virtual PxConvexMesh* createConvexMesh(const PxConvexMeshDesc& desc, PxPhysicsInsertionCallback& insertionCallback);
+ virtual bool validateConvexMesh(const PxConvexMeshDesc& desc);
+ virtual bool computeHullPolygons(const PxSimpleTriangleMesh& mesh, PxAllocatorCallback& inCallback,PxU32& nbVerts, PxVec3*& vertices,
+ PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& hullPolygons);
+ virtual bool cookHeightField(const PxHeightFieldDesc& desc, PxOutputStream& stream);
+ virtual PxHeightField* createHeightField(const PxHeightFieldDesc& desc, PxPhysicsInsertionCallback& insertionCallback);
+
+ PX_FORCE_INLINE static void gatherStrided(const void* src, void* dst, PxU32 nbElem, PxU32 elemSize, PxU32 stride)
+ {
+ const PxU8* s = reinterpret_cast<const PxU8*>(src);
+ PxU8* d = reinterpret_cast<PxU8*>(dst);
+ while(nbElem--)
+ {
+ PxMemCopy(d, s, elemSize);
+ d += elemSize;
+ s += stride;
+ }
+ }
+
+private:
+ bool cookConvexMeshInternal(const PxConvexMeshDesc& desc, ConvexMeshBuilder& meshBuilder, ConvexHullLib* hullLib, PxConvexMeshCookingResult::Enum* condition);
+
+private:
+ PxCookingParams mParams;
+
+ bool cookTriangleMesh(TriangleMeshBuilder& builder, const PxTriangleMeshDesc& desc, PxOutputStream& stream, PxTriangleMeshCookingResult::Enum* condition);
+ PxTriangleMesh* createTriangleMesh(TriangleMeshBuilder& builder, const PxTriangleMeshDesc& desc, PxPhysicsInsertionCallback& insertionCallback);
+};
+
+}
+#endif //#define PX_PSCOOKING_H
diff --git a/PhysX_3.4/Source/PhysXCooking/src/CookingUtils.cpp b/PhysX_3.4/Source/PhysXCooking/src/CookingUtils.cpp
new file mode 100644
index 00000000..37aa7b12
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/CookingUtils.cpp
@@ -0,0 +1,120 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxMath.h"
+#include "CookingUtils.h"
+#include "CmRadixSortBuffered.h"
+#include "PsAllocator.h"
+#include "PsFPU.h"
+
+using namespace physx;
+using namespace Cm;
+
+ReducedVertexCloud::ReducedVertexCloud(const PxVec3* verts, PxU32 nb_verts) : mNbRVerts(0), mRVerts(NULL), mXRef(NULL)
+{
+ mVerts = verts;
+ mNbVerts = nb_verts;
+}
+
+ReducedVertexCloud::~ReducedVertexCloud()
+{
+ Clean();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Frees used ram.
+* \return Self-reference
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ReducedVertexCloud& ReducedVertexCloud::Clean()
+{
+ PX_DELETE_POD(mXRef);
+ PX_FREE_AND_RESET(mRVerts);
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Reduction method. Use this to create a minimal vertex cloud.
+* \param rc [out] result structure
+* \return true if success
+* \warning This is not about welding nearby vertices, here we look for real redundant ones.
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool ReducedVertexCloud::Reduce(REDUCEDCLOUD* rc)
+{
+ Clean();
+
+ mXRef = PX_NEW(PxU32)[mNbVerts];
+
+ float* f = PX_NEW_TEMP(float)[mNbVerts];
+
+ for(PxU32 i=0;i<mNbVerts;i++)
+ f[i] = mVerts[i].x;
+
+ RadixSortBuffered Radix;
+ Radix.Sort(reinterpret_cast<const PxU32*>(f), mNbVerts, RADIX_UNSIGNED);
+
+ for(PxU32 i=0;i<mNbVerts;i++)
+ f[i] = mVerts[i].y;
+ Radix.Sort(reinterpret_cast<const PxU32*>(f), mNbVerts, RADIX_UNSIGNED);
+
+ for(PxU32 i=0;i<mNbVerts;i++)
+ f[i] = mVerts[i].z;
+ const PxU32* Sorted = Radix.Sort(reinterpret_cast<const PxU32*>(f), mNbVerts, RADIX_UNSIGNED).GetRanks();
+
+ PX_DELETE_POD(f);
+
+ mNbRVerts = 0;
+ const PxU32 Junk[] = {PX_INVALID_U32, PX_INVALID_U32, PX_INVALID_U32};
+ const PxU32* Previous = Junk;
+ mRVerts = reinterpret_cast<PxVec3*>(PX_ALLOC(sizeof(PxVec3) * mNbVerts, "PxVec3"));
+ PxU32 Nb = mNbVerts;
+ while(Nb--)
+ {
+ const PxU32 Vertex = *Sorted++; // Vertex number
+
+ const PxU32* current = reinterpret_cast<const PxU32*>(&mVerts[Vertex]);
+ if(current[0]!=Previous[0] || current[1]!=Previous[1] || current[2]!=Previous[2])
+ mRVerts[mNbRVerts++] = mVerts[Vertex];
+
+ Previous = current;
+
+ mXRef[Vertex] = mNbRVerts-1;
+ }
+
+ if(rc)
+ {
+ rc->CrossRef = mXRef;
+ rc->NbRVerts = mNbRVerts;
+ rc->RVerts = mRVerts;
+ }
+ return true;
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/CookingUtils.h b/PhysX_3.4/Source/PhysXCooking/src/CookingUtils.h
new file mode 100644
index 00000000..41de2360
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/CookingUtils.h
@@ -0,0 +1,79 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_COOKINGUTILS
+#define PX_COOKINGUTILS
+
+#include "foundation/PxVec3.h"
+#include "PxPhysXConfig.h"
+
+namespace physx
+{
+ //! Vertex cloud reduction result structure
+ struct REDUCEDCLOUD
+ {
+ // Out
+ PxVec3* RVerts; //!< Reduced list
+ PxU32 NbRVerts; //!< Reduced number of vertices
+ PxU32* CrossRef; //!< nb_verts remapped indices
+ };
+
+ class ReducedVertexCloud
+ {
+ public:
+ // Constructors/destructor
+ ReducedVertexCloud(const PxVec3* verts, PxU32 nb_verts);
+ ~ReducedVertexCloud();
+ // Free used bytes
+ ReducedVertexCloud& Clean();
+ // Cloud reduction
+ bool Reduce(REDUCEDCLOUD* rc=NULL);
+ // Data access
+ PX_INLINE PxU32 GetNbVerts() const { return mNbVerts; }
+ PX_INLINE PxU32 GetNbReducedVerts() const { return mNbRVerts; }
+ PX_INLINE const PxVec3* GetReducedVerts() const { return mRVerts; }
+ PX_INLINE const PxVec3& GetReducedVertex(PxU32 i) const { return mRVerts[i]; }
+ PX_INLINE const PxU32* GetCrossRefTable() const { return mXRef; }
+
+ private:
+ // Original vertex cloud
+ PxU32 mNbVerts; //!< Number of vertices
+ const PxVec3* mVerts; //!< List of vertices (pointer copy)
+
+ // Reduced vertex cloud
+ PxU32 mNbRVerts; //!< Reduced number of vertices
+ PxVec3* mRVerts; //!< Reduced list of vertices
+ PxU32* mXRef; //!< Cross-reference table (used to remap topologies)
+ };
+
+}
+
+#endif
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/EdgeList.cpp b/PhysX_3.4/Source/PhysXCooking/src/EdgeList.cpp
new file mode 100644
index 00000000..bd0b58f9
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/EdgeList.cpp
@@ -0,0 +1,753 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "foundation/PxMemory.h"
+#include "EdgeList.h"
+#include "PxTriangle.h"
+#include "PsMathUtils.h"
+#include "CmRadixSortBuffered.h"
+#include "GuSerialize.h"
+#include "PsFoundation.h"
+
+using namespace physx;
+using namespace Gu;
+
+Gu::EdgeList::EdgeList()
+{
+ mData.mNbEdges = 0;
+ mData.mEdgeFaces = NULL;
+ mData.mEdges = NULL;
+ mData.mEdgeToTriangles = NULL;
+ mData.mFacesByEdges = NULL;
+}
+
+Gu::EdgeList::~EdgeList()
+{
+ PX_FREE_AND_RESET(mData.mFacesByEdges);
+ PX_FREE_AND_RESET(mData.mEdgeToTriangles);
+ PX_FREE_AND_RESET(mData.mEdges);
+ PX_DELETE_POD(mData.mEdgeFaces);
+}
+
+bool Gu::EdgeList::load(PxInputStream& stream)
+{
+ // Import header
+ PxU32 Version;
+ bool Mismatch;
+ if(!ReadHeader('E', 'D', 'G', 'E', Version, Mismatch, stream))
+ return false;
+
+ // Import edges
+ mData.mNbEdges = readDword(Mismatch, stream);
+ //mEdges = ICE_NEW_MEM(Edge[mNbEdges],Edge);
+ mData.mEdges = reinterpret_cast<EdgeData*>(PX_ALLOC(sizeof(EdgeData)*mData.mNbEdges, "EdgeData"));
+ stream.read(mData.mEdges, sizeof(EdgeData)*mData.mNbEdges);
+
+ mData.mNbFaces = readDword(Mismatch, stream);
+ //mEdgeFaces = ICE_NEW_MEM(EdgeTriangle[mNbFaces],EdgeTriangle);
+ mData.mEdgeFaces = reinterpret_cast<EdgeTriangleData*>(PX_ALLOC(sizeof(EdgeTriangleData)*mData.mNbFaces, "EdgeTriangleData"));
+ stream.read(mData.mEdgeFaces, sizeof(EdgeTriangleData)*mData.mNbFaces);
+
+ //mEdgeToTriangles = ICE_NEW_MEM(EdgeDesc[mNbEdges],EdgeDesc);
+ mData.mEdgeToTriangles = reinterpret_cast<EdgeDescData*>(PX_ALLOC(sizeof(EdgeDescData)*mData.mNbEdges, "EdgeDescData"));
+ stream.read(mData.mEdgeToTriangles, sizeof(EdgeDescData)*mData.mNbEdges);
+
+
+ PxU32 LastOffset = mData.mEdgeToTriangles[mData.mNbEdges-1].Offset + mData.mEdgeToTriangles[mData.mNbEdges-1].Count;
+ mData.mFacesByEdges = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*LastOffset, "EdgeList FacesByEdges"));
+ stream.read(mData.mFacesByEdges, sizeof(PxU32)*LastOffset);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Initializes the edge-list.
+ * \param create [in] edge-list creation structure
+ * \return true if success.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool Gu::EdgeListBuilder::init(const EDGELISTCREATE& create)
+{
+ bool FacesToEdges = create.Verts ? true : create.FacesToEdges;
+ bool EdgesToFaces = create.Verts ? true : create.EdgesToFaces;
+
+ // "FacesToEdges" maps each face to three edges.
+ if(FacesToEdges && !createFacesToEdges(create.NbFaces, create.DFaces, create.WFaces))
+ return false;
+
+ // "EdgesToFaces" maps each edge to the set of faces sharing this edge
+ if(EdgesToFaces && !createEdgesToFaces(create.NbFaces, create.DFaces, create.WFaces))
+ return false;
+
+ // Create active edges
+ if(create.Verts && !computeActiveEdges(create.NbFaces, create.DFaces, create.WFaces, create.Verts, create.Epsilon))
+ return false;
+
+ // Get rid of useless data
+ if(!create.FacesToEdges)
+ {
+ PX_FREE_AND_RESET(mData.mEdgeFaces);
+ }
+ if(!create.EdgesToFaces)
+ {
+ PX_FREE_AND_RESET(mData.mEdgeToTriangles);
+ PX_FREE_AND_RESET(mData.mFacesByEdges);
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Computes FacesToEdges.
+ * After the call:
+ * - mNbEdges is updated with the number of non-redundant edges
+ * - mEdges is a list of mNbEdges edges (one edge is 2 vertex-references)
+ * - mEdgesRef is a list of nbfaces structures with 3 indexes in mEdges for each face
+ *
+ * \param nb_faces [in] a number of triangles
+ * \param dfaces [in] list of triangles with PxU32 vertex references (or NULL)
+ * \param wfaces [in] list of triangles with PxU16 vertex references (or NULL)
+ * \return true if success.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool Gu::EdgeListBuilder::createFacesToEdges(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces)
+{
+ // Checkings
+ if(!nb_faces || (!dfaces && !wfaces))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "EdgeList::CreateFacesToEdges: NULL parameter!");
+ return false;
+ }
+
+ if(mData.mEdgeFaces)
+ return true; // Already computed!
+
+ // 1) Get some bytes: I need one EdgesRefs for each face, and some temp buffers
+ mData.mEdgeFaces = PX_NEW(EdgeTriangleData)[nb_faces]; // Link faces to edges
+ PxU32* VRefs0 = PX_NEW_TEMP(PxU32)[nb_faces*3]; // Temp storage
+ PxU32* VRefs1 = PX_NEW_TEMP(PxU32)[nb_faces*3]; // Temp storage
+ EdgeData* Buffer = PX_NEW_TEMP(EdgeData)[nb_faces*3]; // Temp storage
+
+ // 2) Create a full redundant list of 3 edges / face.
+ for(PxU32 i=0;i<nb_faces;i++)
+ {
+ // Get right vertex-references
+ const PxU32 Ref0 = dfaces ? dfaces[i*3+0] : wfaces ? wfaces[i*3+0] : 0;
+ const PxU32 Ref1 = dfaces ? dfaces[i*3+1] : wfaces ? wfaces[i*3+1] : 1;
+ const PxU32 Ref2 = dfaces ? dfaces[i*3+2] : wfaces ? wfaces[i*3+2] : 2;
+
+ // Pre-Sort vertex-references and put them in the lists
+ if(Ref0<Ref1) { VRefs0[i*3+0] = Ref0; VRefs1[i*3+0] = Ref1; } // Edge 0-1 maps (i%3)
+ else { VRefs0[i*3+0] = Ref1; VRefs1[i*3+0] = Ref0; } // Edge 0-1 maps (i%3)
+
+ if(Ref1<Ref2) { VRefs0[i*3+1] = Ref1; VRefs1[i*3+1] = Ref2; } // Edge 1-2 maps (i%3)+1
+ else { VRefs0[i*3+1] = Ref2; VRefs1[i*3+1] = Ref1; } // Edge 1-2 maps (i%3)+1
+
+ if(Ref2<Ref0) { VRefs0[i*3+2] = Ref2; VRefs1[i*3+2] = Ref0; } // Edge 2-0 maps (i%3)+2
+ else { VRefs0[i*3+2] = Ref0; VRefs1[i*3+2] = Ref2; } // Edge 2-0 maps (i%3)+2
+ }
+
+ // 3) Sort the list according to both keys (VRefs0 and VRefs1)
+ Cm::RadixSortBuffered Sorter;
+ const PxU32* Sorted = Sorter.Sort(VRefs1, nb_faces*3).Sort(VRefs0, nb_faces*3).GetRanks();
+
+ // 4) Loop through all possible edges
+ // - clean edges list by removing redundant edges
+ // - create EdgesRef list
+ mData.mNbEdges = 0; // #non-redundant edges
+ mData.mNbFaces = nb_faces;
+ PxU32 PreviousRef0 = PX_INVALID_U32;
+ PxU32 PreviousRef1 = PX_INVALID_U32;
+ for(PxU32 i=0;i<nb_faces*3;i++)
+ {
+ PxU32 Face = Sorted[i]; // Between 0 and nbfaces*3
+ PxU32 ID = Face % 3; // Get edge ID back.
+ PxU32 SortedRef0 = VRefs0[Face]; // (SortedRef0, SortedRef1) is the sorted edge
+ PxU32 SortedRef1 = VRefs1[Face];
+
+ if(SortedRef0!=PreviousRef0 || SortedRef1!=PreviousRef1)
+ {
+ // New edge found! => stored in temp buffer
+ Buffer[mData.mNbEdges].Ref0 = SortedRef0;
+ Buffer[mData.mNbEdges].Ref1 = SortedRef1;
+ mData.mNbEdges++;
+ }
+ PreviousRef0 = SortedRef0;
+ PreviousRef1 = SortedRef1;
+
+ // Create mEdgesRef on the fly
+ mData.mEdgeFaces[Face/3].mLink[ID] = mData.mNbEdges-1;
+ }
+
+ // 5) Here, mNbEdges==#non redundant edges
+ mData.mEdges = reinterpret_cast<EdgeData*>(PX_ALLOC(sizeof(EdgeData)*mData.mNbEdges, "EdgeData"));
+
+ // Create real edges-list.
+ PxMemCopy(mData.mEdges, Buffer, mData.mNbEdges*sizeof(EdgeData));
+
+ // 6) Free ram and exit
+ PX_DELETE_POD(Buffer);
+ PX_DELETE_POD(VRefs1);
+ PX_DELETE_POD(VRefs0);
+
+ return true;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Computes EdgesToFaces.
+ * After the call:
+ * - mEdgeToTriangles is created
+ * - mFacesByEdges is created
+ *
+ * \param nb_faces [in] a number of triangles
+ * \param dfaces [in] list of triangles with PxU32 vertex references (or NULL)
+ * \param wfaces [in] list of triangles with PxU16 vertex references (or NULL)
+ * \return true if success.
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool Gu::EdgeListBuilder::createEdgesToFaces(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces)
+{
+ // 1) I need FacesToEdges !
+ if(!createFacesToEdges(nb_faces, dfaces, wfaces))
+ return false;
+
+ // 2) Get some bytes: one Pair structure / edge
+ mData.mEdgeToTriangles = reinterpret_cast<EdgeDescData*>(PX_ALLOC(sizeof(EdgeDescData)*mData.mNbEdges, "EdgeDescData"));
+ PxMemZero(mData.mEdgeToTriangles, sizeof(EdgeDescData)*mData.mNbEdges);
+
+ // 3) Create Counters, ie compute the #faces sharing each edge
+ for(PxU32 i=0;i<nb_faces;i++)
+ {
+ mData.mEdgeToTriangles[mData.mEdgeFaces[i].mLink[0]].Count++;
+ mData.mEdgeToTriangles[mData.mEdgeFaces[i].mLink[1]].Count++;
+ mData.mEdgeToTriangles[mData.mEdgeFaces[i].mLink[2]].Count++;
+ }
+
+ // 3) Create Radix-like Offsets
+ mData.mEdgeToTriangles[0].Offset=0;
+ for(PxU32 i=1;i<mData.mNbEdges;i++)
+ mData.mEdgeToTriangles[i].Offset = mData.mEdgeToTriangles[i-1].Offset + mData.mEdgeToTriangles[i-1].Count;
+
+ PxU32 LastOffset = mData.mEdgeToTriangles[mData.mNbEdges-1].Offset + mData.mEdgeToTriangles[mData.mNbEdges-1].Count;
+
+ // 4) Get some bytes for mFacesByEdges. LastOffset is the number of indices needed.
+ mData.mFacesByEdges = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*LastOffset, "EdgeListBuilder FacesByEdges"));
+
+ // 5) Create mFacesByEdges
+ for(PxU32 i=0;i<nb_faces;i++)
+ {
+ mData.mFacesByEdges[mData.mEdgeToTriangles[mData.mEdgeFaces[i].mLink[0]].Offset++] = i;
+ mData.mFacesByEdges[mData.mEdgeToTriangles[mData.mEdgeFaces[i].mLink[1]].Offset++] = i;
+ mData.mFacesByEdges[mData.mEdgeToTriangles[mData.mEdgeFaces[i].mLink[2]].Offset++] = i;
+ }
+
+ // 6) Recompute offsets wasted by 5)
+ mData.mEdgeToTriangles[0].Offset=0;
+ for(PxU32 i=1;i<mData.mNbEdges;i++)
+ {
+ mData.mEdgeToTriangles[i].Offset = mData.mEdgeToTriangles[i-1].Offset + mData.mEdgeToTriangles[i-1].Count;
+ }
+
+ return true;
+}
+
+static PX_INLINE PxU32 OppositeVertex(PxU32 r0, PxU32 r1, PxU32 r2, PxU32 vref0, PxU32 vref1)
+{
+ if(vref0==r0)
+ {
+ if (vref1==r1) return r2;
+ else if(vref1==r2) return r1;
+ }
+ else if(vref0==r1)
+ {
+ if (vref1==r0) return r2;
+ else if(vref1==r2) return r0;
+ }
+ else if(vref0==r2)
+ {
+ if (vref1==r1) return r0;
+ else if(vref1==r0) return r1;
+ }
+ return PX_INVALID_U32;
+}
+
+bool Gu::EdgeListBuilder::computeActiveEdges(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces, const PxVec3* verts, float epsilon)
+{
+ // Checkings
+ if(!verts || (!dfaces && !wfaces))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "EdgeList::ComputeActiveEdges: NULL parameter!");
+ return false;
+ }
+
+ PxU32 NbEdges = getNbEdges();
+ if(!NbEdges)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "ActiveEdges::ComputeConvexEdges: no edges in edge list!");
+ return false;
+ }
+
+ const EdgeData* Edges = getEdges();
+ if(!Edges)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "ActiveEdges::ComputeConvexEdges: no edge data in edge list!");
+ return false;
+ }
+
+ const EdgeDescData* ED = getEdgeToTriangles();
+ if(!ED)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "ActiveEdges::ComputeConvexEdges: no edge-to-triangle in edge list!");
+ return false;
+ }
+
+ const PxU32* FBE = getFacesByEdges();
+ if(!FBE)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "ActiveEdges::ComputeConvexEdges: no faces-by-edges in edge list!");
+ return false;
+ }
+
+ // We first create active edges in a temporaray buffer. We have one bool / edge.
+ bool* ActiveEdges = reinterpret_cast<bool*>(PX_ALLOC_TEMP(sizeof(bool)*NbEdges, "bool"));
+
+ // Loop through edges and look for convex ones
+ bool* CurrentMark = ActiveEdges;
+
+ while(NbEdges--)
+ {
+ // Get number of triangles sharing current edge
+ PxU32 Count = ED->Count;
+ // Boundary edges are active => keep them (actually they're silhouette edges directly)
+ // Internal edges can be active => test them
+ // Singular edges ? => discard them
+ bool Active = false;
+ if(Count==1)
+ {
+ Active = true;
+ }
+ else if(Count==2)
+ {
+ PxU32 FaceIndex0 = FBE[ED->Offset+0]*3;
+ PxU32 FaceIndex1 = FBE[ED->Offset+1]*3;
+
+ PxU32 VRef00, VRef01, VRef02;
+ PxU32 VRef10, VRef11, VRef12;
+
+ if(dfaces)
+ {
+ VRef00 = dfaces[FaceIndex0+0];
+ VRef01 = dfaces[FaceIndex0+1];
+ VRef02 = dfaces[FaceIndex0+2];
+ VRef10 = dfaces[FaceIndex1+0];
+ VRef11 = dfaces[FaceIndex1+1];
+ VRef12 = dfaces[FaceIndex1+2];
+ }
+ else //if(wfaces)
+ {
+ PX_ASSERT(wfaces);
+ VRef00 = wfaces[FaceIndex0+0];
+ VRef01 = wfaces[FaceIndex0+1];
+ VRef02 = wfaces[FaceIndex0+2];
+ VRef10 = wfaces[FaceIndex1+0];
+ VRef11 = wfaces[FaceIndex1+1];
+ VRef12 = wfaces[FaceIndex1+2];
+ }
+
+ {
+ // We first check the opposite vertex against the plane
+
+ PxU32 Op = OppositeVertex(VRef00, VRef01, VRef02, Edges->Ref0, Edges->Ref1);
+
+ PxPlane PL1(verts[VRef10], verts[VRef11], verts[VRef12]);
+
+ if(PL1.distance(verts[Op])<0.0f) // If opposite vertex is below the plane, i.e. we discard concave edges
+ {
+ PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
+ PxTriangle T1(verts[VRef10], verts[VRef11], verts[VRef12]);
+
+ PxVec3 N0, N1;
+ T0.normal(N0);
+ T1.normal(N1);
+ const float a = Ps::angle(N0, N1);
+
+ if(fabsf(a)>epsilon) Active = true;
+ }
+ else
+ {
+ PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
+ PxTriangle T1(verts[VRef10], verts[VRef11], verts[VRef12]);
+ PxVec3 N0, N1;
+ T0.normal(N0);
+ T1.normal(N1);
+
+ if(N0.dot(N1) < -0.999f)
+ {
+ Active = true;
+ }
+
+ }
+ }
+
+ }
+ else
+ {
+ //Connected to more than 2
+ //We need to loop through the triangles and count the number of unique triangles (considering back-face triangles as non-unique). If we end up with more than 2 unique triangles,
+ //then by definition this is an inactive edge. However, if we end up with 2 unique triangles (say like a double-sided tesselated surface), then it depends on the same rules as above
+
+ PxU32 FaceInd0 = FBE[ED->Offset]*3;
+ PxU32 VRef00, VRef01, VRef02;
+ PxU32 VRef10=0, VRef11=0, VRef12=0;
+ if(dfaces)
+ {
+ VRef00 = dfaces[FaceInd0+0];
+ VRef01 = dfaces[FaceInd0+1];
+ VRef02 = dfaces[FaceInd0+2];
+ }
+ else //if(wfaces)
+ {
+ PX_ASSERT(wfaces);
+ VRef00 = wfaces[FaceInd0+0];
+ VRef01 = wfaces[FaceInd0+1];
+ VRef02 = wfaces[FaceInd0+2];
+ }
+
+
+ PxU32 numUniqueTriangles = 1;
+ bool doubleSided0 = false;
+ bool doubleSided1 = 0;
+
+ for(PxU32 a = 1; a < Count; ++a)
+ {
+ PxU32 FaceInd = FBE[ED->Offset+a]*3;
+
+ PxU32 VRef0, VRef1, VRef2;
+ if(dfaces)
+ {
+ VRef0 = dfaces[FaceInd+0];
+ VRef1 = dfaces[FaceInd+1];
+ VRef2 = dfaces[FaceInd+2];
+ }
+ else //if(wfaces)
+ {
+ PX_ASSERT(wfaces);
+ VRef0 = wfaces[FaceInd+0];
+ VRef1 = wfaces[FaceInd+1];
+ VRef2 = wfaces[FaceInd+2];
+ }
+
+ if(((VRef0 != VRef00) && (VRef0 != VRef01) && (VRef0 != VRef02)) ||
+ ((VRef1 != VRef00) && (VRef1 != VRef01) && (VRef1 != VRef02)) ||
+ ((VRef2 != VRef00) && (VRef2 != VRef01) && (VRef2 != VRef02)))
+ {
+ //Not the same as trig 0
+ if(numUniqueTriangles == 2)
+ {
+ if(((VRef0 != VRef10) && (VRef0 != VRef11) && (VRef0 != VRef12)) ||
+ ((VRef1 != VRef10) && (VRef1 != VRef11) && (VRef1 != VRef12)) ||
+ ((VRef2 != VRef10) && (VRef2 != VRef11) && (VRef2 != VRef12)))
+ {
+ //Too many unique triangles - terminate and mark as inactive
+ numUniqueTriangles++;
+ break;
+ }
+ else
+ {
+ PxTriangle T0(verts[VRef10], verts[VRef11], verts[VRef12]);
+ PxTriangle T1(verts[VRef0], verts[VRef1], verts[VRef2]);
+ PxVec3 N0, N1;
+ T0.normal(N0);
+ T1.normal(N1);
+
+ if(N0.dot(N1) < -0.999f)
+ doubleSided1 = true;
+ }
+ }
+ else
+ {
+ VRef10 = VRef0;
+ VRef11 = VRef1;
+ VRef12 = VRef2;
+ numUniqueTriangles++;
+ }
+ }
+ else
+ {
+ //Check for double sided...
+ PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
+ PxTriangle T1(verts[VRef0], verts[VRef1], verts[VRef2]);
+ PxVec3 N0, N1;
+ T0.normal(N0);
+ T1.normal(N1);
+
+ if(N0.dot(N1) < -0.999f)
+ doubleSided0 = true;
+ }
+ }
+
+ if(numUniqueTriangles == 1)
+ Active = true;
+ if(numUniqueTriangles == 2)
+ {
+ //Potentially active. Let's check the angles between the surfaces...
+
+ if(doubleSided0 || doubleSided1)
+ {
+
+ // Plane PL1 = faces[FBE[ED->Offset+1]].PlaneEquation(verts);
+ PxPlane PL1(verts[VRef10], verts[VRef11], verts[VRef12]);
+
+ // if(PL1.Distance(verts[Op])<-epsilon) Active = true;
+ //if(PL1.distance(verts[Op])<0.0f) // If opposite vertex is below the plane, i.e. we discard concave edges
+ //KS - can't test signed distance for concave edges. This is a double-sided poly
+ {
+ PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
+ PxTriangle T1(verts[VRef10], verts[VRef11], verts[VRef12]);
+
+ PxVec3 N0, N1;
+ T0.normal(N0);
+ T1.normal(N1);
+ const float a = Ps::angle(N0, N1);
+
+ if(fabsf(a)>epsilon)
+ Active = true;
+ }
+ }
+ else
+ {
+
+ //Not double sided...must have had a bunch of duplicate triangles!!!!
+ //Treat as normal
+ PxU32 Op = OppositeVertex(VRef00, VRef01, VRef02, Edges->Ref0, Edges->Ref1);
+
+ // Plane PL1 = faces[FBE[ED->Offset+1]].PlaneEquation(verts);
+ PxPlane PL1(verts[VRef10], verts[VRef11], verts[VRef12]);
+
+ // if(PL1.Distance(verts[Op])<-epsilon) Active = true;
+ if(PL1.distance(verts[Op])<0.0f) // If opposite vertex is below the plane, i.e. we discard concave edges
+ {
+ PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
+ PxTriangle T1(verts[VRef10], verts[VRef11], verts[VRef12]);
+
+ PxVec3 N0, N1;
+ T0.normal(N0);
+ T1.normal(N1);
+ const float a = Ps::angle(N0, N1);
+
+ if(fabsf(a)>epsilon)
+ Active = true;
+ }
+ }
+ }
+ else
+ {
+ //Lots of triangles all smooshed together. Just activate the edge in this case
+ Active = true;
+ }
+
+ }
+
+ *CurrentMark++ = Active;
+ ED++;
+ Edges++;
+ }
+
+
+ // Now copy bits back into already existing edge structures
+ // - first in edge triangles
+ for(PxU32 i=0;i<mData.mNbFaces;i++)
+ {
+ EdgeTriangleData& ET = mData.mEdgeFaces[i];
+ for(PxU32 j=0;j<3;j++)
+ {
+ PxU32 Link = ET.mLink[j];
+ if(!(Link & MSH_ACTIVE_EDGE_MASK)) // else already active
+ {
+ if(ActiveEdges[Link & MSH_EDGE_LINK_MASK]) ET.mLink[j] |= MSH_ACTIVE_EDGE_MASK; // Mark as active
+ }
+ }
+ }
+
+ // - then in edge-to-faces
+ for(PxU32 i=0;i<mData.mNbEdges;i++)
+ {
+ if(ActiveEdges[i]) mData.mEdgeToTriangles[i].Flags |= PX_EDGE_ACTIVE;
+ }
+
+ // Free & exit
+ PX_FREE_AND_RESET(ActiveEdges);
+
+ {
+ //initially all vertices are flagged to ignore them. (we assume them to be flat)
+ //for all NONFLAT edges, incl boundary
+ //unflag 2 vertices in up to 2 trigs as perhaps interesting
+ //for all CONCAVE edges
+ //flag 2 vertices in up to 2 trigs to ignore them.
+
+ // Handle active vertices
+ PxU32 MaxIndex = 0;
+ for(PxU32 i=0;i<nb_faces;i++)
+ {
+ PxU32 VRef0, VRef1, VRef2;
+ if(dfaces)
+ {
+ VRef0 = dfaces[i*3+0];
+ VRef1 = dfaces[i*3+1];
+ VRef2 = dfaces[i*3+2];
+ }
+ else //if(wfaces)
+ {
+ PX_ASSERT(wfaces);
+ VRef0 = wfaces[i*3+0];
+ VRef1 = wfaces[i*3+1];
+ VRef2 = wfaces[i*3+2];
+ }
+ if(VRef0>MaxIndex) MaxIndex = VRef0;
+ if(VRef1>MaxIndex) MaxIndex = VRef1;
+ if(VRef2>MaxIndex) MaxIndex = VRef2;
+ }
+
+ MaxIndex++;
+ bool* ActiveVerts = reinterpret_cast<bool*>(PX_ALLOC_TEMP(sizeof(bool)*MaxIndex, "bool"));
+ PxMemZero(ActiveVerts, MaxIndex*sizeof(bool));
+
+ PX_ASSERT(dfaces || wfaces);
+ for(PxU32 i=0;i<mData.mNbFaces;i++)
+ {
+ PxU32 VRef[3];
+ if(dfaces)
+ {
+ VRef[0] = dfaces[i*3+0];
+ VRef[1] = dfaces[i*3+1];
+ VRef[2] = dfaces[i*3+2];
+ }
+ else if(wfaces)
+ {
+ VRef[0] = wfaces[i*3+0];
+ VRef[1] = wfaces[i*3+1];
+ VRef[2] = wfaces[i*3+2];
+ }
+
+ const EdgeTriangleData& ET = mData.mEdgeFaces[i];
+ for(PxU32 j=0;j<3;j++)
+ {
+ PxU32 Link = ET.mLink[j];
+ if(Link & MSH_ACTIVE_EDGE_MASK)
+ {
+ // Active edge => mark edge vertices as active
+ PxU32 r0, r1;
+ if(j==0) { r0=0; r1=1; }
+ else if(j==1) { r0=1; r1=2; }
+ else /*if(j==2)*/ { PX_ASSERT(j==2); r0=0; r1=2; }
+ ActiveVerts[VRef[r0]] = ActiveVerts[VRef[r1]] = true;
+ }
+ }
+ }
+
+/* for(PxU32 i=0;i<mNbFaces;i++)
+ {
+ PxU32 VRef[3];
+ if(dfaces)
+ {
+ VRef[0] = dfaces[i*3+0];
+ VRef[1] = dfaces[i*3+1];
+ VRef[2] = dfaces[i*3+2];
+ }
+ else if(wfaces)
+ {
+ VRef[0] = wfaces[i*3+0];
+ VRef[1] = wfaces[i*3+1];
+ VRef[2] = wfaces[i*3+2];
+ }
+
+ const EdgeTriangle& ET = mEdgeFaces[i];
+ for(PxU32 j=0;j<3;j++)
+ {
+ PxU32 Link = ET.mLink[j];
+ if(!(Link & MSH_ACTIVE_EDGE_MASK))
+ {
+ // Inactive edge => mark edge vertices as inactive
+ PxU32 r0, r1;
+ if(j==0) { r0=0; r1=1; }
+ if(j==1) { r0=1; r1=2; }
+ if(j==2) { r0=0; r1=2; }
+ ActiveVerts[VRef[r0]] = ActiveVerts[VRef[r1]] = false;
+ }
+ }
+ }*/
+
+ // Now stuff this into the structure
+ for(PxU32 i=0;i<mData.mNbFaces;i++)
+ {
+ PxU32 VRef[3];
+ if(dfaces)
+ {
+ VRef[0] = dfaces[i*3+0];
+ VRef[1] = dfaces[i*3+1];
+ VRef[2] = dfaces[i*3+2];
+ }
+ else if(wfaces)
+ {
+ VRef[0] = wfaces[i*3+0];
+ VRef[1] = wfaces[i*3+1];
+ VRef[2] = wfaces[i*3+2];
+ }
+
+ EdgeTriangleData& ET = mData.mEdgeFaces[i];
+ for(PxU32 j=0;j<3;j++)
+ {
+ PxU32 Link = ET.mLink[j];
+ if(!(Link & MSH_ACTIVE_VERTEX_MASK)) // else already active
+ {
+ if(ActiveVerts[VRef[j]]) ET.mLink[j] |= MSH_ACTIVE_VERTEX_MASK; // Mark as active
+ }
+ }
+ }
+
+ PX_FREE_AND_RESET(ActiveVerts);
+ }
+
+ return true;
+}
+
+Gu::EdgeListBuilder::EdgeListBuilder()
+{
+}
+
+Gu::EdgeListBuilder::~EdgeListBuilder()
+{
+}
+
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/EdgeList.h b/PhysX_3.4/Source/PhysXCooking/src/EdgeList.h
new file mode 100644
index 00000000..d4a47873
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/EdgeList.h
@@ -0,0 +1,110 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_EDGELIST
+#define PX_EDGELIST
+
+// PT: this file should be moved to cooking lib
+
+#include "foundation/Px.h"
+#include "PsUserAllocated.h"
+
+// Data/code shared with LL
+#include "GuEdgeListData.h"
+
+namespace physx
+{
+namespace Gu
+{
+ //! The edge-list creation structure.
+ struct EDGELISTCREATE
+ {
+ EDGELISTCREATE() :
+ NbFaces (0),
+ DFaces (NULL),
+ WFaces (NULL),
+ FacesToEdges (false),
+ EdgesToFaces (false),
+ Verts (NULL),
+ Epsilon (0.1f)
+ {}
+
+ PxU32 NbFaces; //!< Number of faces in source topo
+ const PxU32* DFaces; //!< List of faces (dwords) or NULL
+ const PxU16* WFaces; //!< List of faces (words) or NULL
+
+ bool FacesToEdges;
+ bool EdgesToFaces;
+ const PxVec3* Verts;
+ float Epsilon;
+ };
+
+ class EdgeList : public Ps::UserAllocated
+ {
+ public:
+ EdgeList();
+ ~EdgeList();
+
+ bool load(PxInputStream& stream);
+ // Data access
+ PX_INLINE PxU32 getNbEdges() const { return mData.mNbEdges; }
+ PX_INLINE const Gu::EdgeData* getEdges() const { return mData.mEdges; }
+ PX_INLINE const Gu::EdgeData& getEdge(PxU32 edge_index) const { return mData.mEdges[edge_index]; }
+ //
+ PX_INLINE PxU32 getNbFaces() const { return mData.mNbFaces; }
+ PX_INLINE const Gu::EdgeTriangleData* getEdgeTriangles() const { return mData.mEdgeFaces; }
+ PX_INLINE const Gu::EdgeTriangleData& getEdgeTriangle(PxU32 face_index) const { return mData.mEdgeFaces[face_index]; }
+ //
+ PX_INLINE const Gu::EdgeDescData* getEdgeToTriangles() const { return mData.mEdgeToTriangles; }
+ PX_INLINE const Gu::EdgeDescData& getEdgeToTriangles(PxU32 edge_index) const { return mData.mEdgeToTriangles[edge_index]; }
+ PX_INLINE const PxU32* getFacesByEdges() const { return mData.mFacesByEdges; }
+ PX_INLINE PxU32 getFacesByEdges(PxU32 face_index) const { return mData.mFacesByEdges[face_index]; }
+
+ protected:
+ Gu::EdgeListData mData; //!< Pointer to edgelist data
+ };
+
+ class EdgeListBuilder : public EdgeList
+ {
+ public:
+ EdgeListBuilder();
+ ~EdgeListBuilder();
+
+ bool init(const EDGELISTCREATE& create);
+ private:
+ bool createFacesToEdges(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces);
+ bool createEdgesToFaces(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces);
+ bool computeActiveEdges(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces, const PxVec3* verts, float epsilon);
+ };
+}
+
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/MeshCleaner.cpp b/PhysX_3.4/Source/PhysXCooking/src/MeshCleaner.cpp
new file mode 100644
index 00000000..0f4b6f67
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/MeshCleaner.cpp
@@ -0,0 +1,233 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxVec3.h"
+#include "MeshCleaner.h"
+#include "PsAllocator.h"
+#include "PsBitUtils.h"
+
+#ifndef PX_COOKING
+#error Do not include anymore!
+#endif
+
+using namespace physx;
+
+struct Indices
+{
+ PxU32 mRef[3];
+
+ PX_FORCE_INLINE bool operator!=(const Indices&v) const { return mRef[0] != v.mRef[0] || mRef[1] != v.mRef[1] || mRef[2] != v.mRef[2]; }
+};
+
+static PX_FORCE_INLINE PxU32 getHashValue(const PxVec3& v)
+{
+ const PxU32* h = reinterpret_cast<const PxU32*>(&v.x);
+ const PxU32 f = (h[0]+h[1]*11-(h[2]*17)) & 0x7fffffff; // avoid problems with +-0
+ return (f>>22)^(f>>12)^(f);
+}
+
+static PX_FORCE_INLINE PxU32 getHashValue(const Indices& v)
+{
+// const PxU32* h = v.mRef;
+// const PxU32 f = (h[0]+h[1]*11-(h[2]*17)) & 0x7fffffff; // avoid problems with +-0
+// return (f>>22)^(f>>12)^(f);
+
+ PxU32 a = v.mRef[0];
+ PxU32 b = v.mRef[1];
+ PxU32 c = v.mRef[2];
+ a=a-b; a=a-c; a=a^(c >> 13);
+ b=b-c; b=b-a; b=b^(a << 8);
+ c=c-a; c=c-b; c=c^(b >> 13);
+ a=a-b; a=a-c; a=a^(c >> 12);
+ b=b-c; b=b-a; b=b^(a << 16);
+ c=c-a; c=c-b; c=c^(b >> 5);
+ a=a-b; a=a-c; a=a^(c >> 3);
+ b=b-c; b=b-a; b=b^(a << 10);
+ c=c-a; c=c-b; c=c^(b >> 15);
+ return c;
+}
+
+MeshCleaner::MeshCleaner(PxU32 nbVerts, const PxVec3* srcVerts, PxU32 nbTris, const PxU32* srcIndices, PxF32 meshWeldTolerance)
+{
+ PxVec3* cleanVerts = reinterpret_cast<PxVec3*>(PX_ALLOC(sizeof(PxVec3)*nbVerts, "MeshCleaner"));
+ PX_ASSERT(cleanVerts);
+
+ PxU32* indices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nbTris*3, "MeshCleaner"));
+
+ PxU32* remapTriangles = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nbTris, "MeshCleaner"));
+
+ PxU32* vertexIndices = NULL;
+ if(meshWeldTolerance!=0.0f)
+ {
+ vertexIndices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nbVerts, "MeshCleaner"));
+ const PxF32 weldTolerance = 1.0f / meshWeldTolerance;
+ // snap to grid
+ for(PxU32 i=0; i<nbVerts; i++)
+ {
+ vertexIndices[i] = i;
+ cleanVerts[i] = PxVec3( PxFloor(srcVerts[i].x*weldTolerance + 0.5f),
+ PxFloor(srcVerts[i].y*weldTolerance + 0.5f),
+ PxFloor(srcVerts[i].z*weldTolerance + 0.5f));
+ }
+ }
+ else
+ {
+ memcpy(cleanVerts, srcVerts, nbVerts*sizeof(PxVec3));
+ }
+
+ const PxU32 maxNbElems = PxMax(nbTris, nbVerts);
+ const PxU32 hashSize = shdfnd::nextPowerOfTwo(maxNbElems);
+ const PxU32 hashMask = hashSize-1;
+ PxU32* hashTable = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*(hashSize + maxNbElems), "MeshCleaner"));
+ PX_ASSERT(hashTable);
+ memset(hashTable, 0xff, hashSize * sizeof(PxU32));
+ PxU32* const next = hashTable + hashSize;
+
+ PxU32* remapVerts = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nbVerts, "MeshCleaner"));
+ memset(remapVerts, 0xff, nbVerts * sizeof(PxU32));
+
+ for(PxU32 i=0;i<nbTris*3;i++)
+ {
+ const PxU32 vref = srcIndices[i];
+ if(vref<nbVerts)
+ remapVerts[vref] = 0;
+ }
+
+ PxU32 nbCleanedVerts = 0;
+ for(PxU32 i=0;i<nbVerts;i++)
+ {
+ if(remapVerts[i]==0xffffffff)
+ continue;
+
+ const PxVec3& v = cleanVerts[i];
+ const PxU32 hashValue = getHashValue(v) & hashMask;
+ PxU32 offset = hashTable[hashValue];
+
+ while(offset!=0xffffffff && cleanVerts[offset]!=v)
+ offset = next[offset];
+
+ if(offset==0xffffffff)
+ {
+ remapVerts[i] = nbCleanedVerts;
+ cleanVerts[nbCleanedVerts] = v;
+ if(vertexIndices)
+ vertexIndices[nbCleanedVerts] = i;
+ next[nbCleanedVerts] = hashTable[hashValue];
+ hashTable[hashValue] = nbCleanedVerts++;
+ }
+ else remapVerts[i] = offset;
+ }
+
+ PxU32 nbCleanedTris = 0;
+ for(PxU32 i=0;i<nbTris;i++)
+ {
+ PxU32 vref0 = *srcIndices++;
+ PxU32 vref1 = *srcIndices++;
+ PxU32 vref2 = *srcIndices++;
+ if(vref0>=nbVerts || vref1>=nbVerts || vref2>=nbVerts)
+ continue;
+
+ // PT: you can still get zero-area faces when the 3 vertices are perfectly aligned
+ const PxVec3& p0 = srcVerts[vref0];
+ const PxVec3& p1 = srcVerts[vref1];
+ const PxVec3& p2 = srcVerts[vref2];
+ const float area2 = ((p0 - p1).cross(p0 - p2)).magnitudeSquared();
+ if(area2==0.0f)
+ continue;
+
+ vref0 = remapVerts[vref0];
+ vref1 = remapVerts[vref1];
+ vref2 = remapVerts[vref2];
+ if(vref0==vref1 || vref1==vref2 || vref2==vref0)
+ continue;
+
+ indices[nbCleanedTris*3+0] = vref0;
+ indices[nbCleanedTris*3+1] = vref1;
+ indices[nbCleanedTris*3+2] = vref2;
+ remapTriangles[nbCleanedTris] = i;
+ nbCleanedTris++;
+ }
+ PX_FREE(remapVerts);
+
+ PxU32 nbToGo = nbCleanedTris;
+ nbCleanedTris = 0;
+ memset(hashTable, 0xff, hashSize * sizeof(PxU32));
+
+ Indices* const I = reinterpret_cast<Indices*>(indices);
+ bool idtRemap = true;
+ for(PxU32 i=0;i<nbToGo;i++)
+ {
+ const Indices& v = I[i];
+ const PxU32 hashValue = getHashValue(v) & hashMask;
+ PxU32 offset = hashTable[hashValue];
+
+ while(offset!=0xffffffff && I[offset]!=v)
+ offset = next[offset];
+
+ if(offset==0xffffffff)
+ {
+ const PxU32 originalIndex = remapTriangles[i];
+ PX_ASSERT(nbCleanedTris<=i);
+ remapTriangles[nbCleanedTris] = originalIndex;
+ if(originalIndex!=nbCleanedTris)
+ idtRemap = false;
+ I[nbCleanedTris] = v;
+ next[nbCleanedTris] = hashTable[hashValue];
+ hashTable[hashValue] = nbCleanedTris++;
+ }
+ }
+ PX_FREE(hashTable);
+
+ if(vertexIndices)
+ {
+ for(PxU32 i=0;i<nbCleanedVerts;i++)
+ cleanVerts[i] = srcVerts[vertexIndices[i]];
+ PX_FREE(vertexIndices);
+ }
+ mNbVerts = nbCleanedVerts;
+ mNbTris = nbCleanedTris;
+ mVerts = cleanVerts;
+ mIndices = indices;
+ if(idtRemap)
+ {
+ PX_FREE(remapTriangles);
+ mRemap = NULL;
+ }
+ else
+ {
+ mRemap = remapTriangles;
+ }
+}
+
+MeshCleaner::~MeshCleaner()
+{
+ PX_FREE_AND_RESET(mRemap);
+ PX_FREE_AND_RESET(mIndices);
+ PX_FREE_AND_RESET(mVerts);
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/MeshCleaner.h b/PhysX_3.4/Source/PhysXCooking/src/MeshCleaner.h
new file mode 100644
index 00000000..be3219ce
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/MeshCleaner.h
@@ -0,0 +1,55 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_MESH_CLEANER_H
+#define PX_MESH_CLEANER_H
+
+#include "foundation/Px.h"
+
+#ifndef PX_COOKING
+#error Do not include anymore!
+#endif
+
+namespace physx
+{
+ class MeshCleaner
+ {
+ public:
+ MeshCleaner(PxU32 nbVerts, const PxVec3* verts, PxU32 nbTris, const PxU32* indices, PxF32 meshWeldTolerance);
+ ~MeshCleaner();
+
+ PxU32 mNbVerts;
+ PxU32 mNbTris;
+ PxVec3* mVerts;
+ PxU32* mIndices;
+ PxU32* mRemap;
+ };
+}
+
+#endif // PX_MESH_CLEANER_H
diff --git a/PhysX_3.4/Source/PhysXCooking/src/Quantizer.cpp b/PhysX_3.4/Source/PhysXCooking/src/Quantizer.cpp
new file mode 100644
index 00000000..6c68892a
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/Quantizer.cpp
@@ -0,0 +1,338 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "Quantizer.h"
+
+#include "foundation/PxVec3.h"
+#include "foundation/PxBounds3.h"
+
+#include "PsUserAllocated.h"
+#include "PsAllocator.h"
+#include "PsArray.h"
+
+#include "CmPhysXCommon.h"
+
+using namespace physx;
+
+PxU32 kmeans_cluster3d(const PxVec3 *input, // an array of input 3d data points.
+ PxU32 inputSize, // the number of input data points.
+ PxU32 clumpCount, // the number of clumps you wish to product.
+ PxVec3 *outputClusters, // The output array of clumps 3d vectors, should be at least 'clumpCount' in size.
+ PxU32 *outputIndices, // A set of indices which remaps the input vertices to clumps; should be at least 'inputSize'
+ float errorThreshold=0.01f, // The error threshold to converge towards before giving up.
+ float collapseDistance=0.01f); // distance so small it is not worth bothering to create a new clump.
+
+template <class Vec,class Type >
+PxU32 kmeans_cluster(const Vec *input,
+ PxU32 inputCount,
+ PxU32 clumpCount,
+ Vec *clusters,
+ PxU32 *outputIndices,
+ Type threshold, // controls how long it works to converge towards a least errors solution.
+ Type collapseDistance) // distance between clumps to consider them to be essentially equal.
+{
+ PxU32 convergeCount = 64; // maximum number of iterations attempting to converge to a solution..
+ PxU32* counts = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*clumpCount, "PxU32"));
+ Type error=0;
+ if ( inputCount <= clumpCount ) // if the number of input points is less than our clumping size, just return the input points.
+ {
+ clumpCount = inputCount;
+ for (PxU32 i=0; i<inputCount; i++)
+ {
+ if ( outputIndices )
+ {
+ outputIndices[i] = i;
+ }
+ clusters[i] = input[i];
+ counts[i] = 1;
+ }
+ }
+ else
+ {
+ PxVec3* centroids = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*clumpCount, "PxVec3"));
+
+ // Take a sampling of the input points as initial centroid estimates.
+ for (PxU32 i=0; i<clumpCount; i++)
+ {
+ PxU32 index = (i*inputCount)/clumpCount;
+ PX_ASSERT( index < inputCount );
+ clusters[i] = input[index];
+ }
+
+ // Here is the main convergence loop
+ Type old_error = FLT_MAX; // old and initial error estimates are max Type
+ error = FLT_MAX;
+ do
+ {
+ old_error = error; // preserve the old error
+ // reset the counts and centroids to current cluster location
+ for (PxU32 i=0; i<clumpCount; i++)
+ {
+ counts[i] = 0;
+ centroids[i] = PxVec3(PxZero);
+ }
+ error = 0;
+ // For each input data point, figure out which cluster it is closest too and add it to that cluster.
+ for (PxU32 i=0; i<inputCount; i++)
+ {
+ Type min_distance = FLT_MAX;
+ // find the nearest clump to this point.
+ for (PxU32 j=0; j<clumpCount; j++)
+ {
+ const Type distance = (input[i] - clusters[j]).magnitudeSquared();
+ if ( distance < min_distance )
+ {
+ min_distance = distance;
+ outputIndices[i] = j; // save which clump this point indexes
+ }
+ }
+ const PxU32 index = outputIndices[i]; // which clump was nearest to this point.
+ centroids[index]+=input[i];
+ counts[index]++; // increment the counter indicating how many points are in this clump.
+ error+=min_distance; // save the error accumulation
+ }
+ // Now, for each clump, compute the mean and store the result.
+ for (PxU32 i=0; i<clumpCount; i++)
+ {
+ if ( counts[i] ) // if this clump got any points added to it...
+ {
+ const Type recip = 1.0f / Type(counts[i]); // compute the average (center of those points)
+ centroids[i]*=recip; // compute the average center of the points in this clump.
+ clusters[i] = centroids[i]; // store it as the new cluster.
+ }
+ }
+ // decrement the convergence counter and bail if it is taking too long to converge to a solution.
+ convergeCount--;
+ if (convergeCount == 0 )
+ {
+ break;
+ }
+ if ( error < threshold ) // early exit if our first guess is already good enough (if all input points are the same)
+ break;
+ } while ( PxAbs(error - old_error) > threshold ); // keep going until the error is reduced by this threshold amount.
+
+ PX_FREE(centroids);
+ }
+
+ // ok..now we prune the clumps if necessary.
+ // The rules are; first, if a clump has no 'counts' then we prune it as it's unused.
+ // The second, is if the centroid of this clump is essentially the same (based on the distance tolerance)
+ // as an existing clump, then it is pruned and all indices which used to point to it, now point to the one
+ // it is closest too.
+ PxU32 outCount = 0; // number of clumps output after pruning performed.
+ Type d2 = collapseDistance*collapseDistance; // squared collapse distance.
+ for (PxU32 i=0; i<clumpCount; i++)
+ {
+ if ( counts[i] == 0 ) // if no points ended up in this clump, eliminate it.
+ continue;
+ // see if this clump is too close to any already accepted clump.
+ bool add = true;
+ PxU32 remapIndex = outCount; // by default this clump will be remapped to its current index.
+ for (PxU32 j=0; j<outCount; j++)
+ {
+ Type distance = (clusters[i] - clusters[j]).magnitudeSquared();
+ if ( distance < d2 )
+ {
+ remapIndex = j;
+ add = false; // we do not add this clump
+ break;
+ }
+ }
+ // If we have fewer output clumps than input clumps so far, then we need to remap the old indices to the new ones.
+ if ( outputIndices )
+ {
+ if ( outCount != i || !add ) // we need to remap indices! everything that was index 'i' now needs to be remapped to 'outCount'
+ {
+ for (PxU32 j=0; j<inputCount; j++)
+ {
+ if ( outputIndices[j] == i )
+ {
+ outputIndices[j] = remapIndex; //
+ }
+ }
+ }
+ }
+ if ( add )
+ {
+ clusters[outCount] = clusters[i];
+ outCount++;
+ }
+ }
+ PX_FREE(counts);
+ clumpCount = outCount;
+ return clumpCount;
+}
+
+PxU32 kmeans_cluster3d(const PxVec3 *input, // an array of input 3d data points.
+ PxU32 inputSize, // the number of input data points.
+ PxU32 clumpCount, // the number of clumps you wish to produce
+ PxVec3 *outputClusters, // The output array of clumps 3d vectors, should be at least 'clumpCount' in size.
+ PxU32 *outputIndices, // A set of indices which remaps the input vertices to clumps; should be at least 'inputSize'
+ float errorThreshold, // The error threshold to converge towards before giving up.
+ float collapseDistance) // distance so small it is not worth bothering to create a new clump.
+{
+ return kmeans_cluster< PxVec3, float >(input, inputSize, clumpCount, outputClusters, outputIndices, errorThreshold, collapseDistance);
+}
+
+class QuantizerImpl : public Quantizer, public Ps::UserAllocated
+{
+public:
+ QuantizerImpl(void)
+ {
+ mScale = PxVec3(1.0f, 1.0f, 1.0f);
+ mCenter = PxVec3(0.0f, 0.0f, 0.0f);
+ }
+
+ // Use the k-means quantizer, similar results, but much slower.
+ virtual const PxVec3 * kmeansQuantize3D(PxU32 vcount,
+ const PxVec3 *vertices,
+ PxU32 stride,
+ bool denormalizeResults,
+ PxU32 maxVertices,
+ PxU32 &outVertsCount)
+ {
+ const PxVec3 *ret = NULL;
+ outVertsCount = 0;
+ mNormalizedInput.clear();
+ mQuantizedOutput.clear();
+
+ if ( vcount > 0 )
+ {
+ normalizeInput(vcount,vertices, stride);
+
+ PxVec3* quantizedOutput = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*vcount, "PxVec3"));
+ PxU32* quantizedIndices = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*vcount, "PxU32"));
+ outVertsCount = kmeans_cluster3d(&mNormalizedInput[0], vcount, maxVertices, quantizedOutput, quantizedIndices, 0.01f, 0.0001f );
+ if ( outVertsCount > 0 )
+ {
+ if ( denormalizeResults )
+ {
+ for (PxU32 i=0; i<outVertsCount; i++)
+ {
+ PxVec3 v( quantizedOutput[i] );
+ v = v.multiply(mScale) + mCenter;
+ mQuantizedOutput.pushBack(v);
+ }
+ }
+ else
+ {
+ for (PxU32 i=0; i<outVertsCount; i++)
+ {
+ const PxVec3& v( quantizedOutput[i] );
+ mQuantizedOutput.pushBack(v);
+ }
+ }
+ ret = &mQuantizedOutput[0];
+ }
+ PX_FREE(quantizedOutput);
+ PX_FREE(quantizedIndices);
+ }
+ return ret;
+ }
+
+ virtual void release(void)
+ {
+ PX_DELETE(this);
+ }
+
+ virtual const PxVec3& getDenormalizeScale(void) const
+ {
+ return mScale;
+ }
+
+ virtual const PxVec3& getDenormalizeCenter(void) const
+ {
+ return mCenter;
+ }
+
+
+
+private:
+
+ void normalizeInput(PxU32 vcount,const PxVec3 *vertices, PxU32 stride)
+ {
+ const char* vtx = reinterpret_cast<const char *> (vertices);
+ mNormalizedInput.clear();
+ mQuantizedOutput.clear();
+ PxBounds3 bounds;
+ bounds.setEmpty();
+ for (PxU32 i=0; i<vcount; i++)
+ {
+ const PxVec3& v = *reinterpret_cast<const PxVec3 *> (vtx);
+ vtx += stride;
+
+ bounds.include(v);
+ }
+
+ mCenter = bounds.getCenter();
+
+ PxVec3 dim = bounds.getDimensions();
+ dim *= 1.001f;
+ mScale = dim*0.5f;
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ if(dim[i] == 0)
+ mScale[i] = 1.0f;
+ }
+
+ PxVec3 recip;
+ recip.x = 1.0f / mScale.x;
+ recip.y = 1.0f / mScale.y;
+ recip.z = 1.0f / mScale.z;
+
+ vtx = reinterpret_cast<const char *> (vertices);
+ for (PxU32 i=0; i<vcount; i++)
+ {
+ PxVec3 v = *reinterpret_cast<const PxVec3 *> (vtx);
+ vtx += stride;
+
+ v = (v - mCenter).multiply(recip);
+
+ mNormalizedInput.pushBack(v);
+ }
+ }
+
+ virtual ~QuantizerImpl(void)
+ {
+
+ }
+
+ private:
+ PxVec3 mScale;
+ PxVec3 mCenter;
+ Ps::Array<PxVec3> mNormalizedInput;
+ Ps::Array<PxVec3> mQuantizedOutput;
+};
+
+Quantizer * physx::createQuantizer(void)
+{
+ QuantizerImpl *m = PX_NEW(QuantizerImpl);
+ return static_cast< Quantizer *>(m);
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/Quantizer.h b/PhysX_3.4/Source/PhysXCooking/src/Quantizer.h
new file mode 100644
index 00000000..0b6e408e
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/Quantizer.h
@@ -0,0 +1,76 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef QUANTIZER_H
+#define QUANTIZER_H
+
+#include "foundation/Px.h"
+
+namespace physx
+{
+
+ //////////////////////////////////////////////////////////////////////////
+ // K-means quantization class
+ // see http://en.wikipedia.org/wiki/K-means_clustering
+ // implementation from John Ratcliff http://codesuppository.blogspot.ch/2010/12/k-means-clustering-algorithm.html
+ class Quantizer
+ {
+ public:
+ // quantize the input vertices
+ virtual const PxVec3* kmeansQuantize3D(PxU32 vcount,
+ const PxVec3 *vertices,
+ PxU32 stride,
+ bool denormalizeResults,
+ PxU32 maxVertices,
+ PxU32 &outVertsCount) = 0;
+
+ // returns the denormalized scale
+ virtual const PxVec3& getDenormalizeScale(void) const = 0;
+
+ // returns the denormalized center
+ virtual const PxVec3& getDenormalizeCenter(void) const = 0;
+
+ // release internal data
+ virtual void release(void) = 0;
+
+
+ protected:
+ virtual ~Quantizer(void)
+ {
+
+ }
+ };
+
+ // creates the quantizer class
+ Quantizer * createQuantizer(void);
+
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.cpp
new file mode 100644
index 00000000..5ab5965d
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.cpp
@@ -0,0 +1,353 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "PsUserAllocated.h"
+#include "PsUtilities.h"
+#include "PsMathUtils.h"
+#include "PsVecMath.h"
+
+#include "PxCooking.h"
+
+#include "GuConvexMeshData.h"
+#include "GuBigConvexData2.h"
+#include "GuIntersectionRayPlane.h"
+#include "GuSerialize.h"
+
+#include "BigConvexDataBuilder.h"
+#include "EdgeList.h"
+
+#include "ConvexHullBuilder.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+using namespace physx;
+using namespace Gu;
+using namespace Ps::aos;
+
+static const PxU32 gSupportVersion = 0;
+static const PxU32 gVersion = 0;
+
+BigConvexDataBuilder::BigConvexDataBuilder(const Gu::ConvexHullData* hull, BigConvexData* gm, const PxVec3* hullVerts) : mHullVerts(hullVerts)
+{
+ mSVM = gm;
+ mHull = hull;
+}
+
+BigConvexDataBuilder::~BigConvexDataBuilder()
+{
+}
+
+bool BigConvexDataBuilder::initialize()
+{
+ mSVM->mData.mSamples = PX_NEW(PxU8)[mSVM->mData.mNbSamples*2u];
+
+#if PX_DEBUG
+// printf("SVM: %d bytes\n", mNbSamples*sizeof(PxU8)*2);
+#endif
+
+ return true;
+}
+
+bool BigConvexDataBuilder::save(PxOutputStream& stream, bool platformMismatch) const
+{
+ // Export header
+ if(!WriteHeader('S', 'U', 'P', 'M', gSupportVersion, platformMismatch, stream))
+ return false;
+
+ // Save base gaussmap
+// if(!GaussMapBuilder::Save(stream, platformMismatch)) return false;
+ // Export header
+ if(!WriteHeader('G', 'A', 'U', 'S', gVersion, platformMismatch, stream))
+ return false;
+
+ // Export basic info
+ // stream.StoreDword(mSubdiv);
+ writeDword(mSVM->mData.mSubdiv, platformMismatch, stream); // PT: could now write Word here
+ // stream.StoreDword(mNbSamples);
+ writeDword(mSVM->mData.mNbSamples, platformMismatch, stream); // PT: could now write Word here
+
+ // Save map data
+ // It's an array of bytes so we don't care about 'PlatformMismatch'
+ stream.write(mSVM->mData.mSamples, sizeof(PxU8)*mSVM->mData.mNbSamples*2);
+
+ if(!saveValencies(stream, platformMismatch))
+ return false;
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute valencies for each vertex
+// we dont compute the edges again here, we have them temporary stored in mHullDataFacesByAllEdges8 structure
+bool BigConvexDataBuilder::computeValencies(const ConvexHullBuilder& meshBuilder)
+{
+ PX_ASSERT(meshBuilder.mHullDataFacesByAllEdges8);
+
+ // Create valencies
+ const PxU32 numVertices = meshBuilder.mHull->mNbHullVertices;
+ mSVM->mData.mNbVerts = numVertices;
+
+ // Get ram for valencies
+ mSVM->mData.mValencies = PX_NEW(Gu::Valency)[mSVM->mData.mNbVerts];
+ PxMemZero(mSVM->mData.mValencies, numVertices*sizeof(Gu::Valency));
+ PxU8 vertexMarker[256];
+ PxMemZero(vertexMarker,numVertices);
+ // Get ram for adjacent vertices references
+ mSVM->mData.mAdjacentVerts = PX_NEW(PxU8)[meshBuilder.mHull->mNbEdges*2u];
+
+ // Compute valencies
+ for (PxU32 i = 0; i < meshBuilder.mHull->mNbPolygons; i++)
+ {
+ PxU32 numVerts = meshBuilder.mHullDataPolygons[i].mNbVerts;
+ const PxU8* Data = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[i].mVRef8;
+ for (PxU32 j = 0; j < numVerts; j++)
+ {
+ mSVM->mData.mValencies[Data[j]].mCount++;
+ PX_ASSERT(mSVM->mData.mValencies[Data[j]].mCount != 0xffff);
+ }
+ }
+
+ // Create offsets
+ mSVM->CreateOffsets();
+
+ // mNbAdjVerts = mOffsets[mNbVerts-1] + mValencies[mNbVerts-1];
+ mSVM->mData.mNbAdjVerts = PxU32(mSVM->mData.mValencies[mSVM->mData.mNbVerts - 1].mOffset + mSVM->mData.mValencies[mSVM->mData.mNbVerts - 1].mCount);
+ PX_ASSERT(mSVM->mData.mNbAdjVerts == PxU32(meshBuilder.mHull->mNbEdges * 2));
+
+ // Create adjacent vertices
+ // parse the polygons and its vertices
+ for (PxU32 i = 0; i < meshBuilder.mHull->mNbPolygons; i++)
+ {
+ PxU32 numVerts = meshBuilder.mHullDataPolygons[i].mNbVerts;
+ const PxU8* Data = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[i].mVRef8;
+ for (PxU32 j = 0; j < numVerts; j++)
+ {
+ const PxU8 vertexIndex = Data[j];
+ PxU8 numAdj = 0;
+ // if we did not parsed this vertex, traverse to the adjacent face and then
+ // again to next till we hit back the original polygon
+ if(vertexMarker[vertexIndex] == 0)
+ {
+ PxU8 prevIndex = Data[(j+1)%numVerts];
+ mSVM->mData.mAdjacentVerts[mSVM->mData.mValencies[vertexIndex].mOffset++] = prevIndex;
+ numAdj++;
+ // now traverse the neighbors
+ PxU8 n0 = meshBuilder.mHullDataFacesByAllEdges8[(meshBuilder.mHullDataPolygons[i].mVRef8 + j)*2];
+ PxU8 n1 = meshBuilder.mHullDataFacesByAllEdges8[(meshBuilder.mHullDataPolygons[i].mVRef8 + j)*2 + 1];
+ PxU32 neighborPolygon = n0 == i ? n1 : n0;
+ while (neighborPolygon != i)
+ {
+ PxU32 numNeighborVerts = meshBuilder.mHullDataPolygons[neighborPolygon].mNbVerts;
+ const PxU8* neighborData = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[neighborPolygon].mVRef8;
+ PxU32 nextEdgeIndex = 0;
+ // search in the neighbor face for the tested vertex
+ for (PxU32 k = 0; k < numNeighborVerts; k++)
+ {
+ // search the vertexIndex
+ if(neighborData[k] == vertexIndex)
+ {
+ const PxU8 nextIndex = neighborData[(k+1)%numNeighborVerts];
+ // next index already there, pick the previous
+ if(nextIndex == prevIndex)
+ {
+ prevIndex = k == 0 ? neighborData[numNeighborVerts - 1] : neighborData[k-1];
+ nextEdgeIndex = k == 0 ? numNeighborVerts - 1 : k-1;
+ }
+ else
+ {
+ prevIndex = nextIndex;
+ nextEdgeIndex = k;
+ }
+ mSVM->mData.mAdjacentVerts[mSVM->mData.mValencies[vertexIndex].mOffset++] = prevIndex;
+ numAdj++;
+ break;
+ }
+ }
+
+ // now move to next neighbor
+ n0 = meshBuilder.mHullDataFacesByAllEdges8[(meshBuilder.mHullDataPolygons[neighborPolygon].mVRef8 + nextEdgeIndex)*2];
+ n1 = meshBuilder.mHullDataFacesByAllEdges8[(meshBuilder.mHullDataPolygons[neighborPolygon].mVRef8 + nextEdgeIndex)*2 + 1];
+ neighborPolygon = n0 == neighborPolygon ? n1 : n0;
+ }
+ vertexMarker[vertexIndex] = numAdj;
+ }
+ }
+ }
+
+ // Recreate offsets
+ mSVM->CreateOffsets();
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute the min dot product from the verts for given dir
+void BigConvexDataBuilder::precomputeSample(const PxVec3& dir, PxU8& startIndex_, float negativeDir)
+{
+ PxU8 startIndex = startIndex_;
+
+ const PxVec3* verts = mHullVerts;
+ const Valency* valency = mSVM->mData.mValencies;
+ const PxU8* adjacentVerts = mSVM->mData.mAdjacentVerts;
+
+ // we have only 256 verts
+ PxU32 smallBitMap[8] = {0,0,0,0,0,0,0,0};
+
+ float minimum = negativeDir * verts[startIndex].dot(dir);
+ PxU32 initialIndex = startIndex;
+ do
+ {
+ initialIndex = startIndex;
+ const PxU32 numNeighbours = valency[startIndex].mCount;
+ const PxU32 offset = valency[startIndex].mOffset;
+
+ for (PxU32 a = 0; a < numNeighbours; ++a)
+ {
+ const PxU8 neighbourIndex = adjacentVerts[offset + a];
+ const float dist = negativeDir * verts[neighbourIndex].dot(dir);
+ if (dist < minimum)
+ {
+ const PxU32 ind = PxU32(neighbourIndex >> 5);
+ const PxU32 mask = PxU32(1 << (neighbourIndex & 31));
+ if ((smallBitMap[ind] & mask) == 0)
+ {
+ smallBitMap[ind] |= mask;
+ minimum = dist;
+ startIndex = neighbourIndex;
+ }
+ }
+ }
+
+ } while (startIndex != initialIndex);
+
+ startIndex_ = startIndex;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Precompute the min/max vertices for cube directions.
+bool BigConvexDataBuilder::precompute(PxU32 subdiv)
+{
+ mSVM->mData.mSubdiv = Ps::to16(subdiv);
+ mSVM->mData.mNbSamples = Ps::to16(6 * subdiv*subdiv);
+
+ if (!initialize())
+ return false;
+
+ PxU8 startIndex[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ PxU8 startIndex2[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ const float halfSubdiv = float(subdiv - 1) * 0.5f;
+ for (PxU32 j = 0; j < subdiv; j++)
+ {
+ for (PxU32 i = j; i < subdiv; i++)
+ {
+ const float iSubDiv = 1.0f - i / halfSubdiv;
+ const float jSubDiv = 1.0f - j / halfSubdiv;
+
+ PxVec3 tempDir(1.0f, iSubDiv, jSubDiv);
+ // we need to normalize only once, then we permute the components
+ // as before for each i,j and j,i face direction
+ tempDir.normalize();
+
+ const PxVec3 dirs[12] = {
+ PxVec3(-tempDir.x, tempDir.y, tempDir.z),
+ PxVec3(tempDir.x, tempDir.y, tempDir.z),
+
+ PxVec3(tempDir.z, -tempDir.x, tempDir.y),
+ PxVec3(tempDir.z, tempDir.x, tempDir.y),
+
+ PxVec3(tempDir.y, tempDir.z, -tempDir.x),
+ PxVec3(tempDir.y, tempDir.z, tempDir.x),
+
+ PxVec3(-tempDir.x, tempDir.z, tempDir.y),
+ PxVec3(tempDir.x, tempDir.z, tempDir.y),
+
+ PxVec3(tempDir.y, -tempDir.x, tempDir.z),
+ PxVec3(tempDir.y, tempDir.x, tempDir.z),
+
+ PxVec3(tempDir.z, tempDir.y, -tempDir.x),
+ PxVec3(tempDir.z, tempDir.y, tempDir.x)
+ };
+
+ // compute in each direction + negative/positive dot, we have
+ // then two start indexes, which are used then for hill climbing
+ for (PxU32 dStep = 0; dStep < 12; dStep++)
+ {
+ precomputeSample(dirs[dStep], startIndex[dStep], 1.0f);
+ precomputeSample(dirs[dStep], startIndex2[dStep], -1.0f);
+ }
+
+ // decompose the vector results into face directions
+ for (PxU32 k = 0; k < 6; k++)
+ {
+ const PxU32 ksub = k*subdiv*subdiv;
+ const PxU32 offset = j + i*subdiv + ksub;
+ const PxU32 offset2 = i + j*subdiv + ksub;
+ PX_ASSERT(offset < mSVM->mData.mNbSamples);
+ PX_ASSERT(offset2 < mSVM->mData.mNbSamples);
+
+ mSVM->mData.mSamples[offset] = startIndex[k];
+ mSVM->mData.mSamples[offset + mSVM->mData.mNbSamples] = startIndex2[k];
+
+ mSVM->mData.mSamples[offset2] = startIndex[k + 6];
+ mSVM->mData.mSamples[offset2 + mSVM->mData.mNbSamples] = startIndex2[k + 6];
+ }
+ }
+ }
+ return true;
+}
+
+static const PxU32 gValencyVersion = 2;
+
+//////////////////////////////////////////////////////////////////////////
+
+bool BigConvexDataBuilder::saveValencies(PxOutputStream& stream, bool platformMismatch) const
+{
+ // Export header
+ if(!WriteHeader('V', 'A', 'L', 'E', gValencyVersion, platformMismatch, stream))
+ return false;
+
+ writeDword(mSVM->mData.mNbVerts, platformMismatch, stream);
+ writeDword(mSVM->mData.mNbAdjVerts, platformMismatch, stream);
+
+ {
+ PxU16* temp = PX_NEW_TEMP(PxU16)[mSVM->mData.mNbVerts];
+ for(PxU32 i=0;i<mSVM->mData.mNbVerts;i++)
+ temp[i] = mSVM->mData.mValencies[i].mCount;
+
+ const PxU32 maxIndex = computeMaxIndex(temp, mSVM->mData.mNbVerts);
+ writeDword(maxIndex, platformMismatch, stream);
+ StoreIndices(Ps::to16(maxIndex), mSVM->mData.mNbVerts, temp, stream, platformMismatch);
+
+ PX_DELETE_POD(temp);
+ }
+ stream.write(mSVM->mData.mAdjacentVerts, mSVM->mData.mNbAdjVerts);
+
+ return true;
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.h
new file mode 100644
index 00000000..2abf5993
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/BigConvexDataBuilder.h
@@ -0,0 +1,100 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef BIG_CONVEX_DATA_BUILDER_H
+#define BIG_CONVEX_DATA_BUILDER_H
+
+#include "foundation/PxMemory.h"
+#include "PsVecMath.h"
+
+namespace physx
+{
+ struct HullTriangleData;
+ class BigConvexData;
+ class ConvexHullBuilder;
+
+ //////////////////////////////////////////////////////////////////////////
+ //! Valencies creation structure
+ struct ValenciesCreate
+ {
+ //! Constructor
+ ValenciesCreate() { PxMemZero(this, sizeof(*this)); }
+
+ PxU32 nbVerts; //!< Number of vertices
+ PxU32 nbFaces; //!< Number of faces
+ const PxU32* dFaces; //!< List of faces (triangle list)
+ const PxU16* wFaces; //!< List of faces (triangle list)
+ bool adjacentList; //!< Compute list of adjacent vertices or not
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+
+ class BigConvexDataBuilder : public Ps::UserAllocated
+ {
+ public:
+ BigConvexDataBuilder(const Gu::ConvexHullData* hull, BigConvexData* gm, const PxVec3* hullVerts);
+ ~BigConvexDataBuilder();
+ // Support vertex map
+ bool precompute(PxU32 subdiv);
+
+ bool initialize();
+
+ bool save(PxOutputStream& stream, bool platformMismatch) const;
+
+ bool computeValencies(const ConvexHullBuilder& meshBuilder);
+ //~Support vertex map
+
+ // Valencies
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes valencies and adjacent vertices.
+ * After the call, get results with the appropriate accessors.
+ *
+ * \param vc [in] creation structure
+ * \return true if success.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool compute(const ValenciesCreate& vc) const;
+
+ bool saveValencies(PxOutputStream& stream, bool platformMismatch) const;
+ //~Valencies
+ protected:
+ PX_FORCE_INLINE void precomputeSample(const PxVec3& dir, PxU8& startIndex, float negativeDir);
+
+ private:
+ const Gu::ConvexHullData* mHull;
+ BigConvexData* mSVM;
+ const PxVec3* mHullVerts;
+
+ };
+
+}
+
+#endif // BIG_CONVEX_DATA_BUILDER_H
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.cpp
new file mode 100644
index 00000000..3b9c3ac6
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.cpp
@@ -0,0 +1,797 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "foundation/PxMemory.h"
+#include "EdgeList.h"
+#include "GuTriangle32.h"
+#include "GuConvexMesh.h"
+#include "PxCooking.h"
+#include "CookingUtils.h"
+#include "ConvexHullBuilder.h"
+#include "CmRadixSortBuffered.h"
+#include "MeshCleaner.h"
+#include "PsArray.h"
+#include "PsFoundation.h"
+#include "PsVecMath.h"
+
+
+// 7: added mHullDataFacesByVertices8
+// 8: added mEdges
+static const physx::PxU32 gVersion = 8;
+
+using namespace physx;
+using namespace Gu;
+using namespace Ps::aos;
+
+#define USE_PRECOMPUTED_HULL_PROJECTION
+
+//////////////////////////////////////////////////////////////////////////
+// default constructor
+ConvexHullBuilder::ConvexHullBuilder(Gu::ConvexHullData* hull, const bool buildGRBData) :
+ mHullDataHullVertices (NULL),
+ mHullDataPolygons (NULL),
+ mHullDataVertexData8 (NULL),
+ mHullDataFacesByEdges8 (NULL),
+ mHullDataFacesByVertices8 (NULL),
+ mHullDataFacesByAllEdges8 (NULL),
+ mEdgeData16 (NULL),
+ mEdges (NULL),
+ mHull (hull),
+ mBuildGRBData (buildGRBData)
+{
+}
+
+//////////////////////////////////////////////////////////////////////////
+// default destructor
+ConvexHullBuilder::~ConvexHullBuilder()
+{
+ PX_DELETE_POD(mEdgeData16);
+ PX_DELETE_POD(mEdges);
+
+ PX_DELETE_POD(mHullDataHullVertices);
+ PX_DELETE_POD(mHullDataPolygons);
+ PX_DELETE_POD(mHullDataVertexData8);
+ PX_DELETE_POD(mHullDataFacesByEdges8);
+ PX_DELETE_POD(mHullDataFacesByVertices8);
+ PX_DELETE_POD(mHullDataFacesByAllEdges8);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// initialize the convex hull
+// \param nbVerts [in] number of vertices used
+// \param verts [in] vertices array
+// \param indices [in] indices array
+// \param nbPolygons [in] number of polygons
+// \param hullPolygons [in] polygons array
+bool ConvexHullBuilder::init(PxU32 nbVerts, const PxVec3* verts, const PxU32* indices, const PxU32 nbIndices,
+ const PxU32 nbPolygons, const PxHullPolygon* hullPolygons, PxU32 gaussMapVertexLimit, bool doValidation, bool userPolygons)
+{
+ PX_ASSERT(indices);
+ PX_ASSERT(verts);
+ PX_ASSERT(hullPolygons);
+ PX_ASSERT(nbVerts);
+ PX_ASSERT(nbPolygons);
+
+ mHullDataHullVertices = NULL;
+ mHullDataPolygons = NULL;
+ mHullDataVertexData8 = NULL;
+ mHullDataFacesByEdges8 = NULL;
+ mHullDataFacesByVertices8 = NULL;
+ mHullDataFacesByAllEdges8 = NULL;
+
+ mEdges = NULL;
+ mEdgeData16 = NULL;
+
+ mHull->mNbHullVertices = Ps::to8(nbVerts);
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ mHullDataHullVertices = reinterpret_cast<PxVec3*>(PX_ALLOC(sizeof(PxVec3) * mHull->mNbHullVertices + 1, "PxVec3"));
+ PxMemCopy(mHullDataHullVertices, verts, mHull->mNbHullVertices*sizeof(PxVec3));
+
+ // Cleanup
+ mHull->mNbPolygons = 0;
+ PX_DELETE_POD(mHullDataVertexData8);
+ PX_FREE_AND_RESET(mHullDataPolygons);
+
+ if(nbPolygons>255)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder::init: convex hull has more than 255 polygons!");
+ return false;
+ }
+
+ // Precompute hull polygon structures
+ mHull->mNbPolygons = Ps::to8(nbPolygons);
+ mHullDataPolygons = reinterpret_cast<Gu::HullPolygonData*>(PX_ALLOC(sizeof(Gu::HullPolygonData)*mHull->mNbPolygons, "Gu::HullPolygonData"));
+
+ mHullDataVertexData8 = PX_NEW(PxU8)[nbIndices];
+ PxU8* dest = mHullDataVertexData8;
+ for(PxU32 i=0;i<nbPolygons;i++)
+ {
+ const PxHullPolygon& inPolygon = hullPolygons[i];
+ mHullDataPolygons[i].mVRef8 = PxU16(dest - mHullDataVertexData8); // Setup link for current polygon
+
+ PxU32 numVerts = inPolygon.mNbVerts;
+ PX_ASSERT(numVerts>=3); // Else something very wrong happened...
+ mHullDataPolygons[i].mNbVerts = Ps::to8(numVerts);
+
+ for (PxU32 j = 0; j < numVerts; j++)
+ {
+ dest[j] = Ps::to8(indices[inPolygon.mIndexBase + j]);
+ }
+
+ mHullDataPolygons[i].mPlane = PxPlane(inPolygon.mPlane[0],inPolygon.mPlane[1],inPolygon.mPlane[2],inPolygon.mPlane[3]);
+
+ // Next one
+ dest += numVerts;
+ }
+
+ if(!calculateVertexMapTable(nbPolygons, userPolygons))
+ return false;
+
+ // moved create edge list here from save, copy. This is a part of the validation process and
+ // we need to create the edge list anyway
+ if (!createEdgeList(doValidation, nbIndices, mHull->mNbHullVertices > gaussMapVertexLimit ? true : false))
+ return false;
+
+#ifdef USE_PRECOMPUTED_HULL_PROJECTION
+ // Loop through polygons
+ for (PxU32 j = 0; j < nbPolygons; j++)
+ {
+ // Precompute hull projection along local polygon normal
+ PxU32 NbVerts = mHull->mNbHullVertices;
+ const PxVec3* Verts = mHullDataHullVertices;
+ Gu::HullPolygonData& polygon = mHullDataPolygons[j];
+ PxReal min = PX_MAX_F32;
+ PxU8 minIndex = 0xff;
+ for (PxU8 i = 0; i < NbVerts; i++)
+ {
+ float dp = (*Verts++).dot(polygon.mPlane.n);
+ if (dp < min)
+ {
+ min = dp;
+ minIndex = i;
+ }
+ }
+ polygon.mMinIndex = minIndex;
+ }
+#endif
+
+ if(doValidation)
+ return checkHullPolygons();
+ else
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// hull polygons check
+bool ConvexHullBuilder::checkHullPolygons() const
+{
+ const PxVec3* hullVerts = mHullDataHullVertices;
+ const PxU8* vertexData = mHullDataVertexData8;
+ Gu::HullPolygonData* hullPolygons = mHullDataPolygons;
+
+ // Check hull validity
+ if(!hullVerts || !hullPolygons)
+ return false;
+
+ if(mHull->mNbPolygons<4)
+ return false;
+
+ PxVec3 max(-FLT_MAX,-FLT_MAX,-FLT_MAX);
+
+ PxVec3 hullMax = hullVerts[0];
+ PxVec3 hullMin = hullVerts[0];
+
+ for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
+ {
+ const PxVec3& hullVert = hullVerts[j];
+ if(fabsf(hullVert.x) > max.x)
+ max.x = fabsf(hullVert.x);
+
+ if(fabsf(hullVert.y) > max.y)
+ max.y = fabsf(hullVert.y);
+
+ if(fabsf(hullVert.z) > max.z)
+ max.z = fabsf(hullVert.z);
+
+ if (hullVert.x > hullMax.x)
+ {
+ hullMax.x = hullVert.x;
+ }
+ else if (hullVert.x < hullMin.x)
+ {
+ hullMin.x = hullVert.x;
+ }
+
+ if (hullVert.y > hullMax.y)
+ {
+ hullMax.y = hullVert.y;
+ }
+ else if (hullVert.y < hullMin.y)
+ {
+ hullMin.y = hullVert.y;
+ }
+
+ if (hullVert.z > hullMax.z)
+ {
+ hullMax.z = hullVert.z;
+ }
+ else if (hullVert.z < hullMin.z)
+ {
+ hullMin.z = hullVert.z;
+ }
+ }
+
+ max += PxVec3(0.02f,0.02f,0.02f);
+
+ PxVec3 testVectors[8];
+ bool foundPlane[8];
+ for (PxU32 i = 0; i < 8; i++)
+ {
+ foundPlane[i] = false;
+ }
+
+ testVectors[0] = PxVec3(max.x,max.y,max.z);
+ testVectors[1] = PxVec3(max.x,-max.y,-max.z);
+ testVectors[2] = PxVec3(max.x,max.y,-max.z);
+ testVectors[3] = PxVec3(max.x,-max.y,max.z);
+ testVectors[4] = PxVec3(-max.x,max.y,max.z);
+ testVectors[5] = PxVec3(-max.x,-max.y,max.z);
+ testVectors[6] = PxVec3(-max.x,max.y,-max.z);
+ testVectors[7] = PxVec3(-max.x,-max.y,-max.z);
+
+
+ // Extra convex hull validity check. This is less aggressive than previous convex decomposer!
+ // Loop through polygons
+ for(PxU32 i=0;i<mHull->mNbPolygons;i++)
+ {
+ const PxPlane& P = hullPolygons[i].mPlane;
+
+ for (PxU32 k = 0; k < 8; k++)
+ {
+ if(!foundPlane[k])
+ {
+ const float d = P.distance(testVectors[k]);
+ if(d >= 0)
+ {
+ foundPlane[k] = true;
+ }
+ }
+ }
+
+ // Test hull vertices against polygon plane
+ // compute the test epsilon the same way we construct the hull, verts are considered coplanar within this epsilon
+ const float planeTolerance = 0.002f;
+ const float testEpsilon = PxMax(planeTolerance * (PxMax(PxAbs(hullMax.x), PxAbs(hullMin.x)) +
+ PxMax(PxAbs(hullMax.y), PxAbs(hullMin.y)) +
+ PxMax(PxAbs(hullMax.z), PxAbs(hullMin.z))), planeTolerance);
+
+ for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
+ {
+ // Don't test vertex if it belongs to plane (to prevent numerical issues)
+ PxU32 nb = hullPolygons[i].mNbVerts;
+ bool discard=false;
+ for(PxU32 k=0;k<nb;k++)
+ {
+ if(vertexData[hullPolygons[i].mVRef8+k]==PxU8(j))
+ {
+ discard = true;
+ break;
+ }
+ }
+
+ if(!discard)
+ {
+ const float d = P.distance(hullVerts[j]);
+// if(d>0.0001f)
+ //if(d>0.02f)
+ if(d > testEpsilon)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Gu::ConvexMesh::checkHullPolygons: Some hull vertices seems to be too far from hull planes.");
+ return false;
+ }
+ }
+ }
+ }
+
+ for (PxU32 i = 0; i < 8; i++)
+ {
+ if(!foundPlane[i])
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Gu::ConvexMesh::checkHullPolygons: Hull seems to have opened volume or do (some) faces have reversed winding?");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Computes the center of the hull. It should be inside it !
+* \param center [out] hull center
+* \return true if success
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool ConvexHullBuilder::computeGeomCenter(PxVec3& center, PxU32 numFaces, HullTriangleData* faces) const
+{
+ // Checkings
+ const PxVec3* PX_RESTRICT hullVerts = mHullDataHullVertices;
+ if (!mHull->mNbHullVertices || !hullVerts) return false;
+
+ // Use the topological method
+ float totalArea = 0.0f;
+ center = PxVec3(0);
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ Gu::TriangleT<PxU32> curTri(faces[i].mRef[0], faces[i].mRef[1], faces[i].mRef[2]);
+ const float area = curTri.area(hullVerts);
+ PxVec3 curCenter; curTri.center(hullVerts, curCenter);
+ center += area * curCenter;
+ totalArea += area;
+ }
+ center /= totalArea;
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// hull data store
+PX_COMPILE_TIME_ASSERT(sizeof(Gu::EdgeDescData)==8);
+PX_COMPILE_TIME_ASSERT(sizeof(Gu::EdgeData)==8);
+bool ConvexHullBuilder::save(PxOutputStream& stream, bool platformMismatch) const
+{
+ // Export header
+ if(!WriteHeader('C', 'L', 'H', 'L', gVersion, platformMismatch, stream))
+ return false;
+
+ // Export header
+ if(!WriteHeader('C', 'V', 'H', 'L', gVersion, platformMismatch, stream))
+ return false;
+
+ // Export figures
+
+ //embed grb flag into mNbEdges
+ PxU16 hasGRBData = PxU16(mBuildGRBData);
+ hasGRBData = PxU16(hasGRBData << 15);
+ PX_ASSERT(mHull->mNbEdges <( (1 << 15) - 1));
+ const PxU16 nbEdges = PxU16(mHull->mNbEdges | hasGRBData);
+ writeDword(mHull->mNbHullVertices, platformMismatch, stream);
+ writeDword(nbEdges, platformMismatch, stream);
+ writeDword(computeNbPolygons(), platformMismatch, stream); // Use accessor to lazy-build
+ PxU32 nb=0;
+ for(PxU32 i=0;i<mHull->mNbPolygons;i++)
+ nb += mHullDataPolygons[i].mNbVerts;
+ writeDword(nb, platformMismatch, stream);
+
+ // Export triangles
+
+ writeFloatBuffer(&mHullDataHullVertices->x, PxU32(mHull->mNbHullVertices*3), platformMismatch, stream);
+
+ // Export polygons
+ // TODO: allow lazy-evaluation
+ // We can't really store the buffer in one run anymore!
+ for(PxU32 i=0;i<mHull->mNbPolygons;i++)
+ {
+ Gu::HullPolygonData tmpCopy = mHullDataPolygons[i];
+ if(platformMismatch)
+ flipData(tmpCopy);
+
+ stream.write(&tmpCopy, sizeof(Gu::HullPolygonData));
+ }
+
+ // PT: why not storeBuffer here?
+ for(PxU32 i=0;i<nb;i++)
+ stream.write(&mHullDataVertexData8[i], sizeof(PxU8));
+
+ stream.write(mHullDataFacesByEdges8, PxU32(mHull->mNbEdges*2));
+ stream.write(mHullDataFacesByVertices8, PxU32(mHull->mNbHullVertices*3));
+
+ if (mBuildGRBData)
+ writeWordBuffer(mEdges, PxU32(mHull->mNbEdges * 2), platformMismatch, stream);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+bool ConvexHullBuilder::copy(ConvexHullData& hullData)
+{
+ // set the numbers
+ hullData.mNbHullVertices = mHull->mNbHullVertices;
+ hullData.mNbEdges = mHull->mNbEdges;
+ hullData.mNbPolygons = Ps::to8(computeNbPolygons());
+ PxU32 nb = 0;
+ for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
+ nb += mHullDataPolygons[i].mNbVerts;
+
+ PxU32 bytesNeeded = Gu::computeBufferSize(hullData, nb);
+
+ // allocate the memory first.
+ void* dataMemory = PX_ALLOC(bytesNeeded, "ConvexHullData data");
+
+ PxU8* address = reinterpret_cast<PxU8*>(dataMemory);
+
+ // set data pointers
+ hullData.mPolygons = reinterpret_cast<Gu::HullPolygonData*>(address); address += sizeof(Gu::HullPolygonData) * hullData.mNbPolygons;
+ PxVec3* dataHullVertices = reinterpret_cast<PxVec3*>(address); address += sizeof(PxVec3) * hullData.mNbHullVertices;
+ PxU8* dataFacesByEdges8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * hullData.mNbEdges * 2;
+ PxU8* dataFacesByVertices8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * hullData.mNbHullVertices * 3;
+ PxU16* dataEdges = reinterpret_cast<PxU16*>(address); address += hullData.mNbEdges.isBitSet() ? sizeof(PxU16) *hullData.mNbEdges * 2 : 0;
+ PxU8* dataVertexData8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * nb; // PT: leave that one last, so that we don't need to serialize "Nb"
+
+ PX_ASSERT(!(size_t(dataHullVertices) % sizeof(PxReal)));
+ PX_ASSERT(!(size_t(hullData.mPolygons) % sizeof(PxReal)));
+ PX_ASSERT(size_t(address) <= size_t(dataMemory) + bytesNeeded);
+
+ PX_ASSERT(mHullDataHullVertices);
+ PX_ASSERT(mHullDataPolygons);
+ PX_ASSERT(mHullDataVertexData8);
+ PX_ASSERT(mHullDataFacesByEdges8);
+ PX_ASSERT(mHullDataFacesByVertices8);
+
+ // copy the data
+ PxMemCopy(dataHullVertices, &mHullDataHullVertices->x, PxU32(mHull->mNbHullVertices * 3)*sizeof(float));
+ PxMemCopy(hullData.mPolygons, mHullDataPolygons , hullData.mNbPolygons*sizeof(Gu::HullPolygonData));
+ PxMemCopy(dataVertexData8, mHullDataVertexData8, nb);
+ PxMemCopy(dataFacesByEdges8,mHullDataFacesByEdges8, PxU32(mHull->mNbEdges * 2));
+ if (hullData.mNbEdges.isBitSet())
+ PxMemCopy(dataEdges, mEdges, PxU32(mHull->mNbEdges * 2) * sizeof(PxU16));
+ PxMemCopy(dataFacesByVertices8, mHullDataFacesByVertices8, PxU32(mHull->mNbHullVertices * 3));
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// calculate vertex map table
+bool ConvexHullBuilder::calculateVertexMapTable(PxU32 nbPolygons, bool userPolygons)
+{
+ mHullDataFacesByVertices8 = PX_NEW(PxU8)[mHull->mNbHullVertices*3u];
+ PxU8 vertexMarker[256];
+ PxMemSet(vertexMarker, 0, mHull->mNbHullVertices);
+
+ for (PxU32 i = 0; i < nbPolygons; i++)
+ {
+ const Gu::HullPolygonData& polygon = mHullDataPolygons[i];
+ for (PxU32 k = 0; k < polygon.mNbVerts; ++k)
+ {
+ const PxU8 index = mHullDataVertexData8[polygon.mVRef8 + k];
+ if (vertexMarker[index] < 3)
+ {
+ //Found a polygon
+ mHullDataFacesByVertices8[index*3 + vertexMarker[index]++] = Ps::to8(i);
+ }
+ }
+ }
+
+ bool noPlaneShift = false;
+ for (PxU32 i = 0; i < mHull->mNbHullVertices; ++i)
+ {
+ if(vertexMarker[i] != 3)
+ noPlaneShift = true;
+ }
+
+ if (noPlaneShift)
+ {
+ //PCM will use the original shape, which means it will have a huge performance drop
+ if (!userPolygons)
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder: convex hull does not have vertex-to-face info! Try to use different convex mesh cooking settings.");
+ else
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder: convex hull does not have vertex-to-face info! Some of the vertices have less than 3 neighbor polygons. The vertex is most likely inside a polygon or on an edge between 2 polygons, please remove those vertices.");
+ for (PxU32 i = 0; i < mHull->mNbHullVertices; ++i)
+ {
+ mHullDataFacesByVertices8[i * 3 + 0] = 0xFF;
+ mHullDataFacesByVertices8[i * 3 + 1] = 0xFF;
+ mHullDataFacesByVertices8[i * 3 + 2] = 0xFF;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+// create edge list
+bool ConvexHullBuilder::createEdgeList(bool doValidation, PxU32 nbEdges, bool prepareBigHullData)
+{
+ // Code below could be greatly simplified if we assume manifold meshes!
+
+ //feodorb: ok, let's assume manifold meshes, since the code before this change
+ //would fail on non-maniflold meshes anyways
+
+ // We need the adjacency graph for hull polygons, similar to what we have for triangles.
+ // - sort the polygon edges and walk them in order
+ // - each edge should appear exactly twice since a convex is a manifold mesh without boundary edges
+ // - the polygon index is implicit when we walk the sorted list => get the 2 polygons back and update adjacency graph
+ //
+ // Two possible structures:
+ // - polygon to edges: needed for local search (actually: polygon to polygons)
+ // - edge to polygons: needed to compute edge normals on-the-fly
+
+ // Below is largely copied from the edge-list code
+
+ // Polygon to edges:
+ //
+ // We're dealing with convex polygons made of N vertices, defining N edges. For each edge we want the edge in
+ // an edge array.
+ //
+ // Edges to polygon:
+ //
+ // For each edge in the array, we want two polygon indices - ie an edge.
+
+ // 0) Compute the total size needed for "polygon to edges"
+ const PxU32 nbPolygons = mHull->mNbPolygons;
+ PxU32 nbEdgesUnshared = nbEdges;
+
+ // in a manifold mesh, each edge is repeated exactly twice as it shares exactly 2 faces
+ if (nbEdgesUnshared % 2 != 0)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
+ return false;
+ }
+
+ if (prepareBigHullData)
+ {
+ mHullDataFacesByAllEdges8 = PX_NEW(PxU8)[nbEdges * 2];
+ }
+
+ // 1) Get some bytes: I need one EdgesRefs for each face, and some temp buffers
+
+ // Face indices by edge indices. First face is the one where the edge is ordered from tail to head.
+ PX_DELETE_POD(mHullDataFacesByEdges8);
+ mHullDataFacesByEdges8 = PX_NEW(PxU8)[nbEdgesUnshared];
+
+ PxU32* tempBuffer = PX_NEW_TEMP(PxU32)[nbEdgesUnshared*8]; // Temp storage
+ PxU32* bufferAdd = tempBuffer;
+ PxU32* PX_RESTRICT vRefs0 = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* PX_RESTRICT vRefs1 = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* polyIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* vertexIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* polyIndex2 = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* vertexIndex2 = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* edgeIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
+ PxU32* edgeData = tempBuffer; tempBuffer += nbEdgesUnshared;
+
+ // TODO avoroshilov: use the same "tempBuffer"
+ bool* flippedVRefs = PX_NEW_TEMP(bool)[nbEdgesUnshared]; // Temp storage
+
+ PxU32* run0 = vRefs0;
+ PxU32* run1 = vRefs1;
+ PxU32* run2 = polyIndex;
+ PxU32* run3 = vertexIndex;
+ bool* run4 = flippedVRefs;
+
+ // 2) Create a full redundant list of edges
+ PxU32 edgeCounter = 0;
+ for(PxU32 i=0;i<nbPolygons;i++)
+ {
+ PxU32 nbVerts = mHullDataPolygons[i].mNbVerts;
+ const PxU8* PX_RESTRICT Data = mHullDataVertexData8 + mHullDataPolygons[i].mVRef8;
+
+ // Loop through polygon vertices
+ for(PxU32 j=0;j<nbVerts;j++)
+ {
+ PxU32 vRef0 = Data[j];
+ PxU32 vRef1 = Data[(j+1)%nbVerts];
+ bool flipped = vRef0>vRef1;
+
+ if (flipped)
+ physx::shdfnd::swap(vRef0, vRef1);
+
+ *run0++ = vRef0;
+ *run1++ = vRef1;
+ *run2++ = i;
+ *run3++ = j;
+ *run4++ = flipped;
+ edgeData[edgeCounter] = edgeCounter;
+ edgeCounter++;
+ }
+ }
+ PX_ASSERT(PxU32(run0-vRefs0)==nbEdgesUnshared);
+ PX_ASSERT(PxU32(run1-vRefs1)==nbEdgesUnshared);
+
+ // 3) Sort the list according to both keys (VRefs0 and VRefs1)
+ Cm::RadixSortBuffered sorter;
+ const PxU32* PX_RESTRICT sorted = sorter.Sort(vRefs1, nbEdgesUnshared,Cm::RADIX_UNSIGNED).Sort(vRefs0, nbEdgesUnshared,Cm::RADIX_UNSIGNED).GetRanks();
+
+ PX_DELETE_POD(mEdges);
+ // Edges by their tail and head VRefs. NbEdgesUnshared == nbEdges * 2
+ // mEdges[edgeIdx*2 + 0] = tailVref, mEdges[edgeIdx*2 + 1] = headVref
+ // Tails and heads should be consistent with face refs, so that the edge is given in the order of
+ // his first face and opposite to the order of his second face
+ mEdges = PX_NEW(PxU16)[nbEdgesUnshared];
+
+ PX_DELETE_POD(mEdgeData16);
+ // Face to edge mapping
+ mEdgeData16 = PX_NEW(PxU16)[nbEdgesUnshared];
+
+ // TODO avoroshilov: remove this comment
+ //mHull->mNbEdges = Ps::to16(nbEdgesUnshared / 2); // #non-redundant edges
+
+ mHull->mNbEdges = 0; // #non-redundant edges
+
+ // A.B. Comment out the early exit temporary since we need to precompute the additonal edge data for GPU
+ //if (!doValidation)
+ //{
+ // // TODO avoroshilov: this codepath is not supported
+
+ // for (PxU32 i = 0; i < nbEdgesUnshared; i = i + 2)
+ // {
+ // const PxU32 sortedIndex = sorted[i]; // Between 0 and Nb
+ // const PxU32 nextSortedIndex = sorted[i + 1]; // Between 0 and Nb
+ // const PxU32 polyID = polyIndex[sortedIndex]; // Poly index
+ // const PxU32 nextPolyID = polyIndex[nextSortedIndex]; // Poly index
+ //
+ // mHullDataFacesByEdges8[(mHull->mNbEdges) * 2] = Ps::to8(polyID);
+ // mHullDataFacesByEdges8[(mHull->mNbEdges) * 2 + 1] = Ps::to8(nextPolyID);
+
+ // // store the full edge data for later use in big convex hull valencies computation
+ // if(mHullDataFacesByAllEdges8)
+ // {
+ // mHullDataFacesByAllEdges8[edgeData[sortedIndex] * 2] = Ps::to8(polyID);
+ // mHullDataFacesByAllEdges8[edgeData[sortedIndex] * 2 + 1] = Ps::to8(nextPolyID);
+
+ // mHullDataFacesByAllEdges8[edgeData[nextSortedIndex] * 2] = Ps::to8(polyID);
+ // mHullDataFacesByAllEdges8[edgeData[nextSortedIndex] * 2 + 1] = Ps::to8(nextPolyID);
+ // }
+ // mHull->mNbEdges++;
+ // }
+
+ // PX_DELETE_POD(bufferAdd);
+ // return true;
+ //}
+
+ // 4) Loop through all possible edges
+ // - clean edges list by removing redundant edges
+ // - create EdgesRef list
+ // mNbFaces = nbFaces;
+
+ // TODO avoroshilov:
+ PxU32 numFacesPerEdgeVerificationCounter = 0;
+
+ PxU16* edgeVertOutput = mEdges;
+
+ PxU32 previousRef0 = PX_INVALID_U32;
+ PxU32 previousRef1 = PX_INVALID_U32;
+ PxU32 previousIndex = PX_INVALID_U32;
+ PxU32 previousPolyId = PX_INVALID_U32;
+
+ PxU16 nbHullEdges = 0;
+ for (PxU32 i = 0; i < nbEdgesUnshared; i++)
+ {
+ const PxU32 sortedIndex = sorted[i]; // Between 0 and Nb
+ const PxU32 polyID = polyIndex[sortedIndex]; // Poly index
+ const PxU32 vertexID = vertexIndex[sortedIndex]; // Poly index
+ PxU32 sortedRef0 = vRefs0[sortedIndex]; // (SortedRef0, SortedRef1) is the sorted edge
+ PxU32 sortedRef1 = vRefs1[sortedIndex];
+ bool flipped = flippedVRefs[sortedIndex];
+
+ if (sortedRef0 != previousRef0 || sortedRef1 != previousRef1)
+ {
+ // TODO avoroshilov: remove this?
+ if (i != 0 && numFacesPerEdgeVerificationCounter != 1)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
+ return false;
+ }
+ numFacesPerEdgeVerificationCounter = 0;
+
+ // ### TODO: change this in edge list as well
+ previousRef0 = sortedRef0;
+ previousRef1 = sortedRef1;
+ previousPolyId = polyID;
+
+ //feodorb:restore the original order of VRefs (tail and head)
+ if (flipped)
+ physx::shdfnd::swap(sortedRef0, sortedRef1);
+
+ *edgeVertOutput++ = Ps::to16(sortedRef0);
+ *edgeVertOutput++ = Ps::to16(sortedRef1);
+
+ nbHullEdges++;
+ }
+ else
+ {
+ mHullDataFacesByEdges8[(nbHullEdges - 1) * 2] = Ps::to8(previousPolyId);
+ mHullDataFacesByEdges8[(nbHullEdges - 1) * 2 + 1] = Ps::to8(polyID);
+
+ ++numFacesPerEdgeVerificationCounter;
+ }
+
+ mEdgeData16[mHullDataPolygons[polyID].mVRef8 + vertexID] = Ps::to16(i / 2);
+
+ if (mHullDataFacesByAllEdges8)
+ {
+ if (previousIndex != PX_INVALID_U32)
+ {
+ // store the full edge data for later use in big convex hull valencies computation
+ mHullDataFacesByAllEdges8[edgeData[sortedIndex] * 2] = Ps::to8(polyID);
+ mHullDataFacesByAllEdges8[edgeData[sortedIndex] * 2 + 1] = Ps::to8(polyIndex[previousIndex]);
+
+ mHullDataFacesByAllEdges8[edgeData[previousIndex] * 2] = Ps::to8(polyID);
+ mHullDataFacesByAllEdges8[edgeData[previousIndex] * 2 + 1] = Ps::to8(polyIndex[previousIndex]);
+ previousIndex = PX_INVALID_U32;
+ }
+ else
+ {
+ previousIndex = sortedIndex;
+ }
+ }
+ // Create mEdgesRef on the fly
+
+ polyIndex2[i] = polyID;
+ vertexIndex2[i] = vertexID;
+ edgeIndex[i] = PxU32(nbHullEdges - 1);
+ }
+
+ mHull->mNbEdges = nbHullEdges;
+
+ //////////////////////
+
+ // 2) Get some bytes: one Pair structure / edge
+ // create this structure only for validation purpose
+ // 3) Create Counters, ie compute the #faces sharing each edge
+ if(doValidation)
+ {
+ //
+ sorted = sorter.Sort(vertexIndex2, nbEdgesUnshared, Cm::RADIX_UNSIGNED).Sort(polyIndex2, nbEdgesUnshared, Cm::RADIX_UNSIGNED).GetRanks();
+
+ for (PxU32 i = 0; i < nbEdgesUnshared; i++) edgeData[i] = edgeIndex[sorted[i]];
+
+ Gu::EdgeDescData* edgeToTriangles = PX_NEW(Gu::EdgeDescData)[PxU16(mHull->mNbEdges)];
+ PxMemZero(edgeToTriangles, sizeof(Gu::EdgeDescData)*mHull->mNbEdges);
+
+ PxU32* data = edgeData;
+ for(PxU32 i=0;i<nbEdgesUnshared;i++) // <= maybe not the same Nb
+ {
+ edgeToTriangles[*data++].Count++;
+ }
+
+ // if we don't have a manifold mesh, this can fail... but the runtime would assert in any case
+ for (PxU32 i = 0; i < mHull->mNbEdges; i++)
+ {
+ if (edgeToTriangles[i].Count != 2)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
+ return false;
+ }
+ }
+ PX_DELETE_POD(edgeToTriangles);
+ }
+
+ // ### free temp ram
+ PX_DELETE_POD(bufferAdd);
+
+ // TODO avoroshilov: use the same "tempBuffer"
+ PX_DELETE_POD(flippedVRefs);
+
+ return true;
+}
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.h
new file mode 100644
index 00000000..a3d57202
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullBuilder.h
@@ -0,0 +1,95 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_CONVEXHULLBUILDER_H
+#define PX_CONVEXHULLBUILDER_H
+
+#include "GuConvexMeshData.h"
+#include "PsUserAllocated.h"
+#include "PxCooking.h"
+
+namespace physx
+{
+ struct PxHullPolygon;
+
+ namespace Gu
+ {
+ struct EdgeDescData;
+ struct ConvexHullData;
+ } // namespace Gu
+
+ struct HullTriangleData
+ {
+ PxU32 mRef[3];
+ };
+
+ class ConvexHullBuilder : public Ps::UserAllocated
+ {
+ public:
+ ConvexHullBuilder(Gu::ConvexHullData* hull, const bool buildGRBData);
+ ~ConvexHullBuilder();
+
+ bool init(PxU32 nbVerts, const PxVec3* verts, const PxU32* indices, const PxU32 nbIndices, const PxU32 nbPolygons,
+ const PxHullPolygon* hullPolygons, PxU32 gaussMapVertexLimit, bool doValidation = true, bool userPolygons = false);
+
+ bool save(PxOutputStream& stream, bool platformMismatch) const;
+ bool copy(Gu::ConvexHullData& hullData);
+
+ bool createEdgeList(bool doValidation, PxU32 nbEdges, bool prepareBigHullData);
+ bool checkHullPolygons() const;
+
+ bool calculateVertexMapTable(PxU32 nbPolygons, bool userPolygons = false);
+
+ PX_INLINE PxU32 computeNbPolygons() const
+ {
+ PX_ASSERT(mHull->mNbPolygons);
+ return mHull->mNbPolygons;
+ }
+
+ PxVec3* mHullDataHullVertices;
+ Gu::HullPolygonData* mHullDataPolygons;
+ PxU8* mHullDataVertexData8;
+ PxU8* mHullDataFacesByEdges8;
+ PxU8* mHullDataFacesByVertices8;
+ PxU8* mHullDataFacesByAllEdges8; // data used fom big hull valencies computation
+
+ PxU16* mEdgeData16; //!< Edge indices indexed by hull polygons
+ PxU16* mEdges; //!< Edge to vertex mapping
+
+ Gu::ConvexHullData* mHull;
+ bool mBuildGRBData;
+
+ protected:
+ bool computeGeomCenter(PxVec3& , PxU32 numFaces, HullTriangleData* faces) const;
+ };
+}
+
+#endif // PX_CONVEXHULLBUILDER_H
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.cpp
new file mode 100644
index 00000000..92ffc888
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.cpp
@@ -0,0 +1,299 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "ConvexHullLib.h"
+#include "Quantizer.h"
+#include "PsAllocator.h"
+#include "foundation/PxBounds3.h"
+#include "foundation/PxMemory.h"
+
+using namespace physx;
+
+namespace local
+{
+ //////////////////////////////////////////////////////////////////////////
+ // constants
+ static const float DISTANCE_EPSILON = 0.000001f; // close enough to consider two floating point numbers to be 'the same'.
+ static const float NORMAL_DISTANCE_EPSILON = 0.001f; // close enough to consider two floating point numbers to be 'the same' in normalized points cloud.
+ static const float RESIZE_VALUE = 0.01f; // if the provided points AABB is very thin resize it to this size
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks if points form a valid AABB cube, if not construct a default CUBE
+ static bool checkPointsAABBValidity(PxU32 numPoints, const PxVec3* points, PxU32 stride , float distanceEpsilon,
+ float resizeValue, PxVec3& center, PxVec3& scale, PxU32& vcount, PxVec3* vertices, bool fCheck = false)
+ {
+ const char* vtx = reinterpret_cast<const char *> (points);
+ PxBounds3 bounds;
+ bounds.setEmpty();
+
+ // get the bounding box
+ for (PxU32 i = 0; i < numPoints; i++)
+ {
+ const PxVec3& p = *reinterpret_cast<const PxVec3 *> (vtx);
+ vtx += stride;
+
+ bounds.include(p);
+ }
+
+ PxVec3 dim = bounds.getDimensions();
+ center = bounds.getCenter();
+
+ // special case, the AABB is very thin or user provided us with only input 2 points
+ // we construct an AABB cube and return it
+ if ( dim.x < distanceEpsilon || dim.y < distanceEpsilon || dim.z < distanceEpsilon || numPoints < 3 )
+ {
+ float len = FLT_MAX;
+
+ // pick the shortest size bigger than the distance epsilon
+ if ( dim.x > distanceEpsilon && dim.x < len )
+ len = dim.x;
+ if ( dim.y > distanceEpsilon && dim.y < len )
+ len = dim.y;
+ if ( dim.z > distanceEpsilon && dim.z < len )
+ len = dim.z;
+
+ // if the AABB is small in all dimensions, resize it
+ if ( len == FLT_MAX )
+ {
+ dim = PxVec3(resizeValue);
+ }
+ // if one edge is small, set to 1/5th the shortest non-zero edge.
+ else
+ {
+ if ( dim.x < distanceEpsilon )
+ dim.x = len * 0.05f;
+ else
+ dim.x *= 0.5f;
+ if ( dim.y < distanceEpsilon )
+ dim.y = len * 0.05f;
+ else
+ dim.y *= 0.5f;
+ if ( dim.z < distanceEpsilon )
+ dim.z = len * 0.05f;
+ else
+ dim.z *= 0.5f;
+ }
+
+ // construct the AABB
+ const PxVec3 extPos = center + dim;
+ const PxVec3 extNeg = center - dim;
+
+ if(fCheck)
+ vcount = 0;
+
+ vertices[vcount++] = extNeg;
+ vertices[vcount++] = PxVec3(extPos.x,extNeg.y,extNeg.z);
+ vertices[vcount++] = PxVec3(extPos.x,extPos.y,extNeg.z);
+ vertices[vcount++] = PxVec3(extNeg.x,extPos.y,extNeg.z);
+ vertices[vcount++] = PxVec3(extNeg.x,extNeg.y,extPos.z);
+ vertices[vcount++] = PxVec3(extPos.x,extNeg.y,extPos.z);
+ vertices[vcount++] = extPos;
+ vertices[vcount++] = PxVec3(extNeg.x,extPos.y,extPos.z);
+ return true; // return cube
+ }
+ else
+ {
+ scale = dim;
+ }
+ return false;
+ }
+
+}
+
+//////////////////////////////////////////////////////////////////////////
+// normalize point cloud, remove duplicates!
+bool ConvexHullLib::cleanupVertices(PxU32 svcount, const PxVec3* svertices, PxU32 stride,
+ PxU32& vcount, PxVec3* vertices, PxVec3& scale, PxVec3& center)
+{
+ if ( svcount == 0 )
+ return false;
+
+ const PxVec3* verticesToClean = svertices;
+ PxU32 numVerticesToClean = svcount;
+ Quantizer* quantizer = NULL;
+
+ // if quantization is enabled, parse the input vertices and produce new qantized vertices,
+ // that will be then cleaned the same way
+ if (mConvexMeshDesc.flags & PxConvexFlag::eQUANTIZE_INPUT)
+ {
+ quantizer = createQuantizer();
+ PxU32 vertsOutCount;
+ const PxVec3* vertsOut = quantizer->kmeansQuantize3D(svcount, svertices, stride,true, mConvexMeshDesc.quantizedCount, vertsOutCount);
+
+ if (vertsOut)
+ {
+ numVerticesToClean = vertsOutCount;
+ verticesToClean = vertsOut;
+ }
+ }
+
+ const float distanceEpsilon = local::DISTANCE_EPSILON * mCookingParams.scale.length;
+ const float resizeValue = local::RESIZE_VALUE * mCookingParams.scale.length;
+ const float normalEpsilon = local::NORMAL_DISTANCE_EPSILON; // used to determine if 2 points are the same
+
+ vcount = 0;
+ PxVec3 recip;
+
+ scale = PxVec3(1.0f);
+
+ // check for the AABB from points, if its very tiny return a resized CUBE
+ if (local::checkPointsAABBValidity(numVerticesToClean, verticesToClean, stride, distanceEpsilon, resizeValue, center, scale, vcount, vertices, false))
+ {
+ if (quantizer)
+ quantizer->release();
+ return true;
+ }
+
+ recip[0] = 1 / scale[0];
+ recip[1] = 1 / scale[1];
+ recip[2] = 1 / scale[2];
+
+ center = center.multiply(recip);
+
+ // normalize the point cloud
+ const char * vtx = reinterpret_cast<const char *> (verticesToClean);
+ for (PxU32 i = 0; i<numVerticesToClean; i++)
+ {
+ const PxVec3& p = *reinterpret_cast<const PxVec3 *>(vtx);
+ vtx+=stride;
+
+ PxVec3 normalizedP = p.multiply(recip); // normalize
+
+ PxU32 j;
+
+ // parse the already stored vertices and check the distance
+ for (j=0; j<vcount; j++)
+ {
+ PxVec3& v = vertices[j];
+
+ const float dx = fabsf(normalizedP[0] - v[0] );
+ const float dy = fabsf(normalizedP[1] - v[1] );
+ const float dz = fabsf(normalizedP[2] - v[2] );
+
+ if ( dx < normalEpsilon && dy < normalEpsilon && dz < normalEpsilon )
+ {
+ // ok, it is close enough to the old one
+ // now let us see if it is further from the center of the point cloud than the one we already recorded.
+ // in which case we keep this one instead.
+ const float dist1 = (normalizedP - center).magnitudeSquared();
+ const float dist2 = (v - center).magnitudeSquared();
+
+ if ( dist1 > dist2 )
+ {
+ v = normalizedP;
+ }
+ break;
+ }
+ }
+
+ // we dont have that vertex in the output, add it
+ if ( j == vcount )
+ {
+ vertices[vcount] = normalizedP;
+ vcount++;
+ }
+ }
+
+ // scale the verts back
+ for (PxU32 i = 0; i < vcount; i++)
+ {
+ vertices[i] = vertices[i].multiply(scale);
+ }
+
+ // ok..now make sure we didn't prune so many vertices it is now invalid.
+ // note, that the output vertices are again scaled, we need to scale them back then
+ local::checkPointsAABBValidity(vcount, vertices, sizeof(PxVec3), distanceEpsilon, resizeValue, center, scale, vcount, vertices, true);
+
+ if (quantizer)
+ quantizer->release();
+ return true;
+}
+
+void ConvexHullLib::swapLargestFace(PxConvexMeshDesc& desc)
+{
+ const PxHullPolygon* polygons = reinterpret_cast<const PxHullPolygon*>(desc.polygons.data);
+ PxHullPolygon* polygonsOut = const_cast<PxHullPolygon*>(polygons);
+
+ PxU32 largestFace = 0;
+ for (PxU32 i = 1; i < desc.polygons.count; i++)
+ {
+ if(polygons[largestFace].mNbVerts < polygons[i].mNbVerts)
+ largestFace = i;
+ }
+
+ // early exit if no swap needs to be done
+ if(largestFace == 0)
+ return;
+
+ const PxU32* indices = reinterpret_cast<const PxU32*>(desc.indices.data);
+ mSwappedIndices = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*desc.indices.count, "PxU32"));
+
+ PxHullPolygon replacedPolygon = polygons[0];
+ PxHullPolygon largestPolygon = polygons[largestFace];
+ polygonsOut[0] = polygons[largestFace];
+ polygonsOut[largestFace] = replacedPolygon;
+
+ // relocate indices
+ PxU16 indexBase = 0;
+ for (PxU32 i = 0; i < desc.polygons.count; i++)
+ {
+ if(i == 0)
+ {
+ PxMemCopy(mSwappedIndices, &indices[largestPolygon.mIndexBase],sizeof(PxU32)*largestPolygon.mNbVerts);
+ polygonsOut[0].mIndexBase = indexBase;
+ indexBase += largestPolygon.mNbVerts;
+ }
+ else
+ {
+ if(i == largestFace)
+ {
+ PxMemCopy(&mSwappedIndices[indexBase], &indices[replacedPolygon.mIndexBase], sizeof(PxU32)*replacedPolygon.mNbVerts);
+ polygonsOut[i].mIndexBase = indexBase;
+ indexBase += replacedPolygon.mNbVerts;
+ }
+ else
+ {
+ PxMemCopy(&mSwappedIndices[indexBase], &indices[polygons[i].mIndexBase], sizeof(PxU32)*polygons[i].mNbVerts);
+ polygonsOut[i].mIndexBase = indexBase;
+ indexBase += polygons[i].mNbVerts;
+ }
+ }
+ }
+
+ PX_ASSERT(indexBase == desc.indices.count);
+
+ desc.indices.data = mSwappedIndices;
+}
+
+ConvexHullLib::~ConvexHullLib()
+{
+ if (mSwappedIndices)
+ PX_FREE(mSwappedIndices);
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.h
new file mode 100644
index 00000000..19ab68fe
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullLib.h
@@ -0,0 +1,82 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_CONVEXHULLLIB_H
+#define PX_CONVEXHULLLIB_H
+
+#include "PxConvexMeshDesc.h"
+#include "PxCooking.h"
+#include "CmPhysXCommon.h"
+
+namespace physx
+{
+ //////////////////////////////////////////////////////////////////////////
+ // base class for the convex hull libraries - inflation based and quickhull
+ class ConvexHullLib
+ {
+ PX_NOCOPY(ConvexHullLib)
+ public:
+ // functions
+ ConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params)
+ : mConvexMeshDesc(desc), mCookingParams(params), mSwappedIndices(NULL)
+ {
+ }
+
+ virtual ~ConvexHullLib();
+
+ // computes the convex hull from provided points
+ virtual PxConvexMeshCookingResult::Enum createConvexHull() = 0;
+
+ // fills the PxConvexMeshDesc with computed hull data
+ virtual void fillConvexMeshDesc(PxConvexMeshDesc& desc) = 0;
+
+ static const PxU32 gpuMaxVertsPerFace = 32;
+
+ protected:
+
+ // clean input vertices from duplicates, normalize etc.
+ bool cleanupVertices(PxU32 svcount, // input vertex count
+ const PxVec3* svertices, // vertices
+ PxU32 stride, // stride
+ PxU32& vcount, // output number of vertices
+ PxVec3* vertices, // location to store the results.
+ PxVec3& scale, // scale
+ PxVec3& center); // center
+
+ void swapLargestFace(PxConvexMeshDesc& desc);
+
+ protected:
+ const PxConvexMeshDesc& mConvexMeshDesc;
+ const PxCookingParams& mCookingParams;
+ PxU32* mSwappedIndices;
+ };
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.cpp
new file mode 100644
index 00000000..cf921a16
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.cpp
@@ -0,0 +1,925 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "foundation/PxBounds3.h"
+#include "foundation/PxMathUtils.h"
+
+#include "ConvexHullUtils.h"
+#include "VolumeIntegration.h"
+#include "PsUtilities.h"
+#include "PsVecMath.h"
+#include "GuBox.h"
+#include "GuConvexMeshData.h"
+
+using namespace physx;
+using namespace Ps::aos;
+
+namespace local
+{
+ static const float MIN_ADJACENT_ANGLE = 3.0f; // in degrees - result wont have two adjacent facets within this angle of each other.
+ static const float MAXDOT_MINANG = cosf(Ps::degToRad(MIN_ADJACENT_ANGLE)); // adjacent angle for dot product tests
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for ConvexHullCrop
+ class VertFlag
+ {
+ public:
+ PxU8 planetest;
+ PxU8 undermap;
+ PxU8 overmap;
+ };
+
+ //////////////////////////////////////////////////////////////////////////|
+ // helper class for ConvexHullCrop
+ class EdgeFlag
+ {
+ public:
+ PxI16 undermap;
+ };
+
+ //////////////////////////////////////////////////////////////////////////|
+ // helper class for ConvexHullCrop
+ class Coplanar
+ {
+ public:
+ PxU16 ea;
+ PxU8 v0;
+ PxU8 v1;
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // plane test
+ enum PlaneTestResult
+ {
+ eCOPLANAR = 0,
+ eUNDER = 1 << 0,
+ eOVER = 1 << 1
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // test where vertex lies in respect to the plane
+ static PlaneTestResult planeTest(const PxPlane& p, const PxVec3& v, float epsilon)
+ {
+ const float a = v.dot(p.n) + p.d;
+ PlaneTestResult flag = (a > epsilon) ? eOVER : ((a < -epsilon) ? eUNDER : eCOPLANAR);
+ return flag;
+ }
+
+ // computes the OBB for this set of points relative to this transform matrix. SIMD version
+ void computeOBBSIMD(PxU32 vcount, const Vec4V* points, Vec4V& sides, const QuatV& rot, Vec4V& trans)
+ {
+ PX_ASSERT(vcount);
+
+ Vec4V minV = V4Load(FLT_MAX);
+ Vec4V maxV = V4Load(FLT_MIN);
+ for (PxU32 i = 0; i < vcount; i++)
+ {
+ const Vec4V& vertexV = points[i];
+ const Vec4V t = V4Sub(vertexV, trans);
+ const Vec4V v = Vec4V_From_Vec3V(QuatRotateInv(rot, Vec3V_From_Vec4V(t)));
+
+ minV = V4Min(minV, v);
+ maxV = V4Max(maxV, v);
+ }
+
+ sides = V4Sub(maxV, minV);
+
+ Mat33V tmpMat;
+ QuatGetMat33V(rot, tmpMat.col0, tmpMat.col1, tmpMat.col2);
+ const FloatV coe = FLoad(0.5f);
+
+ const Vec4V deltaVec = V4Sub(maxV, V4Scale(sides, coe));
+
+ const Vec4V t0 = V4Scale(Vec4V_From_Vec3V(tmpMat.col0), V4GetX(deltaVec));
+ trans = V4Add(trans, t0);
+
+ const Vec4V t1 = V4Scale(Vec4V_From_Vec3V(tmpMat.col1), V4GetY(deltaVec));
+ trans = V4Add(trans, t1);
+
+ const Vec4V t2 = V4Scale(Vec4V_From_Vec3V(tmpMat.col2), V4GetZ(deltaVec));
+ trans = V4Add(trans, t2);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////
+// construct the base cube from given min/max
+ConvexHull::ConvexHull(const PxVec3& bmin, const PxVec3& bmax, const Ps::Array<PxPlane>& inPlanes)
+: mInputPlanes(inPlanes)
+{
+ // min max verts of the cube - 8 verts
+ mVertices.pushBack(PxVec3(bmin.x, bmin.y, bmin.z)); // ---
+ mVertices.pushBack(PxVec3(bmin.x, bmin.y, bmax.z)); // --+
+ mVertices.pushBack(PxVec3(bmin.x, bmax.y, bmin.z)); // -+-
+ mVertices.pushBack(PxVec3(bmin.x, bmax.y, bmax.z)); // -++
+ mVertices.pushBack(PxVec3(bmax.x, bmin.y, bmin.z)); // +--
+ mVertices.pushBack(PxVec3(bmax.x, bmin.y, bmax.z)); // +-+
+ mVertices.pushBack(PxVec3(bmax.x, bmax.y, bmin.z)); // ++-
+ mVertices.pushBack(PxVec3(bmax.x, bmax.y, bmax.z)); // +++
+
+ // cube planes - 6 planes
+ mFacets.pushBack(PxPlane(PxVec3(-1.f, 0, 0), bmin.x)); // 0,1,3,2
+ mFacets.pushBack(PxPlane(PxVec3(1.f, 0, 0), -bmax.x)); // 6,7,5,4
+ mFacets.pushBack(PxPlane(PxVec3(0, -1.f, 0), bmin.y)); // 0,4,5,1
+ mFacets.pushBack(PxPlane(PxVec3(0, 1.f, 0), -bmax.y)); // 3,7,6,2
+ mFacets.pushBack(PxPlane(PxVec3(0, 0, -1.f), bmin.z)); // 0,2,6,4
+ mFacets.pushBack(PxPlane(PxVec3(0, 0, 1.f), -bmax.z)); // 1,5,7,3
+
+ // cube edges - 24 edges
+ mEdges.pushBack(HalfEdge(11, 0, 0));
+ mEdges.pushBack(HalfEdge(23, 1, 0));
+ mEdges.pushBack(HalfEdge(15, 3, 0));
+ mEdges.pushBack(HalfEdge(16, 2, 0));
+
+ mEdges.pushBack(HalfEdge(13, 6, 1));
+ mEdges.pushBack(HalfEdge(21, 7, 1));
+ mEdges.pushBack(HalfEdge(9, 5, 1));
+ mEdges.pushBack(HalfEdge(18, 4, 1));
+
+ mEdges.pushBack(HalfEdge(19, 0, 2));
+ mEdges.pushBack(HalfEdge(6, 4, 2));
+ mEdges.pushBack(HalfEdge(20, 5, 2));
+ mEdges.pushBack(HalfEdge(0, 1, 2));
+
+ mEdges.pushBack(HalfEdge(22, 3, 3));
+ mEdges.pushBack(HalfEdge(4, 7, 3));
+ mEdges.pushBack(HalfEdge(17, 6, 3));
+ mEdges.pushBack(HalfEdge(2, 2, 3));
+
+ mEdges.pushBack(HalfEdge(3, 0, 4));
+ mEdges.pushBack(HalfEdge(14, 2, 4));
+ mEdges.pushBack(HalfEdge(7, 6, 4));
+ mEdges.pushBack(HalfEdge(8, 4, 4));
+
+ mEdges.pushBack(HalfEdge(10, 1, 5));
+ mEdges.pushBack(HalfEdge(5, 5, 5));
+ mEdges.pushBack(HalfEdge(12, 7, 5));
+ mEdges.pushBack(HalfEdge(1, 3, 5));
+}
+
+//////////////////////////////////////////////////////////////////////////
+// create the initial convex hull from given OBB
+ConvexHull::ConvexHull(const PxVec3& extent, const PxTransform& transform, const Ps::Array<PxPlane>& inPlanes)
+ : mInputPlanes(inPlanes)
+{
+ // get the OBB corner points
+ PxVec3 extentPoints[8];
+ PxMat33 rot(transform.q);
+ Gu::computeOBBPoints(extentPoints, transform.p, extent, rot.column0, rot.column1, rot.column2);
+
+ mVertices.pushBack(PxVec3(extentPoints[0].x, extentPoints[0].y, extentPoints[0].z)); // ---
+ mVertices.pushBack(PxVec3(extentPoints[4].x, extentPoints[4].y, extentPoints[4].z)); // --+
+ mVertices.pushBack(PxVec3(extentPoints[3].x, extentPoints[3].y, extentPoints[3].z)); // -+-
+ mVertices.pushBack(PxVec3(extentPoints[7].x, extentPoints[7].y, extentPoints[7].z)); // -++
+ mVertices.pushBack(PxVec3(extentPoints[1].x, extentPoints[1].y, extentPoints[1].z)); // +--
+ mVertices.pushBack(PxVec3(extentPoints[5].x, extentPoints[5].y, extentPoints[5].z)); // +-+
+ mVertices.pushBack(PxVec3(extentPoints[2].x, extentPoints[2].y, extentPoints[2].z)); // ++-
+ mVertices.pushBack(PxVec3(extentPoints[6].x, extentPoints[6].y, extentPoints[6].z)); // +++
+
+ // cube planes - 6 planes
+ PxPlane plane0(extentPoints[0], extentPoints[4], extentPoints[7]); // 0,1,3,2
+ mFacets.pushBack(PxPlane(plane0.n, plane0.d));
+
+ PxPlane plane1(extentPoints[2], extentPoints[6], extentPoints[5]); // 6,7,5,4
+ mFacets.pushBack(PxPlane(plane1.n, plane1.d));
+
+ PxPlane plane2(extentPoints[0], extentPoints[1], extentPoints[5]); // 0,4,5,1
+ mFacets.pushBack(PxPlane(plane2.n, plane2.d));
+
+ PxPlane plane3(extentPoints[7], extentPoints[6], extentPoints[2]); // 3,7,6,2
+ mFacets.pushBack(PxPlane(plane3.n, plane3.d));
+
+ PxPlane plane4(extentPoints[0], extentPoints[3], extentPoints[2]); // 0,2,6,4
+ mFacets.pushBack(PxPlane(plane4.n, plane4.d));
+
+ PxPlane plane5(extentPoints[4], extentPoints[5], extentPoints[6]); // 1,5,7,3
+ mFacets.pushBack(PxPlane(plane5.n, plane5.d));
+
+ // cube edges - 24 edges
+ mEdges.pushBack(HalfEdge(11, 0, 0));
+ mEdges.pushBack(HalfEdge(23, 1, 0));
+ mEdges.pushBack(HalfEdge(15, 3, 0));
+ mEdges.pushBack(HalfEdge(16, 2, 0));
+
+ mEdges.pushBack(HalfEdge(13, 6, 1));
+ mEdges.pushBack(HalfEdge(21, 7, 1));
+ mEdges.pushBack(HalfEdge(9, 5, 1));
+ mEdges.pushBack(HalfEdge(18, 4, 1));
+
+ mEdges.pushBack(HalfEdge(19, 0, 2));
+ mEdges.pushBack(HalfEdge(6, 4, 2));
+ mEdges.pushBack(HalfEdge(20, 5, 2));
+ mEdges.pushBack(HalfEdge(0, 1, 2));
+
+ mEdges.pushBack(HalfEdge(22, 3, 3));
+ mEdges.pushBack(HalfEdge(4, 7, 3));
+ mEdges.pushBack(HalfEdge(17, 6, 3));
+ mEdges.pushBack(HalfEdge(2, 2, 3));
+
+ mEdges.pushBack(HalfEdge(3, 0, 4));
+ mEdges.pushBack(HalfEdge(14, 2, 4));
+ mEdges.pushBack(HalfEdge(7, 6, 4));
+ mEdges.pushBack(HalfEdge(8, 4, 4));
+
+ mEdges.pushBack(HalfEdge(10, 1, 5));
+ mEdges.pushBack(HalfEdge(5, 5, 5));
+ mEdges.pushBack(HalfEdge(12, 7, 5));
+ mEdges.pushBack(HalfEdge(1, 3, 5));
+}
+
+//////////////////////////////////////////////////////////////////////////
+// finds the candidate plane, returns -1 otherwise
+PxI32 ConvexHull::findCandidatePlane(float planeTestEpsilon, float epsilon) const
+{
+ PxI32 p = -1;
+ float md = 0.0f;
+ PxU32 i, j;
+ for (i = 0; i < mInputPlanes.size(); i++)
+ {
+ float d = 0.0f;
+ float dmax = 0.0f;
+ float dmin = 0.0f;
+ for (j = 0; j < mVertices.size(); j++)
+ {
+ dmax = PxMax(dmax, mVertices[j].dot(mInputPlanes[i].n) + mInputPlanes[i].d);
+ dmin = PxMin(dmin, mVertices[j].dot(mInputPlanes[i].n) + mInputPlanes[i].d);
+ }
+
+ float dr = dmax - dmin;
+ if (dr < planeTestEpsilon)
+ dr = 1.0f; // shouldn't happen.
+ d = dmax / dr;
+ // we have a better candidate try another one
+ if (d <= md)
+ continue;
+ // check if we dont have already that plane or if the normals are nearly the same
+ for (j = 0; j<mFacets.size(); j++)
+ {
+ if (mInputPlanes[i] == mFacets[j])
+ {
+ d = 0.0f;
+ continue;
+ }
+ if (mInputPlanes[i].n.dot(mFacets[j].n)> local::MAXDOT_MINANG)
+ {
+ for (PxU32 k = 0; k < mEdges.size(); k++)
+ {
+ if (mEdges[k].p != j)
+ continue;
+ if (mVertices[mEdges[k].v].dot(mInputPlanes[i].n) + mInputPlanes[i].d < 0)
+ {
+ d = 0; // so this plane wont get selected.
+ break;
+ }
+ }
+ }
+ }
+ if (d>md)
+ {
+ p = PxI32(i);
+ md = d;
+ }
+ }
+ return (md > epsilon) ? p : -1;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// internal hull check
+bool ConvexHull::assertIntact(float epsilon) const
+{
+ PxU32 i;
+ PxU32 estart = 0;
+ for (i = 0; i < mEdges.size(); i++)
+ {
+ if (mEdges[estart].p != mEdges[i].p)
+ {
+ estart = i;
+ }
+ PxU32 inext = i + 1;
+ if (inext >= mEdges.size() || mEdges[inext].p != mEdges[i].p)
+ {
+ inext = estart;
+ }
+ PX_ASSERT(mEdges[inext].p == mEdges[i].p);
+ PxI16 nb = mEdges[i].ea;
+ if (nb == 255 || nb == -1)
+ return false;
+ PX_ASSERT(nb != -1);
+ PX_ASSERT(i == PxU32(mEdges[PxU32(nb)].ea));
+ // Check that the vertex of the next edge is the vertex of the adjacent half edge.
+ // Otherwise the two half edges are not really adjacent and we have a hole.
+ PX_ASSERT(mEdges[PxU32(nb)].v == mEdges[inext].v);
+ if (!(mEdges[PxU32(nb)].v == mEdges[inext].v))
+ return false;
+ }
+
+ for (i = 0; i < mEdges.size(); i++)
+ {
+ PX_ASSERT(local::eCOPLANAR == local::planeTest(mFacets[mEdges[i].p], mVertices[mEdges[i].v], epsilon));
+ if (local::eCOPLANAR != local::planeTest(mFacets[mEdges[i].p], mVertices[mEdges[i].v], epsilon))
+ return false;
+ if (mEdges[estart].p != mEdges[i].p)
+ {
+ estart = i;
+ }
+ PxU32 i1 = i + 1;
+ if (i1 >= mEdges.size() || mEdges[i1].p != mEdges[i].p) {
+ i1 = estart;
+ }
+ PxU32 i2 = i1 + 1;
+ if (i2 >= mEdges.size() || mEdges[i2].p != mEdges[i].p) {
+ i2 = estart;
+ }
+ if (i == i2)
+ continue; // i sliced tangent to an edge and created 2 meaningless edges
+
+ // check the face normal against the triangle from edges
+ PxVec3 localNormal = (mVertices[mEdges[i1].v] - mVertices[mEdges[i].v]).cross(mVertices[mEdges[i2].v] - mVertices[mEdges[i1].v]);
+ const float m = localNormal.magnitude();
+ if (m == 0.0f)
+ localNormal = PxVec3(1.f, 0.0f, 0.0f);
+ localNormal *= (1.0f / m);
+ if (localNormal.dot(mFacets[mEdges[i].p].n) <= 0.0f)
+ return false;
+ }
+ return true;
+}
+
+// returns the maximum number of vertices on a face
+PxU32 ConvexHull::maxNumVertsPerFace() const
+{
+ PxU32 maxVerts = 0;
+ PxU32 currentVerts = 0;
+ PxU32 estart = 0;
+ for (PxU32 i = 0; i < mEdges.size(); i++)
+ {
+ if (mEdges[estart].p != mEdges[i].p)
+ {
+ if(currentVerts > maxVerts)
+ {
+ maxVerts = currentVerts + 1;
+ }
+ currentVerts = 0;
+ estart = i;
+ }
+ else
+ {
+ currentVerts++;
+ }
+ }
+ return maxVerts;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// slice the input convexHull with the slice plane
+ConvexHull* physx::convexHullCrop(const ConvexHull& convex, const PxPlane& slice, float planeTestEpsilon)
+{
+ static const PxU8 invalidIndex = PxU8(-1);
+ PxU32 i;
+ PxU32 vertCountUnder = 0; // Running count of the vertices UNDER the slicing plane.
+
+ PX_ASSERT(convex.getEdges().size() < 480);
+
+ // Arrays of mapping information associated with features in the input convex.
+ // edgeflag[i].undermap - output index of input edge convex->edges[i]
+ // vertflag[i].undermap - output index of input vertex convex->vertices[i]
+ // vertflag[i].planetest - the side-of-plane classification of convex->vertices[i]
+ // (There are other members but they are unused.)
+ local::EdgeFlag edgeFlag[512];
+ local::VertFlag vertFlag[256];
+
+ // Lists of output features. Populated during clipping.
+ // Coplanar edges have one sibling in tmpunderedges and one in coplanaredges.
+ // coplanaredges holds the sibling that belong to the new polygon created from slicing.
+ ConvexHull::HalfEdge tmpUnderEdges[512]; // The output edge list.
+ PxPlane tmpUnderPlanes[128]; // The output plane list.
+ local::Coplanar coplanarEdges[512]; // The coplanar edge list.
+
+ PxU32 coplanarEdgesNum = 0; // Running count of coplanar edges.
+
+ // Created vertices on the slicing plane (stored for output after clipping).
+ Ps::Array<PxVec3> createdVerts;
+
+ // Logical OR of individual vertex flags.
+ PxU32 convexClipFlags = 0;
+
+ // Classify each vertex against the slicing plane as OVER | COPLANAR | UNDER.
+ // OVER - Vertex is over (outside) the slicing plane. Will not be output.
+ // COPLANAR - Vertex is on the slicing plane. A copy will be output.
+ // UNDER - Vertex is under (inside) the slicing plane. Will be output.
+ // We keep an array of information structures for each vertex in the input convex.
+ // vertflag[i].undermap - The (computed) index of convex->vertices[i] in the output.
+ // invalidIndex for OVER vertices - they are not output.
+ // initially invalidIndex for COPLANAR vertices - set later.
+ // vertflag[i].overmap - Unused - we don't care about the over part.
+ // vertflag[i].planetest - The classification (clip flag) of convex->vertices[i].
+ for (i = 0; i < convex.getVertices().size(); i++)
+ {
+ local::PlaneTestResult vertexClipFlag = local::planeTest(slice, convex.getVertices()[i], planeTestEpsilon);
+ switch (vertexClipFlag)
+ {
+ case local::eOVER:
+ case local::eCOPLANAR:
+ vertFlag[i].undermap = invalidIndex; // Initially invalid for COPLANAR
+ vertFlag[i].overmap = invalidIndex;
+ break;
+ case local::eUNDER:
+ vertFlag[i].undermap = Ps::to8(vertCountUnder++);
+ vertFlag[i].overmap = invalidIndex;
+ break;
+ }
+ vertFlag[i].planetest = PxU8(vertexClipFlag);
+ convexClipFlags |= vertexClipFlag;
+ }
+
+ // Check special case: everything UNDER or COPLANAR.
+ // This way we know we wont end up with silly faces / edges later on.
+ if ((convexClipFlags & local::eOVER) == 0)
+ {
+ // Just return a copy of the same convex.
+ ConvexHull* dst = PX_NEW_TEMP(ConvexHull)(convex);
+ return dst;
+ }
+
+ PxU16 underEdgeCount = 0; // Running count of output edges.
+ PxU16 underPlanesCount = 0; // Running count of output planes.
+
+ // Clipping Loop
+ // =============
+ //
+ // for each plane
+ //
+ // for each edge
+ //
+ // if first UNDER & second !UNDER
+ // output current edge -> tmpunderedges
+ // if we have done the sibling
+ // connect current edge to its sibling
+ // set vout = first vertex of sibling
+ // else if second is COPLANAR
+ // if we havent already copied it
+ // copy second -> createdverts
+ // set vout = index of created vertex
+ // else
+ // generate a new vertex -> createdverts
+ // set vout = index of created vertex
+ // if vin is already set and vin != vout (non-trivial edge)
+ // output coplanar edge -> tmpunderedges (one sibling)
+ // set coplanaredge to new edge index (for connecting the other sibling)
+ //
+ // else if first !UNDER & second UNDER
+ // if we have done the sibling
+ // connect current edge to its sibling
+ // set vin = second vertex of sibling (this is a bit of a pain)
+ // else if first is COPLANAR
+ // if we havent already copied it
+ // copy first -> createdverts
+ // set vin = index of created vertex
+ // else
+ // generate a new vertex -> createdverts
+ // set vin = index of created vertex
+ // if vout is already set and vin != vout (non-trivial edge)
+ // output coplanar edge -> tmpunderedges (one sibling)
+ // set coplanaredge to new edge index (for connecting the other sibling)
+ // output current edge -> tmpunderedges
+ //
+ // else if first UNDER & second UNDER
+ // output current edge -> tmpunderedges
+ //
+ // next edge
+ //
+ // if part of current plane was UNDER
+ // output current plane -> tmpunderplanes
+ //
+ // if coplanaredge is set
+ // output coplanar edge -> coplanaredges
+ //
+ // next plane
+ //
+
+ // Indexing is a bit tricky here:
+ //
+ // e0 - index of the current edge
+ // e1 - index of the next edge
+ // estart - index of the first edge in the current plane
+ // currentplane - index of the current plane
+ // enextface - first edge of next plane
+
+ PxU32 e0 = 0;
+
+ for (PxU32 currentplane = 0; currentplane < convex.getFacets().size(); currentplane++)
+ {
+
+ PxU32 eStart = e0;
+ PxU32 eNextFace = 0xffffffff;
+ PxU32 e1 = e0 + 1;
+
+ PxU8 vout = invalidIndex;
+ PxU8 vin = invalidIndex;
+
+ PxU32 coplanarEdge = invalidIndex;
+
+ // Logical OR of individual vertex flags in the current plane.
+ PxU32 planeSide = 0;
+
+ do{
+
+ // Next edge modulo logic
+ if (e1 >= convex.getEdges().size() || convex.getEdges()[e1].p != currentplane)
+ {
+ eNextFace = e1;
+ e1 = eStart;
+ }
+
+ const ConvexHull::HalfEdge& edge0 = convex.getEdges()[e0];
+ const ConvexHull::HalfEdge& edge1 = convex.getEdges()[e1];
+ const ConvexHull::HalfEdge& edgea = convex.getEdges()[PxU32(edge0.ea)];
+
+ planeSide |= vertFlag[edge0.v].planetest;
+
+ if (vertFlag[edge0.v].planetest == local::eUNDER && vertFlag[edge1.v].planetest != local::eUNDER)
+ {
+ // first is UNDER, second is COPLANAR or OVER
+
+ // Output current edge.
+ edgeFlag[e0].undermap = short(underEdgeCount);
+ tmpUnderEdges[underEdgeCount].v = vertFlag[edge0.v].undermap;
+ tmpUnderEdges[underEdgeCount].p = PxU8(underPlanesCount);
+ PX_ASSERT(tmpUnderEdges[underEdgeCount].v != invalidIndex);
+
+ if (PxU32(edge0.ea) < e0)
+ {
+ // We have already done the sibling.
+ // Connect current edge to its sibling.
+ PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
+ tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
+ tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
+ // Set vout = first vertex of (output, clipped) sibling.
+ vout = tmpUnderEdges[edgeFlag[edge0.ea].undermap].v;
+ }
+ else if (vertFlag[edge1.v].planetest == local::eCOPLANAR)
+ {
+ // Boundary case.
+ // We output coplanar vertices once.
+ if (vertFlag[edge1.v].undermap == invalidIndex)
+ {
+ createdVerts.pushBack(convex.getVertices()[edge1.v]);
+ // Remember the index so we don't output it again.
+ vertFlag[edge1.v].undermap = Ps::to8(vertCountUnder++);
+ }
+ vout = vertFlag[edge1.v].undermap;
+ }
+ else
+ {
+ // Add new vertex.
+ const PxPlane& p0 = convex.getFacets()[edge0.p];
+ const PxPlane& pa = convex.getFacets()[edgea.p];
+ createdVerts.pushBack(threePlaneIntersection(p0, pa, slice));
+ vout = Ps::to8(vertCountUnder++);
+ }
+
+ // We added an edge, increment the counter
+ underEdgeCount++;
+
+ if (vin != invalidIndex && vin != vout)
+ {
+ // We already have vin and a non-trivial edge
+ // Output coplanar edge
+ PX_ASSERT(vout != invalidIndex);
+ coplanarEdge = underEdgeCount;
+ tmpUnderEdges[underEdgeCount].v = vout;
+ tmpUnderEdges[underEdgeCount].p = PxU8(underPlanesCount);
+ tmpUnderEdges[underEdgeCount].ea = invalidIndex;
+ underEdgeCount++;
+ }
+ }
+ else if (vertFlag[edge0.v].planetest != local::eUNDER && vertFlag[edge1.v].planetest == local::eUNDER)
+ {
+ // First is OVER or COPLANAR, second is UNDER.
+
+ if (PxU32(edge0.ea) < e0)
+ {
+ // We have already done the sibling.
+ // We need the second vertex of the sibling.
+ // Which is the vertex of the next edge in the adjacent poly.
+ int nea = edgeFlag[edge0.ea].undermap + 1;
+ int p = tmpUnderEdges[edgeFlag[edge0.ea].undermap].p;
+ if (nea >= underEdgeCount || tmpUnderEdges[nea].p != p)
+ {
+ // End of polygon, next edge is first edge
+ nea -= 2;
+ while (nea > 0 && tmpUnderEdges[nea - 1].p == p)
+ nea--;
+ }
+ vin = tmpUnderEdges[nea].v;
+ PX_ASSERT(vin < vertCountUnder);
+ }
+ else if (vertFlag[edge0.v].planetest == local::eCOPLANAR)
+ {
+ // Boundary case.
+ // We output coplanar vertices once.
+ if (vertFlag[edge0.v].undermap == invalidIndex)
+ {
+ createdVerts.pushBack(convex.getVertices()[edge0.v]);
+ // Remember the index so we don't output it again.
+ vertFlag[edge0.v].undermap = Ps::to8(vertCountUnder++);
+ }
+ vin = vertFlag[edge0.v].undermap;
+ }
+ else
+ {
+ // Add new vertex.
+ const PxPlane& p0 = convex.getFacets()[edge0.p];
+ const PxPlane& pa = convex.getFacets()[edgea.p];
+ createdVerts.pushBack(threePlaneIntersection(p0, pa, slice));
+ vin = Ps::to8(vertCountUnder++);
+ }
+
+ if (vout != invalidIndex && vin != vout)
+ {
+ // We have been in and out, Add the coplanar edge
+ coplanarEdge = underEdgeCount;
+ tmpUnderEdges[underEdgeCount].v = vout;
+ tmpUnderEdges[underEdgeCount].p = Ps::to8(underPlanesCount);
+ tmpUnderEdges[underEdgeCount].ea = invalidIndex;
+ underEdgeCount++;
+ }
+
+ // Output current edge.
+ tmpUnderEdges[underEdgeCount].v = vin;
+ tmpUnderEdges[underEdgeCount].p = Ps::to8(underPlanesCount);
+ edgeFlag[e0].undermap = short(underEdgeCount);
+
+ if (PxU32(edge0.ea) < e0)
+ {
+ // We have already done the sibling.
+ // Connect current edge to its sibling.
+ PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
+ tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
+ tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
+ }
+
+ PX_ASSERT(edgeFlag[e0].undermap == underEdgeCount);
+ underEdgeCount++;
+ }
+ else if (vertFlag[edge0.v].planetest == local::eUNDER && vertFlag[edge1.v].planetest == local::eUNDER)
+ {
+ // Both UNDER
+
+ // Output current edge.
+ edgeFlag[e0].undermap = short(underEdgeCount);
+ tmpUnderEdges[underEdgeCount].v = vertFlag[edge0.v].undermap;
+ tmpUnderEdges[underEdgeCount].p = Ps::to8(underPlanesCount);
+ if (PxU32(edge0.ea) < e0)
+ {
+ // We have already done the sibling.
+ // Connect current edge to its sibling.
+ PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
+ tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
+ tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
+ }
+ underEdgeCount++;
+ }
+
+ e0 = e1;
+ e1++; // do the modulo at the beginning of the loop
+
+ } while (e0 != eStart);
+
+ e0 = eNextFace;
+
+ if (planeSide & local::eUNDER)
+ {
+ // At least part of current plane is UNDER.
+ // Output current plane.
+ tmpUnderPlanes[underPlanesCount] = convex.getFacets()[currentplane];
+ underPlanesCount++;
+ }
+
+ if (coplanarEdge != invalidIndex)
+ {
+ // We have a coplanar edge.
+ // Add to coplanaredges for later processing.
+ // (One sibling is in place but one is missing)
+ PX_ASSERT(vin != invalidIndex);
+ PX_ASSERT(vout != invalidIndex);
+ PX_ASSERT(coplanarEdge != 511);
+ coplanarEdges[coplanarEdgesNum].ea = PxU8(coplanarEdge);
+ coplanarEdges[coplanarEdgesNum].v0 = vin;
+ coplanarEdges[coplanarEdgesNum].v1 = vout;
+ coplanarEdgesNum++;
+ }
+
+ // Reset coplanar edge infos for next poly
+ vin = invalidIndex;
+ vout = invalidIndex;
+ coplanarEdge = invalidIndex;
+ }
+
+ // Add the new plane to the mix:
+ if (coplanarEdgesNum > 0)
+ {
+ tmpUnderPlanes[underPlanesCount++] = slice;
+ }
+
+ // Sort the coplanar edges in winding order.
+ for (i = 0; i < coplanarEdgesNum - 1; i++)
+ {
+ if (coplanarEdges[i].v1 != coplanarEdges[i + 1].v0)
+ {
+ PxU32 j = 0;
+ for (j = i + 2; j < coplanarEdgesNum; j++)
+ {
+ if (coplanarEdges[i].v1 == coplanarEdges[j].v0)
+ {
+ local::Coplanar tmp = coplanarEdges[i + 1];
+ coplanarEdges[i + 1] = coplanarEdges[j];
+ coplanarEdges[j] = tmp;
+ break;
+ }
+ }
+ if (j >= coplanarEdgesNum)
+ {
+ // PX_ASSERT(j<coplanaredges_num);
+ return NULL;
+ }
+ }
+ }
+
+ // PT: added this line to fix DE2904
+ if (!vertCountUnder)
+ return NULL;
+
+ // Create the output convex.
+ ConvexHull* punder = PX_NEW_TEMP(ConvexHull)(convex.getInputPlanes());
+ ConvexHull& under = *punder;
+
+ // Copy UNDER vertices
+ PxU32 k = 0;
+ for (i = 0; i < convex.getVertices().size(); i++)
+ {
+ if (vertFlag[i].planetest == local::eUNDER)
+ {
+ under.getVertices().pushBack(convex.getVertices()[i]);
+ k++;
+ }
+ }
+
+ // Copy created vertices
+ i = 0;
+ while (k < vertCountUnder)
+ {
+ under.getVertices().pushBack(createdVerts[i++]);
+ k++;
+ }
+
+ PX_ASSERT(i == createdVerts.size());
+
+ // Copy the output edges and output planes.
+ under.getEdges().resize(underEdgeCount + coplanarEdgesNum);
+ under.getFacets().resize(underPlanesCount);
+
+ // Add the coplanar edge siblings that belong to the new polygon (coplanaredges).
+ for (i = 0; i < coplanarEdgesNum; i++)
+ {
+ under.getEdges()[underEdgeCount + i].p = PxU8(underPlanesCount - 1);
+ under.getEdges()[underEdgeCount + i].ea = short(coplanarEdges[i].ea);
+ tmpUnderEdges[coplanarEdges[i].ea].ea = PxI16(underEdgeCount + i);
+ under.getEdges()[underEdgeCount + i].v = coplanarEdges[i].v0;
+ }
+
+ PxMemCopy(under.getEdges().begin(), tmpUnderEdges, sizeof(ConvexHull::HalfEdge)*underEdgeCount);
+ PxMemCopy(under.getFacets().begin(), tmpUnderPlanes, sizeof(PxPlane)*underPlanesCount);
+ return punder;
+}
+
+bool physx::computeOBBFromConvex(const PxConvexMeshDesc& desc, PxVec3& sides, PxTransform& matrix)
+{
+ PxIntegrals integrals;
+ // using the centroid of the convex for the volume integration solved accuracy issues in cases where the inertia tensor
+ // ended up close to not being positive definite and after a few further transforms the diagonalized inertia tensor ended
+ // up with negative values.
+
+ const PxVec3* verts = (reinterpret_cast<const PxVec3*>(desc.points.data));
+ const PxU32* ind = (reinterpret_cast<const PxU32*>(desc.indices.data));
+ const PxHullPolygon* polygons = (reinterpret_cast<const PxHullPolygon*>(desc.polygons.data));
+ PxVec3 mean(0.0f);
+ for (PxU32 i = 0; i < desc.points.count; i++)
+ mean += verts[i];
+ mean *= (1.0f / desc.points.count);
+
+ PxU8* indices = reinterpret_cast<PxU8*> (PX_ALLOC_TEMP(sizeof(PxU8)*desc.indices.count, "PxU8"));
+ for (PxU32 i = 0; i < desc.indices.count; i++)
+ {
+ indices[i] = Ps::to8(ind[i]);
+ }
+ // we need to move the polygon data to internal format
+ Gu::HullPolygonData* polygonData = reinterpret_cast<Gu::HullPolygonData*> (PX_ALLOC_TEMP(sizeof(Gu::HullPolygonData)*desc.polygons.count, "Gu::HullPolygonData"));
+ for (PxU32 i = 0; i < desc.polygons.count; i++)
+ {
+ polygonData[i].mPlane = PxPlane(polygons[i].mPlane[0], polygons[i].mPlane[1], polygons[i].mPlane[2], polygons[i].mPlane[3]);
+ polygonData[i].mNbVerts = Ps::to8(polygons[i].mNbVerts);
+ polygonData[i].mVRef8 = polygons[i].mIndexBase;
+ }
+
+ PxConvexMeshDesc inDesc;
+ inDesc.points.data = desc.points.data;
+ inDesc.points.count = desc.points.count;
+
+ inDesc.polygons.data = polygonData;
+ inDesc.polygons.count = desc.polygons.count;
+
+ inDesc.indices.data = indices;
+ inDesc.indices.count = desc.indices.count;
+
+ // compute volume integrals to get basis axis
+ bool status = (desc.flags & PxConvexFlag::eFAST_INERTIA_COMPUTATION) ?
+ computeVolumeIntegralsEberlySIMD(inDesc, 1.0f, integrals, mean) : computeVolumeIntegralsEberly(inDesc, 1.0f, integrals, mean);
+ if (status)
+ {
+ Vec4V* pointsV = reinterpret_cast<Vec4V*> (PX_ALLOC_TEMP(sizeof(Vec4V)*desc.points.count, "Vec4V"));
+ for (PxU32 i = 0; i < desc.points.count; i++)
+ {
+ // safe to V4 load, same as volume integration - we allocate one more vector
+ pointsV[i] = V4LoadU(&verts[i].x);
+ }
+
+ PxMat33 inertia;
+ integrals.getOriginInertia(inertia);
+ PxQuat inertiaQuat;
+ PxDiagonalize(inertia, inertiaQuat);
+ PxMat33 baseAxis(inertiaQuat);
+ Vec4V center = V4LoadU(&integrals.COM.x);
+
+ const PxU32 numSteps = 20;
+ const float subStep = Ps::degToRad(float(360/numSteps));
+
+ float bestVolume = 1e9;
+
+ for (PxU32 axis = 0; axis < 3; axis++)
+ {
+ for (PxU32 iStep = 0; iStep < numSteps; iStep++)
+ {
+ PxQuat quat(iStep*subStep, baseAxis[axis]);
+
+ Vec4V transV = center;
+ Vec4V psidesV;
+
+ const QuatV rotV = QuatVLoadU(&quat.x);
+ local::computeOBBSIMD(desc.points.count, pointsV, psidesV, rotV, transV);
+
+ PxVec3 psides;
+ V3StoreU(Vec3V_From_Vec4V(psidesV), psides);
+
+ const float volume = psides[0] * psides[1] * psides[2]; // the volume of the cube
+
+ if (volume <= bestVolume)
+ {
+ bestVolume = volume;
+ sides = psides;
+
+ V4StoreU(rotV, &matrix.q.x);
+ V3StoreU(Vec3V_From_Vec4V(transV), matrix.p);
+ }
+ }
+ }
+
+ PX_FREE_AND_RESET(pointsV);
+ }
+ else
+ {
+ PX_FREE_AND_RESET(indices);
+ PX_FREE_AND_RESET(polygonData);
+ return false;
+ }
+
+ PX_FREE_AND_RESET(indices);
+ PX_FREE_AND_RESET(polygonData);
+ return true;
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.h
new file mode 100644
index 00000000..5178b043
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexHullUtils.h
@@ -0,0 +1,177 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_CONVEXHULLUTILS_H
+#define PX_CONVEXHULLUTILS_H
+
+#include "foundation/PxMemory.h"
+#include "foundation/PxPlane.h"
+
+#include "CmPhysXCommon.h"
+
+#include "PsUserAllocated.h"
+#include "PsArray.h"
+#include "PsMathUtils.h"
+
+#include "PxConvexMeshDesc.h"
+
+namespace physx
+{
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for hull construction, holds the vertices and planes together
+ // while cropping the hull with planes
+ class ConvexHull : public Ps::UserAllocated
+ {
+ public:
+
+ // Helper class for halfedge representation
+ class HalfEdge
+ {
+ public:
+ PxI16 ea; // the other half of the edge (index into edges list)
+ PxU8 v; // the vertex at the start of this edge (index into vertices list)
+ PxU8 p; // the facet on which this edge lies (index into facets list)
+ HalfEdge(){}
+ HalfEdge(PxI16 _ea, PxU8 _v, PxU8 _p) :ea(_ea), v(_v), p(_p){}
+ };
+
+ ConvexHull& operator = (const ConvexHull&);
+
+ // construct the base cube hull from given max/min AABB
+ ConvexHull(const PxVec3& bmin, const PxVec3& bmax, const Ps::Array<PxPlane>& inPlanes);
+
+ // construct the base cube hull from given OBB
+ ConvexHull(const PxVec3& extent, const PxTransform& transform, const Ps::Array<PxPlane>& inPlanes);
+
+ // copy constructor
+ ConvexHull(const ConvexHull& srcHull)
+ : mInputPlanes(srcHull.getInputPlanes())
+ {
+ copyHull(srcHull);
+ }
+
+ // construct plain hull
+ ConvexHull(const Ps::Array<PxPlane>& inPlanes)
+ : mInputPlanes(inPlanes)
+ {
+ }
+
+ // finds the candidate plane, returns -1 otherwise
+ PxI32 findCandidatePlane(float planetestepsilon, float epsilon) const;
+
+ // internal check of the hull integrity
+ bool assertIntact(float epsilon) const;
+
+ // return vertices
+ const Ps::Array<PxVec3>& getVertices() const
+ {
+ return mVertices;
+ }
+
+ // return edges
+ const Ps::Array<HalfEdge>& getEdges() const
+ {
+ return mEdges;
+ }
+
+ // return faces
+ const Ps::Array<PxPlane>& getFacets() const
+ {
+ return mFacets;
+ }
+
+ // return input planes
+ const Ps::Array<PxPlane>& getInputPlanes() const
+ {
+ return mInputPlanes;
+ }
+
+ // return vertices
+ Ps::Array<PxVec3>& getVertices()
+ {
+ return mVertices;
+ }
+
+ // return edges
+ Ps::Array<HalfEdge>& getEdges()
+ {
+ return mEdges;
+ }
+
+ // return faces
+ Ps::Array<PxPlane>& getFacets()
+ {
+ return mFacets;
+ }
+
+ // returns the maximum number of vertices on a face
+ PxU32 maxNumVertsPerFace() const;
+
+ // copy the hull from source
+ void copyHull(const ConvexHull& src)
+ {
+ mVertices.resize(src.getVertices().size());
+ mEdges.resize(src.getEdges().size());
+ mFacets.resize(src.getFacets().size());
+
+ PxMemCopy(mVertices.begin(), src.getVertices().begin(), src.getVertices().size()*sizeof(PxVec3));
+ PxMemCopy(mEdges.begin(), src.getEdges().begin(), src.getEdges().size()*sizeof(HalfEdge));
+ PxMemCopy(mFacets.begin(), src.getFacets().begin(), src.getFacets().size()*sizeof(PxPlane));
+ }
+
+ private:
+ Ps::Array<PxVec3> mVertices;
+ Ps::Array<HalfEdge> mEdges;
+ Ps::Array<PxPlane> mFacets;
+ const Ps::Array<PxPlane>& mInputPlanes;
+ };
+
+ //////////////////////////////////////////////////////////////////////////|
+ // Crops the hull with a provided plane and with given epsilon
+ // returns new hull if succeeded
+ ConvexHull* convexHullCrop(const ConvexHull& convex, const PxPlane& slice, float planetestepsilon);
+
+ //////////////////////////////////////////////////////////////////////////|
+ // three planes intersection
+ PX_FORCE_INLINE PxVec3 threePlaneIntersection(const PxPlane& p0, const PxPlane& p1, const PxPlane& p2)
+ {
+ PxMat33 mp = (PxMat33(p0.n, p1.n, p2.n)).getTranspose();
+ PxMat33 mi = (mp).getInverse();
+ PxVec3 b(p0.d, p1.d, p2.d);
+ return -mi.transform(b);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // Compute OBB around given convex hull
+ bool computeOBBFromConvex(const PxConvexMeshDesc& desc, PxVec3& sides, PxTransform& matrix);
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.cpp
new file mode 100644
index 00000000..5fb356c3
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.cpp
@@ -0,0 +1,504 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "GuConvexMesh.h"
+#include "PsFoundation.h"
+#include "PsMathUtils.h"
+#include "Cooking.h"
+
+#include "GuHillClimbing.h"
+#include "GuBigConvexData2.h"
+#include "GuInternal.h"
+#include "GuSerialize.h"
+#include "VolumeIntegration.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "VolumeIntegration.h"
+#include "ConvexHullBuilder.h"
+#include "ConvexMeshBuilder.h"
+#include "BigConvexDataBuilder.h"
+
+#include "CmUtils.h"
+#include "PsVecMath.h"
+
+using namespace physx;
+using namespace Gu;
+using namespace Ps::aos;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ConvexMeshBuilder::ConvexMeshBuilder(const bool buildGRBData) : hullBuilder(&mHullData, buildGRBData), mBigConvexData(NULL), mMass(0.0f), mInertia(PxIdentity)
+{
+}
+
+ConvexMeshBuilder::~ConvexMeshBuilder()
+{
+ PX_DELETE_AND_RESET(mBigConvexData);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// load the mesh data from given polygons
+bool ConvexMeshBuilder::build(const PxConvexMeshDesc& desc, PxU32 gaussMapVertexLimit, bool validateOnly, bool userPolygons)
+{
+ if(!desc.isValid())
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "Gu::ConvexMesh::loadFromDesc: desc.isValid() failed!");
+ return false;
+ }
+
+ if(!loadConvexHull(desc, gaussMapVertexLimit, userPolygons))
+ return false;
+
+ // Compute local bounds (*after* hull has been created)
+ PxBounds3 minMaxBounds;
+ computeBoundsAroundVertices(minMaxBounds, mHullData.mNbHullVertices, hullBuilder.mHullDataHullVertices);
+ mHullData.mAABB = CenterExtents(minMaxBounds);
+
+ if(mHullData.mNbHullVertices > gaussMapVertexLimit)
+ {
+ if(!computeGaussMaps())
+ {
+ return false;
+ }
+ }
+
+ if(validateOnly)
+ return true;
+
+// TEST_INTERNAL_OBJECTS
+ computeInternalObjects();
+//~TEST_INTERNAL_OBJECTS
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+PX_COMPILE_TIME_ASSERT(sizeof(PxMaterialTableIndex)==sizeof(PxU16));
+bool ConvexMeshBuilder::save(PxOutputStream& stream, bool platformMismatch) const
+{
+ // Export header
+ if(!writeHeader('C', 'V', 'X', 'M', PX_CONVEX_VERSION, platformMismatch, stream))
+ return false;
+
+ // Export serialization flags
+ PxU32 serialFlags = 0;
+
+ writeDword(serialFlags, platformMismatch, stream);
+
+ if(!hullBuilder.save(stream, platformMismatch))
+ return false;
+
+ // Export local bounds
+// writeFloat(geomEpsilon, platformMismatch, stream);
+ writeFloat(0.0f, platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMin(0), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMin(1), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMin(2), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMax(0), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMax(1), platformMismatch, stream);
+ writeFloat(mHullData.mAABB.getMax(2), platformMismatch, stream);
+
+ // Export mass info
+ writeFloat(mMass, platformMismatch, stream);
+ writeFloatBuffer(reinterpret_cast<const PxF32*>(&mInertia), 9, platformMismatch, stream);
+ writeFloatBuffer(&mHullData.mCenterOfMass.x, 3, platformMismatch, stream);
+
+ // Export gaussmaps
+ if(mBigConvexData)
+ {
+ writeFloat(1.0f, platformMismatch, stream); //gauss map flag true
+ BigConvexDataBuilder SVMB(&mHullData, mBigConvexData, hullBuilder.mHullDataHullVertices);
+ SVMB.save(stream, platformMismatch);
+ }
+ else
+ writeFloat(-1.0f, platformMismatch, stream); //gauss map flag false
+
+// TEST_INTERNAL_OBJECTS
+ writeFloat(mHullData.mInternal.mRadius, platformMismatch, stream);
+ writeFloat(mHullData.mInternal.mExtents[0], platformMismatch, stream);
+ writeFloat(mHullData.mInternal.mExtents[1], platformMismatch, stream);
+ writeFloat(mHullData.mInternal.mExtents[2], platformMismatch, stream);
+//~TEST_INTERNAL_OBJECTS
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// instead of saving the data into stream, we copy the mesh data
+// into internal Gu::ConvexMesh.
+bool ConvexMeshBuilder::copy(Gu::ConvexHullData& hullData)
+{
+ // hull builder data copy
+ hullBuilder.copy(hullData);
+
+ // mass props
+ hullData.mAABB = mHullData.mAABB;
+ hullData.mCenterOfMass = mHullData.mCenterOfMass;
+
+ // big convex data
+ if(mBigConvexData)
+ {
+ hullData.mBigConvexRawData = &mBigConvexData->mData;
+ }
+ else
+ hullData.mBigConvexRawData = NULL;
+
+ // internal data
+ hullData.mInternal.mRadius = mHullData.mInternal.mRadius;
+ hullData.mInternal.mExtents[0] = mHullData.mInternal.mExtents[0];
+ hullData.mInternal.mExtents[1] = mHullData.mInternal.mExtents[1];
+ hullData.mInternal.mExtents[2] = mHullData.mInternal.mExtents[2];
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// compute mass and inertia of the convex mesh
+void ConvexMeshBuilder::computeMassInfo(bool lowerPrecision)
+{
+ if(mMass <= 0.0f) //not yet computed.
+ {
+ PxIntegrals integrals;
+ PxConvexMeshDesc meshDesc;
+ meshDesc.points.count = mHullData.mNbHullVertices;
+ meshDesc.points.data = hullBuilder.mHullDataHullVertices;
+ meshDesc.points.stride = sizeof(PxVec3);
+
+ meshDesc.polygons.data = hullBuilder.mHullDataPolygons;
+ meshDesc.polygons.stride = sizeof(Gu::HullPolygonData);
+ meshDesc.polygons.count = hullBuilder.mHull->mNbPolygons;
+
+ meshDesc.indices.data = hullBuilder.mHullDataVertexData8;
+
+ // using the centroid of the convex for the volume integration solved accuracy issues in cases where the inertia tensor
+ // ended up close to not being positive definite and after a few further transforms the diagonalized inertia tensor ended
+ // up with negative values.
+ PxVec3 mean(0.0f);
+ for(PxU32 i=0; i < mHullData.mNbHullVertices; i++)
+ mean += hullBuilder.mHullDataHullVertices[i];
+ mean *= (1.0f / mHullData.mNbHullVertices);
+
+ bool status = lowerPrecision ?
+ computeVolumeIntegralsEberlySIMD(meshDesc, 1.0f, integrals, mean) : computeVolumeIntegralsEberly(meshDesc, 1.0f, integrals, mean);
+ if(status)
+ {
+
+ integrals.getOriginInertia(reinterpret_cast<PxMat33&>(mInertia));
+ mHullData.mCenterOfMass = integrals.COM;
+
+ //note: the mass will be negative for an inside-out mesh!
+ if(mInertia.column0.isFinite() && mInertia.column1.isFinite() && mInertia.column2.isFinite()
+ && mHullData.mCenterOfMass.isFinite() && PxIsFinite(PxReal(integrals.mass)))
+ {
+ if (integrals.mass < 0)
+ {
+ Ps::getFoundation().error(PX_WARN, "Gu::ConvexMesh: Mesh has a negative volume! Is it open or do (some) faces have reversed winding? (Taking absolute value.)");
+ integrals.mass = -integrals.mass;
+ mInertia = -mInertia;
+ }
+
+ mMass = PxReal(integrals.mass); //set mass to valid value.
+ return;
+ }
+ }
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Gu::ConvexMesh: Error computing mesh mass properties!\n");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable:4996) // permitting use of gatherStrided until we have a replacement.
+#endif
+
+bool ConvexMeshBuilder::loadConvexHull(const PxConvexMeshDesc& desc, PxU32 gaussMapVertexLimit, bool userPolygons)
+{
+ // gather points
+ PxVec3* geometry = reinterpret_cast<PxVec3*>(PxAlloca(sizeof(PxVec3)*desc.points.count));
+ Cooking::gatherStrided(desc.points.data, geometry, desc.points.count, sizeof(PxVec3), desc.points.stride);
+
+ PxU32* topology = NULL;
+
+ // gather indices
+ // store the indices into topology if we have the polygon data
+ if(desc.indices.data)
+ {
+ topology = reinterpret_cast<PxU32*>(PxAlloca(sizeof(PxU32)*desc.indices.count));
+ if (desc.flags & PxConvexFlag::e16_BIT_INDICES)
+ {
+ // conversion; 16 bit index -> 32 bit index & stride
+ PxU32* dest = topology;
+ const PxU32* pastLastDest = topology + desc.indices.count;
+ const PxU8* source = reinterpret_cast<const PxU8*>(desc.indices.data);
+ while (dest < pastLastDest)
+ {
+ const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
+ *dest++ = trig16[0];
+ *dest++ = trig16[1];
+ *dest++ = trig16[2];
+ source += desc.indices.stride;
+ }
+ }
+ else
+ {
+ Cooking::gatherStrided(desc.indices.data, topology, desc.indices.count, sizeof(PxU32), desc.indices.stride);
+ }
+ }
+
+ // gather polygons
+ PxHullPolygon* hullPolygons = NULL;
+ if(desc.polygons.data)
+ {
+ hullPolygons = reinterpret_cast<PxHullPolygon*>(PxAlloca(sizeof(PxHullPolygon)*desc.polygons.count));
+ Cooking::gatherStrided(desc.polygons.data,hullPolygons,desc.polygons.count,sizeof(PxHullPolygon),desc.polygons.stride);
+
+ // if user polygons, make sure the largest one is the first one
+ if (userPolygons)
+ {
+ PxU32 largestPolygon = 0;
+ for (PxU32 i = 1; i < desc.polygons.count; i++)
+ {
+ if(hullPolygons[i].mNbVerts > hullPolygons[largestPolygon].mNbVerts)
+ largestPolygon = i;
+ }
+ if(largestPolygon != 0)
+ {
+ PxHullPolygon movedPolygon = hullPolygons[0];
+ hullPolygons[0] = hullPolygons[largestPolygon];
+ hullPolygons[largestPolygon] = movedPolygon;
+ }
+ }
+ }
+
+ const bool doValidation = desc.flags & PxConvexFlag::eDISABLE_MESH_VALIDATION ? false : true;
+ if(!hullBuilder.init(desc.points.count, geometry, topology, desc.indices.count, desc.polygons.count, hullPolygons, gaussMapVertexLimit, doValidation))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "Gu::ConvexMesh::loadConvexHull: convex hull init failed!");
+ return false;
+ }
+ computeMassInfo(desc.flags & PxConvexFlag::eFAST_INERTIA_COMPUTATION);
+
+ return true;
+}
+
+#if PX_VC
+#pragma warning(pop)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// compute polygons from given triangles. This is support function used in extensions. We do not accept triangles as an input for convex mesh desc.
+bool ConvexMeshBuilder::computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles, PxAllocatorCallback& inAllocator,
+ PxU32& outNbVerts, PxVec3*& outVertices , PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& polygons)
+{
+ if(!hullBuilder.computeHullPolygons(nbVerts,verts,nbTriangles,triangles))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexMeshBuilder::computeHullPolygons: compute convex hull polygons failed. Provided triangles dont form a convex hull.");
+ return false;
+ }
+
+ outNbVerts = hullBuilder.mHull->mNbHullVertices;
+ nbPolygons = hullBuilder.mHull->mNbPolygons;
+
+ outVertices = reinterpret_cast<PxVec3*>(inAllocator.allocate(outNbVerts*sizeof(PxVec3),"PxVec3",__FILE__,__LINE__));
+ PxMemCopy(outVertices,hullBuilder.mHullDataHullVertices,outNbVerts*sizeof(PxVec3));
+
+ nbIndices = 0;
+ for (PxU32 i = 0; i < nbPolygons; i++)
+ {
+ nbIndices += hullBuilder.mHullDataPolygons[i].mNbVerts;
+ }
+
+ indices = reinterpret_cast<PxU32*>(inAllocator.allocate(nbIndices*sizeof(PxU32),"PxU32",__FILE__,__LINE__));
+ for (PxU32 i = 0; i < nbIndices; i++)
+ {
+ indices[i] = hullBuilder.mHullDataVertexData8[i];
+ }
+
+ polygons = reinterpret_cast<PxHullPolygon*>(inAllocator.allocate(nbPolygons*sizeof(PxHullPolygon),"PxHullPolygon",__FILE__,__LINE__));
+
+ for (PxU32 i = 0; i < nbPolygons; i++)
+ {
+ const Gu::HullPolygonData& polygonData = hullBuilder.mHullDataPolygons[i];
+ PxHullPolygon& outPolygon = polygons[i];
+ outPolygon.mPlane[0] = polygonData.mPlane.n.x;
+ outPolygon.mPlane[1] = polygonData.mPlane.n.y;
+ outPolygon.mPlane[2] = polygonData.mPlane.n.z;
+ outPolygon.mPlane[3] = polygonData.mPlane.d;
+
+ outPolygon.mNbVerts = polygonData.mNbVerts;
+ outPolygon.mIndexBase = polygonData.mVRef8;
+
+ for (PxU32 j = 0; j < polygonData.mNbVerts; j++)
+ {
+ PX_ASSERT(indices[outPolygon.mIndexBase + j] == hullBuilder.mHullDataVertexData8[polygonData.mVRef8+j]);
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// compute big convex data
+bool ConvexMeshBuilder::computeGaussMaps()
+{
+ // The number of polygons is limited to 256 because the gaussmap encode 256 polys maximum
+
+ PxU32 density = 16;
+ // density = 64;
+ // density = 8;
+ // density = 2;
+
+ PX_DELETE(mBigConvexData);
+ PX_NEW_SERIALIZED(mBigConvexData,BigConvexData);
+ BigConvexDataBuilder SVMB(&mHullData, mBigConvexData, hullBuilder.mHullDataHullVertices);
+ // valencies we need to compute first, they are needed for min/max precompute
+ SVMB.computeValencies(hullBuilder);
+ SVMB.precompute(density);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// TEST_INTERNAL_OBJECTS
+
+static void ComputeInternalExtent(Gu::ConvexHullData& data, const Gu::HullPolygonData* hullPolys)
+{
+ const PxVec3 e = data.mAABB.getMax() - data.mAABB.getMin();
+
+ // PT: For that formula, see \\sw\physx\PhysXSDK\3.4\trunk\InternalDocumentation\Cooking\InternalExtents.png
+ const float r = data.mInternal.mRadius / sqrtf(3.0f);
+
+ const float epsilon = 1E-7f;
+
+ const PxU32 largestExtent = Ps::largestAxis(e);
+ PxU32 e0 = Ps::getNextIndex3(largestExtent);
+ PxU32 e1 = Ps::getNextIndex3(e0);
+ if(e[e0] < e[e1])
+ Ps::swap<PxU32>(e0,e1);
+
+ data.mInternal.mExtents[0] = FLT_MAX;
+ data.mInternal.mExtents[1] = FLT_MAX;
+ data.mInternal.mExtents[2] = FLT_MAX;
+
+ // PT: the following code does ray-vs-plane raycasts.
+
+ // find the largest box along the largest extent, with given internal radius
+ for(PxU32 i = 0; i < data.mNbPolygons; i++)
+ {
+ // concurrent with search direction
+ const float d = hullPolys[i].mPlane.n[largestExtent];
+ if((-epsilon < d && d < epsilon))
+ continue;
+
+ const float numBase = -hullPolys[i].mPlane.d - hullPolys[i].mPlane.n.dot(data.mCenterOfMass);
+ const float denBase = 1.0f/hullPolys[i].mPlane.n[largestExtent];
+ const float numn0 = r * hullPolys[i].mPlane.n[e0];
+ const float numn1 = r * hullPolys[i].mPlane.n[e1];
+
+ float num = numBase - numn0 - numn1;
+ float ext = PxMax(fabsf(num*denBase), r);
+ if(ext < data.mInternal.mExtents[largestExtent])
+ data.mInternal.mExtents[largestExtent] = ext;
+
+ num = numBase - numn0 + numn1;
+ ext = PxMax(fabsf(num *denBase), r);
+ if(ext < data.mInternal.mExtents[largestExtent])
+ data.mInternal.mExtents[largestExtent] = ext;
+
+ num = numBase + numn0 + numn1;
+ ext = PxMax(fabsf(num *denBase), r);
+ if(ext < data.mInternal.mExtents[largestExtent])
+ data.mInternal.mExtents[largestExtent] = ext;
+
+ num = numBase + numn0 - numn1;
+ ext = PxMax(fabsf(num *denBase), r);
+ if(ext < data.mInternal.mExtents[largestExtent])
+ data.mInternal.mExtents[largestExtent] = ext;
+ }
+
+ // Refine the box along e0,e1
+ for(PxU32 i = 0; i < data.mNbPolygons; i++)
+ {
+ const float denumAdd = hullPolys[i].mPlane.n[e0] + hullPolys[i].mPlane.n[e1];
+ const float denumSub = hullPolys[i].mPlane.n[e0] - hullPolys[i].mPlane.n[e1];
+
+ const float numBase = -hullPolys[i].mPlane.d - hullPolys[i].mPlane.n.dot(data.mCenterOfMass);
+ const float numn0 = data.mInternal.mExtents[largestExtent] * hullPolys[i].mPlane.n[largestExtent];
+
+ if(!(-epsilon < denumAdd && denumAdd < epsilon))
+ {
+ float num = numBase - numn0;
+ float ext = PxMax(fabsf(num/ denumAdd), r);
+ if(ext < data.mInternal.mExtents[e0])
+ data.mInternal.mExtents[e0] = ext;
+
+ num = numBase + numn0;
+ ext = PxMax(fabsf(num / denumAdd), r);
+ if(ext < data.mInternal.mExtents[e0])
+ data.mInternal.mExtents[e0] = ext;
+ }
+
+ if(!(-epsilon < denumSub && denumSub < epsilon))
+ {
+ float num = numBase - numn0;
+ float ext = PxMax(fabsf(num / denumSub), r);
+ if(ext < data.mInternal.mExtents[e0])
+ data.mInternal.mExtents[e0] = ext;
+
+ num = numBase + numn0;
+ ext = PxMax(fabsf(num / denumSub), r);
+ if(ext < data.mInternal.mExtents[e0])
+ data.mInternal.mExtents[e0] = ext;
+ }
+ }
+ data.mInternal.mExtents[e1] = data.mInternal.mExtents[e0];
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute internal objects, get the internal extent and radius
+void ConvexMeshBuilder::computeInternalObjects()
+{
+ const Gu::HullPolygonData* hullPolys = hullBuilder.mHullDataPolygons;
+ Gu::ConvexHullData& data = mHullData;
+
+ // compute the internal radius
+ data.mInternal.mRadius = FLT_MAX;
+ for(PxU32 i=0;i<data.mNbPolygons;i++)
+ {
+ const float dist = fabsf(hullPolys[i].mPlane.distance(data.mCenterOfMass));
+ if(dist<data.mInternal.mRadius)
+ data.mInternal.mRadius = dist;
+ }
+
+ ComputeInternalExtent(data, hullPolys);
+}
+
+//~TEST_INTERNAL_OBJECTS
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.h
new file mode 100644
index 00000000..57e0ca97
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexMeshBuilder.h
@@ -0,0 +1,100 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_COLLISION_CONVEXMESHBUILDER
+#define PX_COLLISION_CONVEXMESHBUILDER
+
+#include "GuConvexMeshData.h"
+#include "PxCooking.h"
+#include "ConvexPolygonsBuilder.h"
+
+namespace physx
+{
+ //////////////////////////////////////////////////////////////////////////
+ // Convex mesh builder, creates the convex mesh from given polygons and creates internal data
+ class ConvexMeshBuilder
+ {
+ public:
+ ConvexMeshBuilder(const bool buildGRBData);
+ ~ConvexMeshBuilder();
+
+ // loads the computed or given convex hull from descriptor.
+ // the descriptor does contain polygons directly, triangles are not allowed
+ bool build(const PxConvexMeshDesc&, PxU32 gaussMapVertexLimit, bool validateOnly = false, bool userPolygons = false);
+
+ // save the convex mesh into stream
+ bool save(PxOutputStream& stream, bool platformMismatch) const;
+
+ // copy the convex mesh into internal convex mesh, which can be directly used then
+ bool copy(Gu::ConvexHullData& convexData);
+
+ // loads the convex mesh from given polygons
+ bool loadConvexHull(const PxConvexMeshDesc&, PxU32 gaussMapVertexLimit, bool userPolygons);
+
+ // computed hull polygons from given triangles
+ bool computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles, PxAllocatorCallback& inAllocator,
+ PxU32& outNbVerts, PxVec3*& outVertices, PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& polygons);
+
+ // compute big convex data
+ bool computeGaussMaps();
+
+ // compute mass, inertia tensor
+ void computeMassInfo(bool lowerPrecision);
+// TEST_INTERNAL_OBJECTS
+ // internal objects
+ void computeInternalObjects();
+//~TEST_INTERNAL_OBJECTS
+
+ // return computed mass
+ PxReal getMass() const { return mMass; }
+
+ // return computed inertia tensor
+ const PxMat33& getInertia() const { return mInertia; }
+
+ // return big convex data
+ BigConvexData* getBigConvexData() const { return mBigConvexData; }
+
+ // set big convex data
+ void setBigConvexData(BigConvexData* data) { mBigConvexData = data; }
+
+ mutable ConvexPolygonsBuilder hullBuilder;
+
+ protected:
+ Gu::ConvexHullData mHullData;
+
+ BigConvexData* mBigConvexData; //!< optional, only for large meshes! PT: redundant with ptr in chull data? Could also be end of other buffer
+ PxReal mMass; //this is mass assuming a unit density that can be scaled by instances!
+ PxMat33 mInertia; //in local space of mesh!
+
+ };
+
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.cpp
new file mode 100644
index 00000000..44725819
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.cpp
@@ -0,0 +1,1328 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "foundation/PxMemory.h"
+#include "EdgeList.h"
+#include "Adjacencies.h"
+#include "MeshCleaner.h"
+#include "CmRadixSortBuffered.h"
+#include "CookingUtils.h"
+#include "PsArray.h"
+#include "PsFoundation.h"
+
+#include "ConvexPolygonsBuilder.h"
+
+
+using namespace physx;
+
+#define USE_PRECOMPUTED_HULL_PROJECTION
+
+static PX_INLINE void Flip(HullTriangleData& data)
+{
+ PxU32 tmp = data.mRef[2];
+ data.mRef[2] = data.mRef[1];
+ data.mRef[1] = tmp;
+}
+
+//////////////////////////////////////////////////////////////////////////
+//! A generic couple structure
+class Pair : public Ps::UserAllocated
+{
+public:
+ PX_FORCE_INLINE Pair() {}
+ PX_FORCE_INLINE Pair(PxU32 i0, PxU32 i1) : id0(i0), id1(i1) {}
+ PX_FORCE_INLINE ~Pair() {}
+
+ //! Operator for "if(Pair==Pair)"
+ PX_FORCE_INLINE bool operator==(const Pair& p) const { return (id0==p.id0) && (id1==p.id1); }
+ //! Operator for "if(Pair!=Pair)"
+ PX_FORCE_INLINE bool operator!=(const Pair& p) const { return (id0!=p.id0) || (id1!=p.id1); }
+
+ PxU32 id0; //!< First index of the pair
+ PxU32 id1; //!< Second index of the pair
+};
+PX_COMPILE_TIME_ASSERT(sizeof(Pair)==8);
+
+//////////////////////////////////////////////////////////////////////////
+// construct a plane
+template <class T>
+PX_INLINE PxPlane PlaneEquation(const T& t, const PxVec3* verts)
+{
+ const PxVec3& p0 = verts[t.v[0]];
+ const PxVec3& p1 = verts[t.v[1]];
+ const PxVec3& p2 = verts[t.v[2]];
+ return PxPlane(p0, p1, p2);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// negate plane
+static PX_FORCE_INLINE void negatePlane(Gu::HullPolygonData& data)
+{
+ data.mPlane.n = -data.mPlane.n;
+ data.mPlane.d = -data.mPlane.d;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Inverse a buffer in-place
+static bool inverseBuffer(PxU32 nbEntries, PxU8* entries)
+{
+ if(!nbEntries || !entries) return false;
+
+ for(PxU32 i=0; i < (nbEntries>>1); i++)
+ Ps::swap(entries[i], entries[nbEntries-1-i]);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Extracts a line-strip from a list of non-sorted line-segments (slow)
+static bool findLineStrip(Ps::Array<PxU32>& lineStrip, const Ps::Array<Pair>& lineSegments)
+{
+ // Ex:
+ //
+ // 4-2
+ // 0-1
+ // 2-3
+ // 4-0
+ // 7-3
+ // 7-1
+ //
+ // => 0-1-7-3-2-4-0
+
+ // 0-0-1-1-2-2-3-3-4-4-7-7
+
+ // 0-1
+ // 0-4
+ // 1-7
+ // 2-3
+ // 2-4
+ // 3-7
+
+ // Naive implementation below
+
+ Ps::Array<Pair> Copy(lineSegments);
+
+RunAgain:
+ {
+ PxU32 nbSegments = Copy.size();
+ for(PxU32 j=0;j<nbSegments;j++)
+ {
+ PxU32 ID0 = Copy[j].id0;
+ PxU32 ID1 = Copy[j].id1;
+
+ for(PxU32 i=j+1;i<nbSegments;i++)
+ {
+ if(
+ (Copy[i].id0==ID0 && Copy[i].id1==ID1)
+ || (Copy[i].id1==ID0 && Copy[i].id0==ID1)
+ )
+ {
+ // Duplicate segment found => remove both
+ PX_ASSERT(Copy.size()>=2);
+ Copy.remove(i);
+ Copy.remove(j);
+ goto RunAgain;
+ }
+ }
+ }
+ // Goes through when everything's fine
+ }
+
+ PxU32 ref0 = 0xffffffff;
+ PxU32 ref1 = 0xffffffff;
+ if(Copy.size()>=1)
+ {
+ Pair* Segments = Copy.begin();
+ if(Segments)
+ {
+ ref0 = Segments->id0;
+ ref1 = Segments->id1;
+ lineStrip.pushBack(ref0);
+ lineStrip.pushBack(ref1);
+ PX_ASSERT(Copy.size()>=1);
+ Copy.remove(0);
+ }
+ }
+
+Wrap:
+ // Look for same vertex ref in remaining segments
+ PxU32 nb = Copy.size();
+ if(!nb)
+ {
+ // ### check the line is actually closed?
+ return true;
+ }
+
+ for(PxU32 i=0;i<nb;i++)
+ {
+ PxU32 newRef0 = Copy[i].id0;
+ PxU32 newRef1 = Copy[i].id1;
+
+ // We look for Ref1 only
+ if(newRef0==ref1)
+ {
+ // r0 - r1
+ // r1 - x
+ lineStrip.pushBack(newRef1); // Output the other reference
+ ref0 = newRef0;
+ ref1 = newRef1;
+ Copy.remove(i);
+ goto Wrap;
+ }
+ else if(newRef1==ref1)
+ {
+ // r0 - r1
+ // x - r1 => r1 - x
+ lineStrip.pushBack(newRef0); // Output the other reference
+ ref0 = newRef1;
+ ref1 = newRef0;
+ Copy.remove(i);
+ goto Wrap;
+ }
+ }
+ return false;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Test for duplicate triangles
+PX_COMPILE_TIME_ASSERT(sizeof(Gu::TriangleT<PxU32>)==sizeof(PxVec3)); // ...
+static bool TestDuplicateTriangles(PxU32& nbFaces, Gu::TriangleT<PxU32>* faces, bool repair)
+{
+ if(!nbFaces || !faces)
+ return true;
+
+ Gu::TriangleT<PxU32>* indices32 = reinterpret_cast<Gu::TriangleT<PxU32>*>(PxAlloca(nbFaces*sizeof(Gu::TriangleT<PxU32>)));
+ for(PxU32 i=0;i<nbFaces;i++)
+ {
+ indices32[i].v[0] = faces[i].v[0];
+ indices32[i].v[1] = faces[i].v[1];
+ indices32[i].v[2] = faces[i].v[2];
+ }
+
+ // Radix-sort power...
+ ReducedVertexCloud reducer(reinterpret_cast<PxVec3*>(indices32), nbFaces);
+ REDUCEDCLOUD rc;
+ reducer.Reduce(&rc);
+ if(rc.NbRVerts<nbFaces)
+ {
+ if(repair)
+ {
+ nbFaces = rc.NbRVerts;
+ for(PxU32 i=0;i<nbFaces;i++)
+ {
+ const Gu::TriangleT<PxU32>* curTri = reinterpret_cast<const Gu::TriangleT<PxU32>*>(&rc.RVerts[i]);
+ faces[i].v[0] = curTri->v[0];
+ faces[i].v[1] = curTri->v[1];
+ faces[i].v[2] = curTri->v[2];
+ }
+ }
+ return false; // Test failed
+ }
+ return true; // Test succeeded
+}
+
+//////////////////////////////////////////////////////////////////////////
+// plane culling test
+static PX_FORCE_INLINE bool testCulling(const Gu::TriangleT<PxU32>& triangle, const PxVec3* verts, const PxVec3& center)
+{
+ const PxPlane plane(verts[triangle.v[0]], verts[triangle.v[1]], verts[triangle.v[2]]);
+ return plane.distance(center)>0.0f;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// face normals test
+static bool TestUnifiedNormals(PxU32 nbVerts, const PxVec3* verts, PxU32 nbFaces, Gu::TriangleT<PxU32>* faces, bool repair)
+{
+ if(!nbVerts || !verts || !nbFaces || !faces)
+ return false;
+
+ // Unify normals so that all hull faces are well oriented
+
+ // Compute geometric center - we need a vertex inside the hull
+ const float coeff = 1.0f / float(nbVerts);
+ PxVec3 geomCenter(0.0f, 0.0f, 0.0f);
+ for(PxU32 i=0;i<nbVerts;i++)
+ {
+ geomCenter.x += verts[i].x * coeff;
+ geomCenter.y += verts[i].y * coeff;
+ geomCenter.z += verts[i].z * coeff;
+ }
+
+ // We know the hull is (hopefully) convex so we can easily test whether a point is inside the hull or not.
+ // The previous geometric center must be invisible from any hull face: that's our test to decide whether a normal
+ // must be flipped or not.
+ bool status = true;
+ for(PxU32 i=0;i<nbFaces;i++)
+ {
+ // Test face visibility from the geometric center (supposed to be inside the hull).
+ // All faces must be invisible from this point to ensure a strict CCW order.
+ if(testCulling(faces[i], verts, geomCenter))
+ {
+ if(repair) faces[i].flip();
+ status = false;
+ }
+ }
+
+ return status;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// clean the mesh
+static bool CleanFaces(PxU32& nbFaces, Gu::TriangleT<PxU32>* faces, PxU32& nbVerts, PxVec3* verts)
+{
+ // Brute force mesh cleaning.
+ // PT: I added this back on Feb-18-05 because it fixes bugs with hulls from QHull.
+ MeshCleaner cleaner(nbVerts, verts, nbFaces, faces->v, 0.0f);
+ if (!cleaner.mNbTris)
+ return false;
+
+ nbVerts = cleaner.mNbVerts;
+ nbFaces = cleaner.mNbTris;
+
+ PxMemCopy(verts, cleaner.mVerts, cleaner.mNbVerts*sizeof(PxVec3));
+
+ for (PxU32 i = 0; i < cleaner.mNbTris; i++)
+ {
+ faces[i].v[0] = cleaner.mIndices[i * 3 + 0];
+ faces[i].v[1] = cleaner.mIndices[i * 3 + 1];
+ faces[i].v[2] = cleaner.mIndices[i * 3 + 2];
+ }
+
+ // Get rid of duplicates
+ TestDuplicateTriangles(nbFaces, faces, true);
+
+ // Unify normals
+ TestUnifiedNormals(nbVerts, verts, nbFaces, faces, true);
+
+ // Remove zero-area triangles
+ // TestZeroAreaTriangles(nbFaces, faces, verts, true);
+
+ // Unify normals again
+ TestUnifiedNormals(nbVerts, verts, nbFaces, faces, true);
+
+ // Get rid of duplicates again
+ TestDuplicateTriangles(nbFaces, faces, true);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// check the newly constructed faces
+static bool CheckFaces(PxU32 nbFaces, const Gu::TriangleT<PxU32>* faces, PxU32 nbVerts, const PxVec3* verts)
+{
+ // Remove const since we use functions that can do both testing & repairing. But we won't change the data.
+ Gu::TriangleT<PxU32>* f = const_cast<Gu::TriangleT<PxU32>*>(faces);
+
+ // Test duplicate faces
+ if(!TestDuplicateTriangles(nbFaces, f, false))
+ return false;
+
+ // Test unified normals
+ if(!TestUnifiedNormals(nbVerts, verts, nbFaces, f, false))
+ return false;
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute the newell plane from the face verts
+static bool computeNewellPlane(PxPlane& plane, PxU32 nbVerts, const PxU8* indices, const PxVec3* verts)
+{
+ if(!nbVerts || !indices || !verts)
+ return false;
+
+ PxVec3 centroid(0,0,0), normal(0,0,0);
+ for(PxU32 i=nbVerts-1, j=0; j<nbVerts; i=j, j++)
+ {
+ normal.x += (verts[indices[i]].y - verts[indices[j]].y) * (verts[indices[i]].z + verts[indices[j]].z);
+ normal.y += (verts[indices[i]].z - verts[indices[j]].z) * (verts[indices[i]].x + verts[indices[j]].x);
+ normal.z += (verts[indices[i]].x - verts[indices[j]].x) * (verts[indices[i]].y + verts[indices[j]].y);
+ centroid += verts[indices[j]];
+ }
+ plane.n = normal;
+ plane.n.normalize();
+ plane.d = -(centroid.dot(plane.n))/float(nbVerts);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Analyses a redundant vertices and splits the polygons if necessary.
+* \relates ConvexHull
+* \fn extractHullPolygons(Container& polygon_data, const ConvexHull& hull)
+* \param nb_polygons [out] number of extracted polygons
+* \param polygon_data [out] polygon data: (Nb indices, index 0, index 1... index N)(Nb indices, index 0, index 1... index N)(...)
+* \param hull [in] convex hull
+* \param redundantVertices [out] redundant vertices found inside the polygons - we want to remove them because of PCM
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static void checkRedundantVertices(PxU32& nb_polygons, Ps::Array<PxU32>& polygon_data, const ConvexPolygonsBuilder& hull, Ps::Array<PxU32>& triangle_data, Ps::Array<PxU32>& redundantVertices)
+{
+ const PxU32* dFaces = reinterpret_cast<const PxU32*>(hull.getFaces());
+ bool needToSplitPolygons = false;
+
+ bool* polygonMarkers = reinterpret_cast<bool*>(PxAlloca(nb_polygons*sizeof(bool)));
+ PxMemZero(polygonMarkers, nb_polygons*sizeof(bool));
+
+ bool* redundancyMarkers = reinterpret_cast<bool*>(PxAlloca(redundantVertices.size()*sizeof(bool)));
+ PxMemZero(redundancyMarkers, redundantVertices.size()*sizeof(bool));
+
+ // parse through the redundant vertices and if we cannot remove them split just the actual polygon if possible
+ Ps::Array<PxU32> polygonsContainer;
+ PxU32 numEntries = 0;
+ for (PxU32 i = redundantVertices.size(); i--;)
+ {
+ numEntries = 0;
+ polygonsContainer.clear();
+ // go through polygons, if polygons does have only 3 verts we cannot remove any vertex from it, try to decompose the second one
+ PxU32* Data = polygon_data.begin();
+ for(PxU32 t=0;t<nb_polygons;t++)
+ {
+ PxU32 nbVerts = *Data++;
+ PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
+
+ for(PxU32 j=0;j<nbVerts;j++)
+ {
+ if(redundantVertices[i] == Data[j])
+ {
+ polygonsContainer.pushBack(t);
+ polygonsContainer.pushBack(nbVerts);
+ numEntries++;
+ break;
+ }
+ }
+ Data += nbVerts;
+ }
+
+ bool needToSplit = false;
+ for (PxU32 j = 0; j < numEntries; j++)
+ {
+ PxU32 numInternalVertices = polygonsContainer[j*2 + 1];
+ if(numInternalVertices == 3)
+ {
+ needToSplit = true;
+ }
+ }
+
+ // now lets mark the polygons for split
+ if(needToSplit)
+ {
+ // mark the redundant vertex, it is solved by spliting, dont report it
+ needToSplitPolygons = true;
+ redundancyMarkers[i] = true;
+ for (PxU32 j = 0; j < numEntries; j++)
+ {
+ PxU32 polygonNumber = polygonsContainer[j*2];
+ PxU32 numInternalPolygons = polygonsContainer[j*2 + 1];
+ if(numInternalPolygons != 3)
+ {
+ polygonMarkers[polygonNumber] = true;
+ }
+ }
+ }
+ }
+
+ if(needToSplitPolygons)
+ {
+ // parse from the end so we can remove it and not change the order
+ for (PxU32 i = redundantVertices.size(); i--;)
+ {
+ // remove it
+ if(redundancyMarkers[i])
+ {
+ redundantVertices.remove(i);
+ }
+ }
+
+ Ps::Array<PxU32> newPolygon_data;
+ Ps::Array<PxU32> newTriangle_data;
+ PxU32 newNb_polygons = 0;
+
+ PxU32* data = polygon_data.begin();
+ PxU32* triData = triangle_data.begin();
+ for(PxU32 i=0;i<nb_polygons;i++)
+ {
+ PxU32 nbVerts = *data++;
+ PxU32 nbTris = *triData++;
+ if(polygonMarkers[i])
+ {
+ // split the polygon into triangles
+ for(PxU32 k=0;k< nbTris; k++)
+ {
+ newNb_polygons++;
+ const PxU32 faceIndex = triData[k];
+ newPolygon_data.pushBack(PxU32(3));
+ newPolygon_data.pushBack(dFaces[3*faceIndex]);
+ newPolygon_data.pushBack(dFaces[3*faceIndex + 1]);
+ newPolygon_data.pushBack(dFaces[3*faceIndex + 2]);
+ newTriangle_data.pushBack(PxU32(1));
+ newTriangle_data.pushBack(faceIndex);
+ }
+ }
+ else
+ {
+ newNb_polygons++;
+ // copy the original polygon
+ newPolygon_data.pushBack(nbVerts);
+ for(PxU32 j=0;j<nbVerts;j++)
+ newPolygon_data.pushBack(data[j]);
+
+ // copy the original polygon triangles
+ newTriangle_data.pushBack(nbTris);
+ for(PxU32 k=0;k< nbTris; k++)
+ {
+ newTriangle_data.pushBack(triData[k]);
+ }
+ }
+ data += nbVerts;
+ triData += nbTris;
+ }
+
+ // now put the data to output
+ polygon_data.clear();
+ triangle_data.clear();
+
+ // the copy does copy even the data
+ polygon_data = newPolygon_data;
+ triangle_data = newTriangle_data;
+ nb_polygons = newNb_polygons;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Analyses a convex hull made of triangles and extracts polygon data out of it.
+* \relates ConvexHull
+* \fn extractHullPolygons(Ps::Array<PxU32>& polygon_data, const ConvexHull& hull)
+* \param nb_polygons [out] number of extracted polygons
+* \param polygon_data [out] polygon data: (Nb indices, index 0, index 1... index N)(Nb indices, index 0, index 1... index N)(...)
+* \param hull [in] convex hull
+* \param triangle_data [out] triangle data
+* \param rendundantVertices [out] redundant vertices found inside the polygons - we want to remove them because of PCM
+* \return true if success
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static bool extractHullPolygons(PxU32& nb_polygons, Ps::Array<PxU32>& polygon_data, const ConvexPolygonsBuilder& hull, Ps::Array<PxU32>* triangle_data, Ps::Array<PxU32>& rendundantVertices)
+{
+ PxU32 nbFaces = hull.getNbFaces();
+ const PxVec3* hullVerts = hull.mHullDataHullVertices;
+ const PxU32 nbVertices = hull.mHull->mNbHullVertices;
+
+ const PxU16* wFaces = NULL;
+ const PxU32* dFaces = reinterpret_cast<const PxU32*>(hull.getFaces());
+ PX_ASSERT(wFaces || dFaces);
+
+ ADJACENCIESCREATE create;
+ create.NbFaces = nbFaces;
+ create.DFaces = dFaces;
+ create.WFaces = wFaces;
+ create.Verts = hullVerts;
+ //Create.Epsilon = 0.01f; // PT: trying to fix Rob Elam bug. Also fixes TTP 2467
+ // Create.Epsilon = 0.001f; // PT: for "Bruno's bug"
+ create.Epsilon = 0.005f; // PT: middle-ground seems to fix both. Expose this param?
+
+
+ AdjacenciesBuilder adj;
+ if(!adj.Init(create)) return false;
+
+ PxU32 nbBoundaryEdges = adj.ComputeNbBoundaryEdges();
+ if(nbBoundaryEdges) return false; // A valid hull shouldn't have open edges!!
+
+ bool* markers = reinterpret_cast<bool*>(PxAlloca(nbFaces*sizeof(bool)));
+ PxMemZero(markers, nbFaces*sizeof(bool));
+
+ PxU8* vertexMarkers = reinterpret_cast<PxU8*>(PxAlloca(nbVertices*sizeof(PxU8)));
+ PxMemZero(vertexMarkers, nbVertices*sizeof(PxU8));
+
+ PxU32 currentFace = 0; // Start with first triangle
+ nb_polygons = 0;
+ do
+ {
+ currentFace = 0;
+ while(currentFace<nbFaces && markers[currentFace]) currentFace++;
+
+ // Start from "closest" face and floodfill through inactive edges
+ struct Local
+ {
+ static void FloodFill(Ps::Array<PxU32>& indices, const AdjTriangle* faces, PxU32 current, bool* inMarkers)
+ {
+ if(inMarkers[current]) return;
+ inMarkers[current] = true;
+
+ indices.pushBack(current);
+ const AdjTriangle& AT = faces[current];
+
+ // We can floodfill through inactive edges since the mesh is convex (inactive==planar)
+ if(!AT.HasActiveEdge01()) FloodFill(indices, faces, AT.GetAdjTri(EDGE01), inMarkers);
+ if(!AT.HasActiveEdge20()) FloodFill(indices, faces, AT.GetAdjTri(EDGE02), inMarkers);
+ if(!AT.HasActiveEdge12()) FloodFill(indices, faces, AT.GetAdjTri(EDGE12), inMarkers);
+ }
+
+ static bool GetNeighborFace(PxU32 index,PxU32 triangleIndex,const AdjTriangle* faces, const PxU32* dfaces, PxU32& neighbor, PxU32& current)
+ {
+ PxU32 currentIndex = index;
+ PxU32 previousIndex = index;
+ bool firstFace = true;
+ bool next = true;
+ while (next)
+ {
+ const AdjTriangle& currentAT = faces[currentIndex];
+ PxU32 refTr0 = dfaces[currentIndex*3 + 0];
+ PxU32 refTr1 = dfaces[currentIndex*3 + 1];
+
+ PxU32 edge[2];
+ edge[0] = 1;
+ edge[1] = 2;
+ if(triangleIndex == refTr0)
+ {
+ edge[0] = 0;
+ edge[1] = 1;
+ }
+ else
+ {
+ if(triangleIndex == refTr1)
+ {
+ edge[0] = 0;
+ edge[1] = 2;
+ }
+ }
+
+ if(currentAT.HasActiveEdge(edge[0]) && currentAT.HasActiveEdge(edge[1]))
+ {
+ return false;
+ }
+
+ if(!currentAT.HasActiveEdge(edge[0]) && !currentAT.HasActiveEdge(edge[1]))
+ {
+ // not interested in testing transition vertices
+ if(currentIndex == index)
+ {
+ return false;
+ }
+
+ // transition one
+ for (PxU32 i = 0; i < 2; i++)
+ {
+ PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[i]));
+
+ // exit if we circle around the vertex back to beginning
+ if(testIndex == index && previousIndex != index)
+ {
+ return false;
+ }
+
+ if(testIndex != previousIndex)
+ {
+ // move to next
+ previousIndex = currentIndex;
+ currentIndex = testIndex;
+ break;
+ }
+ }
+ }
+ else
+ {
+ if(!currentAT.HasActiveEdge(edge[0]))
+ {
+ PxU32 t = edge[0];
+ edge[0] = edge[1];
+ edge[1] = t;
+ }
+
+ if(currentAT.HasActiveEdge(edge[0]))
+ {
+ PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[0]));
+ if(firstFace)
+ {
+ firstFace = false;
+ }
+ else
+ {
+ neighbor = testIndex;
+ current = currentIndex;
+ return true;
+ }
+ }
+
+ if(!currentAT.HasActiveEdge(edge[1]))
+ {
+ PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[1]));
+ if(testIndex != index)
+ {
+ previousIndex = currentIndex;
+ currentIndex = testIndex;
+ }
+ }
+ }
+
+ }
+
+ return false;
+ }
+
+ static bool CheckFloodFillFace(PxU32 index,const AdjTriangle* faces, const PxU32* dfaces)
+ {
+ if(!dfaces)
+ return true;
+
+ const AdjTriangle& checkedAT = faces[index];
+
+ PxU32 refTr0 = dfaces[index*3 + 0];
+ PxU32 refTr1 = dfaces[index*3 + 1];
+ PxU32 refTr2 = dfaces[index*3 + 2];
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ if(!checkedAT.HasActiveEdge(i))
+ {
+ PxU32 testTr0 = refTr1;
+ PxU32 testTr1 = refTr2;
+ PxU32 testIndex0 = 0;
+ PxU32 testIndex1 = 1;
+ if(i == 0)
+ {
+ testTr0 = refTr0;
+ testTr1 = refTr1;
+ testIndex0 = 1;
+ testIndex1 = 2;
+ }
+ else
+ {
+ if(i == 1)
+ {
+ testTr0 = refTr0;
+ testTr1 = refTr2;
+ testIndex0 = 0;
+ testIndex1 = 2;
+ }
+ }
+
+ PxU32 adjFaceTested = checkedAT.GetAdjTri(SharedEdgeIndex(testIndex0));
+
+ PxU32 neighborIndex00;
+ PxU32 neighborIndex01;
+ bool found0 = GetNeighborFace(index,testTr0,faces,dfaces, neighborIndex00, neighborIndex01);
+ PxU32 neighborIndex10;
+ PxU32 neighborIndex11;
+ bool found1 = GetNeighborFace(adjFaceTested,testTr0,faces,dfaces, neighborIndex10, neighborIndex11);
+
+ if(found0 && found1 && neighborIndex00 == neighborIndex11 && neighborIndex01 == neighborIndex10)
+ {
+ return false;
+ }
+
+ adjFaceTested = checkedAT.GetAdjTri(SharedEdgeIndex(testIndex1));
+ found0 = GetNeighborFace(index,testTr1,faces,dfaces,neighborIndex00,neighborIndex01);
+ found1 = GetNeighborFace(adjFaceTested,testTr1,faces,dfaces,neighborIndex10,neighborIndex11);
+
+ if(found0 && found1 && neighborIndex00 == neighborIndex11 && neighborIndex01 == neighborIndex10)
+ {
+ return false;
+ }
+
+ }
+ }
+
+ return true;
+ }
+
+ static bool CheckFloodFill(Ps::Array<PxU32>& indices,AdjTriangle* faces,bool* inMarkers, const PxU32* dfaces)
+ {
+ bool valid = true;
+
+ for(PxU32 i=0;i<indices.size();i++)
+ {
+ //const AdjTriangle& AT = faces[indices.GetEntry(i)];
+
+ for(PxU32 j= i + 1;j<indices.size();j++)
+ {
+ const AdjTriangle& testAT = faces[indices[j]];
+
+ if(testAT.GetAdjTri(EDGE01) == indices[i])
+ {
+ if(testAT.HasActiveEdge01())
+ {
+ valid = false;
+ }
+ }
+ if(testAT.GetAdjTri(EDGE02) == indices[i])
+ {
+ if(testAT.HasActiveEdge20())
+ {
+ valid = false;
+ }
+ }
+ if(testAT.GetAdjTri(EDGE12) == indices[i])
+ {
+ if(testAT.HasActiveEdge12())
+ {
+ valid = false;
+ }
+ }
+
+ if(!valid)
+ break;
+ }
+
+ if(!CheckFloodFillFace(indices[i], faces, dfaces))
+ {
+ valid = false;
+ }
+
+ if(!valid)
+ break;
+ }
+
+ if(!valid)
+ {
+ for(PxU32 i=0;i<indices.size();i++)
+ {
+ AdjTriangle& AT = faces[indices[i]];
+ AT.mATri[0] |= 0x20000000;
+ AT.mATri[1] |= 0x20000000;
+ AT.mATri[2] |= 0x20000000;
+
+ inMarkers[indices[i]] = false;
+ }
+
+ indices.forceSize_Unsafe(0);
+
+ return true;
+ }
+
+ return false;
+ }
+ };
+
+ if(currentFace!=nbFaces)
+ {
+ Ps::Array<PxU32> indices; // Indices of triangles forming hull polygon
+
+ bool doFill = true;
+ while (doFill)
+ {
+ Local::FloodFill(indices, adj.mFaces, currentFace, markers);
+
+ doFill = Local::CheckFloodFill(indices,adj.mFaces,markers, dFaces);
+ }
+
+ // Now it would be nice to recreate a closed linestrip, similar to silhouette extraction. The line is composed of active edges, this time.
+
+
+ Ps::Array<Pair> activeSegments;
+ //Container ActiveSegments;
+ // Loop through triangles composing the polygon
+ for(PxU32 i=0;i<indices.size();i++)
+ {
+ const PxU32 currentTriIndex = indices[i]; // Catch current triangle
+ const PxU32 vRef0 = dFaces ? dFaces[currentTriIndex*3+0] : wFaces[currentTriIndex*3+0];
+ const PxU32 vRef1 = dFaces ? dFaces[currentTriIndex*3+1] : wFaces[currentTriIndex*3+1];
+ const PxU32 vRef2 = dFaces ? dFaces[currentTriIndex*3+2] : wFaces[currentTriIndex*3+2];
+
+ // Keep active edges
+ if(adj.mFaces[currentTriIndex].HasActiveEdge01()) { activeSegments.pushBack(Pair(vRef0,vRef1)); }
+ if(adj.mFaces[currentTriIndex].HasActiveEdge20()) { activeSegments.pushBack(Pair(vRef0,vRef2)); }
+ if(adj.mFaces[currentTriIndex].HasActiveEdge12()) { activeSegments.pushBack(Pair(vRef1,vRef2)); }
+ }
+
+ // We assume the polygon is convex. In that case it should always be possible to retriangulate it so that the triangles are
+ // implicit (in particular, it should always be possible to remove interior triangles)
+
+ Ps::Array<PxU32> lineStrip;
+ if(findLineStrip(lineStrip, activeSegments))
+ {
+ PxU32 nb = lineStrip.size();
+ if(nb)
+ {
+ const PxU32* entries = lineStrip.begin();
+ PX_ASSERT(entries[0] == entries[nb-1]); // findLineStrip() is designed that way. Might not be what we want!
+
+ // We get rid of the last (duplicated) index
+ polygon_data.pushBack(nb-1);
+ for (PxU32 i = 0; i < nb-1; i++)
+ {
+ vertexMarkers[entries[i]]++;
+ polygon_data.pushBack(entries[i]);
+ }
+ nb_polygons++;
+
+ // Loop through vertices composing the line strip polygon end mark the redundant vertices inside the polygon
+ for(PxU32 i=0;i<indices.size();i++)
+ {
+ const PxU32 CurrentTriIndex = indices[i]; // Catch current triangle
+ const PxU32 VRef0 = dFaces ? dFaces[CurrentTriIndex*3+0] : wFaces[CurrentTriIndex*3+0];
+ const PxU32 VRef1 = dFaces ? dFaces[CurrentTriIndex*3+1] : wFaces[CurrentTriIndex*3+1];
+ const PxU32 VRef2 = dFaces ? dFaces[CurrentTriIndex*3+2] : wFaces[CurrentTriIndex*3+2];
+
+ bool found0 = false;
+ bool found1 = false;
+ bool found2 = false;
+
+ for (PxU32 j=0;j < nb - 1; j++)
+ {
+ if(VRef0 == entries[j])
+ {
+ found0 = true;
+ }
+
+ if(VRef1 == entries[j])
+ {
+ found1 = true;
+ }
+
+ if(VRef2 == entries[j])
+ {
+ found2 = true;
+ }
+
+ if(found0 && found1 && found2)
+ break;
+ }
+
+ if(!found0)
+ {
+ if(rendundantVertices.find(VRef0) == rendundantVertices.end())
+ rendundantVertices.pushBack(VRef0);
+ }
+
+ if(!found1)
+ {
+ if(rendundantVertices.find(VRef1) == rendundantVertices.end())
+ rendundantVertices.pushBack(VRef1);
+
+ }
+
+ if(!found2)
+ {
+ if(rendundantVertices.find(VRef2) == rendundantVertices.end())
+ rendundantVertices.pushBack(VRef2);
+ }
+ }
+
+ // If needed, output triangle indices used to build this polygon
+ if(triangle_data)
+ {
+ triangle_data->pushBack(indices.size());
+ for (PxU32 j = 0; j < indices.size(); j++)
+ triangle_data->pushBack(indices[j]);
+ }
+ }
+ }
+ else
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Meshmerizer::extractHullPolygons: line strip extraction failed");
+ return false;
+ }
+ }
+ }
+ while(currentFace!=nbFaces);
+
+ for (PxU32 i = 0; i < nbVertices; i++)
+ {
+ if(vertexMarkers[i] < 3)
+ {
+ if(rendundantVertices.find(i) == rendundantVertices.end())
+ rendundantVertices.pushBack(i);
+ }
+ }
+
+ if(rendundantVertices.size() > 0 && triangle_data)
+ checkRedundantVertices(nb_polygons,polygon_data,hull,*triangle_data,rendundantVertices);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+ConvexPolygonsBuilder::ConvexPolygonsBuilder(Gu::ConvexHullData* hull, const bool buildGRBData)
+ : ConvexHullBuilder(hull, buildGRBData), mNbHullFaces(0), mFaces(NULL)
+{
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+ConvexPolygonsBuilder::~ConvexPolygonsBuilder()
+{
+ PX_DELETE_POD(mFaces);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// compute hull polygons from given hull triangles
+bool ConvexPolygonsBuilder::computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles)
+{
+ PX_ASSERT(triangles);
+ PX_ASSERT(verts);
+
+ mHullDataHullVertices = NULL;
+ mHullDataPolygons = NULL;
+ mHullDataVertexData8 = NULL;
+ mHullDataFacesByEdges8 = NULL;
+ mHullDataFacesByVertices8 = NULL;
+
+ mNbHullFaces = nbTriangles;
+ mHull->mNbHullVertices = Ps::to8(nbVerts);
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ mHullDataHullVertices = reinterpret_cast<PxVec3*>(PX_ALLOC(sizeof(PxVec3) * mHull->mNbHullVertices + 1, "PxVec3"));
+ PxMemCopy(mHullDataHullVertices, verts, mHull->mNbHullVertices*sizeof(PxVec3));
+
+ mFaces = PX_NEW(HullTriangleData)[mNbHullFaces];
+ for(PxU32 i=0;i<mNbHullFaces;i++)
+ {
+ PX_ASSERT(triangles[i*3+0]<=0xffff);
+ PX_ASSERT(triangles[i*3+1]<=0xffff);
+ PX_ASSERT(triangles[i*3+2]<=0xffff);
+ mFaces[i].mRef[0] = triangles[i*3+0];
+ mFaces[i].mRef[1] = triangles[i*3+1];
+ mFaces[i].mRef[2] = triangles[i*3+2];
+ }
+
+ Gu::TriangleT<PxU32>* hullAsIndexedTriangle = reinterpret_cast<Gu::TriangleT<PxU32>*>(mFaces);
+
+ // We don't trust the user at all... So, clean the hull.
+ PxU32 nbHullVerts = mHull->mNbHullVertices;
+ CleanFaces(mNbHullFaces, hullAsIndexedTriangle, nbHullVerts, mHullDataHullVertices);
+ PX_ASSERT(nbHullVerts<256);
+ mHull->mNbHullVertices = Ps::to8(nbHullVerts);
+
+ // ...and then run the full tests again.
+ if(!CheckFaces(mNbHullFaces, hullAsIndexedTriangle, mHull->mNbHullVertices, mHullDataHullVertices))
+ return false;
+
+ // Transform triangles-to-polygons
+ if(!createPolygonData())
+ return false;
+
+ return checkHullPolygons();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+* Computes polygon data.
+* \return true if success
+*/
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+bool ConvexPolygonsBuilder::createPolygonData()
+{
+ // Cleanup
+ mHull->mNbPolygons = 0;
+ PX_DELETE_POD(mHullDataVertexData8);
+ PX_DELETE_POD(mHullDataFacesByVertices8);
+ PX_FREE_AND_RESET(mHullDataPolygons);
+
+ // Extract polygon data from triangle data
+ Ps::Array<PxU32> temp;
+ Ps::Array<PxU32> temp2;
+ Ps::Array<PxU32> rendundantVertices;
+ PxU32 nbPolygons;
+ if(!extractHullPolygons(nbPolygons, temp, *this, &temp2,rendundantVertices))
+ return false;
+
+ PxVec3* reducedHullDataHullVertices = mHullDataHullVertices;
+ PxU8 numReducedHullDataVertices = mHull->mNbHullVertices;
+
+ if(rendundantVertices.size() > 0)
+ {
+ numReducedHullDataVertices = Ps::to8(mHull->mNbHullVertices - rendundantVertices.size());
+ reducedHullDataHullVertices = static_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*numReducedHullDataVertices,"Reduced vertices hull data"));
+ PxU8* remapTable = PX_NEW(PxU8)[mHull->mNbHullVertices];
+
+ PxU8 currentIndex = 0;
+ for (PxU8 i = 0; i < mHull->mNbHullVertices; i++)
+ {
+ if(rendundantVertices.find(i) == rendundantVertices.end())
+ {
+ PX_ASSERT(currentIndex < numReducedHullDataVertices);
+ reducedHullDataHullVertices[currentIndex] = mHullDataHullVertices[i];
+ remapTable[i] = currentIndex;
+ currentIndex++;
+ }
+ else
+ {
+ remapTable[i] = 0xFF;
+ }
+ }
+
+ PxU32* data = temp.begin();
+ for(PxU32 i=0;i<nbPolygons;i++)
+ {
+ PxU32 nbVerts = *data++;
+ PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
+
+ for(PxU32 j=0;j<nbVerts;j++)
+ {
+ PX_ASSERT(data[j] < mHull->mNbHullVertices);
+ data[j] = remapTable[data[j]];
+ }
+
+ data += nbVerts;
+ }
+
+ PX_DELETE_POD(remapTable);
+ }
+
+ if(nbPolygons>255)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder: convex hull has more than 255 polygons!");
+ return false;
+ }
+
+ // Precompute hull polygon structures
+ mHull->mNbPolygons = Ps::to8(nbPolygons);
+ mHullDataPolygons = reinterpret_cast<Gu::HullPolygonData*>(PX_ALLOC(sizeof(Gu::HullPolygonData)*mHull->mNbPolygons, "Gu::HullPolygonData"));
+ PxMemZero(mHullDataPolygons, sizeof(Gu::HullPolygonData)*mHull->mNbPolygons);
+
+ // The winding hasn't been preserved so we need to handle this. Basically we need to "unify normals"
+ // exactly as we did at hull creation time - except this time we work on polygons
+ PxVec3 geomCenter;
+ computeGeomCenter(geomCenter, mNbHullFaces, mFaces);
+
+ // Loop through polygons
+ // We have N polygons => remove N entries for number of vertices
+ PxU32 tmp = temp.size() - nbPolygons;
+ mHullDataVertexData8 = PX_NEW(PxU8)[tmp];
+ PxU8* dest = mHullDataVertexData8;
+ const PxU32* data = temp.begin();
+ const PxU32* triData = temp2.begin();
+ for(PxU32 i=0;i<nbPolygons;i++)
+ {
+ mHullDataPolygons[i].mVRef8 = PxU16(dest - mHullDataVertexData8); // Setup link for current polygon
+ PxU32 nbVerts = *data++;
+ PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
+ mHullDataPolygons[i].mNbVerts = Ps::to8(nbVerts);
+
+ PxU32 index = 0;
+ for(PxU32 j=0;j<nbVerts;j++)
+ {
+ if(data[j] != 0xFF)
+ {
+ dest[index] = Ps::to8(data[j]);
+ index++;
+ }
+ else
+ {
+ mHullDataPolygons[i].mNbVerts--;
+ }
+ }
+
+ // Compute plane equation
+ {
+ computeNewellPlane(mHullDataPolygons[i].mPlane, mHullDataPolygons[i].mNbVerts, dest, reducedHullDataHullVertices);
+
+ PxU32 nbTris = *triData++; // #tris in current poly
+ bool flip = false;
+ for(PxU32 k=0;k< nbTris; k++)
+ {
+ PxU32 triIndex = *triData++; // Index of one triangle composing polygon
+ PX_ASSERT(triIndex<mNbHullFaces);
+ const Gu::TriangleT<PxU32>& T = reinterpret_cast<const Gu::TriangleT<PxU32>&>(mFaces[triIndex]);
+ const PxPlane PL = PlaneEquation(T, mHullDataHullVertices);
+ if(k==0 && PL.n.dot(mHullDataPolygons[i].mPlane.n) < 0.0f)
+ {
+ flip = true;
+ }
+ }
+ if(flip)
+ {
+ negatePlane(mHullDataPolygons[i]);
+ inverseBuffer(mHullDataPolygons[i].mNbVerts, dest);
+ }
+
+ for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
+ {
+ float d = - (mHullDataPolygons[i].mPlane.n).dot(mHullDataHullVertices[j]);
+ if(d<mHullDataPolygons[i].mPlane.d) mHullDataPolygons[i].mPlane.d=d;
+ }
+ }
+
+ // "Unify normal"
+ if(mHullDataPolygons[i].mPlane.distance(geomCenter)>0.0f)
+ {
+ inverseBuffer(mHullDataPolygons[i].mNbVerts, dest);
+
+ negatePlane(mHullDataPolygons[i]);
+ PX_ASSERT(mHullDataPolygons[i].mPlane.distance(geomCenter)<=0.0f);
+ }
+
+ // Next one
+ data += nbVerts; // Skip vertex indices
+ dest += mHullDataPolygons[i].mNbVerts;
+ }
+
+ if(reducedHullDataHullVertices != mHullDataHullVertices)
+ {
+ PxMemCopy(mHullDataHullVertices,reducedHullDataHullVertices,sizeof(PxVec3)*numReducedHullDataVertices);
+ PX_FREE(reducedHullDataHullVertices);
+
+ mHull->mNbHullVertices = numReducedHullDataVertices;
+ }
+
+ //calculate the vertex map table
+ if(!calculateVertexMapTable(nbPolygons))
+ return false;
+
+#ifdef USE_PRECOMPUTED_HULL_PROJECTION
+ // Loop through polygons
+ for(PxU32 j=0;j<nbPolygons;j++)
+ {
+ // Precompute hull projection along local polygon normal
+ PxU32 nbVerts = mHull->mNbHullVertices;
+ const PxVec3* verts = mHullDataHullVertices;
+ Gu::HullPolygonData& polygon = mHullDataPolygons[j];
+ PxReal min = PX_MAX_F32;
+ PxU8 minIndex = 0xff;
+ for (PxU8 i = 0; i < nbVerts; i++)
+ {
+ float dp = (*verts++).dot(polygon.mPlane.n);
+ if(dp < min)
+ {
+ min = dp;
+ minIndex = i;
+ }
+ }
+ polygon.mMinIndex = minIndex;
+ }
+#endif
+
+ // Triangulate newly created polygons to recreate a clean vertex cloud.
+ return createTrianglesFromPolygons();
+}
+
+//////////////////////////////////////////////////////////////////////////
+// create back triangles from polygons
+bool ConvexPolygonsBuilder::createTrianglesFromPolygons()
+{
+ if (!mHull->mNbPolygons || !mHullDataPolygons) return false;
+
+ PxU32 maxNbTriangles = 0;
+ for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
+ {
+ if (mHullDataPolygons[i].mNbVerts < 3)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder::CreateTrianglesFromPolygons: convex hull has a polygon with less than 3 vertices!");
+ return false;
+ }
+ maxNbTriangles += mHullDataPolygons[i].mNbVerts - 2;
+ }
+
+ HullTriangleData* tmpFaces = PX_NEW(HullTriangleData)[maxNbTriangles];
+
+ HullTriangleData* currFace = tmpFaces;
+ PxU32 nbTriangles = 0;
+ const PxU8* vertexData = mHullDataVertexData8;
+ const PxVec3* hullVerts = mHullDataHullVertices;
+ for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
+ {
+ const PxU8* data = vertexData + mHullDataPolygons[i].mVRef8;
+ PxU32 nbVerts = mHullDataPolygons[i].mNbVerts;
+
+ // Triangulate the polygon such that all all generated triangles have one and the same vertex
+ // in common.
+ //
+ // Make sure to avoid creating zero area triangles. Imagine the following polygon:
+ //
+ // 4 3
+ // *------------------*
+ // | |
+ // *---*----*----*----*
+ // 5 6 0 1 2
+ //
+ // Choosing vertex 0 as the shared vertex, the following zero area triangles will be created:
+ // [0 1 2], [0 5 6]
+ //
+ // Check for these triangles and discard them
+ // Note: Such polygons should only occur if the user defines the convex hull, i.e., the triangles
+ // of the convex shape, himself. If the convex hull is built from the vertices only, the
+ // hull algorithm removes the useless vertices.
+ //
+ for (PxU32 j = 0; j < nbVerts - 2; j++)
+ {
+ currFace->mRef[0] = data[0];
+ currFace->mRef[1] = data[(j + 1) % nbVerts];
+ currFace->mRef[2] = data[(j + 2) % nbVerts];
+
+ const PxVec3& p0 = hullVerts[currFace->mRef[0]];
+ const PxVec3& p1 = hullVerts[currFace->mRef[1]];
+ const PxVec3& p2 = hullVerts[currFace->mRef[2]];
+
+ const float area = ((p1 - p0).cross(p2 - p0)).magnitudeSquared();
+
+ if (area != 0.0f) // Else discard the triangle
+ {
+ nbTriangles++;
+ currFace++;
+ }
+ }
+ }
+
+ PX_DELETE_POD(mFaces);
+ HullTriangleData* faces;
+ PX_ASSERT(nbTriangles <= maxNbTriangles);
+ if (maxNbTriangles == nbTriangles)
+ {
+ // No zero area triangles, hence the face buffer has correct size and can be used directly.
+ faces = tmpFaces;
+ }
+ else
+ {
+ // Resize face buffer because some triangles were discarded.
+ faces = PX_NEW(HullTriangleData)[nbTriangles];
+ if (!faces)
+ {
+ PX_DELETE_POD(tmpFaces);
+ return false;
+ }
+ PxMemCopy(faces, tmpFaces, sizeof(HullTriangleData)*nbTriangles);
+ PX_DELETE_POD(tmpFaces);
+ }
+ mFaces = faces;
+ mNbHullFaces = nbTriangles;
+ // TODO: at this point useless vertices should be removed from the hull. The current fix is to initialize
+ // support vertices to known valid vertices, but it's not really convincing.
+
+ // Re-unify normals
+ PxVec3 geomCenter;
+ computeGeomCenter(geomCenter, mNbHullFaces, mFaces);
+
+ for (PxU32 i = 0; i < mNbHullFaces; i++)
+ {
+ const PxPlane P(hullVerts[mFaces[i].mRef[0]],
+ hullVerts[mFaces[i].mRef[1]],
+ hullVerts[mFaces[i].mRef[2]]);
+ if (P.distance(geomCenter) > 0.0f)
+ {
+ Flip(mFaces[i]);
+ }
+ }
+ return true;
+}
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.h
new file mode 100644
index 00000000..52044eb0
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/ConvexPolygonsBuilder.h
@@ -0,0 +1,64 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_CONVEXPOLYGONSBUILDER_H
+#define PX_CONVEXPOLYGONSBUILDER_H
+
+#include "ConvexHullBuilder.h"
+
+namespace physx
+{
+ //////////////////////////////////////////////////////////////////////////
+ // extended convex hull builder for a case where we build polygons from input triangles
+ class ConvexPolygonsBuilder : public ConvexHullBuilder
+ {
+ public:
+ ConvexPolygonsBuilder(Gu::ConvexHullData* hull, const bool buildGRBData);
+ ~ConvexPolygonsBuilder();
+
+ bool computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles);
+
+ PX_INLINE PxU32 getNbFaces()const { return mNbHullFaces; }
+ PX_INLINE const HullTriangleData* getFaces() const { return mFaces; }
+
+
+ private:
+ bool createPolygonData();
+ bool createTrianglesFromPolygons();
+
+ private:
+ PxU32 mNbHullFaces; //!< Number of faces in the convex hull
+ HullTriangleData* mFaces; //!< Triangles.
+
+ };
+}
+
+#endif // PX_CONVEXHULLBUILDER_H
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.cpp
new file mode 100644
index 00000000..8f60275c
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.cpp
@@ -0,0 +1,1481 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "PsAlloca.h"
+#include "PsUserAllocated.h"
+#include "PsMathUtils.h"
+#include "PsUtilities.h"
+
+#include "foundation/PxMath.h"
+#include "foundation/PxBounds3.h"
+#include "foundation/PxPlane.h"
+#include "foundation/PxMemory.h"
+
+#include "InflationConvexHullLib.h"
+#include "ConvexHullUtils.h"
+
+using namespace physx;
+
+namespace local
+{
+ //////////////////////////////////////////////////////////////////////////
+ // constants
+ static const float DIMENSION_EPSILON_MULTIPLY = 0.001f; // used to scale down bounds dimension and set as epsilon used in the hull generator
+ static const float DIR_ANGLE_MULTIPLY = 0.025f; // used in maxIndexInDirSterid for direction check modifier
+ static const float VOLUME_EPSILON = (1e-20f); // volume epsilon used for simplex valid
+ static const float MIN_ADJACENT_ANGLE = 3.0f; // in degrees - result wont have two adjacent facets within this angle of each other. !AB expose this parameter or use the one PxCookingParams
+ static const float PAPERWIDTH = 0.001f; // used in hull construction from planes, within paperwidth its considered coplanar
+
+ //////////////////////////////////////////////////////////////////////////
+ // gets the most distant index along the given dir filtering allowed indices
+ PxI32 maxIndexInDirFiltered(const PxVec3 *p,PxU32 count,const PxVec3 &dir, bool* tempNotAllowed)
+ {
+ PX_ASSERT(count);
+ PxI32 m=-1;
+ for(PxU32 i=0;i < count; i++)
+ {
+ if(!tempNotAllowed[i])
+ {
+ if(m==-1 || p[i].dot(dir) > p[m].dot(dir))
+ m= PxI32(i);
+ }
+ }
+ PX_ASSERT(m!=-1);
+ return m;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // gets orthogonal more significant vector
+ static PxVec3 orth(const PxVec3& v)
+ {
+ PxVec3 a= v.cross(PxVec3(0,0,1.f));
+ PxVec3 b= v.cross(PxVec3(0,1.f,0));
+ PxVec3 out = (a.magnitudeSquared() > b.magnitudeSquared())? a : b;
+ out.normalize();
+ return out;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // find the most distant index in given direction dir
+ PxI32 maxIndexInDirSterid(const PxVec3* p,PxU32 count,const PxVec3& dir,Ps::Array<PxU8> &allow)
+ {
+ // if the found vertex does not get hit by a slightly rotated ray, it
+ // may not be the extreme we are looking for. Therefore it is marked
+ // as disabled for the direction search and different candidate is chosen.
+ PX_ALLOCA(tempNotAllowed,bool,count);
+ PxMemSet(tempNotAllowed,0,count*sizeof(bool));
+
+ PxI32 m=-1;
+ while(m==-1)
+ {
+ // get the furthest index along dir
+ m = maxIndexInDirFiltered(p,count,dir,tempNotAllowed);
+ PX_ASSERT(m >= 0);
+
+ if(allow[PxU32(m)] == 3)
+ return m;
+
+ // get orthogonal vectors to the dir
+ PxVec3 u = orth(dir);
+ PxVec3 v = u.cross(dir);
+
+ PxI32 ma=-1;
+ // we shoot a ray close to the original dir and hope to get the same index
+ // if we not hit the same index we try it with bigger precision
+ // if we still fail to hit the same index we drop the index and iterate again
+ for(float x = 0.0f ; x <= 360.0f ; x+= 45.0f)
+ {
+ float s0 = PxSin(Ps::degToRad(x));
+ float c0 = PxCos(Ps::degToRad(x));
+ PxI32 mb = maxIndexInDirFiltered(p,count,dir+(u*s0+v*c0)*DIR_ANGLE_MULTIPLY,tempNotAllowed);
+ if(ma==m && mb==m)
+ {
+ allow[PxU32(m)]=3;
+ return m;
+ }
+ if(ma!=-1 && ma!=mb)
+ {
+ PxI32 mc = ma;
+ for(float xx = x-40.0f ; xx <= x ; xx+= 5.0f)
+ {
+ float s = PxSin(Ps::degToRad(xx));
+ float c = PxCos(Ps::degToRad(xx));
+ int md = maxIndexInDirFiltered(p,count,dir+(u*s+v*c)*DIR_ANGLE_MULTIPLY,tempNotAllowed);
+ if(mc==m && md==m)
+ {
+ allow[PxU32(m)]=3;
+ return m;
+ }
+ mc=md;
+ }
+ }
+ ma=mb;
+ }
+ tempNotAllowed[m]=true;
+ m=-1;
+ }
+ PX_ASSERT(0);
+ return m;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // Simplex helper class - just holds the 4 indices
+ class HullSimplex
+ {
+ public:
+ PxI32 x,y,z,w;
+ HullSimplex(){}
+ HullSimplex(PxI32 _x,PxI32 _y, PxI32 _z,PxI32 _w){x=_x;y=_y;z=_z;w=_w;}
+ const PxI32& operator[](PxI32 i) const
+ {
+ return reinterpret_cast<const PxI32*>(this)[i];
+ }
+ PxI32& operator[](PxI32 i)
+ {
+ return reinterpret_cast<PxI32*>(this)[i];
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks the volume of given simplex
+ static bool hasVolume(const PxVec3* verts, PxU32 p0, PxU32 p1, PxU32 p2, PxU32 p3)
+ {
+ PxVec3 result3 = (verts[p1]-verts[p0]).cross(verts[p2]-verts[p0]);
+ if ((result3).magnitude() < VOLUME_EPSILON && (result3).magnitude() > -VOLUME_EPSILON) // Almost collinear or otherwise very close to each other
+ return false;
+ result3.normalize();
+ const float result = result3.dot(verts[p3]-verts[p0]);
+ return (result > VOLUME_EPSILON || result < -VOLUME_EPSILON); // Returns true if volume is significantly non-zero
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // finds the hull simplex http://en.wikipedia.org/wiki/Simplex
+ // in - vertices, vertex count, dimensions
+ // out - indices forming the simplex
+ static HullSimplex findSimplex(const PxVec3* verts, PxU32 verts_count, Ps::Array<PxU8>& allow,const PxVec3& minMax)
+ {
+ // pick the basis vectors
+ PxVec3 basisVector[3];
+ PxVec3 basis[3];
+ basisVector[0] = PxVec3( 1.0f, 0.02f, 0.01f);
+ basisVector[1] = PxVec3(-0.02f, 1.0f, -0.01f);
+ basisVector[2] = PxVec3( 0.01f, 0.02f, 1.0f );
+
+ PxU32 index0 = 0;
+ PxU32 index1 = 1;
+ PxU32 index2 = 2;
+
+ // make the order of the basis vector depending on the points bounds, first basis test will be done
+ // along the longest axis
+ if(minMax.z > minMax.x && minMax.z > minMax.y)
+ {
+ index0 = 2;
+ index1 = 0;
+ index2 = 1;
+ }
+ else
+ {
+ if(minMax.y > minMax.x && minMax.y > minMax.z)
+ {
+ index0 = 1;
+ index1 = 2;
+ index2 = 0;
+ }
+ }
+
+ // pick the fist basis vector
+ basis[0] = basisVector[index0];
+ // find the indices along the pos/neg direction
+ PxI32 p0 = maxIndexInDirSterid(verts,verts_count, basis[0],allow);
+ PxI32 p1 = maxIndexInDirSterid(verts,verts_count,-basis[0],allow);
+
+ // set the first simplex axis
+ basis[0] = verts[p0]-verts[p1];
+ // if the points are the same or the basis vector is zero, terminate we failed to find a simplex
+ if(p0==p1 || basis[0]==PxVec3(0.0f))
+ return HullSimplex(-1,-1,-1,-1);
+
+ // get the orthogonal vectors against the new basis[0] vector
+ basis[1] = basisVector[index1].cross(basis[0]); //cross(float3( 1, 0.02f, 0),basis[0]);
+ basis[2] = basisVector[index2].cross(basis[0]); //cross(float3(-0.02f, 1, 0),basis[0]);
+ // pick the longer basis vector
+ basis[1] = ((basis[1]).magnitudeSquared() > (basis[2]).magnitudeSquared()) ? basis[1] : basis[2];
+ basis[1].normalize();
+
+ // get the index along the picked second axis
+ PxI32 p2 = maxIndexInDirSterid(verts,verts_count,basis[1],allow);
+ // if we got the same point, try the negative direction
+ if(p2 == p0 || p2 == p1)
+ {
+ p2 = maxIndexInDirSterid(verts,verts_count,-basis[1],allow);
+ }
+ // we failed to create the simplex the points are the same as the base line
+ if(p2 == p0 || p2 == p1)
+ return HullSimplex(-1,-1,-1,-1);
+
+ // set the second simplex edge
+ basis[1] = verts[p2] - verts[p0];
+ // get the last orthogonal direction
+ basis[2] = basis[1].cross(basis[0]);
+ basis[2].normalize();
+
+ // get the index along the last direction
+ PxI32 p3 = maxIndexInDirSterid(verts,verts_count,basis[2],allow);
+ if(p3==p0||p3==p1||p3==p2||!HullSimplex::hasVolume(verts, PxU32(p0), PxU32(p1), PxU32(p2), PxU32(p3)))
+ {
+ p3 = maxIndexInDirSterid(verts,verts_count,-basis[2],allow);
+ }
+ // if this index was already chosen terminate we dont have the simplex
+ if(p3==p0||p3==p1||p3==p2)
+ return HullSimplex(-1,-1,-1,-1);
+
+ PX_ASSERT(!(p0==p1||p0==p2||p0==p3||p1==p2||p1==p3||p2==p3));
+
+ // check the axis order
+ if((verts[p3]-verts[p0]).dot((verts[p1]-verts[p0]).cross(verts[p2]-verts[p0])) < 0)
+ {
+ Ps::swap(p2,p3);
+ }
+ return HullSimplex(p0,p1,p2,p3);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper struct for hull expand
+ struct ExpandPlane
+ {
+ PxPlane mPlane;
+ int mAdjacency[3]; // 1 - 0, 2 - 0, 2 - 1
+ int mExpandPoint;
+ float mExpandDistance;
+ int mIndices[3];
+ int mTrisIndex;
+
+ ExpandPlane()
+ {
+ for (int i = 0; i < 3; i++)
+ {
+ mAdjacency[i] = -1;
+ mIndices[i] = -1;
+ }
+
+ mExpandDistance = -FLT_MAX;
+ mExpandPoint = -1;
+ mTrisIndex = -1;
+ }
+ };
+
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for triangle representation
+ class int3
+ {
+ public:
+ PxI32 x,y,z;
+ int3(){}
+ int3(PxI32 _x,PxI32 _y, PxI32 _z){x=_x;y=_y;z=_z;}
+ const PxI32& operator[](PxI32 i) const
+ {
+ return reinterpret_cast<const PxI32*>(this)[i];
+ }
+ PxI32& operator[](PxI32 i)
+ {
+ return reinterpret_cast<PxI32*>(this)[i];
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for triangle representation
+ class Tri : public int3, public Ps::UserAllocated
+ {
+ public:
+ int3 n;
+ PxI32 id;
+ PxI32 vmax;
+ float rise;
+
+ // get the neighbor index for edge
+ PxI32& neib(PxI32 a, PxI32 b)
+ {
+ static PxI32 er=-1;
+ for(PxI32 i=0;i<3;i++)
+ {
+ PxI32 i1= (i+1)%3;
+ PxI32 i2= (i+2)%3;
+ if((*this)[i]==a && (*this)[i1]==b) return n[i2];
+ if((*this)[i]==b && (*this)[i1]==a) return n[i2];
+ }
+ PX_ASSERT(0);
+ return er;
+ }
+
+ // get triangle normal
+ PxVec3 getNormal(const PxVec3* verts) const
+ {
+ // return the normal of the triangle
+ // inscribed by v0, v1, and v2
+ const PxVec3& v0 = verts[(*this)[0]];
+ const PxVec3& v1 = verts[(*this)[1]];
+ const PxVec3& v2 = verts[(*this)[2]];
+ PxVec3 cp= (v1-v0).cross(v2-v1);
+ float m= (cp).magnitude();
+ if(m==0)
+ return PxVec3(1.f,0.0f,0.0f);
+ return cp*(1.0f/m);
+ }
+
+ float getArea2(const PxVec3* verts) const
+ {
+ const PxVec3& v0 = verts[(*this)[0]];
+ const PxVec3& v1 = verts[(*this)[1]];
+ const PxVec3& v2 = verts[(*this)[2]];
+ return ((v0-v1).cross(v2-v0)).magnitudeSquared();
+ }
+
+ friend class HullTriangles;
+ protected:
+
+ Tri(PxI32 a, PxI32 b, PxI32 c) : int3(a, b, c), n(-1,-1,-1)
+ {
+ vmax=-1;
+ rise = 0.0f;
+ }
+
+ ~Tri()
+ {
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks if for given triangle the point is above the triangle in the normal direction
+ // value is checked against an epsilon
+ static PxI32 above(const PxVec3* vertices, const Tri& t, const PxVec3& p, float epsilon)
+ {
+ PxVec3 n = t.getNormal(vertices);
+ return (n.dot(p-vertices[t[0]]) > epsilon);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks if given triangle does contain the vertex v
+ static int hasVert(const int3& t, int v)
+ {
+ return (t[0]==v || t[1]==v || t[2]==v) ;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper class for hull triangles management
+ class HullTriangles
+ {
+ public:
+ HullTriangles()
+ {
+ mTriangles.reserve(256);
+ }
+
+ ~HullTriangles()
+ {
+ for (PxU32 i = 0; i < mTriangles.size(); i++)
+ {
+ if(mTriangles[i])
+ delete mTriangles[i];
+ }
+ mTriangles.clear();
+ }
+
+ const Tri* operator[](PxU32 i) const
+ {
+ return mTriangles[i];
+ }
+
+ Tri* operator[](PxU32 i)
+ {
+ return mTriangles[i];
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////
+
+ const Ps::Array<Tri*>& getTriangles() const
+ {
+ return mTriangles;
+ }
+ Ps::Array<Tri*>& getTriangles()
+ {
+ return mTriangles;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+
+ PxU32 size() const
+ {
+ return mTriangles.size();
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // delete triangle from the array
+ Tri* createTri(PxI32 a, PxI32 b, PxI32 c)
+ {
+ Tri* tri = PX_NEW_TEMP(Tri)(a, b, c);
+ tri->id = PxI32(mTriangles.size());
+ mTriangles.pushBack(tri);
+ return tri;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // delete triangle from the array
+ void deleteTri(Tri* tri)
+ {
+ PX_ASSERT((mTriangles)[PxU32(tri->id)]==tri);
+ (mTriangles)[PxU32(tri->id)] = NULL;
+ delete tri;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // check triangle
+ void checkit(Tri* t) const
+ {
+ PX_ASSERT((mTriangles)[PxU32(t->id)]==t);
+ for(int i=0;i<3;i++)
+ {
+ const int i1=(i+1)%3;
+ const int i2=(i+2)%3;
+ const int a = (*t)[i1];
+ const int b = (*t)[i2];
+ PX_ASSERT(a!=b);
+ PX_ASSERT( (mTriangles)[PxU32(t->n[i])]->neib(b,a) == t->id);
+ PX_UNUSED(a);
+ PX_UNUSED(b);
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // find the triangle, which has the greatest rise (distance in the direction of normal)
+ // return such a triangle if it does exist and if the rise is bigger than given epsilon
+ Tri* findExtrudable(float epsilon) const
+ {
+ Tri* t = NULL;
+ for(PxU32 i=0; i < mTriangles.size(); i++)
+ {
+ if(!t || ((mTriangles)[i] && (t->rise < (mTriangles)[i]->rise)))
+ {
+ t = (mTriangles)[i];
+ }
+ }
+ if(!t)
+ return NULL;
+ return (t->rise > epsilon) ? t : NULL;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // extrude the given triangle t0 with triangle v
+ void extrude(Tri* t0, PxI32 v)
+ {
+ int3 t= *t0;
+ PxI32 n = PxI32(mTriangles.size());
+ // create the 3 extruded triangles
+ Tri* ta = createTri(v, t[1], t[2]);
+ ta->n = int3(t0->n[0],n+1,n+2);
+ (mTriangles)[PxU32(t0->n[0])]->neib(t[1],t[2]) = n+0;
+ Tri* tb = createTri(v, t[2], t[0]);
+ tb->n = int3(t0->n[1],n+2,n+0);
+ (mTriangles)[PxU32(t0->n[1])]->neib(t[2],t[0]) = n+1;
+ Tri* tc = createTri(v, t[0], t[1]);
+ tc->n = int3(t0->n[2],n+0,n+1);
+ (mTriangles)[PxU32(t0->n[2])]->neib(t[0],t[1]) = n+2;
+ checkit(ta);
+ checkit(tb);
+ checkit(tc);
+
+ // check if the added triangle is not already inserted
+ // in that case we remove both and fix the neighbors
+ // for the remaining triangles
+ if(hasVert(*(mTriangles)[PxU32(ta->n[0])],v))
+ removeb2b(ta,(mTriangles)[PxU32(ta->n[0])]);
+ if(hasVert(*(mTriangles)[PxU32(tb->n[0])],v))
+ removeb2b(tb,(mTriangles)[PxU32(tb->n[0])]);
+ if(hasVert(*(mTriangles)[PxU32(tc->n[0])],v))
+ removeb2b(tc,(mTriangles)[PxU32(tc->n[0])]);
+ deleteTri(t0);
+ }
+
+ protected:
+ //////////////////////////////////////////////////////////////////////////
+ // remove the 2 triangles which are the same and fix the neighbor triangles
+ void b2bfix(Tri* s, Tri* t)
+ {
+ for(int i=0;i<3;i++)
+ {
+ const int i1=(i+1)%3;
+ const int i2=(i+2)%3;
+ const int a = (*s)[i1];
+ const int b = (*s)[i2];
+ PX_ASSERT((mTriangles)[PxU32(s->neib(a,b))]->neib(b,a) == s->id);
+ PX_ASSERT((mTriangles)[PxU32(t->neib(a,b))]->neib(b,a) == t->id);
+ (mTriangles)[PxU32(s->neib(a,b))]->neib(b,a) = t->neib(b,a);
+ (mTriangles)[PxU32(t->neib(b,a))]->neib(a,b) = s->neib(a,b);
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // remove the 2 triangles which are the same and fix the neighbor triangles
+ void removeb2b(Tri* s, Tri* t)
+ {
+ b2bfix(s,t);
+ deleteTri(s);
+ deleteTri(t);
+ }
+
+
+ private:
+ Ps::Array<Tri*> mTriangles;
+ };
+
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+
+InflationConvexHullLib::InflationConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params)
+ : ConvexHullLib(desc,params), mFinished(false)
+{
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Main function to create the hull.
+// Construct the hull with set input parameters - ConvexMeshDesc and CookingParams
+PxConvexMeshCookingResult::Enum InflationConvexHullLib::createConvexHull()
+{
+ PxConvexMeshCookingResult::Enum res = PxConvexMeshCookingResult::eFAILURE;
+
+ PxU32 vcount = mConvexMeshDesc.points.count;
+ if ( vcount < 8 )
+ vcount = 8;
+
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ PxVec3* vsource = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP( sizeof(PxVec3)*vcount + 1, "PxVec3"));
+ PxVec3 scale;
+ PxVec3 center;
+ PxU32 ovcount;
+
+ // cleanup the vertices first
+ if(!cleanupVertices(mConvexMeshDesc.points.count, reinterpret_cast<const PxVec3*> (mConvexMeshDesc.points.data), mConvexMeshDesc.points.stride,
+ ovcount, vsource, scale, center ))
+ return res;
+
+ // scale vertices back to their original size.
+ for (PxU32 i=0; i<ovcount; i++)
+ {
+ PxVec3& v = vsource[i];
+ v.multiply(scale);
+ }
+
+ // compute the actual hull
+ ConvexHullLibResult::ErrorCode hullResult = computeHull(ovcount,vsource);
+ if(hullResult == ConvexHullLibResult::eSUCCESS)
+ {
+ mFinished = true;
+ res = PxConvexMeshCookingResult::eSUCCESS;
+ }
+ else
+ {
+ if(hullResult == ConvexHullLibResult::eZERO_AREA_TEST_FAILED)
+ res = PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
+ }
+
+ if(vsource)
+ {
+ PX_FREE(vsource);
+ }
+
+ return res;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// computes the hull and stores results into mHullResult
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::computeHull(PxU32 vertsCount, const PxVec3* verts)
+{
+ PX_ASSERT(verts);
+ PX_ASSERT(vertsCount > 0);
+
+ ConvexHull* hullOut = NULL;
+ ConvexHullLibResult::ErrorCode res = calchull(verts, vertsCount, hullOut);
+ if ((res == ConvexHullLibResult::eFAILURE) || (res == ConvexHullLibResult::eZERO_AREA_TEST_FAILED))
+ return res;
+
+ PX_ASSERT(hullOut);
+
+ // parse the hullOut and fill the result with vertices and polygons
+ mHullResult.mIndices = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*(hullOut->getEdges().size()), "PxU32"));
+ mHullResult.mIndexCount=hullOut->getEdges().size();
+
+ mHullResult.mPolygonCount = hullOut->getFacets().size();
+ mHullResult.mPolygons = reinterpret_cast<PxHullPolygon*> (PX_ALLOC_TEMP(sizeof(PxHullPolygon)*mHullResult.mPolygonCount, "PxHullPolygon"));
+
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ mHullResult.mVertices = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*hullOut->getVertices().size() + 1, "PxVec3"));
+ mHullResult.mVcount = hullOut->getVertices().size();
+ PxMemCopy(mHullResult.mVertices,hullOut->getVertices().begin(),sizeof(PxVec3)*mHullResult.mVcount);
+
+ PxU32 i=0;
+ PxU32 k=0;
+ PxU32 j = 1;
+ while(i<hullOut->getEdges().size())
+ {
+ j=1;
+ PxHullPolygon& polygon = mHullResult.mPolygons[k];
+ // get num indices per polygon
+ while(j+i < hullOut->getEdges().size() && hullOut->getEdges()[i].p == hullOut->getEdges()[i+j].p)
+ {
+ j++;
+ }
+ polygon.mNbVerts = Ps::to16(j);
+ polygon.mIndexBase = Ps::to16(i);
+
+ // get the plane
+ polygon.mPlane[0] = hullOut->getFacets()[k].n[0];
+ polygon.mPlane[1] = hullOut->getFacets()[k].n[1];
+ polygon.mPlane[2] = hullOut->getFacets()[k].n[2];
+
+ polygon.mPlane[3] = hullOut->getFacets()[k].d;
+
+ while(j--)
+ {
+ mHullResult.mIndices[i] = hullOut->getEdges()[i].v;
+ i++;
+ }
+ k++;
+ }
+
+ PX_ASSERT(k==hullOut->getFacets().size());
+ PX_DELETE(hullOut);
+
+ return res;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// internal function taking the cleaned vertices and constructing the
+// new hull from them.
+// 1. using the incremental algorithm create base hull from the input vertices
+// 2. if we reached the vertex limit, we expand the hull
+// 3. otherwise we compute the new planes and inflate them
+// 4. we crop the AABB with the computed planes to construct the new hull
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::calchull(const PxVec3* verts, PxU32 verts_count, ConvexHull*& hullOut)
+{
+ // calculate the actual hull using the incremental algorithm
+ local::HullTriangles triangles;
+ ConvexHullLibResult::ErrorCode rc = calchullgen(verts,verts_count, triangles);
+ if ((rc == ConvexHullLibResult::eFAILURE) || (rc == ConvexHullLibResult::eZERO_AREA_TEST_FAILED))
+ return rc;
+
+ // if vertex limit reached construct the hullOut from the expanded planes
+ if(rc == ConvexHullLibResult::eVERTEX_LIMIT_REACHED)
+ {
+ if(mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING)
+ rc = expandHull(verts,verts_count,triangles,hullOut);
+ else
+ rc = expandHullOBB(verts,verts_count,triangles,hullOut);
+ if ((rc == ConvexHullLibResult::eFAILURE) || (rc == ConvexHullLibResult::eZERO_AREA_TEST_FAILED))
+ return rc;
+
+ return ConvexHullLibResult::eSUCCESS;
+ }
+
+ Ps::Array<PxPlane> planes;
+ if(!calchullplanes(verts,triangles,planes))
+ return ConvexHullLibResult::eFAILURE;
+
+ if(!overhull(verts, verts_count, planes,hullOut))
+ return ConvexHullLibResult::eFAILURE;
+
+ return ConvexHullLibResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// computes the actual hull using the incremental algorithm
+// in - vertices, numVertices
+// out - triangles
+// 1. construct the initial simplex
+// 2. each step take the most furthers vertex from the hull and add it
+// 3. terminate if we reached the hull limit or all verts are used
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::calchullgen(const PxVec3* verts, PxU32 verts_count, local::HullTriangles& triangles)
+{
+ // at least 4 verts so we can construct a simplex
+ // limit is 256 for OBB slicing or fixed limit for plane shifting
+ PxU32 vlimit = (mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING) ? mConvexMeshDesc.vertexLimit : 256u;
+ PxU32 numHullVerts = 4;
+ if(verts_count < 4)
+ return ConvexHullLibResult::eFAILURE;
+
+ PxU32 j;
+ PxBounds3 bounds;
+ bounds.setEmpty();
+
+ Ps::Array<PxU8> isextreme;
+ isextreme.reserve(verts_count);
+
+ Ps::Array<PxU8> allow;
+ allow.reserve(verts_count);
+
+ for(j=0; j < verts_count; j++)
+ {
+ allow.pushBack(1);
+ isextreme.pushBack(0);
+ bounds.include(verts[j]);
+ }
+
+ const PxVec3 dimensions = bounds.getDimensions();
+ const float epsilon = dimensions.magnitude() * local::DIMENSION_EPSILON_MULTIPLY;
+ mTolerance = 0.001f;
+ mPlaneTolerance = epsilon;
+
+ const bool useAreaTest = mConvexMeshDesc.flags & PxConvexFlag::eCHECK_ZERO_AREA_TRIANGLES ? true : false;
+ const float areaEpsilon = useAreaTest ? mCookingParams.areaTestEpsilon * 2.0f : epsilon*epsilon*0.1f;
+
+ // find the simplex
+ local::HullSimplex p = local::findSimplex(verts,verts_count,allow, dimensions);
+ if(p.x==-1) // simplex failed
+ return ConvexHullLibResult::eFAILURE;
+
+ // a valid interior point
+ PxVec3 center = (verts[p[0]]+verts[p[1]]+verts[p[2]]+verts[p[3]]) /4.0f;
+
+ // add the simplex triangles into the triangle array
+ local::Tri *t0 = triangles.createTri(p[2], p[3], p[1]); t0->n=local::int3(2,3,1);
+ local::Tri *t1 = triangles.createTri(p[3], p[2], p[0]); t1->n=local::int3(3,2,0);
+ local::Tri *t2 = triangles.createTri(p[0], p[1], p[3]); t2->n=local::int3(0,1,3);
+ local::Tri *t3 = triangles.createTri(p[1], p[0], p[2]); t3->n=local::int3(1,0,2);
+ // mark the simplex indices as extremes
+ isextreme[PxU32(p[0])]=isextreme[PxU32(p[1])]=isextreme[PxU32(p[2])]=isextreme[PxU32(p[3])]=1;
+
+ // check if the added simplex triangles are valid
+ triangles.checkit(t0);
+ triangles.checkit(t1);
+ triangles.checkit(t2);
+ triangles.checkit(t3);
+
+ // parse the initial triangles and set max vertex along the normal and its distance
+ for(j=0;j< triangles.size(); j++)
+ {
+ local::Tri *t=(triangles.getTriangles())[j];
+ PX_ASSERT(t);
+ PX_ASSERT(t->vmax<0);
+ PxVec3 n= (*t).getNormal(verts);
+ t->vmax = local::maxIndexInDirSterid(verts,verts_count,n,allow);
+ t->rise = n.dot(verts[t->vmax]-verts[(*t)[0]]);
+
+ // use the areaTest to drop small triangles, which can cause trouble to the simulation,
+ // if we drop triangles from the initial simplex, we let the user know that the provided points form
+ // a simplex which is too small for given area threshold
+ if(useAreaTest && ((verts[(*t)[1]]-verts[(*t)[0]]).cross(verts[(*t)[2]]-verts[(*t)[1]])).magnitude() < areaEpsilon)
+ {
+ triangles.deleteTri(t0);
+ triangles.deleteTri(t1);
+ triangles.deleteTri(t2);
+ triangles.deleteTri(t3);
+ return ConvexHullLibResult::eZERO_AREA_TEST_FAILED;
+ }
+ }
+
+ local::Tri *te;
+ // lower the vertex limit, we did already set 4 verts
+ vlimit-=4;
+ // iterate over triangles till we reach the limit or we dont have triangles with
+ // significant rise or we cannot add any triangles at all
+ while(vlimit >0 && ((te = triangles.findExtrudable(epsilon)) != NULL))
+ {
+ PxI32 v = te->vmax;
+ PX_ASSERT(!isextreme[PxU32(v)]); // wtf we've already done this vertex
+ // set as extreme point
+ isextreme[PxU32(v)]=1;
+
+ j=triangles.size();
+ // go through the triangles and extrude the extreme point if it is above it
+ while(j--)
+ {
+ if(!(triangles)[j])
+ continue;
+ const local::Tri& t= *(triangles)[j];
+ if(above(verts,t,verts[v],0.01f*epsilon))
+ {
+ triangles.extrude((triangles)[j],v);
+ }
+ }
+
+ // now check for those degenerate cases where we have a flipped triangle or a really skinny triangle
+ j=triangles.size();
+ while(j--)
+ {
+ if(!(triangles)[j])
+ continue;
+ if(!hasVert(*(triangles)[j],v))
+ break;
+ local::int3 nt=*(triangles)[j];
+ if(above(verts,*(triangles)[j],center,0.01f*epsilon) || ((verts[nt[1]]-verts[nt[0]]).cross(verts[nt[2]]-verts[nt[1]])).magnitude() < areaEpsilon)
+ {
+ local::Tri *nb = (triangles)[PxU32((triangles)[j]->n[0])];
+ PX_ASSERT(nb);
+ PX_ASSERT(!hasVert(*nb,v));
+ PX_ASSERT(PxU32(nb->id)<j);
+ triangles.extrude(nb,v);
+ j=triangles.size();
+ }
+ }
+
+ // get new rise and vmax for the new triangles
+ j=triangles.size();
+ while(j--)
+ {
+ local::Tri *t=(triangles)[j];
+ if(!t)
+ continue;
+ if(t->vmax >= 0)
+ break;
+ PxVec3 n= t->getNormal(verts);
+ t->vmax = local::maxIndexInDirSterid(verts,verts_count,n,allow);
+ if(isextreme[PxU32(t->vmax)])
+ {
+ t->vmax=-1; // already done that vertex - algorithm needs to be able to terminate.
+ }
+ else
+ {
+ t->rise = n.dot(verts[t->vmax]-verts[(*t)[0]]);
+ }
+ }
+ // we added a vertex we lower the limit
+ vlimit --;
+ numHullVerts++;
+ }
+
+ if((mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING) && vlimit == 0)
+ return ConvexHullLibResult::eVERTEX_LIMIT_REACHED;
+ if (!(mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING) && numHullVerts > mConvexMeshDesc.vertexLimit)
+ return ConvexHullLibResult::eVERTEX_LIMIT_REACHED;
+
+ return ConvexHullLibResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// expand the hull with the from the limited triangles set
+// expand hull will do following steps:
+// 1. get planes from triangles that form the best hull with given vertices
+// 2. compute the adjacency information for the planes
+// 3. expand the planes to have all vertices inside the planes volume
+// 4. compute new points by 3 adjacency planes intersections
+// 5. take those points and create the hull from them
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::expandHull(const PxVec3* verts, PxU32 vertsCount, const local::HullTriangles& triangles, ConvexHull*& hullOut)
+{
+#if PX_DEBUG
+ struct LocalTests
+ {
+ static bool PlaneCheck(const PxVec3* verts_, PxU32 verts_count_, Ps::Array<local::ExpandPlane>& planes)
+ {
+ for(PxU32 i=0;i<planes.size();i++)
+ {
+ const local::ExpandPlane& expandPlane = planes[i];
+ if(expandPlane.mTrisIndex != -1)
+ {
+ for(PxU32 j=0;j<verts_count_;j++)
+ {
+ const PxVec3& vertex = verts_[j];
+
+ PX_ASSERT(expandPlane.mPlane.distance(vertex) < 0.02f);
+
+ if(expandPlane.mPlane.distance(vertex) > 0.02f)
+ {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+ }
+ };
+#endif
+
+
+ Ps::Array<local::ExpandPlane> planes;
+
+ // need planes and the adjacency for the triangle
+ int numPoints = 0;
+ for(PxU32 i=0; i < triangles.size();i++)
+ {
+ local::ExpandPlane expandPlane;
+ if((triangles)[i])
+ {
+ const local::Tri *t=(triangles)[i];
+ PxPlane p;
+ p.n = t->getNormal(verts);
+ p.d = -p.n.dot(verts[(*t)[0]]);
+ expandPlane.mPlane = p;
+
+ for (int l = 0; l < 3; l++)
+ {
+ if(t->n[l] > numPoints)
+ {
+ numPoints = t->n[l];
+ }
+ }
+
+ for(PxU32 j=0;j<triangles.size();j++)
+ {
+ if((triangles)[j] && i != j)
+ {
+ const local::Tri *testTris=(triangles)[j];
+
+ int numId0 = 0;
+ int numId1 = 0;
+ int numId2 = 0;
+
+ for (int k = 0; k < 3; k++)
+ {
+ int testI = (*testTris)[k];
+ if(testI == (*t)[0] || testI == (*t)[1])
+ {
+ numId0++;
+ }
+ if(testI == (*t)[0] || testI == (*t)[2])
+ {
+ numId1++;
+ }
+ if(testI == (*t)[2] || testI == (*t)[1])
+ {
+ numId2++;
+ }
+ }
+
+ if(numId0 == 2)
+ {
+ PX_ASSERT(expandPlane.mAdjacency[0] == -1);
+ expandPlane.mAdjacency[0] = int(j);
+ }
+ if(numId1 == 2)
+ {
+ PX_ASSERT(expandPlane.mAdjacency[1] == -1);
+ expandPlane.mAdjacency[1] = int(j);
+ }
+ if(numId2 == 2)
+ {
+ PX_ASSERT(expandPlane.mAdjacency[2] == -1);
+ expandPlane.mAdjacency[2] = int(j);
+ }
+ }
+ }
+
+ expandPlane.mTrisIndex = int(i);
+ }
+ planes.pushBack(expandPlane);
+ }
+ numPoints++;
+
+ // go over the planes now and expand them
+ for(PxU32 i=0;i< vertsCount;i++)
+ {
+ const PxVec3& vertex = verts[i];
+
+ for(PxU32 j=0;j< triangles.size();j++)
+ {
+ local::ExpandPlane& expandPlane = planes[j];
+ if(expandPlane.mTrisIndex != -1)
+ {
+ float dist = expandPlane.mPlane.distance(vertex);
+ if(dist > 0 && dist > expandPlane.mExpandDistance)
+ {
+ expandPlane.mExpandDistance = dist;
+ expandPlane.mExpandPoint = int(i);
+ }
+ }
+ }
+ }
+
+ // expand the planes
+ for(PxU32 i=0;i<planes.size();i++)
+ {
+ local::ExpandPlane& expandPlane = planes[i];
+ if(expandPlane.mTrisIndex != -1)
+ {
+ if(expandPlane.mExpandPoint >= 0)
+ expandPlane.mPlane.d -= expandPlane.mExpandDistance;
+ }
+ }
+
+ PX_ASSERT(LocalTests::PlaneCheck(verts,vertsCount,planes));
+
+ Ps::Array <int> translateTable;
+ Ps::Array <PxVec3> points;
+ numPoints = 0;
+
+ // find new triangle points and store them
+ for(PxU32 i=0;i<planes.size();i++)
+ {
+ local::ExpandPlane& expandPlane = planes[i];
+ if(expandPlane.mTrisIndex != -1)
+ {
+ const local::Tri *expandTri=(triangles)[PxU32(expandPlane.mTrisIndex)];
+
+ for (int j = 0; j < 3; j++)
+ {
+ local::ExpandPlane& plane1 = planes[PxU32(expandPlane.mAdjacency[j])];
+ local::ExpandPlane& plane2 = planes[PxU32(expandPlane.mAdjacency[(j + 1)%3])];
+ const local::Tri *tri1=(triangles)[PxU32(expandPlane.mAdjacency[j])];
+ const local::Tri *tri2=(triangles)[PxU32(expandPlane.mAdjacency[(j + 1)%3])];
+
+ int indexE = -1;
+ int index1 = -1;
+ int index2 = -1;
+ for (int l = 0; l < 3; l++)
+ {
+ for (int k = 0; k < 3; k++)
+ {
+ for (int m = 0; m < 3; m++)
+ {
+ if((*expandTri)[l] == (*tri1)[k] && (*expandTri)[l] == (*tri2)[m])
+ {
+ indexE = l;
+ index1 = k;
+ index2 = m;
+ }
+ }
+ }
+ }
+
+ PX_ASSERT(indexE != -1);
+
+ int foundIndex = -1;
+ for (PxU32 u = 0; u < translateTable.size(); u++)
+ {
+ if(translateTable[u] == ((*expandTri)[indexE]))
+ {
+ foundIndex = int(u);
+ break;
+ }
+ }
+
+ PxVec3 point = threePlaneIntersection(expandPlane.mPlane, plane1.mPlane, plane2.mPlane);
+
+ if(foundIndex == -1)
+ {
+ expandPlane.mIndices[indexE] = numPoints;
+ plane1.mIndices[index1] = numPoints;
+ plane2.mIndices[index2] = numPoints;
+
+ points.pushBack(point);
+ translateTable.pushBack((*expandTri)[indexE]);
+ numPoints++;
+ }
+ else
+ {
+ if(expandPlane.mPlane.distance(points[PxU32(foundIndex)]) < -0.02f || plane1.mPlane.distance(points[PxU32(foundIndex)]) < -0.02f || plane2.mPlane.distance(points[PxU32(foundIndex)]) < -0.02f)
+ {
+ points[PxU32(foundIndex)] = point;
+ }
+
+ expandPlane.mIndices[indexE] = foundIndex;
+ plane1.mIndices[index1] = foundIndex;
+ plane2.mIndices[index2] = foundIndex;
+ }
+
+ }
+ }
+ }
+
+ // construct again the hull from the new points
+ local::HullTriangles outTriangles;
+ ConvexHullLibResult::ErrorCode rc = calchullgen(points.begin(),PxU32(numPoints), outTriangles);
+ if ((rc == ConvexHullLibResult::eFAILURE) || (rc == ConvexHullLibResult::eZERO_AREA_TEST_FAILED))
+ return rc;
+
+ // cleanup the unused vertices
+ Ps::Array<PxVec3> usedVertices;
+ translateTable.clear();
+ translateTable.resize(points.size());
+ for (PxU32 i = 0; i < points.size(); i++)
+ {
+ for (PxU32 j = 0; j < outTriangles.size(); j++)
+ {
+ const local::Tri* tri = outTriangles[j];
+ if(tri)
+ {
+ if((*tri)[0] == int(i) || (*tri)[1] == int(i) || (*tri)[2] == int(i))
+ {
+ translateTable[i] = int(usedVertices.size());
+ usedVertices.pushBack(points[i]);
+ break;
+ }
+ }
+ }
+ }
+
+ // now construct the hullOut
+ Ps::Array<PxPlane> inputPlanes; // < just a blank input planes
+ ConvexHull* c = PX_NEW_TEMP(ConvexHull)(inputPlanes);
+
+ // copy the vertices
+ for (PxU32 i = 0; i < usedVertices.size(); i++)
+ {
+ c->getVertices().pushBack(usedVertices[i]);
+ }
+
+ // copy planes and create edges
+ PxU32 numFaces = 0;
+ for (PxU32 i = 0; i < outTriangles.size(); i++)
+ {
+ const local::Tri* tri = outTriangles[i];
+ if(tri)
+ {
+ PxPlane triPlane;
+ triPlane.n = tri->getNormal(points.begin());
+ triPlane.d = -triPlane.n.dot(points[PxU32((*tri)[0])]);
+ c->getFacets().pushBack(triPlane);
+
+ for (int j = 0; j < 3; j++)
+ {
+ ConvexHull::HalfEdge edge;
+ edge.p = Ps::to8(numFaces);
+ edge.v = Ps::to8(translateTable[PxU32((*tri)[j])]);
+ c->getEdges().pushBack(edge);
+ }
+ numFaces++;
+ }
+ }
+ hullOut = c;
+ return ConvexHullLibResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// expand the hull from the limited triangles set
+// 1. collect all planes
+// 2. create OBB from the input verts
+// 3. slice the OBB with the planes
+// 5. iterate till vlimit is reached
+ConvexHullLibResult::ErrorCode InflationConvexHullLib::expandHullOBB(const PxVec3* verts, PxU32 vertsCount, const local::HullTriangles& triangles, ConvexHull*& hullOut)
+{
+ Ps::Array<PxPlane> expandPlanes;
+ expandPlanes.reserve(triangles.size());
+
+ PxU32* indices = PX_NEW_TEMP(PxU32)[triangles.size()*3];
+ PxHullPolygon* polygons = PX_NEW_TEMP(PxHullPolygon)[triangles.size()];
+
+ PxU16 currentIndex = 0;
+ PxU32 currentFace = 0;
+
+ // collect expand planes
+ for (PxU32 i = 0; i < triangles.size(); i++)
+ {
+ local::ExpandPlane expandPlane;
+ if ((triangles)[i])
+ {
+ const local::Tri *t = (triangles)[i];
+ PxPlane p;
+ p.n = t->getNormal(verts);
+ p.d = -p.n.dot(verts[(*t)[0]]);
+
+ // store the polygon
+ PxHullPolygon& polygon = polygons[currentFace++];
+ polygon.mIndexBase = currentIndex;
+ polygon.mNbVerts = 3;
+ polygon.mPlane[0] = p.n[0];
+ polygon.mPlane[1] = p.n[1];
+ polygon.mPlane[2] = p.n[2];
+ polygon.mPlane[3] = p.d;
+
+ // store the index list
+ indices[currentIndex++] = PxU32((*t)[0]);
+ indices[currentIndex++] = PxU32((*t)[1]);
+ indices[currentIndex++] = PxU32((*t)[2]);
+
+ expandPlanes.pushBack(p);
+ }
+ }
+
+ PxTransform obbTransform;
+ PxVec3 sides;
+
+ // compute the OBB
+ PxConvexMeshDesc convexDesc;
+ convexDesc.points.count = vertsCount;
+ convexDesc.points.data = verts;
+ convexDesc.points.stride = sizeof(PxVec3);
+
+ convexDesc.indices.count = currentIndex;
+ convexDesc.indices.stride = sizeof(PxU32);
+ convexDesc.indices.data = indices;
+
+ convexDesc.polygons.count = currentFace;
+ convexDesc.polygons.data = polygons;
+ convexDesc.polygons.stride = sizeof(PxHullPolygon);
+
+ convexDesc.flags = mConvexMeshDesc.flags;
+
+ computeOBBFromConvex(convexDesc, sides, obbTransform);
+
+ // free the memory used for the convex mesh desc
+ PX_FREE_AND_RESET(indices);
+ PX_FREE_AND_RESET(polygons);
+
+ // crop the OBB
+ PxU32 maxplanes = PxMin(PxU32(256), expandPlanes.size());
+
+ ConvexHull* c = PX_NEW_TEMP(ConvexHull)(sides*0.5f, obbTransform, expandPlanes);
+
+ const float planeTolerance = mPlaneTolerance;
+ const float epsilon = mTolerance;
+
+ PxI32 k;
+ while (maxplanes-- && (k = c->findCandidatePlane(planeTolerance, epsilon)) >= 0)
+ {
+ ConvexHull* tmp = c;
+ c = convexHullCrop(*tmp, expandPlanes[PxU32(k)], planeTolerance);
+ if (c == NULL)
+ {
+ c = tmp;
+ break;
+ } // might want to debug this case better!!!
+ if (!c->assertIntact(planeTolerance))
+ {
+ PX_DELETE(c);
+ c = tmp;
+ break;
+ } // might want to debug this case better too!!!
+
+ // check for vertex limit
+ if (c->getVertices().size() > mConvexMeshDesc.vertexLimit)
+ {
+ PX_DELETE(c);
+ c = tmp;
+ maxplanes = 0;
+ break;
+ }
+ PX_DELETE(tmp);
+ }
+
+ PX_ASSERT(c->assertIntact(planeTolerance));
+
+ hullOut = c;
+
+ return ConvexHullLibResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// calculate the planes from given triangles
+// 1. merge triangles with similar normal
+// 2. inflate the planes
+// 3. store the new triangles
+bool InflationConvexHullLib::calchullplanes(const PxVec3* verts, local::HullTriangles& triangles, Ps::Array<PxPlane>& planes)
+{
+ PxU32 i,j;
+ float maxdot_minang = cosf(Ps::degToRad(local::MIN_ADJACENT_ANGLE));
+
+ // parse the triangles and check the angle between them, if the angle is below MIN_ADJACENT_ANGLE
+ // merge the triangles into single plane
+ for(i=0;i<triangles.size();i++)
+ {
+ if(triangles[i])
+ {
+ for(j=i+1;j<triangles.size();j++)
+ {
+ if(triangles[i] && triangles[j])
+ {
+ local::Tri *ti = triangles[i];
+ local::Tri *tj = triangles[j];
+ PxVec3 ni = ti->getNormal(verts);
+ PxVec3 nj = tj->getNormal(verts);
+ if(ni.dot(nj) > maxdot_minang)
+ {
+ // somebody has to die, keep the biggest triangle
+ if( ti->getArea2(verts) < tj->getArea2(verts))
+ {
+ triangles.deleteTri(triangles[i]);
+ }
+ else
+ {
+ triangles.deleteTri(triangles[j]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // now add for each triangle that left a plane
+ for(i=0;i<triangles.size();i++)
+ {
+ if(triangles[i])
+ {
+
+ local::Tri *t = triangles[i];
+ PxVec3 n = t->getNormal(verts);
+ float d = -n.dot(verts[(*t)[0]]) - mCookingParams.skinWidth;
+ PxPlane p(n,d);
+ planes.pushBack(p);
+ }
+ }
+
+ // delete the triangles we don't need them anymore
+ for(i=0;i< triangles.size(); i++)
+ {
+ if(triangles[i])
+ {
+ triangles.deleteTri(triangles[i]);
+ }
+ }
+ triangles.getTriangles().clear();
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// create new points from the given planes, which form the new hull
+// 1. form an AABB from the input verts
+// 2. slice the AABB with the planes
+// 3. if sliced hull is still valid use it, otherwise step back, try different plane
+// 4. exit if limit reached or all planes added
+bool InflationConvexHullLib::overhull(const PxVec3* verts, PxU32 vertsCount,const Ps::Array<PxPlane>& planes, ConvexHull*& hullOut)
+{
+ PxU32 i,j;
+ if(vertsCount < 4)
+ return false;
+
+ const PxU32 planesLimit = 256;
+ PxU32 maxplanes = PxMin(planesLimit,planes.size());
+
+ // get the bounds
+ PxBounds3 bounds;
+ bounds.setEmpty();
+ for(i=0;i<vertsCount;i++)
+ {
+ bounds.include(verts[i]);
+ }
+ float diameter = bounds.getDimensions().magnitude();
+ PxVec3 emin = bounds.minimum;
+ PxVec3 emax = bounds.maximum;
+ float epsilon = 0.01f; // size of object is taken into account within candidate plane function. Used to multiply here by magnitude(emax-emin)
+ float planetestepsilon = (emax-emin).magnitude() * local::PAPERWIDTH;
+ // todo: add bounding cube planes to force bevel. or try instead not adding the diameter expansion ??? must think.
+ // ConvexH *convex = ConvexHMakeCube(bmin - float3(diameter,diameter,diameter),bmax+float3(diameter,diameter,diameter));
+
+ // now expand the axis aligned planes by half diameter, !AB what is the point here?
+ float maxdot_minang = cosf(Ps::degToRad(local::MIN_ADJACENT_ANGLE));
+ for(j=0;j<6;j++)
+ {
+ PxVec3 n(0,0,0);
+ n[j/2] = (j%2) ? 1.0f : -1.0f;
+ for(i=0; i < planes.size(); i++)
+ {
+ if(n.dot(planes[i].n) > maxdot_minang)
+ {
+ (*((j%2)?&emax:&emin)) += n * (diameter*0.5f);
+ break;
+ }
+ }
+ }
+
+ ConvexHull* c = PX_NEW_TEMP(ConvexHull)(emin,emax, planes);
+ PxI32 k;
+ // find the candidate plane and crop the hull
+ while(maxplanes-- && (k= c->findCandidatePlane(planetestepsilon, epsilon))>=0)
+ {
+ ConvexHull* tmp = c;
+ c = convexHullCrop(*tmp,planes[PxU32(k)], planetestepsilon);
+ if(c==NULL)
+ {
+ c=tmp;
+ break;
+ } // might want to debug this case better!!!
+ if(!c->assertIntact(planetestepsilon))
+ {
+ PX_DELETE(c);
+ c=tmp;
+ break;
+ } // might want to debug this case better too!!!
+
+ // check for vertex limit
+ if(c->getVertices().size() > mConvexMeshDesc.vertexLimit)
+ {
+ PX_DELETE(c);
+ c=tmp;
+ maxplanes = 0;
+ break;
+ }
+ // check for vertex limit per face if necessary, GRB supports max 32 verts per face
+ if ((mConvexMeshDesc.flags & PxConvexFlag::eGPU_COMPATIBLE) && c->maxNumVertsPerFace() > gpuMaxVertsPerFace)
+ {
+ PX_DELETE(c);
+ c = tmp;
+ maxplanes = 0;
+ break;
+ }
+ PX_DELETE(tmp);
+ }
+
+ PX_ASSERT(c->assertIntact(planetestepsilon));
+ hullOut = c;
+
+ return true;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+// fill the data
+void InflationConvexHullLib::fillConvexMeshDesc(PxConvexMeshDesc& outDesc)
+{
+ PX_ASSERT(mFinished);
+
+ outDesc.indices.count = mHullResult.mIndexCount;
+ outDesc.indices.stride = sizeof(PxU32);
+ outDesc.indices.data = mHullResult.mIndices;
+
+ outDesc.points.count = mHullResult.mVcount;
+ outDesc.points.stride = sizeof(PxVec3);
+ outDesc.points.data = mHullResult.mVertices;
+
+ outDesc.polygons.count = mHullResult.mPolygonCount;
+ outDesc.polygons.stride = sizeof(PxHullPolygon);
+ outDesc.polygons.data = mHullResult.mPolygons;
+
+ swapLargestFace(outDesc);
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+InflationConvexHullLib::~InflationConvexHullLib()
+{
+ if(mHullResult.mIndices)
+ {
+ PX_FREE(mHullResult.mIndices);
+ }
+
+ if(mHullResult.mPolygons)
+ {
+ PX_FREE(mHullResult.mPolygons);
+ }
+
+ if(mHullResult.mVertices)
+ {
+ PX_FREE(mHullResult.mVertices);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.h b/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.h
new file mode 100644
index 00000000..8369691f
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/InflationConvexHullLib.h
@@ -0,0 +1,133 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_INFLATION_CONVEXHULLLIB_H
+#define PX_INFLATION_CONVEXHULLLIB_H
+
+#include "ConvexHullLib.h"
+#include "Ps.h"
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+
+namespace local
+{
+ class HullTriangles;
+}
+
+namespace physx
+{
+ class ConvexHull;
+
+ //////////////////////////////////////////////////////////////////////////
+ // internal hull lib results
+ struct ConvexHullLibResult
+ {
+ // return code
+ enum ErrorCode
+ {
+ eSUCCESS = 0, // success!
+ eFAILURE, // failed.
+ eVERTEX_LIMIT_REACHED, // vertex limit reached fallback.
+ eZERO_AREA_TEST_FAILED// area test failed - failed to create simplex
+ };
+
+ PxU32 mVcount;
+ PxU32 mIndexCount;
+ PxU32 mPolygonCount;
+ PxVec3* mVertices;
+ PxU32* mIndices;
+ PxHullPolygon* mPolygons;
+
+
+ ConvexHullLibResult()
+ : mVcount(0), mIndexCount(0), mPolygonCount(0),
+ mVertices(NULL), mIndices(NULL), mPolygons(NULL)
+ {
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // inflation based hull library. Using the legacy Stan hull lib with inflation
+ // We construct the hull using incremental method and then inflate the resulting planes
+ // by specified skinWidth. We take the planes and crop AABB with them to construct
+ // the final hull. This method may reduce the number of polygons significantly
+ // in case of lot of vertices are used. On the other hand, we produce new vertices
+ // and enlarge the original hull constructed from the given input points.
+ // Generally speaking, the increase of vertices is usually too big, so it is not worthy
+ // to use this algorithm to reduce the number of polygons. This method is also very unprecise
+ // and may produce invalid hulls. It is recommended to use the new quickhull library.
+ class InflationConvexHullLib: public ConvexHullLib, public Ps::UserAllocated
+ {
+ PX_NOCOPY(InflationConvexHullLib)
+ public:
+
+ // functions
+ InflationConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params);
+
+ ~InflationConvexHullLib();
+
+ // computes the convex hull from provided points
+ virtual PxConvexMeshCookingResult::Enum createConvexHull();
+
+ // fills the convexmeshdesc with computed hull data
+ virtual void fillConvexMeshDesc(PxConvexMeshDesc& desc);
+
+ protected:
+ // internal
+
+ // compute the hull
+ ConvexHullLibResult::ErrorCode computeHull(PxU32 vertsCount, const PxVec3* verts);
+
+ // computes the hull
+ ConvexHullLibResult::ErrorCode calchull(const PxVec3* verts, PxU32 verts_count, ConvexHull*& hullOut);
+
+ // computes the actual hull using the incremental algorithm
+ ConvexHullLibResult::ErrorCode calchullgen(const PxVec3* verts, PxU32 verts_count, local::HullTriangles& triangles);
+
+ // calculates the hull planes from the triangles
+ bool calchullplanes(const PxVec3* verts, local::HullTriangles& triangles, Ps::Array<PxPlane>& planes);
+
+ // construct the hull from given planes - create new verts
+ bool overhull(const PxVec3* verts, PxU32 vertsCount,const Ps::Array<PxPlane>& planes, ConvexHull*& hullOut);
+
+ // expand the hull with the limited triangles set
+ ConvexHullLibResult::ErrorCode expandHull(const PxVec3* verts, PxU32 vertsCount, const local::HullTriangles& triangles, ConvexHull*& hullOut);
+
+ // expand the hull with the limited triangles set
+ ConvexHullLibResult::ErrorCode expandHullOBB(const PxVec3* verts, PxU32 vertsCount, const local::HullTriangles& triangles, ConvexHull*& hullOut);
+
+ private:
+ bool mFinished;
+ ConvexHullLibResult mHullResult;
+ float mTolerance;
+ float mPlaneTolerance;
+ };
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.cpp
new file mode 100644
index 00000000..13b88364
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.cpp
@@ -0,0 +1,2383 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "QuickHullConvexHullLib.h"
+#include "ConvexHullUtils.h"
+
+#include "PsAllocator.h"
+#include "PsUserAllocated.h"
+#include "PsSort.h"
+#include "PsMathUtils.h"
+#include "PsFoundation.h"
+#include "PsUtilities.h"
+#include "PsBitUtils.h"
+
+#include "foundation/PxMath.h"
+#include "foundation/PxPlane.h"
+#include "foundation/PxBounds3.h"
+#include "foundation/PxMemory.h"
+
+using namespace physx;
+
+namespace local
+{
+ //////////////////////////////////////////////////////////////////////////
+ static const float MIN_ADJACENT_ANGLE = 3.0f; // in degrees - result wont have two adjacent facets within this angle of each other.
+ static const float PLANE_THICKNES = 3.0f * PX_EPS_F32; // points within this distance are considered on a plane
+ static const float ACCEPTANCE_EPSILON_MULTIPLY = 2000.0f; // used to scale up plane tolerance to accept new points into convex, plane thickness tolerance is too high for point acceptance
+ static const float PLANE_TOLERANCE = 0.001f; // points within this distance are considered on a plane for post adjacent merging and eye vertex acceptance
+ static const float MAXDOT_MINANG = cosf(Ps::degToRad(MIN_ADJACENT_ANGLE)); // adjacent angle for dot product tests
+
+ //////////////////////////////////////////////////////////////////////////
+
+ struct QuickHullFace;
+ class ConvexHull;
+ class HullPlanes;
+
+ //////////////////////////////////////////////////////////////////////////
+ template<typename T, bool useIndexing>
+ class MemBlock
+ {
+ public:
+ MemBlock(PxU32 preallocateSize)
+ : mPreallocateSize(preallocateSize), mCurrentBlock(0), mCurrentIndex(0)
+ {
+ PX_ASSERT(preallocateSize);
+ T* block = reinterpret_cast<T*>(PX_ALLOC_TEMP(sizeof(T)*preallocateSize, "Quickhull MemBlock"));
+ mBlocks.pushBack(block);
+ }
+
+ MemBlock()
+ : mPreallocateSize(0), mCurrentBlock(0), mCurrentIndex(0)
+ {
+ }
+
+ void init(PxU32 preallocateSize)
+ {
+ PX_ASSERT(preallocateSize);
+ PX_ASSERT(mPreallocateSize == 0);
+ mPreallocateSize = preallocateSize;
+ T* block = reinterpret_cast<T*>(PX_ALLOC_TEMP(sizeof(T)*preallocateSize, "Quickhull MemBlock"));
+ if(useIndexing)
+ {
+ for (PxU32 i = 0; i < mPreallocateSize; i++)
+ {
+ // placement new to index data
+ PX_PLACEMENT_NEW(&block[i], T)(i);
+ }
+ }
+ mBlocks.pushBack(block);
+ }
+
+ ~MemBlock()
+ {
+ for (PxU32 i = 0; i < mBlocks.size(); i++)
+ {
+ PX_FREE(mBlocks[i]);
+ }
+ mBlocks.clear();
+ }
+
+ T* getItem(PxU32 index)
+ {
+ const PxU32 block = index/mPreallocateSize;
+ const PxU32 itemIndex = index % mPreallocateSize;
+ PX_ASSERT(block <= mCurrentBlock);
+ PX_ASSERT(itemIndex < mPreallocateSize);
+ return &(mBlocks[block])[itemIndex];
+ }
+
+ T* getFreeItem()
+ {
+ PX_ASSERT(mPreallocateSize);
+ // check if we have enough space in block, otherwise allocate new block
+ if(mCurrentIndex < mPreallocateSize)
+ {
+ return &(mBlocks[mCurrentBlock])[mCurrentIndex++];
+ }
+ else
+ {
+ T* block = reinterpret_cast<T*>(PX_ALLOC_TEMP(sizeof(T)*mPreallocateSize, "Quickhull MemBlock"));
+ mCurrentBlock++;
+ if (useIndexing)
+ {
+ for (PxU32 i = 0; i < mPreallocateSize; i++)
+ {
+ // placement new to index data
+ PX_PLACEMENT_NEW(&block[i], T)(mCurrentBlock*mPreallocateSize + i);
+ }
+ }
+ mBlocks.pushBack(block);
+ mCurrentIndex = 0;
+ return &(mBlocks[mCurrentBlock])[mCurrentIndex++];
+ }
+ }
+
+ private:
+ PxU32 mPreallocateSize;
+ PxU32 mCurrentBlock;
+ PxU32 mCurrentIndex;
+ Ps::Array<T*> mBlocks;
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // representation of quick hull vertex
+ struct QuickHullVertex
+ {
+ PxVec3 point; // point vector
+ PxU32 index; // point index for compare
+ float dist; // distance from plane if necessary
+
+ QuickHullVertex* next; // link to next vertex, linked list used for conflict list
+
+ PX_FORCE_INLINE bool operator==(const QuickHullVertex& vertex) const
+ {
+ return index == vertex.index ? true : false;
+ }
+
+ PX_FORCE_INLINE bool operator <(const QuickHullVertex& vertex) const
+ {
+ return dist < vertex.dist ? true : false;
+ }
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // representation of quick hull half edge
+ struct QuickHullHalfEdge
+ {
+ QuickHullHalfEdge() : prev(NULL), next(NULL), twin(NULL), face(NULL)
+ {
+ }
+
+ QuickHullHalfEdge(PxU32 )
+ : prev(NULL), next(NULL), twin(NULL), face(NULL)
+ {
+ }
+
+ QuickHullVertex tail; // tail vertex, head vertex is the tail of the twin
+
+ QuickHullHalfEdge* prev; // previous edge
+ QuickHullHalfEdge* next; // next edge
+ QuickHullHalfEdge* twin; // twin/opposite edge
+
+ QuickHullFace* face; // face where the edge belong
+
+ PX_FORCE_INLINE const QuickHullVertex& getTail() const
+ {
+ return tail;
+ }
+
+ PX_FORCE_INLINE const QuickHullVertex& getHead() const
+ {
+ PX_ASSERT(twin);
+ return twin->tail;
+ }
+
+ PX_FORCE_INLINE void setTwin(QuickHullHalfEdge* edge)
+ {
+ twin = edge;
+ edge->twin = this;
+ }
+
+ PX_FORCE_INLINE QuickHullFace* getOppositeFace() const
+ {
+ return twin->face;
+ }
+
+ float getOppositeFaceDistance() const;
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+
+ typedef Ps::Array<QuickHullVertex*> QuickHullVertexArray;
+ typedef Ps::Array<QuickHullHalfEdge*> QuickHullHalfEdgeArray;
+ typedef Ps::Array<QuickHullFace*> QuickHullFaceArray;
+
+ //////////////////////////////////////////////////////////////////////////
+ // representation of quick hull face
+ struct QuickHullFace
+ {
+ enum FaceState
+ {
+ eVISIBLE,
+ eDELETED,
+ eNON_CONVEX
+ };
+
+ QuickHullHalfEdge* edge; // starting edge
+ PxU16 numEdges; // num edges on the face
+ QuickHullVertex* conflictList; // conflict list, used to determine unclaimed vertices
+
+ PxVec3 normal; // Newell plane normal
+ float area; // face area
+ PxVec3 centroid; // face centroid
+
+ float planeOffset; // Newell plane offset
+ float expandOffset; // used for plane expansion if vertex limit reached
+
+ FaceState state; // face validity state
+
+ QuickHullFace* nextFace; // used to indicate next free face in faceList
+ PxU32 index; // face index for compare identification
+
+ public:
+ QuickHullFace()
+ : edge(NULL), numEdges(0), conflictList(NULL), area(0.0f), planeOffset(0.0f), expandOffset(-FLT_MAX),
+ state(eVISIBLE), nextFace(NULL)
+ {
+ }
+
+ QuickHullFace(PxU32 ind)
+ : edge(NULL), numEdges(0), conflictList(NULL), area(0.0f), planeOffset(0.0f), expandOffset(-FLT_MAX),
+ state(eVISIBLE), nextFace(NULL), index(ind)
+ {
+ }
+
+ ~QuickHullFace()
+ {
+ }
+
+ // get edge on index
+ PX_FORCE_INLINE QuickHullHalfEdge* getEdge(PxU32 i) const
+ {
+ QuickHullHalfEdge* he = edge;
+ while (i > 0)
+ {
+ he = he->next;
+ i--;
+ }
+ return he;
+ }
+
+ // distance from a plane to provided point
+ PX_FORCE_INLINE float distanceToPlane(const PxVec3 p) const
+ {
+ return normal.dot(p) - planeOffset;
+ }
+
+ // compute face normal and centroid
+ PX_FORCE_INLINE void computeNormalAndCentroid()
+ {
+ PX_ASSERT(edge);
+ normal = PxVec3(PxZero);
+ numEdges = 1;
+
+ QuickHullHalfEdge* testEdge = edge;
+ QuickHullHalfEdge* startEdge = NULL;
+ float minDist = FLT_MAX;
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ const float d = (testEdge->tail.point - testEdge->next->tail.point).magnitudeSquared();
+ if (d < minDist)
+ {
+ minDist = d;
+ startEdge = testEdge;
+ }
+ testEdge = testEdge->next;
+ }
+ PX_ASSERT(startEdge);
+
+ QuickHullHalfEdge* he = startEdge->next;
+ const PxVec3& p0 = startEdge->tail.point;
+ const PxVec3 d = he->tail.point - p0;
+ centroid = startEdge->tail.point;
+
+ do
+ {
+ numEdges++;
+ centroid += he->tail.point;
+
+ normal += d.cross(he->next->tail.point - p0);
+
+ he = he->next;
+ } while (he != startEdge);
+
+ area = normal.normalize();
+ centroid *= (1.0f / float(numEdges));
+
+ planeOffset = normal.dot(centroid);
+ }
+
+ // merge adjacent face
+ void mergeAdjacentFace(QuickHullHalfEdge* halfEdge, QuickHullFaceArray& discardedFaces);
+
+ // check face consistency
+ bool checkFaceConsistency();
+
+ private:
+ // connect halfedges
+ QuickHullFace* connectHalfEdges(QuickHullHalfEdge* hedgePrev, QuickHullHalfEdge* hedge);
+
+ // check if the face does have only 3 vertices
+ PX_FORCE_INLINE bool isTriangle() const
+ {
+ return numEdges == 3 ? true : false;
+ }
+
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ struct QuickHullResult
+ {
+ enum Enum
+ {
+ eSUCCESS, // ok
+ eZERO_AREA_TEST_FAILED, // area test failed for simplex
+ eVERTEX_LIMIT_REACHED, // vertex limit reached need to expand hull
+ ePOLYGONS_LIMIT_REACHED, // polygons hard limit reached
+ eFAILURE // general failure
+ };
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // Quickhull base class holding the hull during construction
+ class QuickHull : public Ps::UserAllocated
+ {
+ PX_NOCOPY(QuickHull)
+ public:
+
+ QuickHull(const PxCookingParams& params, const PxConvexMeshDesc& desc);
+
+ ~QuickHull();
+
+ // preallocate the edges, faces, vertices
+ void preallocate(PxU32 numVertices);
+
+ // parse the input verts, store them into internal format
+ void parseInputVertices(const PxVec3* verts, PxU32 numVerts);
+
+ // release the hull and data
+ void releaseHull();
+
+ // sets the precomputed min/max data
+ void setPrecomputedMinMax(const QuickHullVertex* minVertex,const QuickHullVertex* maxVertex, const float tolerance,const float planeTolerance);
+
+ // main entry function to build the hull from provided points
+ QuickHullResult::Enum buildHull();
+
+ PxU32 maxNumVertsPerFace() const;
+
+ protected:
+ // compute min max verts
+ void computeMinMaxVerts();
+
+ // find the initial simplex
+ bool findSimplex();
+
+ // add the initial simplex
+ void addSimplex(QuickHullVertex* simplex, bool flipTriangle);
+
+ // finds next point to add
+ QuickHullVertex* nextPointToAdd(QuickHullFace*& eyeFace);
+
+ // adds point to the hull
+ bool addPointToHull(const QuickHullVertex* vertex, QuickHullFace& face);
+
+ // creates new face from given triangles
+ QuickHullFace* createTriangle(const QuickHullVertex& v0, const QuickHullVertex& v1, const QuickHullVertex& v2);
+
+ // adds point to the face conflict list
+ void addPointToFace(QuickHullFace& face, QuickHullVertex* vertex, float dist);
+
+ // removes eye point from the face conflict list
+ void removeEyePointFromFace(QuickHullFace& face, const QuickHullVertex* vertex);
+
+ // calculate the horizon fro the eyePoint against a given face
+ void calculateHorizon(const PxVec3& eyePoint, QuickHullHalfEdge* edge, QuickHullFace& face, QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& removedFaces);
+
+ // adds new faces from given horizon and eyePoint
+ void addNewFacesFromHorizon(const QuickHullVertex* eyePoint, const QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& newFaces);
+
+ // merge adjacent face
+ bool doAdjacentMerge(QuickHullFace& face, bool mergeWrtLargeFace);
+
+ // merge adjacent face doing normal test
+ bool doPostAdjacentMerge(QuickHullFace& face, const float minAngle);
+
+ // delete face points
+ void deleteFacePoints(QuickHullFace& faceToDelete, QuickHullFace* absorbingFace);
+
+ // resolve unclaimed points
+ void resolveUnclaimedPoints(const QuickHullFaceArray& newFaces);
+
+ // merges polygons with similar normals
+ void postMergeHull();
+
+ // check if 2 faces can be merged
+ bool canMergeFaces(const QuickHullHalfEdge& he, float planeTolerance);
+
+ // get next free face
+ PX_FORCE_INLINE QuickHullFace* getFreeHullFace()
+ {
+ return mFreeFaces.getFreeItem();
+ }
+
+ // get next free half edge
+ PX_FORCE_INLINE QuickHullHalfEdge* getFreeHullHalfEdge()
+ {
+ return mFreeHalfEdges.getFreeItem();
+ }
+
+ protected:
+ friend class physx::QuickHullConvexHullLib;
+
+ const PxCookingParams& mCookingParams; // cooking params
+ const PxConvexMeshDesc& mConvexDesc; // convex desc
+
+ PxVec3 mInteriorPoint; // interior point for int/ext tests
+
+ PxU32 mMaxVertices; // maximum number of vertices (can be different as we may add vertices during the cleanup
+ PxU32 mNumVertices; // actual number of vertices
+
+ QuickHullVertex* mVerticesList; // vertices list preallocated
+ MemBlock<QuickHullHalfEdge, false> mFreeHalfEdges; // free half edges
+ MemBlock<QuickHullFace, true> mFreeFaces; // free faces
+
+ QuickHullFaceArray mHullFaces; // actual hull faces, contains also invalid and not used faces
+ PxU32 mNumHullFaces; // actual number of hull faces
+
+ bool mPrecomputedMinMax; // if we got the precomputed min/max values
+ QuickHullVertex mMinVertex[3]; // min vertex
+ QuickHullVertex mMaxVertex[3]; // max vertex
+ float mTolerance; // hull tolerance, used for plane thickness and merge strategy
+ float mPlaneTolerance; // used for post merge stage
+
+ QuickHullVertexArray mUnclaimedPoints; // holds temp unclaimed points
+
+ QuickHullHalfEdgeArray mHorizon; // array for horizon computation
+ QuickHullFaceArray mNewFaces; // new faces created during horizon computation
+ QuickHullFaceArray mRemovedFaces; // removd faces during horizon computation
+ QuickHullFaceArray mDiscardedFaces; // discarded faces during face merging
+ };
+
+ //////////////////////////////////////////////////////////////////////////
+ // return the distance from opposite face
+ float QuickHullHalfEdge::getOppositeFaceDistance() const
+ {
+ PX_ASSERT(face);
+ PX_ASSERT(twin);
+ return face->distanceToPlane(twin->face->centroid);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // merge adjacent face from provided half edge.
+ // 1. set new half edges
+ // 2. connect the new half edges - check we did not produced redundant triangles, discard them
+ // 3. recompute the plane and check consistency
+ void QuickHullFace::mergeAdjacentFace(QuickHullHalfEdge* hedgeAdj, QuickHullFaceArray& discardedFaces)
+ {
+ QuickHullFace* oppFace = hedgeAdj->getOppositeFace();
+
+ discardedFaces.pushBack(oppFace);
+ oppFace->state = QuickHullFace::eDELETED;
+
+ QuickHullHalfEdge* hedgeOpp = hedgeAdj->twin;
+
+ QuickHullHalfEdge* hedgeAdjPrev = hedgeAdj->prev;
+ QuickHullHalfEdge* hedgeAdjNext = hedgeAdj->next;
+ QuickHullHalfEdge* hedgeOppPrev = hedgeOpp->prev;
+ QuickHullHalfEdge* hedgeOppNext = hedgeOpp->next;
+
+ // check if we are lining up with the face in adjPrev dir
+ while (hedgeAdjPrev->getOppositeFace() == oppFace)
+ {
+ hedgeAdjPrev = hedgeAdjPrev->prev;
+ hedgeOppNext = hedgeOppNext->next;
+ }
+
+ // check if we are lining up with the face in adjNext dir
+ while (hedgeAdjNext->getOppositeFace() == oppFace)
+ {
+ hedgeOppPrev = hedgeOppPrev->prev;
+ hedgeAdjNext = hedgeAdjNext->next;
+ }
+
+ QuickHullHalfEdge* hedge;
+
+ // set new face owner for the line up edges
+ for (hedge = hedgeOppNext; hedge != hedgeOppPrev->next; hedge = hedge->next)
+ {
+ hedge->face = this;
+ }
+
+ // if we are about to delete the shared edge, check if its not the starting edge of the face
+ if (hedgeAdj == edge)
+ {
+ edge = hedgeAdjNext;
+ }
+
+ // handle the half edges at the head
+ QuickHullFace* discardedFace;
+ discardedFace = connectHalfEdges(hedgeOppPrev, hedgeAdjNext);
+ if (discardedFace != NULL)
+ {
+ discardedFaces.pushBack(discardedFace);
+ }
+
+ // handle the half edges at the tail
+ discardedFace = connectHalfEdges(hedgeAdjPrev, hedgeOppNext);
+ if (discardedFace != NULL)
+ {
+ discardedFaces.pushBack(discardedFace);
+ }
+
+ computeNormalAndCentroid();
+ PX_ASSERT(checkFaceConsistency());
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // connect half edges of 2 adjacent faces
+ // if we find redundancy - edges are in a line, we drop the addional face if it is just a skinny triangle
+ QuickHullFace* QuickHullFace::connectHalfEdges(QuickHullHalfEdge* hedgePrev, QuickHullHalfEdge* hedge)
+ {
+ QuickHullFace* discardedFace = NULL;
+
+ // redundant edge - can be in a line
+ if (hedgePrev->getOppositeFace() == hedge->getOppositeFace())
+ {
+ // then there is a redundant edge that we can get rid off
+ QuickHullFace* oppFace = hedge->getOppositeFace();
+ QuickHullHalfEdge* hedgeOpp;
+
+ if (hedgePrev == edge)
+ {
+ edge = hedge;
+ }
+
+ // check if its not a skinny face with just 3 vertices - 3 edges
+ if (oppFace->isTriangle())
+ {
+ // then we can get rid of the opposite face altogether
+ hedgeOpp = hedge->twin->prev->twin;
+
+ oppFace->state = QuickHullFace::eDELETED;
+ discardedFace = oppFace;
+ }
+ else
+ {
+ // if not triangle, merge the 2 opposite halfedges into one
+ hedgeOpp = hedge->twin->next;
+
+ if (oppFace->edge == hedgeOpp->prev)
+ {
+ oppFace->edge = hedgeOpp;
+ }
+ hedgeOpp->prev = hedgeOpp->prev->prev;
+ hedgeOpp->prev->next = hedgeOpp;
+ }
+
+ hedge->prev = hedgePrev->prev;
+ hedge->prev->next = hedge;
+
+ hedge->twin = hedgeOpp;
+ hedgeOpp->twin = hedge;
+
+ // oppFace was modified, so need to recompute
+ oppFace->computeNormalAndCentroid();
+ }
+ else
+ {
+ // just merge the halfedges
+ hedgePrev->next = hedge;
+ hedge->prev = hedgePrev;
+ }
+ return discardedFace;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // check face consistency
+ bool QuickHullFace::checkFaceConsistency()
+ {
+ // do a sanity check on the face
+ QuickHullHalfEdge* hedge = edge;
+ PxU32 numv = 0;
+
+ // check degenerate face
+ do
+ {
+ numv++;
+ hedge = hedge->next;
+ } while (hedge != edge);
+
+ // degenerate face found
+ PX_ASSERT(numv > 2);
+
+ numv = 0;
+ hedge = edge;
+ do
+ {
+ QuickHullHalfEdge* hedgeOpp = hedge->twin;
+
+ // check if we have twin set
+ PX_ASSERT(hedgeOpp != NULL);
+
+ // twin for the twin must be the original edge
+ PX_ASSERT(hedgeOpp->twin == hedge);
+
+ QuickHullFace* oppFace = hedgeOpp->face;
+
+ PX_UNUSED(oppFace);
+
+ // opposite edge face must be set and valid
+ PX_ASSERT(oppFace != NULL);
+ PX_ASSERT(oppFace->state != QuickHullFace::eDELETED);
+
+ // edges face must be this one
+ PX_ASSERT(hedge->face == this);
+
+ hedge = hedge->next;
+ } while (hedge != edge);
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+
+ QuickHull::QuickHull(const PxCookingParams& params, const PxConvexMeshDesc& desc)
+ : mCookingParams(params), mConvexDesc(desc), mVerticesList(NULL), mNumHullFaces(0), mPrecomputedMinMax(false),
+ mTolerance(-1.0f), mPlaneTolerance(-1.0f)
+ {
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+
+ QuickHull::~QuickHull()
+ {
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // sets the precomputed min/max values
+ void QuickHull::setPrecomputedMinMax(const QuickHullVertex* minVertex,const QuickHullVertex* maxVertex, const float tolerance,const float planeTolerance)
+ {
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ mMinVertex[i] = minVertex[i];
+ mMaxVertex[i] = maxVertex[i];
+ }
+
+ mTolerance = tolerance;
+ mPlaneTolerance = planeTolerance;
+
+ mPrecomputedMinMax = true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // preallocate internal buffers
+ void QuickHull::preallocate(PxU32 numVertices)
+ {
+ PX_ASSERT(numVertices > 0);
+
+ // max num vertices = numVertices
+ mMaxVertices = PxMax(PxU32(8), numVertices); // 8 is min, since we can expand to AABB during the clean vertices phase
+ mVerticesList = reinterpret_cast<QuickHullVertex*> (PX_ALLOC_TEMP(sizeof(QuickHullVertex)*mMaxVertices, "QuickHullVertex"));
+
+ // estimate the max half edges
+ PxU32 maxHalfEdges = (3 * mMaxVertices - 6) * 3;
+ mFreeHalfEdges.init(maxHalfEdges);
+
+ // estimate the max faces
+ PxU32 maxFaces = (2 * mMaxVertices - 4);
+ mFreeFaces.init(maxFaces*2);
+
+ mHullFaces.reserve(maxFaces);
+ mUnclaimedPoints.reserve(numVertices);
+
+ mNewFaces.reserve(32);
+ mRemovedFaces.reserve(32);
+ mDiscardedFaces.reserve(32);
+ mHorizon.reserve(PxMin(numVertices,PxU32(128)));
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // release internal buffers
+ void QuickHull::releaseHull()
+ {
+ if (mVerticesList)
+ {
+ PX_FREE_AND_RESET(mVerticesList);
+ }
+ mHullFaces.clear();
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // returns the maximum number of vertices on a face
+ PxU32 QuickHull::maxNumVertsPerFace() const
+ {
+ PxU32 numFaces = mHullFaces.size();
+ PxU32 maxVerts = 0;
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ const local::QuickHullFace& face = *mHullFaces[i];
+ if (face.state == local::QuickHullFace::eVISIBLE)
+ {
+ if (face.numEdges > maxVerts)
+ maxVerts = face.numEdges;
+ }
+ }
+ return maxVerts;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // parse the input vertices and store them in the hull
+ void QuickHull::parseInputVertices(const PxVec3* verts, PxU32 numVerts)
+ {
+ PX_ASSERT(verts);
+ PX_ASSERT(numVerts <= mMaxVertices);
+
+ mNumVertices = numVerts;
+ for (PxU32 i = 0; i < numVerts; i++)
+ {
+ mVerticesList[i].point = verts[i];
+ mVerticesList[i].index = i;
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // compute min max verts
+ void QuickHull::computeMinMaxVerts()
+ {
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ mMinVertex[i] = mVerticesList[0];
+ mMaxVertex[i] = mVerticesList[0];
+ }
+
+ PxVec3 max = mVerticesList[0].point;
+ PxVec3 min = mVerticesList[0].point;
+
+ // get the max min vertices along the x,y,z
+ for (PxU32 i = 1; i < mNumVertices; i++)
+ {
+ const QuickHullVertex& testVertex = mVerticesList[i];
+ const PxVec3& testPoint = testVertex.point;
+ if (testPoint.x > max.x)
+ {
+ max.x = testPoint.x;
+ mMaxVertex[0] = testVertex;
+ }
+ else if (testPoint.x < min.x)
+ {
+ min.x = testPoint.x;
+ mMinVertex[0] = testVertex;
+ }
+
+ if (testPoint.y > max.y)
+ {
+ max.y = testPoint.y;
+ mMaxVertex[1] = testVertex;
+ }
+ else if (testPoint.y < min.y)
+ {
+ min.y = testPoint.y;
+ mMinVertex[1] = testVertex;
+ }
+
+ if (testPoint.z > max.z)
+ {
+ max.z = testPoint.z;
+ mMaxVertex[2] = testVertex;
+ }
+ else if (testPoint.z < min.z)
+ {
+ min.z = testPoint.z;
+ mMinVertex[2] = testVertex;
+ }
+ }
+
+ mTolerance = PxMax(local::PLANE_THICKNES * (PxMax(PxAbs(max.x), PxAbs(min.x)) +
+ PxMax(PxAbs(max.y), PxAbs(min.y)) +
+ PxMax(PxAbs(max.z), PxAbs(min.z))), local::PLANE_THICKNES);
+ mPlaneTolerance = local::PLANE_TOLERANCE;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // find the initial simplex
+ // 1. search in max axis from compute min,max
+ // 2. 3rd point is the furthest vertex from the initial line
+ // 3. 4th vertex is along the line, 3rd vertex normal
+ bool QuickHull::findSimplex()
+ {
+ float max = 0;
+ PxU32 imax = 0;
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ float diff = mMaxVertex[i].point[i] - mMinVertex[i].point[i];
+ if (diff > max)
+ {
+ max = diff;
+ imax = i;
+ }
+ }
+
+ if (max <= mTolerance)
+ {
+ // should not happen as we clear the vertices before and expand them if they are really close to each other
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be almost at the same place");
+ return false;
+ }
+
+ QuickHullVertex simplex[4];
+
+ // set first two vertices to be those with the greatest
+ // one dimensional separation
+ simplex[0] = mMaxVertex[imax];
+ simplex[1] = mMinVertex[imax];
+
+ // set third vertex to be the vertex farthest from
+ // the line between simplex[0] and simplex[1]
+ PxVec3 normal;
+ float maxDist = 0;
+ PxVec3 u01 = (simplex[1].point - simplex[0].point);
+ u01.normalize();
+
+ for (PxU32 i = 0; i < mNumVertices; i++)
+ {
+ const QuickHullVertex& testVert = mVerticesList[i];
+ const PxVec3& testPoint = testVert.point;
+ const PxVec3 diff = testPoint - simplex[0].point;
+ const PxVec3 xprod = u01.cross(diff);
+ const float lenSqr = xprod.magnitudeSquared();
+ if (lenSqr > maxDist && testVert.index != simplex[0].index && testVert.index != simplex[1].index)
+ {
+ maxDist = lenSqr;
+ simplex[2] = testVert;
+ normal = xprod;
+ }
+ }
+
+ if (PxSqrt(maxDist) <= 100 * mTolerance)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be colinear.");
+ return false;
+ }
+ normal.normalize();
+
+ // set the forth vertex in the normal direction
+ const float d0 = simplex[2].point.dot(normal);
+ maxDist = 0.0f;
+ for (PxU32 i = 0; i < mNumVertices; i++)
+ {
+ const QuickHullVertex& testVert = mVerticesList[i];
+ const PxVec3& testPoint = testVert.point;
+ const float dist = PxAbs(testPoint.dot(normal) - d0);
+ if (dist > maxDist && testVert.index != simplex[0].index &&
+ testVert.index != simplex[1].index && testVert.index != simplex[2].index)
+ {
+ maxDist = dist;
+ simplex[3] = testVert;
+ }
+ }
+
+ if (PxAbs(maxDist) <= 100 * mTolerance)
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be coplanar.");
+ return false;
+ }
+
+ // now create faces from those triangles
+ addSimplex(&simplex[0], simplex[3].point.dot(normal) - d0 < 0);
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // create triangle from given vertices, produce new face and connect the half edges
+ QuickHullFace* QuickHull::createTriangle(const QuickHullVertex& v0, const QuickHullVertex& v1, const QuickHullVertex& v2)
+ {
+ QuickHullFace* face = getFreeHullFace();
+
+ QuickHullHalfEdge* he0 = getFreeHullHalfEdge();
+ he0->face = face;
+ he0->tail = v0;
+
+ QuickHullHalfEdge* he1 = getFreeHullHalfEdge();
+ he1->face = face;
+ he1->tail = v1;
+
+ QuickHullHalfEdge* he2 = getFreeHullHalfEdge();
+ he2->face = face;
+ he2->tail = v2;
+
+ he0->prev = he2;
+ he0->next = he1;
+ he1->prev = he0;
+ he1->next = he2;
+ he2->prev = he1;
+ he2->next = he0;
+
+ face->edge = he0;
+ face->nextFace = NULL;
+
+ // compute the normal and offset
+ face->computeNormalAndCentroid();
+ return face;
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////
+ // add initial simplex to the quickhull
+ // construct triangles from the simplex points and connect them with half edges
+ void QuickHull::addSimplex(QuickHullVertex* simplex, bool flipTriangle)
+ {
+ PX_ASSERT(simplex);
+
+ // get interior point
+ PxVec3 vectorSum = simplex[0].point;
+ for (PxU32 i = 1; i < 4; i++)
+ {
+ vectorSum += simplex[i].point;
+ }
+ mInteriorPoint = vectorSum / 4.0f;
+
+ QuickHullFace* tris[4];
+ // create the triangles from the initial simplex
+ if (flipTriangle)
+ {
+ tris[0] = createTriangle(simplex[0], simplex[1], simplex[2]);
+ tris[1] = createTriangle(simplex[3], simplex[1], simplex[0]);
+ tris[2] = createTriangle(simplex[3], simplex[2], simplex[1]);
+ tris[3] = createTriangle(simplex[3], simplex[0], simplex[2]);
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ PxU32 k = (i + 1) % 3;
+ tris[i + 1]->getEdge(1)->setTwin(tris[k + 1]->getEdge(0));
+ tris[i + 1]->getEdge(2)->setTwin(tris[0]->getEdge(k));
+ }
+ }
+ else
+ {
+ tris[0] = createTriangle(simplex[0], simplex[2], simplex[1]);
+ tris[1] = createTriangle(simplex[3], simplex[0], simplex[1]);
+ tris[2] = createTriangle(simplex[3], simplex[1], simplex[2]);
+ tris[3] = createTriangle(simplex[3], simplex[2], simplex[0]);
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ PxU32 k = (i + 1) % 3;
+ tris[i + 1]->getEdge(0)->setTwin(tris[k + 1]->getEdge(1));
+ tris[i + 1]->getEdge(2)->setTwin(tris[0]->getEdge((3 - i) % 3));
+ }
+ }
+
+ // push back the first 4 faces created from the simplex
+ for (PxU32 i = 0; i < 4; i++)
+ {
+ mHullFaces.pushBack(tris[i]);
+ }
+ mNumHullFaces = 4;
+
+ // go through points and add point to faces if they are on the plane
+ for (PxU32 i = 0; i < mNumVertices; i++)
+ {
+ const QuickHullVertex& v = mVerticesList[i];
+
+ if (v == simplex[0] || v == simplex[1] || v == simplex[2] || v == simplex[3])
+ {
+ continue;
+ }
+
+ float maxDist = mTolerance;
+ QuickHullFace* maxFace = NULL;
+ for (PxU32 k = 0; k < 4; k++)
+ {
+ const float dist = tris[k]->distanceToPlane(v.point);
+ if (dist > maxDist)
+ {
+ maxFace = tris[k];
+ maxDist = dist;
+ }
+ }
+
+ if (maxFace != NULL)
+ {
+ addPointToFace(*maxFace, &mVerticesList[i], maxDist);
+ }
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // adds a point to the conflict list
+ // the trick here is to store the most furthest point as the last, thats the only one we care about
+ // the rest is not important, we just need to store them and claim to new faces later, if the
+ // faces most furthest point is the current global maximum
+ void QuickHull::addPointToFace(QuickHullFace& face, QuickHullVertex* vertex, float dist)
+ {
+ // if we dont have a conflict list, store the vertex as the first one in the conflict list
+ vertex->dist = dist;
+ if(!face.conflictList)
+ {
+ face.conflictList = vertex;
+ vertex->dist = dist;
+ vertex->next = NULL;
+ return;
+ }
+
+ PX_ASSERT(face.conflictList);
+
+ // this is not the furthest vertex, store it as next in the linked list
+ if (face.conflictList->dist > dist)
+ {
+ vertex->next = face.conflictList->next;
+ face.conflictList->next = vertex;
+ }
+ else
+ {
+ // this is the furthest vertex, store it as first in the linked list
+ vertex->next = face.conflictList;
+ face.conflictList = vertex;
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // removes eye point from a conflict list
+ // we know that the vertex must the last, as we store it at the back, so just popback()
+ void QuickHull::removeEyePointFromFace(QuickHullFace& face, const QuickHullVertex* vertex)
+ {
+ PX_UNUSED(vertex);
+ // the picked vertex should always be the first in the linked list
+ PX_ASSERT(face.conflictList == vertex);
+
+ face.conflictList = face.conflictList->next;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // merge polygons with similar normals
+ void QuickHull::postMergeHull()
+ {
+ // merge faces with similar normals
+ for (PxU32 i = 0; i < mHullFaces.size(); i++)
+ {
+ QuickHullFace& face = *mHullFaces[i];
+
+ if (face.state == QuickHullFace::eVISIBLE)
+ {
+ PX_ASSERT(face.checkFaceConsistency());
+ while (doPostAdjacentMerge(face, local::MAXDOT_MINANG));
+ }
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // builds the hull
+ // 1. find the initial simplex
+ // 2. check if simplex has a valid area
+ // 3. add vertices to the hull. We add vertex most furthest from the hull
+ // 4. terminate if hull limit reached or we have added all vertices
+ QuickHullResult::Enum QuickHull::buildHull()
+ {
+ QuickHullVertex* eyeVtx = NULL;
+ QuickHullFace* eyeFace;
+
+ // compute the vertex min max along x,y,z
+ if(!mPrecomputedMinMax)
+ computeMinMaxVerts();
+
+ // find the initial simplex of the hull
+ if (!findSimplex())
+ {
+ return QuickHullResult::eFAILURE;
+ }
+
+ // simplex area test
+ const bool useAreaTest = mConvexDesc.flags & PxConvexFlag::eCHECK_ZERO_AREA_TRIANGLES ? true : false;
+ const float areaEpsilon = mCookingParams.areaTestEpsilon * 2.0f;
+ if (useAreaTest)
+ {
+ for (PxU32 i = 0; i < mHullFaces.size(); i++)
+ {
+ if (mHullFaces[i]->area < areaEpsilon)
+ {
+ return QuickHullResult::eZERO_AREA_TEST_FAILED;
+ }
+ }
+ }
+
+ // add points to the hull
+ PxU32 numVerts = 4; // initial vertex count - simplex vertices
+ while ((eyeVtx = nextPointToAdd(eyeFace)) != NULL)
+ {
+ // if plane shifting vertex limit, we need the reduced hull
+ if((mConvexDesc.flags & PxConvexFlag::ePLANE_SHIFTING) && (numVerts >= mConvexDesc.vertexLimit))
+ break;
+
+ PX_ASSERT(eyeFace);
+ if (!addPointToHull(eyeVtx, *eyeFace))
+ {
+ // we hit the polygons hard limit
+ return QuickHullResult::ePOLYGONS_LIMIT_REACHED;
+ }
+ numVerts++;
+ }
+
+ // vertex limit has been reached. We did not stopped the iteration, since we
+ // will use the produced hull to compute OBB from it and use the planes
+ // to slice the initial OBB
+ if (numVerts >= mConvexDesc.vertexLimit)
+ {
+ return QuickHullResult::eVERTEX_LIMIT_REACHED;
+ }
+
+ return QuickHullResult::eSUCCESS;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // finds the best point to add to the hull
+ // go through the faces conflict list and pick the global maximum
+ QuickHullVertex* QuickHull::nextPointToAdd(QuickHullFace*& eyeFace)
+ {
+ QuickHullVertex* eyeVtx = NULL;
+ QuickHullFace* eyeF = NULL;
+ float maxDist = PxMax(mTolerance*ACCEPTANCE_EPSILON_MULTIPLY, mPlaneTolerance);
+ for (PxU32 i = 0; i < mHullFaces.size(); i++)
+ {
+ if (mHullFaces[i]->state == QuickHullFace::eVISIBLE && mHullFaces[i]->conflictList)
+ {
+ const float dist = mHullFaces[i]->conflictList->dist;
+ if (maxDist < dist)
+ {
+ maxDist = dist;
+ eyeVtx = mHullFaces[i]->conflictList;
+ eyeF = mHullFaces[i];
+ }
+ }
+ }
+
+ eyeFace = eyeF;
+ return eyeVtx;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // adds vertex to the hull
+ // returns false if the new faces count would hit the hull face hard limit (255)
+ bool QuickHull::addPointToHull(const QuickHullVertex* eyeVtx, QuickHullFace& eyeFace)
+ {
+ // removes the eyePoint from the conflict list
+ removeEyePointFromFace(eyeFace, eyeVtx);
+
+ // calculates the horizon from the eyePoint
+ calculateHorizon(eyeVtx->point, NULL, eyeFace, mHorizon, mRemovedFaces);
+
+ // check if we dont hit the polygons hard limit
+ if (mNumHullFaces + mHorizon.size() > 255)
+ {
+ // make the faces visible again and quit
+ for (PxU32 i = 0; i < mRemovedFaces.size(); i++)
+ {
+ mRemovedFaces[i]->state = QuickHullFace::eVISIBLE;
+ }
+ mNumHullFaces += mRemovedFaces.size();
+ return false;
+ }
+
+ // adds new faces from given horizon and eyePoint
+ addNewFacesFromHorizon(eyeVtx, mHorizon, mNewFaces);
+
+ // first merge pass ... merge faces which are non-convex
+ // as determined by the larger face
+ for (PxU32 i = 0; i < mNewFaces.size(); i++)
+ {
+ QuickHullFace& face = *mNewFaces[i];
+
+ if (face.state == QuickHullFace::eVISIBLE)
+ {
+ PX_ASSERT(face.checkFaceConsistency());
+ while (doAdjacentMerge(face, true));
+ }
+ }
+
+ // second merge pass ... merge faces which are non-convex
+ // wrt either face
+ for (PxU32 i = 0; i < mNewFaces.size(); i++)
+ {
+ QuickHullFace& face = *mNewFaces[i];
+ if (face.state == QuickHullFace::eNON_CONVEX)
+ {
+ face.state = QuickHullFace::eVISIBLE;
+ while (doAdjacentMerge(face, false));
+ }
+ }
+
+ resolveUnclaimedPoints(mNewFaces);
+
+ mHorizon.clear();
+ mNewFaces.clear();
+ mRemovedFaces.clear();
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // merge adjacent faces
+ // We merge 2 adjacent faces if they lie on the same thick plane defined by the mTolerance
+ // we do this in 2 steps to ensure we dont leave non-convex faces
+ bool QuickHull::doAdjacentMerge(QuickHullFace& face, bool mergeWrtLargeFace)
+ {
+ QuickHullHalfEdge* hedge = face.edge;
+
+ bool convex = true;
+ do
+ {
+ const QuickHullFace& oppFace = *hedge->getOppositeFace();
+ bool merge = false;
+
+ if (mergeWrtLargeFace)
+ {
+ // merge faces if they are parallel or non-convex
+ // wrt to the larger face; otherwise, just mark
+ // the face non-convex for the second pass.
+ if (face.area > oppFace.area)
+ {
+ if (hedge->getOppositeFaceDistance() > -mTolerance)
+ {
+ merge = true;
+ }
+ else if (hedge->twin->getOppositeFaceDistance() > -mTolerance)
+ {
+ convex = false;
+ }
+ }
+ else
+ {
+ if (hedge->twin->getOppositeFaceDistance() > -mTolerance)
+ {
+ merge = true;
+ }
+ else if (hedge->getOppositeFaceDistance() > -mTolerance)
+ {
+ convex = false;
+ }
+ }
+ }
+ else
+ {
+ // then merge faces if they are definitively non-convex
+ if (hedge->getOppositeFaceDistance() > -mTolerance ||
+ hedge->twin->getOppositeFaceDistance() > -mTolerance)
+ {
+ merge = true;
+ }
+ }
+
+ if (merge)
+ {
+ mDiscardedFaces.clear();
+ face.mergeAdjacentFace(hedge, mDiscardedFaces);
+ mNumHullFaces -= mDiscardedFaces.size();
+ for (PxU32 i = 0; i < mDiscardedFaces.size(); i++)
+ {
+ deleteFacePoints(*mDiscardedFaces[i], &face);
+ }
+ PX_ASSERT(face.checkFaceConsistency());
+ return true;
+ }
+ hedge = hedge->next;
+ } while (hedge != face.edge);
+
+ if (!convex)
+ {
+ face.state = QuickHullFace::eNON_CONVEX;
+ }
+ return false;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // merge adjacent faces doing normal test
+ // we try to merge more aggressively 2 faces with the same normal.
+ // We use bigger tolerance for the plane thickness in the end - mPlaneTolerance.
+ bool QuickHull::doPostAdjacentMerge(QuickHullFace& face, const float maxdot_minang)
+ {
+ QuickHullHalfEdge* hedge = face.edge;
+
+ do
+ {
+ const QuickHullFace& oppFace = *hedge->getOppositeFace();
+ bool merge = false;
+ const PxVec3& ni = face.normal;
+ const PxVec3& nj = oppFace.normal;
+ const float dotP = ni.dot(nj);
+
+ if (dotP > maxdot_minang)
+ {
+ if (face.area > oppFace.area)
+ {
+ // check if we can merge the 2 faces
+ merge = canMergeFaces(*hedge, mPlaneTolerance);
+ }
+ }
+
+ if (merge)
+ {
+ QuickHullFaceArray discardedFaces;
+ face.mergeAdjacentFace(hedge, discardedFaces);
+ mNumHullFaces -= discardedFaces.size();
+ for (PxU32 i = 0; i < discardedFaces.size(); i++)
+ {
+ deleteFacePoints(*discardedFaces[i], &face);
+ }
+ PX_ASSERT(face.checkFaceConsistency());
+ return true;
+ }
+ hedge = hedge->next;
+ } while (hedge != face.edge);
+
+ return false;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // checks if 2 adjacent faces can be merged
+ // 1. creates a face with merged vertices
+ // 2. computes new normal and centroid
+ // 3. checks that all verts are not too far away from the plane
+ // 4. checks that the new polygon is still convex
+ // 5. checks if we are about to merge only 2 neighbor faces, we dont
+ // want to merge additional faces, that might corrupt the convexity
+ bool QuickHull::canMergeFaces(const QuickHullHalfEdge& he, float planeTolerance)
+ {
+ const QuickHullFace& face1 = *he.face;
+ const QuickHullFace& face2 = *he.twin->face;
+
+ // construct the merged face
+ PX_ALLOCA(edges, QuickHullHalfEdge, (face1.numEdges + face2.numEdges));
+ PxMemSet(edges, 0, (face1.numEdges + face2.numEdges)*sizeof(QuickHullHalfEdge));
+ QuickHullFace mergedFace;
+ mergedFace.edge = &edges[0];
+
+ // copy the first face edges
+ PxU32 currentEdge = 0;
+ QuickHullHalfEdge* copyHe = he.next;
+ while (copyHe != &he)
+ {
+ edges[currentEdge].face = &mergedFace;
+ edges[currentEdge].tail = copyHe->tail;
+ edges[currentEdge].next = &edges.mPointer[currentEdge + 1];
+
+ currentEdge++;
+ copyHe = copyHe->next;
+ }
+
+ // copy the second face edges
+ copyHe = he.twin->next;
+ while (copyHe != he.twin)
+ {
+ edges[currentEdge].face = &mergedFace;
+ edges[currentEdge].tail = copyHe->tail;
+ edges[currentEdge].next = &edges.mPointer[currentEdge + 1];
+
+ currentEdge++;
+ copyHe = copyHe->next;
+ }
+ edges[--currentEdge].next = &edges.mPointer[0];
+
+ // compute normal and centroid
+ mergedFace.computeNormalAndCentroid();
+
+ // test the vertex distance
+ QuickHullHalfEdge* qhe = mergedFace.edge;
+ do
+ {
+ const QuickHullVertex& vertex = qhe->tail;
+ const float dist = mergedFace.distanceToPlane(vertex.point);
+ if (dist > planeTolerance)
+ {
+ return false;
+ }
+ qhe = qhe->next;
+ } while (qhe != mergedFace.edge);
+
+ // check the convexity
+ qhe = mergedFace.edge;
+ do
+ {
+ const QuickHullVertex& vertex = qhe->tail;
+ const QuickHullVertex& nextVertex = qhe->next->tail;
+
+ PxVec3 edgeVector = nextVertex.point - vertex.point;
+ edgeVector.normalize();
+ const PxVec3 outVector = -mergedFace.normal.cross(edgeVector);
+
+ QuickHullHalfEdge* testHe = qhe->next;
+ do
+ {
+ const QuickHullVertex& testVertex = testHe->tail;
+ const float dist = (testVertex.point - vertex.point).dot(outVector);
+
+ if (dist > mTolerance)
+ return false;
+
+ testHe = testHe->next;
+ } while (testHe != qhe->next);
+
+ qhe = qhe->next;
+ } while (qhe != mergedFace.edge);
+
+
+ const QuickHullFace* oppFace = he.getOppositeFace();
+
+ QuickHullHalfEdge* hedgeOpp = he.twin;
+
+ QuickHullHalfEdge* hedgeAdjPrev = he.prev;
+ QuickHullHalfEdge* hedgeAdjNext = he.next;
+ QuickHullHalfEdge* hedgeOppPrev = hedgeOpp->prev;
+ QuickHullHalfEdge* hedgeOppNext = hedgeOpp->next;
+
+ // check if we are lining up with the face in adjPrev dir
+ while (hedgeAdjPrev->getOppositeFace() == oppFace)
+ {
+ hedgeAdjPrev = hedgeAdjPrev->prev;
+ hedgeOppNext = hedgeOppNext->next;
+ }
+
+ // check if we are lining up with the face in adjNext dir
+ while (hedgeAdjNext->getOppositeFace() == oppFace)
+ {
+ hedgeOppPrev = hedgeOppPrev->prev;
+ hedgeAdjNext = hedgeAdjNext->next;
+ }
+
+ // no redundant merges, just clean merge of 2 neighbour faces
+ if (hedgeOppPrev->getOppositeFace() == hedgeAdjNext->getOppositeFace())
+ {
+ return false;
+ }
+
+ if (hedgeAdjPrev->getOppositeFace() == hedgeOppNext->getOppositeFace())
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // delete face points and store them as unclaimed, so we can add them back to new faces later
+ void QuickHull::deleteFacePoints(QuickHullFace& face, QuickHullFace* absorbingFace)
+ {
+ // no conflict list for this face
+ if(!face.conflictList)
+ return;
+
+ QuickHullVertex* unclaimedVertex = face.conflictList;
+ QuickHullVertex* vertexToClaim = NULL;
+ while (unclaimedVertex)
+ {
+ vertexToClaim = unclaimedVertex;
+ unclaimedVertex = unclaimedVertex->next;
+ vertexToClaim->next = NULL;
+ if (!absorbingFace)
+ {
+ mUnclaimedPoints.pushBack(vertexToClaim);
+ }
+ else
+ {
+ const float dist = absorbingFace->distanceToPlane(vertexToClaim->point);
+ if (dist > mTolerance)
+ {
+ addPointToFace(*absorbingFace, vertexToClaim, dist);
+ }
+ else
+ {
+ mUnclaimedPoints.pushBack(vertexToClaim);
+ }
+ }
+ }
+
+ face.conflictList = NULL;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // calculate the horizon from the eyePoint against a given face
+ void QuickHull::calculateHorizon(const PxVec3& eyePoint, QuickHullHalfEdge* edge0, QuickHullFace& face, QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& removedFaces)
+ {
+ deleteFacePoints(face, NULL);
+ face.state = QuickHullFace::eDELETED;
+ removedFaces.pushBack(&face);
+ mNumHullFaces--;
+ QuickHullHalfEdge* edge;
+ if (edge0 == NULL)
+ {
+ edge0 = face.getEdge(0);
+ edge = edge0;
+ }
+ else
+ {
+ edge = edge0->next;
+ }
+
+ do
+ {
+ QuickHullFace* oppFace = edge->getOppositeFace();
+ if (oppFace->state == QuickHullFace::eVISIBLE)
+ {
+ const float dist = oppFace->distanceToPlane(eyePoint);
+ if (dist > mTolerance)
+ {
+ calculateHorizon(eyePoint, edge->twin, *oppFace, horizon, removedFaces);
+ }
+ else
+ {
+ horizon.pushBack(edge);
+ }
+ }
+ edge = edge->next;
+ } while (edge != edge0);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // adds new faces from given horizon and eyePoint
+ void QuickHull::addNewFacesFromHorizon(const QuickHullVertex* eyePoint, const QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& newFaces)
+ {
+ QuickHullHalfEdge* hedgeSidePrev = NULL;
+ QuickHullHalfEdge* hedgeSideBegin = NULL;
+
+ for (PxU32 i = 0; i < horizon.size(); i++)
+ {
+ const QuickHullHalfEdge& horizonHe = *horizon[i];
+
+ QuickHullFace* face = createTriangle(*eyePoint, horizonHe.getHead(), horizonHe.getTail());
+ mHullFaces.pushBack(face);
+ mNumHullFaces++;
+ face->getEdge(2)->setTwin(horizonHe.twin);
+
+ QuickHullHalfEdge* hedgeSide = face->edge;
+ if (hedgeSidePrev != NULL)
+ {
+ hedgeSide->next->setTwin(hedgeSidePrev);
+ }
+ else
+ {
+ hedgeSideBegin = hedgeSide;
+ }
+ newFaces.pushBack(face);
+ hedgeSidePrev = hedgeSide;
+ }
+ hedgeSideBegin->next->setTwin(hedgeSidePrev);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // resolve unclaimed points
+ void QuickHull::resolveUnclaimedPoints(const QuickHullFaceArray& newFaces)
+ {
+ for (PxU32 i = 0; i < mUnclaimedPoints.size(); i++)
+ {
+ QuickHullVertex* vtx = mUnclaimedPoints[i];
+
+ float maxDist = mTolerance;
+ QuickHullFace* maxFace = NULL;
+ for (PxU32 j = 0; j < newFaces.size(); j++)
+ {
+ const QuickHullFace& newFace = *newFaces[j];
+ if (newFace.state == QuickHullFace::eVISIBLE)
+ {
+ const float dist = newFace.distanceToPlane(vtx->point);
+ if (dist > maxDist)
+ {
+ maxDist = dist;
+ maxFace = newFaces[j];
+ }
+ }
+ }
+ if (maxFace != NULL)
+ {
+ addPointToFace(*maxFace, vtx, maxDist);
+ }
+ }
+
+ mUnclaimedPoints.clear();
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper struct for hull expand point
+ struct ExpandPoint
+ {
+ PxPlane plane[3]; // the 3 planes that will give us the point
+ PxU32 planeIndex[3]; // index of the planes for identification
+
+ bool operator==(const ExpandPoint& expPoint) const
+ {
+ if (expPoint.planeIndex[0] == planeIndex[0] && expPoint.planeIndex[1] == planeIndex[1] &&
+ expPoint.planeIndex[2] == planeIndex[2])
+ return true;
+ else
+ return false;
+}
+ };
+
+//////////////////////////////////////////////////////////////////////////
+ // gets the half edge neighbors and form the expand point
+ void getExpandPoint(const QuickHullHalfEdge& he, ExpandPoint& expandPoint, const Ps::Array<PxU32>* translationTable = NULL)
+ {
+ // set the first 2 - the edge face and the twin face
+ expandPoint.planeIndex[0] = (translationTable) ? ((*translationTable)[he.face->index]) : (he.face->index);
+
+ PxU32 index = translationTable ? ((*translationTable)[he.twin->face->index]) : he.twin->face->index;
+ if (index < expandPoint.planeIndex[0])
+ {
+ expandPoint.planeIndex[1] = expandPoint.planeIndex[0];
+ expandPoint.planeIndex[0] = index;
+ }
+ else
+ {
+ expandPoint.planeIndex[1] = index;
+ }
+
+ // now the 3rd one is the next he twin index
+ index = translationTable ? (*translationTable)[he.next->twin->face->index] : he.next->twin->face->index;
+ if (index < expandPoint.planeIndex[0])
+ {
+ expandPoint.planeIndex[2] = expandPoint.planeIndex[1];
+ expandPoint.planeIndex[1] = expandPoint.planeIndex[0];
+ expandPoint.planeIndex[0] = index;
+ }
+ else
+ {
+ if (index < expandPoint.planeIndex[1])
+ {
+ expandPoint.planeIndex[2] = expandPoint.planeIndex[1];
+ expandPoint.planeIndex[1] = index;
+ }
+ else
+ {
+ expandPoint.planeIndex[2] = index;
+ }
+ }
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // adds the expand point, don't add similar point
+ void addExpandPoint(const ExpandPoint& expandPoint, Ps::Array<ExpandPoint>& expandPoints)
+ {
+ for (PxU32 i = expandPoints.size(); i--;)
+ {
+ if (expandPoint == expandPoints[i])
+ {
+ return;
+ }
+ }
+
+ expandPoints.pushBack(expandPoint);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // helper for 3 planes intersection
+ static PxVec3 threePlaneIntersection(const PxPlane &p0, const PxPlane &p1, const PxPlane &p2)
+ {
+ PxMat33 mp = (PxMat33(p0.n, p1.n, p2.n)).getTranspose();
+ PxMat33 mi = (mp).getInverse();
+ PxVec3 b(p0.d, p1.d, p2.d);
+ return -mi.transform(b);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+QuickHullConvexHullLib::QuickHullConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params)
+ : ConvexHullLib(desc, params),mQuickHull(NULL), mCropedConvexHull(NULL), mVertsOut(NULL), mIndicesOut(NULL), mPolygonsOut(NULL)
+{
+ mQuickHull = PX_NEW_TEMP(local::QuickHull)(params, desc);
+ mQuickHull->preallocate(desc.points.count);
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+QuickHullConvexHullLib::~QuickHullConvexHullLib()
+{
+ mQuickHull->releaseHull();
+ PX_DELETE(mQuickHull);
+
+ if(mCropedConvexHull)
+ {
+ PX_DELETE(mCropedConvexHull);
+ }
+
+ PX_FREE(mVertsOut);
+ PX_FREE(mPolygonsOut);
+ PX_FREE(mIndicesOut);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// create the hull
+// 1. clean the input vertices
+// 2. check we can construct the simplex, if not expand the input verts
+// 3. prepare the quickhull - preallocate, parse input verts
+// 4. construct the hull
+// 5. post merge faces if limit not reached
+// 6. if limit reached, expand the hull
+PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::createConvexHull()
+{
+ PxConvexMeshCookingResult::Enum res = PxConvexMeshCookingResult::eFAILURE;
+
+ PxU32 vcount = mConvexMeshDesc.points.count;
+ if ( vcount < 8 )
+ vcount = 8;
+
+ PxVec3* outvsource = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP( sizeof(PxVec3)*vcount, "PxVec3"));
+ PxVec3 scale;
+ PxVec3 center;
+ PxU32 outvcount;
+
+ // cleanup the vertices first
+ if(!cleanupVertices(mConvexMeshDesc.points.count, reinterpret_cast<const PxVec3*> (mConvexMeshDesc.points.data), mConvexMeshDesc.points.stride,
+ outvcount, outvsource, scale, center ))
+ {
+ PX_FREE(outvsource);
+ return res;
+ }
+
+ // scale vertices back to their original size.
+ // move the vertices to the origin
+ for (PxU32 i=0; i< outvcount; i++)
+ {
+ PxVec3& v = outvsource[i];
+ v.multiply(scale);
+ }
+
+ local::QuickHullVertex minimumVertex[3];
+ local::QuickHullVertex maximumVertex[3];
+ float tolerance;
+ float planeTolerance;
+ bool canReuse = cleanupForSimplex(outvsource, outvcount, &minimumVertex[0], &maximumVertex[0], tolerance, planeTolerance);
+
+ mQuickHull->parseInputVertices(outvsource,outvcount);
+
+ if(canReuse)
+ {
+ mQuickHull->setPrecomputedMinMax(minimumVertex, maximumVertex, tolerance, planeTolerance);
+ }
+
+ local::QuickHullResult::Enum qhRes = mQuickHull->buildHull();
+
+ switch(qhRes)
+ {
+ case local::QuickHullResult::eZERO_AREA_TEST_FAILED:
+ res = PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
+ break;
+ case local::QuickHullResult::eSUCCESS:
+ mQuickHull->postMergeHull();
+ res = PxConvexMeshCookingResult::eSUCCESS;
+ break;
+ case local::QuickHullResult::ePOLYGONS_LIMIT_REACHED:
+ res = PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED;
+ break;
+ case local::QuickHullResult::eVERTEX_LIMIT_REACHED:
+ {
+ // expand the hull
+ if(mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING)
+ res = expandHull();
+ else
+ res = expandHullOBB();
+ }
+ break;
+ case local::QuickHullResult::eFAILURE:
+ break;
+ };
+
+ // check if we need to build GRB compatible mesh
+ // if hull was cropped we already have a compatible mesh, if not check
+ // the max verts per face
+ if((mConvexMeshDesc.flags & PxConvexFlag::eGPU_COMPATIBLE) && !mCropedConvexHull &&
+ res == PxConvexMeshCookingResult::eSUCCESS)
+ {
+ PX_ASSERT(mQuickHull);
+ // if we hit the vertex per face limit, expand the hull by cropping OBB
+ if(mQuickHull->maxNumVertsPerFace() > gpuMaxVertsPerFace)
+ {
+ res = expandHullOBB();
+ }
+ }
+
+ PX_FREE(outvsource);
+ return res;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// fixup the input vertices to be not colinear or coplanar for the initial simplex find
+bool QuickHullConvexHullLib::cleanupForSimplex(PxVec3* vertices, PxU32 vertexCount, local::QuickHullVertex* minimumVertex,
+ local::QuickHullVertex* maximumVertex, float& tolerance, float& planeTolerance)
+{
+ bool retVal = true;
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ minimumVertex[i].point = vertices[0];
+ minimumVertex[i].index = 0;
+ maximumVertex[i].point = vertices[0];
+ maximumVertex[i].index = 0;
+
+ }
+
+ PxVec3 max = vertices[0];
+ PxVec3 min = vertices[0];
+
+ // get the max min vertices along the x,y,z
+ for (PxU32 i = 1; i < vertexCount; i++)
+ {
+ const PxVec3& testPoint = vertices[i];
+ if (testPoint.x > max.x)
+ {
+ max.x = testPoint.x;
+ maximumVertex[0].point = testPoint;
+ maximumVertex[0].index = i;
+ }
+ else if (testPoint.x < min.x)
+ {
+ min.x = testPoint.x;
+ minimumVertex[0].point = testPoint;
+ minimumVertex[0].index = i;
+ }
+
+ if (testPoint.y > max.y)
+ {
+ max.y = testPoint.y;
+ maximumVertex[1].point = testPoint;
+ maximumVertex[1].index = i;
+ }
+ else if (testPoint.y < min.y)
+ {
+ min.y = testPoint.y;
+ minimumVertex[1].point = testPoint;
+ minimumVertex[1].index = i;
+ }
+
+ if (testPoint.z > max.z)
+ {
+ max.z = testPoint.z;
+ maximumVertex[2].point = testPoint;
+ maximumVertex[2].index = i;
+ }
+ else if (testPoint.z < min.z)
+ {
+ min.z = testPoint.z;
+ minimumVertex[2].point = testPoint;
+ minimumVertex[2].index = i;
+ }
+ }
+
+ tolerance = PxMax(local::PLANE_THICKNES * (PxMax(PxAbs(max.x), PxAbs(min.x)) +
+ PxMax(PxAbs(max.y), PxAbs(min.y)) +
+ PxMax(PxAbs(max.z), PxAbs(min.z))), local::PLANE_THICKNES);
+
+ planeTolerance = local::PLANE_TOLERANCE;
+
+ float fmax = 0;
+ PxU32 imax = 0;
+
+ for (PxU32 i = 0; i < 3; i++)
+ {
+ float diff = (maximumVertex[i].point)[i] - (minimumVertex[i].point)[i];
+ if (diff > fmax)
+ {
+ fmax = diff;
+ imax = i;
+ }
+ }
+
+ PxVec3 simplex[4];
+
+ // set first two vertices to be those with the greatest
+ // one dimensional separation
+ simplex[0] = maximumVertex[imax].point;
+ simplex[1] = minimumVertex[imax].point;
+
+ // set third vertex to be the vertex farthest from
+ // the line between simplex[0] and simplex[1]
+ PxVec3 normal;
+ float maxDist = 0;
+ imax = 0;
+ PxVec3 u01 = (simplex[1] - simplex[0]);
+ u01.normalize();
+
+ for (PxU32 i = 0; i < vertexCount; i++)
+ {
+ const PxVec3& testPoint = vertices[i];
+ const PxVec3 diff = testPoint - simplex[0];
+ const PxVec3 xprod = u01.cross(diff);
+ const float lenSqr = xprod.magnitudeSquared();
+ if (lenSqr > maxDist)
+ {
+ maxDist = lenSqr;
+ simplex[2] = testPoint;
+ normal = xprod;
+ imax = i;
+ }
+ }
+
+ if (PxSqrt(maxDist) <= 100 * tolerance)
+ {
+ // points are collinear, we have to move the point further
+ PxVec3 u02 = simplex[2] - simplex[0];
+ float fT = u02.dot(u01);
+ const float sqrLen = u01.magnitudeSquared();
+ fT /= sqrLen;
+ PxVec3 n = u02 - fT*u01;
+ n.normalize();
+ const PxVec3 mP = simplex[2] + n * 100.0f * tolerance;
+ simplex[2] = mP;
+ vertices[imax] = mP;
+ retVal = false;
+ }
+ normal.normalize();
+
+ // set the forth vertex in the normal direction
+ float d0 = simplex[2].dot(normal);
+ maxDist = 0.0f;
+ imax = 0;
+ for (PxU32 i = 0; i < vertexCount; i++)
+ {
+ const PxVec3& testPoint = vertices[i];
+ float dist = PxAbs(testPoint.dot(normal) - d0);
+ if (dist > maxDist)
+ {
+ maxDist = dist;
+ simplex[3] = testPoint;
+ imax = i;
+ }
+ }
+
+ if (PxAbs(maxDist) <= 100.0f * tolerance)
+ {
+ float dist = (vertices[imax].dot(normal) - d0);
+ if (dist > 0)
+ vertices[imax] = vertices[imax] + normal * 100.0f * tolerance;
+ else
+ vertices[imax] = vertices[imax] - normal * 100.0f * tolerance;
+ retVal = false;
+ }
+
+ return retVal;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// expand the hull with the from the limited triangles set
+// expand hull will do following steps:
+// 1. get expand points from hull that form the best hull with given vertices
+// 2. expand the planes to have all vertices inside the planes volume
+// 3. compute new points by 3 adjacency planes intersections
+// 4. take those points and create the hull from them
+PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::expandHull()
+{
+ Ps::Array<local::ExpandPoint> expandPoints;
+ expandPoints.reserve(mQuickHull->mNumVertices);
+
+ // go over faces and gather expand points
+ for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
+ {
+ const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ local::ExpandPoint expandPoint;
+ local::QuickHullHalfEdge* he = face.edge;
+ local::getExpandPoint(*he, expandPoint);
+ local::addExpandPoint(expandPoint, expandPoints);
+ he = he->next;
+ while (he != face.edge)
+ {
+ local::getExpandPoint(*he, expandPoint);
+ local::addExpandPoint(expandPoint, expandPoints);
+ he = he->next;
+ }
+ }
+ }
+
+
+ // go over the planes now and expand them
+ for(PxU32 iVerts=0;iVerts< mQuickHull->mNumVertices;iVerts++)
+ {
+ const local::QuickHullVertex& vertex = mQuickHull->mVerticesList[iVerts];
+
+ for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
+ {
+ local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ const float dist = face.distanceToPlane(vertex.point);
+ if(dist > 0 && dist > face.expandOffset)
+ {
+ face.expandOffset = dist;
+ }
+ }
+ }
+ }
+
+ // fill the expand points planes
+ for(PxU32 i=0;i<expandPoints.size();i++)
+ {
+ local::ExpandPoint& expandPoint = expandPoints[i];
+ for (PxU32 k = 0; k < 3; k++)
+ {
+ const local::QuickHullFace& face = *mQuickHull->mFreeFaces.getItem(expandPoint.planeIndex[k]);
+ PX_ASSERT(face.index == expandPoint.planeIndex[k]);
+ PxPlane plane;
+ plane.n = face.normal;
+ plane.d = -face.planeOffset;
+ if(face.expandOffset > 0.0f)
+ plane.d -= face.expandOffset;
+ expandPoint.plane[k] = plane;
+ }
+ }
+
+ // now find the plane intersection
+ PX_ALLOCA(vertices,PxVec3,expandPoints.size());
+ for(PxU32 i=0;i<expandPoints.size();i++)
+ {
+ local::ExpandPoint& expandPoint = expandPoints[i];
+ vertices[i] = local::threePlaneIntersection(expandPoint.plane[0],expandPoint.plane[1],expandPoint.plane[2]);
+ }
+
+ // construct again the hull from the new points
+ local::QuickHull* newHull = PX_NEW_TEMP(local::QuickHull)(mQuickHull->mCookingParams, mQuickHull->mConvexDesc);
+ newHull->preallocate(expandPoints.size());
+ newHull->parseInputVertices(vertices,expandPoints.size());
+
+ local::QuickHullResult::Enum qhRes = newHull->buildHull();
+ switch(qhRes)
+ {
+ case local::QuickHullResult::eZERO_AREA_TEST_FAILED:
+ {
+ newHull->releaseHull();
+ PX_DELETE(newHull);
+ return PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
+ }
+ case local::QuickHullResult::eSUCCESS:
+ case local::QuickHullResult::eVERTEX_LIMIT_REACHED:
+ case local::QuickHullResult::ePOLYGONS_LIMIT_REACHED:
+ {
+ mQuickHull->releaseHull();
+ PX_DELETE(mQuickHull);
+ mQuickHull = newHull;
+ }
+ break;
+ case local::QuickHullResult::eFAILURE:
+ {
+ newHull->releaseHull();
+ PX_DELETE(newHull);
+ return PxConvexMeshCookingResult::eFAILURE;
+ }
+ };
+
+ return PxConvexMeshCookingResult::eSUCCESS;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// expand the hull from the limited triangles set
+// 1. collect all planes
+// 2. create OBB from the input verts
+// 3. slice the OBB with the planes
+// 5. iterate till vlimit is reached
+PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::expandHullOBB()
+{
+ Ps::Array<PxPlane> expandPlanes;
+ expandPlanes.reserve(mQuickHull->mHullFaces.size());
+
+ // collect expand planes
+ for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
+ {
+ local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if (face.state == local::QuickHullFace::eVISIBLE)
+ {
+ PxPlane plane;
+ plane.n = face.normal;
+ plane.d = -face.planeOffset;
+ if (face.expandOffset > 0.0f)
+ plane.d -= face.expandOffset;
+
+ expandPlanes.pushBack(plane);
+ }
+ }
+
+
+ PxTransform obbTransform;
+ PxVec3 sides;
+
+ // compute the OBB
+ PxConvexMeshDesc convexDesc;
+ fillConvexMeshDescFromQuickHull(convexDesc);
+ convexDesc.flags = mConvexMeshDesc.flags;
+ computeOBBFromConvex(convexDesc, sides, obbTransform);
+
+ // free the memory used for the convex mesh desc
+ PX_FREE_AND_RESET(mVertsOut);
+ PX_FREE_AND_RESET(mPolygonsOut);
+ PX_FREE_AND_RESET(mIndicesOut);
+
+ // crop the OBB
+ PxU32 maxplanes = PxMin(PxU32(256), expandPlanes.size());
+
+ ConvexHull* c = PX_NEW_TEMP(ConvexHull)(sides*0.5f,obbTransform, expandPlanes);
+
+ const float planeTolerance = mQuickHull->mPlaneTolerance;
+ const float epsilon = mQuickHull->mTolerance;
+
+ PxI32 k;
+ while (maxplanes-- && (k = c->findCandidatePlane(planeTolerance, epsilon)) >= 0)
+ {
+ ConvexHull* tmp = c;
+ c = convexHullCrop(*tmp, expandPlanes[PxU32(k)], planeTolerance);
+ if (c == NULL)
+ {
+ c = tmp;
+ break;
+ } // might want to debug this case better!!!
+ if (!c->assertIntact(planeTolerance))
+ {
+ PX_DELETE(c);
+ c = tmp;
+ break;
+ } // might want to debug this case better too!!!
+
+ // check for vertex limit
+ if (c->getVertices().size() > mConvexMeshDesc.vertexLimit)
+ {
+ PX_DELETE(c);
+ c = tmp;
+ maxplanes = 0;
+ break;
+ }
+ // check for vertex limit per face if necessary, GRB supports max 32 verts per face
+ if ((mConvexMeshDesc.flags & PxConvexFlag::eGPU_COMPATIBLE) && c->maxNumVertsPerFace() > gpuMaxVertsPerFace)
+ {
+ PX_DELETE(c);
+ c = tmp;
+ maxplanes = 0;
+ break;
+ }
+ PX_DELETE(tmp);
+ }
+
+ PX_ASSERT(c->assertIntact(planeTolerance));
+
+ mCropedConvexHull = c;
+
+ return PxConvexMeshCookingResult::eSUCCESS;
+}
+
+
+
+
+//////////////////////////////////////////////////////////////////////////
+// fill the descriptor with computed verts, indices and polygons
+void QuickHullConvexHullLib::fillConvexMeshDesc(PxConvexMeshDesc& desc)
+{
+ if (mCropedConvexHull)
+ fillConvexMeshDescFromCroppedHull(desc);
+ else
+ fillConvexMeshDescFromQuickHull(desc);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// fill the descriptor with computed verts, indices and polygons from quickhull convex
+void QuickHullConvexHullLib::fillConvexMeshDescFromQuickHull(PxConvexMeshDesc& desc)
+{
+ // get the number of indices needed
+ PxU32 numIndices = 0;
+ PxU32 numFaces = mQuickHull->mHullFaces.size();
+ PxU32 numFacesOut = 0;
+ PxU32 largestFace = 0; // remember the largest face, we store it as the first face, required for GRB test (max 32 vers per face supported)
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ numFacesOut++;
+ numIndices += face.numEdges;
+ if(face.numEdges > mQuickHull->mHullFaces[largestFace]->numEdges)
+ largestFace = i;
+ }
+ }
+
+ // allocate out buffers
+ PxU32* indices = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*numIndices, "PxU32"));
+ PxI32* translateTable = reinterpret_cast<PxI32*> (PX_ALLOC_TEMP(sizeof(PxU32)*mQuickHull->mNumVertices, "PxU32"));
+ PxMemSet(translateTable,-1,mQuickHull->mNumVertices*sizeof(PxU32));
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ PxVec3* vertices = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*mQuickHull->mNumVertices + 1, "PxVec3"));
+ PxHullPolygon* polygons = reinterpret_cast<PxHullPolygon*> (PX_ALLOC_TEMP(sizeof(PxHullPolygon)*numFacesOut, "PxHullPolygon"));
+
+ // go over the hullPolygons and mark valid vertices, create translateTable
+ PxU32 numVertices = 0;
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ local::QuickHullHalfEdge* he = face.edge;
+ if(translateTable[he->tail.index] == -1)
+ {
+ vertices[numVertices] = he->tail.point;
+ translateTable[he->tail.index] = PxI32(numVertices);
+ numVertices++;
+ }
+ he = he->next;
+ while (he != face.edge)
+ {
+ if(translateTable[he->tail.index] == -1)
+ {
+ vertices[numVertices] = he->tail.point;
+ translateTable[he->tail.index] = PxI32(numVertices);
+ numVertices++;
+ }
+ he = he->next;
+ }
+ }
+ }
+
+
+ desc.points.count = numVertices;
+ desc.points.data = vertices;
+ desc.points.stride = sizeof(PxVec3);
+
+ desc.indices.count = numIndices;
+ desc.indices.data = indices;
+ desc.indices.stride = sizeof(PxU32);
+
+ desc.polygons.count = numFacesOut;
+ desc.polygons.data = polygons;
+ desc.polygons.stride = sizeof(PxHullPolygon);
+
+ PxU16 indexOffset = 0;
+ numFacesOut = 0;
+ for (PxU32 i = 0; i < numFaces; i++)
+ {
+ // faceIndex - store the largest face first then the rest
+ PxU32 faceIndex;
+ if(i == 0)
+ {
+ faceIndex = largestFace;
+ }
+ else
+ {
+ faceIndex = (i == largestFace) ? 0 : i;
+ }
+
+ const local::QuickHullFace& face = *mQuickHull->mHullFaces[faceIndex];
+ if(face.state == local::QuickHullFace::eVISIBLE)
+ {
+ //create index data
+ local::QuickHullHalfEdge* he = face.edge;
+ PxU32 index = 0;
+ indices[index + indexOffset] = PxU32(translateTable[he->tail.index]);
+ index++;
+ he = he->next;
+ while (he != face.edge)
+ {
+ indices[index + indexOffset] = PxU32(translateTable[he->tail.index]);
+ index++;
+ he = he->next;
+ }
+
+ // create polygon
+ PxHullPolygon polygon;
+ polygon.mPlane[0] = face.normal[0];
+ polygon.mPlane[1] = face.normal[1];
+ polygon.mPlane[2] = face.normal[2];
+ polygon.mPlane[3] = -face.normal.dot(face.centroid);
+
+ polygon.mIndexBase = indexOffset;
+ polygon.mNbVerts = face.numEdges;
+ indexOffset += face.numEdges;
+ polygons[numFacesOut] = polygon;
+ numFacesOut++;
+ }
+ }
+
+ PX_ASSERT(mQuickHull->mNumHullFaces == numFacesOut);
+
+ mVertsOut = vertices;
+ mIndicesOut = indices;
+ mPolygonsOut = polygons;
+
+ PX_FREE(translateTable);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// fill the desc from cropped hull data
+void QuickHullConvexHullLib::fillConvexMeshDescFromCroppedHull(PxConvexMeshDesc& outDesc)
+{
+ PX_ASSERT(mCropedConvexHull);
+
+ // parse the hullOut and fill the result with vertices and polygons
+ mIndicesOut = reinterpret_cast<PxU32*> (PX_ALLOC_TEMP(sizeof(PxU32)*(mCropedConvexHull->getEdges().size()), "PxU32"));
+ PxU32 numIndices = mCropedConvexHull->getEdges().size();
+
+ PxU32 numPolygons = mCropedConvexHull->getFacets().size();
+ mPolygonsOut = reinterpret_cast<PxHullPolygon*> (PX_ALLOC_TEMP(sizeof(PxHullPolygon)*numPolygons, "PxHullPolygon"));
+
+ // allocate additional vec3 for V4 safe load in VolumeInteration
+ mVertsOut = reinterpret_cast<PxVec3*> (PX_ALLOC_TEMP(sizeof(PxVec3)*mCropedConvexHull->getVertices().size() + 1, "PxVec3"));
+ PxU32 numVertices = mCropedConvexHull->getVertices().size();
+ PxMemCopy(mVertsOut, mCropedConvexHull->getVertices().begin(), sizeof(PxVec3)*numVertices);
+
+ PxU32 i = 0;
+ PxU32 k = 0;
+ PxU32 j = 1;
+ while (i < mCropedConvexHull->getEdges().size())
+ {
+ j = 1;
+ PxHullPolygon& polygon = mPolygonsOut[k];
+ // get num indices per polygon
+ while (j + i < mCropedConvexHull->getEdges().size() && mCropedConvexHull->getEdges()[i].p == mCropedConvexHull->getEdges()[i + j].p)
+ {
+ j++;
+ }
+ polygon.mNbVerts = Ps::to16(j);
+ polygon.mIndexBase = Ps::to16(i);
+
+ // get the plane
+ polygon.mPlane[0] = mCropedConvexHull->getFacets()[k].n[0];
+ polygon.mPlane[1] = mCropedConvexHull->getFacets()[k].n[1];
+ polygon.mPlane[2] = mCropedConvexHull->getFacets()[k].n[2];
+
+ polygon.mPlane[3] = mCropedConvexHull->getFacets()[k].d;
+
+ while (j--)
+ {
+ mIndicesOut[i] = mCropedConvexHull->getEdges()[i].v;
+ i++;
+ }
+ k++;
+ }
+
+ PX_ASSERT(k == mCropedConvexHull->getFacets().size());
+
+ outDesc.indices.count = numIndices;
+ outDesc.indices.stride = sizeof(PxU32);
+ outDesc.indices.data = mIndicesOut;
+
+ outDesc.points.count = numVertices;
+ outDesc.points.stride = sizeof(PxVec3);
+ outDesc.points.data = mVertsOut;
+
+ outDesc.polygons.count = numPolygons;
+ outDesc.polygons.stride = sizeof(PxHullPolygon);
+ outDesc.polygons.data = mPolygonsOut;
+
+ swapLargestFace(outDesc);
+}
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.h b/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.h
new file mode 100644
index 00000000..ad077654
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/QuickHullConvexHullLib.h
@@ -0,0 +1,97 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_QUICKHULL_CONVEXHULLLIB_H
+#define PX_QUICKHULL_CONVEXHULLLIB_H
+
+#include "ConvexHullLib.h"
+#include "Ps.h"
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+
+namespace local
+{
+ class QuickHull;
+ struct QuickHullVertex;
+}
+
+namespace physx
+{
+ class ConvexHull;
+
+ //////////////////////////////////////////////////////////////////////////
+ // Quickhull lib constructs the hull from given input points. The resulting hull
+ // will only contain a subset of the input points. The algorithm does incrementally
+ // adds most furthest vertices to the starting simplex. The produced hulls are build with high precision
+ // and produce more stable and correct results, than the legacy algorithm.
+ class QuickHullConvexHullLib: public ConvexHullLib, public Ps::UserAllocated
+ {
+ PX_NOCOPY(QuickHullConvexHullLib)
+ public:
+
+ // functions
+ QuickHullConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params);
+
+ ~QuickHullConvexHullLib();
+
+ // computes the convex hull from provided points
+ virtual PxConvexMeshCookingResult::Enum createConvexHull();
+
+ // fills the convexmeshdesc with computed hull data
+ virtual void fillConvexMeshDesc(PxConvexMeshDesc& desc);
+
+ protected:
+ // if vertex limit reached we need to expand the hull using the OBB slicing
+ PxConvexMeshCookingResult::Enum expandHullOBB();
+
+ // if vertex limit reached we need to expand the hull using the plane shifting
+ PxConvexMeshCookingResult::Enum expandHull();
+
+ // checks for collinearity and co planarity
+ // returns true if the simplex was ok, we can reuse the computed tolerances and min/max values
+ bool cleanupForSimplex(PxVec3* vertices, PxU32 vertexCount, local::QuickHullVertex* minimumVertex,
+ local::QuickHullVertex* maximumVertex, float& tolerance, float& planeTolerance);
+
+ // fill the result desc from quick hull convex
+ void fillConvexMeshDescFromQuickHull(PxConvexMeshDesc& desc);
+
+ // fill the result desc from cropped hull convex
+ void fillConvexMeshDescFromCroppedHull(PxConvexMeshDesc& desc);
+
+ private:
+ local::QuickHull* mQuickHull; // the internal quick hull representation
+ ConvexHull* mCropedConvexHull; //the hull cropped from OBB, used for vertex limit path
+
+ PxVec3* mVertsOut; // vertices for output
+ PxU32* mIndicesOut; // inidices for output
+ PxHullPolygon* mPolygonsOut; // polygons for output
+ };
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.cpp b/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.cpp
new file mode 100644
index 00000000..f388a32c
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.cpp
@@ -0,0 +1,797 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+//#ifdef PX_COOKING
+
+/*
+* This code computes volume integrals needed to compute mass properties of polyhedral bodies.
+* Based on public domain code by Brian Mirtich.
+*/
+#include "foundation/PxMemory.h"
+#include "VolumeIntegration.h"
+#include "PxSimpleTriangleMesh.h"
+#include "PxConvexMeshDesc.h"
+#include "GuConvexMeshData.h"
+#include "PsUtilities.h"
+#include "PsVecMath.h"
+
+
+namespace physx
+{
+
+ using namespace Ps::aos;
+
+namespace
+{
+
+ class VolumeIntegrator
+ {
+ public:
+ VolumeIntegrator(PxSimpleTriangleMesh mesh, PxF64 mDensity);
+ ~VolumeIntegrator();
+ bool computeVolumeIntegrals(PxIntegrals& ir);
+ private:
+ struct Normal
+ {
+ PxVec3 normal;
+ PxF32 w;
+ };
+
+ struct Face
+ {
+ PxF64 Norm[3];
+ PxF64 w;
+ PxU32 Verts[3];
+ };
+
+ // Data structures
+ PxF64 mMass; //!< Mass
+ PxF64 mDensity; //!< Density
+ PxSimpleTriangleMesh mesh;
+ //Normal * faceNormals; //!< temp face normal data structure
+
+
+
+
+ unsigned int mA; //!< Alpha
+ unsigned int mB; //!< Beta
+ unsigned int mC; //!< Gamma
+
+ // Projection integrals
+ PxF64 mP1;
+ PxF64 mPa; //!< Pi Alpha
+ PxF64 mPb; //!< Pi Beta
+ PxF64 mPaa; //!< Pi Alpha^2
+ PxF64 mPab; //!< Pi AlphaBeta
+ PxF64 mPbb; //!< Pi Beta^2
+ PxF64 mPaaa; //!< Pi Alpha^3
+ PxF64 mPaab; //!< Pi Alpha^2Beta
+ PxF64 mPabb; //!< Pi AlphaBeta^2
+ PxF64 mPbbb; //!< Pi Beta^3
+
+ // Face integrals
+ PxF64 mFa; //!< FAlpha
+ PxF64 mFb; //!< FBeta
+ PxF64 mFc; //!< FGamma
+ PxF64 mFaa; //!< FAlpha^2
+ PxF64 mFbb; //!< FBeta^2
+ PxF64 mFcc; //!< FGamma^2
+ PxF64 mFaaa; //!< FAlpha^3
+ PxF64 mFbbb; //!< FBeta^3
+ PxF64 mFccc; //!< FGamma^3
+ PxF64 mFaab; //!< FAlpha^2Beta
+ PxF64 mFbbc; //!< FBeta^2Gamma
+ PxF64 mFcca; //!< FGamma^2Alpha
+
+ // The 10 volume integrals
+ PxF64 mT0; //!< ~Total mass
+ PxF64 mT1[3]; //!< Location of the center of mass
+ PxF64 mT2[3]; //!< Moments of inertia
+ PxF64 mTP[3]; //!< Products of inertia
+
+ // Internal methods
+ // bool Init();
+ PxVec3 computeCenterOfMass();
+ void computeInertiaTensor(PxF64* J);
+ void computeCOMInertiaTensor(PxF64* J);
+ void computeFaceNormal(Face & f, PxU32 * indices);
+
+ void computeProjectionIntegrals(const Face& f);
+ void computeFaceIntegrals(const Face& f);
+ };
+
+ #define X 0u
+ #define Y 1u
+ #define Z 2u
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Constructor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ VolumeIntegrator::VolumeIntegrator(PxSimpleTriangleMesh mesh_, PxF64 density)
+ {
+ mDensity = density;
+ mMass = 0.0;
+ this->mesh = mesh_;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Destructor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ VolumeIntegrator::~VolumeIntegrator()
+ {
+ }
+
+ void VolumeIntegrator::computeFaceNormal(Face & f, PxU32 * indices)
+ {
+ const PxU8 * vertPointer = reinterpret_cast<const PxU8*>(mesh.points.data);
+
+ //two edges
+ PxVec3 d1 = (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[1] )) - (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[0] ));
+ PxVec3 d2 = (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[2] )) - (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[1] ));
+
+
+ PxVec3 normal = d1.cross(d2);
+
+ normal.normalize();
+
+ f.w = - PxF64(normal.dot( (*reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * indices[0] )) ));
+
+ f.Norm[0] = PxF64(normal.x);
+ f.Norm[1] = PxF64(normal.y);
+ f.Norm[2] = PxF64(normal.z);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes volume integrals for a polyhedron by summing surface integrals over its faces.
+ * \param ir [out] a result structure.
+ * \return true if success
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool VolumeIntegrator::computeVolumeIntegrals(PxIntegrals& ir)
+ {
+ // Clear all integrals
+ mT0 = mT1[X] = mT1[Y] = mT1[Z] = mT2[X] = mT2[Y] = mT2[Z] = mTP[X] = mTP[Y] = mTP[Z] = 0;
+
+ Face f;
+ const PxU8 * trigPointer = reinterpret_cast<const PxU8*>(mesh.triangles.data);
+ for(PxU32 i=0;i<mesh.triangles.count;i++, trigPointer += mesh.triangles.stride)
+ {
+
+ if (mesh.flags & PxMeshFlag::e16_BIT_INDICES)
+ {
+ f.Verts[0] = (reinterpret_cast<const PxU16 *>(trigPointer))[0];
+ f.Verts[1] = (reinterpret_cast<const PxU16 *>(trigPointer))[1];
+ f.Verts[2] = (reinterpret_cast<const PxU16 *>(trigPointer))[2];
+ }
+ else
+ {
+ f.Verts[0] = (reinterpret_cast<const PxU32 *>(trigPointer)[0]);
+ f.Verts[1] = (reinterpret_cast<const PxU32 *>(trigPointer)[1]);
+ f.Verts[2] = (reinterpret_cast<const PxU32 *>(trigPointer)[2]);
+ }
+
+ if (mesh.flags & PxMeshFlag::eFLIPNORMALS)
+ {
+ PxU32 t = f.Verts[1];
+ f.Verts[1] = f.Verts[2];
+ f.Verts[2] = t;
+ }
+
+ //compute face normal:
+ computeFaceNormal(f,f.Verts);
+
+ // Compute alpha/beta/gamma as the right-handed permutation of (x,y,z) that maximizes |n|
+ PxF64 nx = fabs(f.Norm[X]);
+ PxF64 ny = fabs(f.Norm[Y]);
+ PxF64 nz = fabs(f.Norm[Z]);
+ if (nx > ny && nx > nz) mC = X;
+ else mC = (ny > nz) ? Y : Z;
+ mA = (mC + 1) % 3;
+ mB = (mA + 1) % 3;
+
+ // Compute face contribution
+ computeFaceIntegrals(f);
+
+ // Update integrals
+ mT0 += f.Norm[X] * ((mA == X) ? mFa : ((mB == X) ? mFb : mFc));
+
+ mT1[mA] += f.Norm[mA] * mFaa;
+ mT1[mB] += f.Norm[mB] * mFbb;
+ mT1[mC] += f.Norm[mC] * mFcc;
+
+ mT2[mA] += f.Norm[mA] * mFaaa;
+ mT2[mB] += f.Norm[mB] * mFbbb;
+ mT2[mC] += f.Norm[mC] * mFccc;
+
+ mTP[mA] += f.Norm[mA] * mFaab;
+ mTP[mB] += f.Norm[mB] * mFbbc;
+ mTP[mC] += f.Norm[mC] * mFcca;
+ }
+
+ mT1[X] /= 2; mT1[Y] /= 2; mT1[Z] /= 2;
+ mT2[X] /= 3; mT2[Y] /= 3; mT2[Z] /= 3;
+ mTP[X] /= 2; mTP[Y] /= 2; mTP[Z] /= 2;
+
+ // Fill result structure
+ ir.COM = computeCenterOfMass();
+ computeInertiaTensor(reinterpret_cast<PxF64*>(ir.inertiaTensor));
+ computeCOMInertiaTensor(reinterpret_cast<PxF64*>(ir.COMInertiaTensor));
+ ir.mass = mMass;
+ return true;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes the center of mass.
+ * \return The center of mass.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ PxVec3 VolumeIntegrator::computeCenterOfMass()
+ {
+ // Compute center of mass
+ PxVec3 COM(0.0f, 0.0f, 0.0f);
+ if(mT0!=0.0)
+ {
+ COM.x = float(mT1[X] / mT0);
+ COM.y = float(mT1[Y] / mT0);
+ COM.z = float(mT1[Z] / mT0);
+ }
+ return COM;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Setups the inertia tensor relative to the origin.
+ * \param it [out] the returned inertia tensor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void VolumeIntegrator::computeInertiaTensor(PxF64* it)
+ {
+ PxF64 J[3][3];
+
+ // Compute inertia tensor
+ J[X][X] = mDensity * (mT2[Y] + mT2[Z]);
+ J[Y][Y] = mDensity * (mT2[Z] + mT2[X]);
+ J[Z][Z] = mDensity * (mT2[X] + mT2[Y]);
+
+ J[X][Y] = J[Y][X] = - mDensity * mTP[X];
+ J[Y][Z] = J[Z][Y] = - mDensity * mTP[Y];
+ J[Z][X] = J[X][Z] = - mDensity * mTP[Z];
+
+ PxMemCopy(it, J, 9*sizeof(PxF64));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Setups the inertia tensor relative to the COM.
+ * \param it [out] the returned inertia tensor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void VolumeIntegrator::computeCOMInertiaTensor(PxF64* it)
+ {
+ PxF64 J[3][3];
+
+ mMass = mDensity * mT0;
+
+ const PxVec3 COM = computeCenterOfMass();
+ const PxVec3 MassCOM(PxF32(mMass) * COM);
+ const PxVec3 MassCOM2(MassCOM.x * COM.x, MassCOM.y * COM.y, MassCOM.z * COM.z);
+
+ // Compute initial inertia tensor
+ computeInertiaTensor(reinterpret_cast<PxF64*>(J));
+
+ // Translate inertia tensor to center of mass
+ // Huyghens' theorem:
+ // Jx'x' = Jxx - m*(YG^2+ZG^2)
+ // Jy'y' = Jyy - m*(ZG^2+XG^2)
+ // Jz'z' = Jzz - m*(XG^2+YG^2)
+ // XG, YG, ZG = new origin
+ // YG^2+ZG^2 = dx^2
+ J[X][X] -= PxF64(MassCOM2.y + MassCOM2.z);
+ J[Y][Y] -= PxF64(MassCOM2.z + MassCOM2.x);
+ J[Z][Z] -= PxF64(MassCOM2.x + MassCOM2.y);
+
+ // Huyghens' theorem:
+ // Jx'y' = Jxy - m*XG*YG
+ // Jy'z' = Jyz - m*YG*ZG
+ // Jz'x' = Jzx - m*ZG*XG
+ // ### IS THE SIGN CORRECT ?
+ J[X][Y] = J[Y][X] += PxF64(MassCOM.x * COM.y);
+ J[Y][Z] = J[Z][Y] += PxF64(MassCOM.y * COM.z);
+ J[Z][X] = J[X][Z] += PxF64(MassCOM.z * COM.x);
+
+ PxMemCopy(it, J, 9*sizeof(PxF64));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes integrals over a face projection from the coordinates of the projections vertices.
+ * \param f [in] a face structure.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void VolumeIntegrator::computeProjectionIntegrals(const Face& f)
+ {
+ mP1 = mPa = mPb = mPaa = mPab = mPbb = mPaaa = mPaab = mPabb = mPbbb = 0.0;
+
+ const PxU8* vertPointer = reinterpret_cast<const PxU8*>(mesh.points.data);
+ for(PxU32 i=0;i<3;i++)
+ {
+ const PxVec3& p0 = *reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * (f.Verts[i]) );
+ const PxVec3& p1 = *reinterpret_cast<const PxVec3 *>(vertPointer + mesh.points.stride * (f.Verts[(i+1) % 3]) );
+
+
+ PxF64 a0 = PxF64(p0[mA]);
+ PxF64 b0 = PxF64(p0[mB]);
+ PxF64 a1 = PxF64(p1[mA]);
+ PxF64 b1 = PxF64(p1[mB]);
+
+ PxF64 da = a1 - a0; // DeltaA
+ PxF64 db = b1 - b0; // DeltaB
+
+ PxF64 a0_2 = a0 * a0; // Alpha0^2
+ PxF64 a0_3 = a0_2 * a0; // ...
+ PxF64 a0_4 = a0_3 * a0;
+
+ PxF64 b0_2 = b0 * b0;
+ PxF64 b0_3 = b0_2 * b0;
+ PxF64 b0_4 = b0_3 * b0;
+
+ PxF64 a1_2 = a1 * a1;
+ PxF64 a1_3 = a1_2 * a1;
+
+ PxF64 b1_2 = b1 * b1;
+ PxF64 b1_3 = b1_2 * b1;
+
+ PxF64 C1 = a1 + a0;
+
+ PxF64 Ca = a1*C1 + a0_2;
+ PxF64 Caa = a1*Ca + a0_3;
+ PxF64 Caaa = a1*Caa + a0_4;
+
+ PxF64 Cb = b1*(b1 + b0) + b0_2;
+ PxF64 Cbb = b1*Cb + b0_3;
+ PxF64 Cbbb = b1*Cbb + b0_4;
+
+ PxF64 Cab = 3*a1_2 + 2*a1*a0 + a0_2;
+ PxF64 Kab = a1_2 + 2*a1*a0 + 3*a0_2;
+
+ PxF64 Caab = a0*Cab + 4*a1_3;
+ PxF64 Kaab = a1*Kab + 4*a0_3;
+
+ PxF64 Cabb = 4*b1_3 + 3*b1_2*b0 + 2*b1*b0_2 + b0_3;
+ PxF64 Kabb = b1_3 + 2*b1_2*b0 + 3*b1*b0_2 + 4*b0_3;
+
+ mP1 += db*C1;
+ mPa += db*Ca;
+ mPaa += db*Caa;
+ mPaaa += db*Caaa;
+ mPb += da*Cb;
+ mPbb += da*Cbb;
+ mPbbb += da*Cbbb;
+ mPab += db*(b1*Cab + b0*Kab);
+ mPaab += db*(b1*Caab + b0*Kaab);
+ mPabb += da*(a1*Cabb + a0*Kabb);
+ }
+
+ mP1 /= 2.0;
+ mPa /= 6.0;
+ mPaa /= 12.0;
+ mPaaa /= 20.0;
+ mPb /= -6.0;
+ mPbb /= -12.0;
+ mPbbb /= -20.0;
+ mPab /= 24.0;
+ mPaab /= 60.0;
+ mPabb /= -60.0;
+ }
+
+ #define SQR(x) ((x)*(x)) //!< Returns x square
+ #define CUBE(x) ((x)*(x)*(x)) //!< Returns x cube
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes surface integrals over a polyhedral face from the integrals over its projection.
+ * \param f [in] a face structure.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void VolumeIntegrator::computeFaceIntegrals(const Face& f)
+ {
+ computeProjectionIntegrals(f);
+
+ PxF64 w = f.w;
+ const PxF64* n = f.Norm;
+ PxF64 k1 = 1 / n[mC];
+ PxF64 k2 = k1 * k1;
+ PxF64 k3 = k2 * k1;
+ PxF64 k4 = k3 * k1;
+
+ mFa = k1 * mPa;
+ mFb = k1 * mPb;
+ mFc = -k2 * (n[mA]*mPa + n[mB]*mPb + w*mP1);
+
+ mFaa = k1 * mPaa;
+ mFbb = k1 * mPbb;
+ mFcc = k3 * (SQR(n[mA])*mPaa + 2*n[mA]*n[mB]*mPab + SQR(n[mB])*mPbb + w*(2*(n[mA]*mPa + n[mB]*mPb) + w*mP1));
+
+ mFaaa = k1 * mPaaa;
+ mFbbb = k1 * mPbbb;
+ mFccc = -k4 * (CUBE(n[mA])*mPaaa + 3*SQR(n[mA])*n[mB]*mPaab
+ + 3*n[mA]*SQR(n[mB])*mPabb + CUBE(n[mB])*mPbbb
+ + 3*w*(SQR(n[mA])*mPaa + 2*n[mA]*n[mB]*mPab + SQR(n[mB])*mPbb)
+ + w*w*(3*(n[mA]*mPa + n[mB]*mPb) + w*mP1));
+
+ mFaab = k1 * mPaab;
+ mFbbc = -k2 * (n[mA]*mPabb + n[mB]*mPbbb + w*mPbb);
+ mFcca = k3 * (SQR(n[mA])*mPaaa + 2*n[mA]*n[mB]*mPaab + SQR(n[mB])*mPabb + w*(2*(n[mA]*mPaa + n[mB]*mPab) + w*mPa));
+ }
+
+ /*
+ * This code computes volume integrals needed to compute mass properties of polyhedral bodies.
+ * Based on public domain code by David Eberly.
+ */
+
+ class VolumeIntegratorEberly
+ {
+ public:
+ VolumeIntegratorEberly(const PxConvexMeshDesc& mesh, PxF64 mDensity);
+ ~VolumeIntegratorEberly();
+ bool computeVolumeIntegralsSIMD(PxIntegrals& ir, const PxVec3& origin);
+ bool computeVolumeIntegrals(PxIntegrals& ir, const PxVec3& origin);
+
+ private:
+ VolumeIntegratorEberly& operator=(const VolumeIntegratorEberly&);
+ const PxConvexMeshDesc& mDesc;
+ PxF64 mMass;
+ PxReal mMassR;
+ PxF64 mDensity;
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Constructor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ VolumeIntegratorEberly::VolumeIntegratorEberly(const PxConvexMeshDesc& desc, PxF64 density)
+ : mDesc(desc), mMass(0), mMassR(0), mDensity(density)
+ {
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Destructor.
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ VolumeIntegratorEberly::~VolumeIntegratorEberly()
+ {
+ }
+
+ PX_FORCE_INLINE void subexpressions(PxF64 w0, PxF64 w1, PxF64 w2, PxF64& f1, PxF64& f2, PxF64& f3, PxF64& g0, PxF64& g1, PxF64& g2)
+ {
+ PxF64 temp0 = w0 + w1;
+ f1 = temp0 + w2;
+ PxF64 temp1 = w0*w0;
+ PxF64 temp2 = temp1 + w1*temp0;
+ f2 = temp2 + w2*f1;
+ f3 = w0*temp1 + w1*temp2 + w2*f2;
+ g0 = f2 + w0*(f1 + w0);
+ g1 = f2 + w1*(f1 + w1);
+ g2 = f2 + w2*(f1 + w2);
+ }
+
+ PX_FORCE_INLINE void subexpressionsSIMD(const Vec4V& w0, const Vec4V& w1, const Vec4V& w2,
+ Vec4V& f1, Vec4V& f2, Vec4V& f3, Vec4V& g0, Vec4V& g1, Vec4V& g2)
+ {
+ const Vec4V temp0 = V4Add(w0, w1);
+ f1 = V4Add(temp0, w2);
+ const Vec4V temp1 = V4Mul(w0,w0);
+ const Vec4V temp2 = V4MulAdd(w1, temp0, temp1);
+ f2 = V4MulAdd(w2, f1, temp2);
+
+ // f3 = w0.multiply(temp1) + w1.multiply(temp2) + w2.multiply(f2);
+ const Vec4V ad0 = V4Mul(w0, temp1);
+ const Vec4V ad1 = V4MulAdd(w1, temp2, ad0);
+ f3 = V4MulAdd(w2, f2, ad1);
+
+ g0 = V4MulAdd(w0, V4Add(f1, w0), f2); // f2 + w0.multiply(f1 + w0);
+ g1 = V4MulAdd(w1, V4Add(f1, w1), f2); // f2 + w1.multiply(f1 + w1);
+ g2 = V4MulAdd(w2, V4Add(f1, w2), f2); // f2 + w2.multiply(f1 + w2);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes volume integrals for a polyhedron by summing surface integrals over its faces. SIMD version
+ * \param ir [out] a result structure.
+ * \param origin [in] the origin of the mesh vertices. All vertices will be shifted accordingly prior to computing the volume integrals.
+ Can improve accuracy, for example, if the centroid is used in the case of a convex mesh. Note: the returned inertia will not be relative to this origin but relative to (0,0,0).
+ * \return true if success
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool VolumeIntegratorEberly::computeVolumeIntegralsSIMD(PxIntegrals& ir, const PxVec3& origin)
+ {
+ FloatV mult = FLoad(1.0f/6.0f);
+ const Vec4V multV = V4Load(1.0f/24.0f);
+ const Vec4V multV2 = V4Load(1.0f/60.0f);
+ const Vec4V multVV = V4Load(1.0f/120.0f);
+
+ // order: 1, x, y, z, x^2, y^2, z^2, xy, yz, zx
+ FloatV intg = FLoad(0.0f);
+ Vec4V intgV = V4Load(0.0f);
+ Vec4V intgV2 = V4Load(0.0f);
+ Vec4V intgVV = V4Load(0.0f);
+
+ const Vec4V originV = Vec4V_From_PxVec3_WUndefined(origin);
+ const FloatV zeroV = FLoad(0.0f);
+
+ const PxVec3* hullVerts = static_cast<const PxVec3*> (mDesc.points.data);
+ const Gu::HullPolygonData* hullPolygons = static_cast<const Gu::HullPolygonData*> (mDesc.polygons.data);
+
+ for (PxU32 i = 0; i < mDesc.polygons.count; i++)
+ {
+ const Gu::HullPolygonData& polygon = hullPolygons[i];
+ const PxU8* data = static_cast<const PxU8*>(mDesc.indices.data) + polygon.mVRef8;
+ const PxU32 nbVerts = polygon.mNbVerts;
+
+ PX_ASSERT(nbVerts > 2);
+
+ const Vec4V normalV = V4LoadU(&polygon.mPlane.n.x);
+
+ for (PxU32 j = 0; j < nbVerts - 2; j++)
+ {
+ // Should be safe to V4Load, we allocate one more vertex each time
+ const Vec4V vertex0 = V4LoadU(&hullVerts[data[0]].x);
+ const Vec4V vertex1 = V4LoadU(&hullVerts[data[j + 1]].x);
+ const Vec4V vertex2 = V4LoadU(&hullVerts[data[j + 2]].x);
+
+ const Vec4V p0 = V4Sub(vertex0, originV);
+ Vec4V p1 = V4Sub(vertex1, originV);
+ Vec4V p2 = V4Sub(vertex2, originV);
+
+ const Vec4V p0YZX = V4PermYZXW(p0);
+ const Vec4V p1YZX = V4PermYZXW(p1);
+ const Vec4V p2YZX = V4PermYZXW(p2);
+
+ // get edges and cross product of edges
+ Vec4V d = V4Cross(V4Sub(p1, p0), V4Sub(p2, p0)); // (p1 - p0).cross(p2 - p0);
+
+ const FloatV dist = V4Dot3(d, normalV);
+ //if(cp.dot(normalV) < 0)
+ if(FAllGrtr(zeroV, dist))
+ {
+ d = V4Neg(d);
+ Vec4V temp = p1;
+ p1 = p2;
+ p2 = temp;
+ }
+
+ // compute integral terms
+ Vec4V f1; Vec4V f2; Vec4V f3; Vec4V g0; Vec4V g1; Vec4V g2;
+
+ subexpressionsSIMD(p0, p1, p2, f1, f2, f3, g0, g1, g2);
+
+ // update integrals
+ intg = FScaleAdd(V4GetX(d), V4GetX(f1), intg); //intg += d.x*f1.x;
+
+ intgV = V4MulAdd(d, f2, intgV); // intgV +=d.multiply(f2);
+ intgV2 = V4MulAdd(d, f3, intgV2); // intgV2 += d.multiply(f3);
+
+ const Vec4V ad0 = V4Mul(p0YZX, g0);
+ const Vec4V ad1 = V4MulAdd(p1YZX, g1, ad0);
+ const Vec4V ad2 = V4MulAdd(p2YZX, g2, ad1);
+ intgVV = V4MulAdd(d, ad2, intgVV); //intgVV += d.multiply(p0YZX.multiply(g0) + p1YZX.multiply(g1) + p2YZX.multiply(g2));
+ }
+ }
+
+ intg = FMul(intg, mult); // intg *= mult;
+ intgV = V4Mul(intgV, multV);
+ intgV2 = V4Mul(intgV2, multV2);
+ intgVV = V4Mul(intgVV, multVV);
+
+ // center of mass ir.COM = intgV/mMassR;
+ const Vec4V comV = V4ScaleInv(intgV, intg);
+ // we rewrite the mass, but then we set it back
+ V4StoreU(comV, &ir.COM.x);
+
+ FStore(intg, &mMassR);
+ ir.mass = PxF64(mMassR); // = intg;
+
+ PxVec3 intg2;
+ V3StoreU(Vec3V_From_Vec4V(intgV2), intg2);
+
+ PxVec3 intVV;
+ V3StoreU(Vec3V_From_Vec4V(intgVV), intVV);
+
+ // inertia tensor relative to the provided origin parameter
+ ir.inertiaTensor[0][0] = PxF64(intg2.y + intg2.z);
+ ir.inertiaTensor[1][1] = PxF64(intg2.x + intg2.z);
+ ir.inertiaTensor[2][2] = PxF64(intg2.x + intg2.y);
+ ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = PxF64(-intVV.x);
+ ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = PxF64(-intVV.y);
+ ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = PxF64(-intVV.z);
+
+ // inertia tensor relative to center of mass
+ ir.COMInertiaTensor[0][0] = ir.inertiaTensor[0][0] -PxF64(mMassR*(ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z));
+ ir.COMInertiaTensor[1][1] = ir.inertiaTensor[1][1] -PxF64(mMassR*(ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x));
+ ir.COMInertiaTensor[2][2] = ir.inertiaTensor[2][2] -PxF64(mMassR*(ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y));
+ ir.COMInertiaTensor[0][1] = ir.COMInertiaTensor[1][0] = (ir.inertiaTensor[0][1] +PxF64(mMassR*ir.COM.x*ir.COM.y));
+ ir.COMInertiaTensor[1][2] = ir.COMInertiaTensor[2][1] = (ir.inertiaTensor[1][2] +PxF64(mMassR*ir.COM.y*ir.COM.z));
+ ir.COMInertiaTensor[0][2] = ir.COMInertiaTensor[2][0] = (ir.inertiaTensor[0][2] +PxF64(mMassR*ir.COM.z*ir.COM.x));
+
+ // inertia tensor relative to (0,0,0)
+ if (!origin.isZero())
+ {
+ PxVec3 sum = ir.COM + origin;
+ ir.inertiaTensor[0][0] -= PxF64(mMassR*((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z) - (sum.y*sum.y+sum.z*sum.z)));
+ ir.inertiaTensor[1][1] -= PxF64(mMassR*((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x) - (sum.z*sum.z+sum.x*sum.x)));
+ ir.inertiaTensor[2][2] -= PxF64(mMassR*((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y) - (sum.x*sum.x+sum.y*sum.y)));
+ ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = ir.inertiaTensor[0][1] + PxF64(mMassR*((ir.COM.x*ir.COM.y) - (sum.x*sum.y)));
+ ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = ir.inertiaTensor[1][2] + PxF64(mMassR*((ir.COM.y*ir.COM.z) - (sum.y*sum.z)));
+ ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = ir.inertiaTensor[0][2] + PxF64(mMassR*((ir.COM.z*ir.COM.x) - (sum.z*sum.x)));
+ ir.COM = sum;
+ }
+
+ return true;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /**
+ * Computes volume integrals for a polyhedron by summing surface integrals over its faces.
+ * \param ir [out] a result structure.
+ * \param origin [in] the origin of the mesh vertices. All vertices will be shifted accordingly prior to computing the volume integrals.
+ Can improve accuracy, for example, if the centroid is used in the case of a convex mesh. Note: the returned inertia will not be relative to this origin but relative to (0,0,0).
+ * \return true if success
+ */
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ bool VolumeIntegratorEberly::computeVolumeIntegrals(PxIntegrals& ir, const PxVec3& origin)
+ {
+ const PxF64 mult[10] = {1.0/6.0,1.0/24.0,1.0/24.0,1.0/24.0,1.0/60.0,1.0/60.0,1.0/60.0,1.0/120.0,1.0/120.0,1.0/120.0};
+ PxF64 intg[10] = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; // order: 1, x, y, z, x^2, y^2, z^2, xy, yz, zx
+ const PxVec3* hullVerts = static_cast<const PxVec3*> (mDesc.points.data);
+
+ for (PxU32 i = 0; i < mDesc.polygons.count; i++)
+ {
+ const Gu::HullPolygonData& polygon = (static_cast<const Gu::HullPolygonData*> (mDesc.polygons.data))[i];
+ const PxU8* Data = static_cast<const PxU8*>(mDesc.indices.data) + polygon.mVRef8;
+ const PxU32 NbVerts = polygon.mNbVerts;
+ for (PxU32 j = 0; j < NbVerts - 2; j++)
+ {
+ const PxVec3 p0 = hullVerts[Data[0]] - origin;
+ PxVec3 p1 = hullVerts[Data[(j + 1) % NbVerts]] - origin;
+ PxVec3 p2 = hullVerts[Data[(j + 2) % NbVerts]] - origin;
+
+ PxVec3 cp = (p1 - p0).cross(p2 - p0);
+
+ if(cp.dot(polygon.mPlane.n) < 0)
+ {
+ cp = -cp;
+ Ps::swap(p1,p2);
+ }
+
+ PxF64 x0 = PxF64(p0.x); PxF64 y0 = PxF64(p0.y); PxF64 z0 = PxF64(p0.z);
+ PxF64 x1 = PxF64(p1.x); PxF64 y1 = PxF64(p1.y); PxF64 z1 = PxF64(p1.z);
+ PxF64 x2 = PxF64(p2.x); PxF64 y2 = PxF64(p2.y); PxF64 z2 = PxF64(p2.z);
+
+ // get edges and cross product of edges
+ PxF64 d0 = PxF64(cp.x); PxF64 d1 = PxF64(cp.y); PxF64 d2 = PxF64(cp.z);
+
+ // compute integral terms
+ PxF64 f1x; PxF64 f2x; PxF64 f3x; PxF64 g0x; PxF64 g1x; PxF64 g2x;
+ PxF64 f1y; PxF64 f2y; PxF64 f3y; PxF64 g0y; PxF64 g1y; PxF64 g2y;
+ PxF64 f1z; PxF64 f2z; PxF64 f3z; PxF64 g0z; PxF64 g1z; PxF64 g2z;
+
+ subexpressions(x0, x1, x2, f1x, f2x, f3x, g0x, g1x, g2x);
+ subexpressions(y0, y1, y2, f1y, f2y, f3y, g0y, g1y, g2y);
+ subexpressions(z0, z1, z2, f1z, f2z, f3z, g0z, g1z, g2z);
+
+ // update integrals
+ intg[0] += d0*f1x;
+ intg[1] += d0*f2x; intg[2] += d1*f2y; intg[3] += d2*f2z;
+ intg[4] += d0*f3x; intg[5] += d1*f3y; intg[6] += d2*f3z;
+ intg[7] += d0*(y0*g0x + y1*g1x + y2*g2x);
+ intg[8] += d1*(z0*g0y + z1*g1y + z2*g2y);
+ intg[9] += d2*(x0*g0z + x1*g1z + x2*g2z);
+
+ }
+ }
+
+ for (PxU32 i = 0; i < 10; i++)
+ {
+ intg[i] *= mult[i];
+ }
+
+ ir.mass = mMass = intg[0];
+ // center of mass
+ ir.COM.x = PxReal(intg[1]/mMass);
+ ir.COM.y = PxReal(intg[2]/mMass);
+ ir.COM.z = PxReal(intg[3]/mMass);
+
+ // inertia tensor relative to the provided origin parameter
+ ir.inertiaTensor[0][0] = intg[5]+intg[6];
+ ir.inertiaTensor[1][1] = intg[4]+intg[6];
+ ir.inertiaTensor[2][2] = intg[4]+intg[5];
+ ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = -intg[7];
+ ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = -intg[8];
+ ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = -intg[9];
+
+ // inertia tensor relative to center of mass
+ ir.COMInertiaTensor[0][0] = ir.inertiaTensor[0][0] -mMass*PxF64((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z));
+ ir.COMInertiaTensor[1][1] = ir.inertiaTensor[1][1] -mMass*PxF64((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x));
+ ir.COMInertiaTensor[2][2] = ir.inertiaTensor[2][2] -mMass*PxF64((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y));
+ ir.COMInertiaTensor[0][1] = ir.COMInertiaTensor[1][0] = (ir.inertiaTensor[0][1] +mMass*PxF64(ir.COM.x*ir.COM.y));
+ ir.COMInertiaTensor[1][2] = ir.COMInertiaTensor[2][1] = (ir.inertiaTensor[1][2] +mMass*PxF64(ir.COM.y*ir.COM.z));
+ ir.COMInertiaTensor[0][2] = ir.COMInertiaTensor[2][0] = (ir.inertiaTensor[0][2] +mMass*PxF64(ir.COM.z*ir.COM.x));
+
+ // inertia tensor relative to (0,0,0)
+ if (!origin.isZero())
+ {
+ PxVec3 sum = ir.COM + origin;
+ ir.inertiaTensor[0][0] -= mMass*PxF64((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z) - (sum.y*sum.y+sum.z*sum.z));
+ ir.inertiaTensor[1][1] -= mMass*PxF64((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x) - (sum.z*sum.z+sum.x*sum.x));
+ ir.inertiaTensor[2][2] -= mMass*PxF64((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y) - (sum.x*sum.x+sum.y*sum.y));
+ ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = ir.inertiaTensor[0][1] + mMass*PxF64((ir.COM.x*ir.COM.y) - (sum.x*sum.y));
+ ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = ir.inertiaTensor[1][2] + mMass*PxF64((ir.COM.y*ir.COM.z) - (sum.y*sum.z));
+ ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = ir.inertiaTensor[0][2] + mMass*PxF64((ir.COM.z*ir.COM.x) - (sum.z*sum.x));
+ ir.COM = sum;
+ }
+
+ return true;
+ }
+} // namespace
+
+// Wrapper
+bool computeVolumeIntegrals(const PxSimpleTriangleMesh& mesh, PxReal density, PxIntegrals& integrals)
+{
+ VolumeIntegrator v(mesh, PxF64(density));
+ return v.computeVolumeIntegrals(integrals);
+}
+
+// Wrapper
+bool computeVolumeIntegralsEberly(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin)
+{
+ VolumeIntegratorEberly v(mesh, PxF64(density));
+ v.computeVolumeIntegrals(integrals, origin);
+ return true;
+}
+
+// Wrapper
+bool computeVolumeIntegralsEberlySIMD(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin)
+{
+ VolumeIntegratorEberly v(mesh, PxF64(density));
+ v.computeVolumeIntegralsSIMD(integrals, origin);
+ return true;
+}
+
+}
+
+//#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.h b/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.h
new file mode 100644
index 00000000..559dc2f9
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/convex/VolumeIntegration.h
@@ -0,0 +1,102 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_FOUNDATION_NXVOLUMEINTEGRATION
+#define PX_FOUNDATION_NXVOLUMEINTEGRATION
+/** \addtogroup foundation
+ @{
+*/
+
+
+#include "foundation/Px.h"
+#include "foundation/PxVec3.h"
+#include "foundation/PxMat33.h"
+#include "CmPhysXCommon.h"
+
+namespace physx
+{
+
+class PxSimpleTriangleMesh;
+class PxConvexMeshDesc;
+
+/**
+\brief Data structure used to store mass properties.
+*/
+struct PxIntegrals
+ {
+ PxVec3 COM; //!< Center of mass
+ PxF64 mass; //!< Total mass
+ PxF64 inertiaTensor[3][3]; //!< Inertia tensor (mass matrix) relative to the origin
+ PxF64 COMInertiaTensor[3][3]; //!< Inertia tensor (mass matrix) relative to the COM
+
+ /**
+ \brief Retrieve the inertia tensor relative to the center of mass.
+
+ \param inertia Inertia tensor.
+ */
+ void getInertia(PxMat33& inertia)
+ {
+ for(PxU32 j=0;j<3;j++)
+ {
+ for(PxU32 i=0;i<3;i++)
+ {
+ inertia(i,j) = PxF32(COMInertiaTensor[i][j]);
+ }
+ }
+ }
+
+ /**
+ \brief Retrieve the inertia tensor relative to the origin.
+
+ \param inertia Inertia tensor.
+ */
+ void getOriginInertia(PxMat33& inertia)
+ {
+ for(PxU32 j=0;j<3;j++)
+ {
+ for(PxU32 i=0;i<3;i++)
+ {
+ inertia(i,j) = PxF32(inertiaTensor[i][j]);
+ }
+ }
+ }
+ };
+
+ bool computeVolumeIntegrals(const PxSimpleTriangleMesh& mesh, PxReal density, PxIntegrals& integrals);
+
+ // specialized method taking polygons directly, so we don't need to compute and store triangles for each polygon
+ bool computeVolumeIntegralsEberly(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin); // Eberly simplified method
+
+ // specialized method taking polygons directly, so we don't need to compute and store triangles for each polygon, SIMD version
+ bool computeVolumeIntegralsEberlySIMD(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin); // Eberly simplified method
+}
+
+ /** @} */
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.cpp b/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.cpp
new file mode 100644
index 00000000..974e1faa
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.cpp
@@ -0,0 +1,29 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.h
new file mode 100644
index 00000000..a41889ed
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.h
@@ -0,0 +1,337 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_COLLISION_GRBTRIANGLEMESHCOOKING
+#define PX_COLLISION_GRBTRIANGLEMESHCOOKING
+
+#include "GuMeshData.h"
+#include "cooking/PxCooking.h"
+
+namespace physx
+{
+ namespace Gu
+ {
+
+ }
+
+// TODO avoroshilov: remove duplicate definitions
+static const PxU32 BOUNDARY = 0xffffffff;
+static const PxU32 NONCONVEX_FLAG = 0x80000000;
+
+struct EdgeTriLookup
+{
+ PxU32 edgeId0, edgeId1;
+ PxU32 triId;
+
+ bool operator < (const EdgeTriLookup& edge1) const
+ {
+ return edgeId0 < edge1.edgeId0 || (edgeId0 == edge1.edgeId0 && edgeId1 < edge1.edgeId1);
+ }
+
+ bool operator <=(const EdgeTriLookup& edge1) const
+ {
+ return edgeId0 < edge1.edgeId0 || (edgeId0 == edge1.edgeId0 && edgeId1 <= edge1.edgeId1);
+ }
+};
+
+
+static PxU32 binarySearch(const EdgeTriLookup* __restrict data, const PxU32 numElements, const EdgeTriLookup& value)
+{
+ PxU32 left = 0;
+ PxU32 right = numElements;
+
+ while ((right - left) > 1)
+ {
+ PxU32 pos = (left + right) / 2;
+ const EdgeTriLookup& element = data[pos];
+ if (element <= value)
+ {
+ left = pos;
+ }
+ else
+ {
+ right = pos;
+ }
+ }
+
+ return left;
+}
+
+// slightly different behavior from collide2: boundary edges are filtered out
+
+static PxU32 findAdjacent(const PxVec3* triVertices, const PxVec3* triNormals, const uint3* triIndices,
+ PxU32 nbTris, PxU32 i0, PxU32 i1, const PxPlane& plane,
+ EdgeTriLookup* triLookups, PxU32 triangleIndex)
+{
+ PxU32 result = BOUNDARY;
+ PxReal bestCos = -FLT_MAX;
+
+ EdgeTriLookup lookup;
+ lookup.edgeId0 = PxMin(i0, i1);
+ lookup.edgeId1 = PxMax(i0, i1);
+
+ PxU32 startIndex = binarySearch(triLookups, nbTris * 3, lookup);
+
+ for (PxU32 a = startIndex; a > 0; --a)
+ {
+ if (triLookups[a - 1].edgeId0 == lookup.edgeId0 && triLookups[a - 1].edgeId1 == lookup.edgeId1)
+ startIndex = a - 1;
+ else
+ break;
+ }
+
+ for (PxU32 a = startIndex; a < nbTris * 3; ++a)
+ {
+ EdgeTriLookup& edgeTri = triLookups[a];
+
+ if (edgeTri.edgeId0 != lookup.edgeId0 || edgeTri.edgeId1 != lookup.edgeId1)
+ break;
+
+ if (edgeTri.triId == triangleIndex)
+ continue;
+
+ const uint3& triIdx = triIndices[edgeTri.triId];
+ PxU32 vIdx0 = triIdx.x;
+ PxU32 vIdx1 = triIdx.y;
+ PxU32 vIdx2 = triIdx.z;
+
+ PxU32 other = vIdx0 + vIdx1 + vIdx2 - (i0 + i1);
+
+ if (plane.distance(triVertices[other]) >= 0)
+ return NONCONVEX_FLAG | edgeTri.triId;
+
+ PxReal c = plane.n.dot(triNormals[edgeTri.triId]);
+ if (c>bestCos)
+ {
+ bestCos = c;
+ result = edgeTri.triId;
+ }
+
+ }
+
+ return result;
+}
+
+
+static void buildAdjacencies(uint4* triAdjacencies, PxVec3* tempNormalsPerTri_prealloc, const PxVec3* triVertices, const uint3* triIndices, PxU32 nbTris)
+{
+ //PxVec3 * triNormals = new PxVec3[nbTris];
+
+ EdgeTriLookup* edgeLookups = reinterpret_cast<EdgeTriLookup*>(PX_ALLOC(sizeof(EdgeTriLookup) * nbTris * 3, PX_DEBUG_EXP("edgeLookups")));
+
+
+ for (PxU32 i = 0; i < nbTris; i++)
+ {
+ const uint3& triIdx = triIndices[i];
+ PxU32 vIdx0 = triIdx.x;
+ PxU32 vIdx1 = triIdx.y;
+ PxU32 vIdx2 = triIdx.z;
+
+ tempNormalsPerTri_prealloc[i] = (triVertices[vIdx1] - triVertices[vIdx0]).cross(triVertices[vIdx2] - triVertices[vIdx0]).getNormalized();
+
+ edgeLookups[i * 3].edgeId0 = PxMin(vIdx0, vIdx1);
+ edgeLookups[i * 3].edgeId1 = PxMax(vIdx0, vIdx1);
+ edgeLookups[i * 3].triId = i;
+
+ edgeLookups[i * 3 + 1].edgeId0 = PxMin(vIdx1, vIdx2);
+ edgeLookups[i * 3 + 1].edgeId1 = PxMax(vIdx1, vIdx2);
+ edgeLookups[i * 3 + 1].triId = i;
+
+ edgeLookups[i * 3 + 2].edgeId0 = PxMin(vIdx0, vIdx2);
+ edgeLookups[i * 3 + 2].edgeId1 = PxMax(vIdx0, vIdx2);
+ edgeLookups[i * 3 + 2].triId = i;
+ }
+
+ Ps::sort<EdgeTriLookup>(edgeLookups, PxU32(nbTris * 3));
+
+ for (PxU32 i = 0; i < nbTris; i++)
+ {
+ const uint3& triIdx = triIndices[i];
+ PxU32 vIdx0 = triIdx.x;
+ PxU32 vIdx1 = triIdx.y;
+ PxU32 vIdx2 = triIdx.z;
+
+ PxPlane triPlane(triVertices[vIdx0], tempNormalsPerTri_prealloc[i]);
+ uint4 triAdjIdx;
+
+ triAdjIdx.x = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx0, vIdx1, triPlane, edgeLookups, i);
+ triAdjIdx.y = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx1, vIdx2, triPlane, edgeLookups, i);
+ triAdjIdx.z = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx2, vIdx0, triPlane, edgeLookups, i);
+ triAdjIdx.w = 0;
+
+ triAdjacencies[i] = triAdjIdx;
+ }
+
+
+ PX_FREE(edgeLookups);
+}
+
+static bool isEdgeNonconvex(PxU32 adjEdgeIndex)
+{
+ return (adjEdgeIndex != BOUNDARY) && (adjEdgeIndex & NONCONVEX_FLAG);
+}
+
+PX_INLINE void buildVertexConnection_p1(PxU32 * vertValency, PxU32 * vertNeighborStart, PxU32 & tempNumAdjVertices, const float4 * /*triVertices*/, const uint4 * triIndices, const uint4 * triAdjacencies, PxU32 nbVerts, PxU32 nbTris)
+{
+ tempNumAdjVertices = 0;
+ memset(vertValency, 0, nbVerts*sizeof(PxU32));
+
+ // Calculate max num of adjVerts
+ for (PxU32 i = 0; i < nbTris; i++)
+ {
+ uint4 triIdx = triIndices[i];
+ PxU32 vi0 = triIdx.x;
+ PxU32 vi1 = triIdx.y;
+ PxU32 vi2 = triIdx.z;
+
+ uint4 triAdjIdx = triAdjacencies[i];
+
+ PxU32 totalVertsAdded = 0;
+ if (!isEdgeNonconvex(triAdjIdx.x))
+ {
+ ++vertValency[vi0];
+ ++vertValency[vi1];
+ totalVertsAdded += 2;
+ }
+ if (!isEdgeNonconvex(triAdjIdx.y))
+ {
+ ++vertValency[vi1];
+ ++vertValency[vi2];
+ totalVertsAdded += 2;
+ }
+ if (!isEdgeNonconvex(triAdjIdx.z))
+ {
+ ++vertValency[vi2];
+ ++vertValency[vi0];
+ totalVertsAdded += 2;
+ }
+ tempNumAdjVertices += totalVertsAdded;
+ }
+ PxU32 offset = 0;
+ for (PxU32 i = 0; i < nbVerts; i++)
+ {
+ vertNeighborStart[i] = offset;
+ offset += vertValency[i];
+ }
+
+ memset(vertValency, 0, nbVerts*sizeof(PxU32));
+}
+
+PX_INLINE PxU32 buildVertexConnection_p2(PxU32 * vertValency, PxU32 * vertNeighborStart, PxU32 * vertNeighboringPairs_prealloc, PxU32 tempNumAdjVertices, const float4 * /*triVertices*/, const uint4 * triIndices, const uint4 * triAdjacencies, PxU32 /*nbVerts*/, PxU32 nbTris)
+{
+ memset(vertNeighboringPairs_prealloc, 0xff, tempNumAdjVertices*2*sizeof(PxU32));
+
+ PxU32 newAdjVertsNum = 0;
+ for (PxU32 i = 0; i < nbTris; i++)
+ {
+ uint4 triIdx = triIndices[i];
+ PxU32 vi[3] =
+ {
+ triIdx.x,
+ triIdx.y,
+ triIdx.z
+ };
+ uint4 triAdjIdx = triAdjacencies[i];
+ PxU32 ta[3] =
+ {
+ triAdjIdx.x,
+ triAdjIdx.y,
+ triAdjIdx.z
+ };
+
+ for (int tvi = 0; tvi < 3; ++tvi)
+ {
+ PxU32 curIdx = vi[tvi];
+ PxU32 nextIdx = vi[(tvi+1)%3];
+ if (!isEdgeNonconvex(ta[tvi]))
+ {
+ bool matchFound = false;
+ for (PxU32 valIdx = vertNeighborStart[curIdx], valIdxEnd = vertNeighborStart[curIdx] + vertValency[curIdx]; valIdx < valIdxEnd; ++valIdx)
+ {
+ if (vertNeighboringPairs_prealloc[valIdx*2+1] == nextIdx)
+ {
+ matchFound = true;
+ break;
+ }
+ }
+
+ if (!matchFound)
+ {
+ PxU32 curPairIdx;
+
+ curPairIdx = vertNeighborStart[curIdx] + vertValency[curIdx];
+ vertNeighboringPairs_prealloc[curPairIdx*2+0] = curIdx;
+ vertNeighboringPairs_prealloc[curPairIdx*2+1] = nextIdx;
+ ++vertValency[curIdx];
+
+ curPairIdx = vertNeighborStart[nextIdx] + vertValency[nextIdx];
+ vertNeighboringPairs_prealloc[curPairIdx*2+0] = nextIdx;
+ vertNeighboringPairs_prealloc[curPairIdx*2+1] = curIdx;
+ ++vertValency[nextIdx];
+
+ newAdjVertsNum += 2;
+ }
+ }
+ }
+ }
+
+ return newAdjVertsNum;
+}
+
+PX_INLINE void buildVertexConnection_p3(PxU32 * vertNeighbors, PxU32 * /*vertValency*/, PxU32 * vertNeighborStart, PxU32 * vertNeighboringPairs_prealloc, PxU32 tempNumAdjVertices, PxU32 newNumAdjVertices, const float4 * /*triVertices*/, const uint4 * /*triIndices*/, const uint4 * /*triAdjacencies*/, PxU32 /*nbVerts*/, PxU32 /*nbTris*/)
+{
+ PX_UNUSED(newNumAdjVertices);
+ PxU32 prevVertex = 0xFFffFFff;
+ PxU32 writingIndex = 0;
+ for (PxU32 i = 0; i < tempNumAdjVertices; ++i)
+ {
+ PxU32 curPairIdx0 = vertNeighboringPairs_prealloc[i*2+0];
+ if (curPairIdx0 == 0xFFffFFff)
+ {
+ continue;
+ }
+
+ PxU32 curPairIdx1 = vertNeighboringPairs_prealloc[i*2+1];
+ vertNeighbors[writingIndex] = curPairIdx1;
+ if (curPairIdx0 != prevVertex)
+ {
+ vertNeighborStart[curPairIdx0] = writingIndex;
+ }
+ prevVertex = curPairIdx0;
+
+ ++writingIndex;
+ }
+
+ PX_ASSERT(writingIndex == newNumAdjVertices);
+}
+
+} // namespace physx
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.cpp b/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.cpp
new file mode 100644
index 00000000..7215b1c7
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.cpp
@@ -0,0 +1,84 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxIO.h"
+#include "GuHeightField.h"
+#include "GuSerialize.h"
+
+using namespace physx;
+using namespace Gu;
+
+namespace physx
+{
+
+bool saveHeightField(const HeightField& hf, PxOutputStream& stream, bool endian)
+{
+ // write header
+ if(!writeHeader('H', 'F', 'H', 'F', PX_HEIGHTFIELD_VERSION, endian, stream))
+ return false;
+
+ const Gu::HeightFieldData& hfData = hf.getData();
+
+ // write mData members
+ writeDword(hfData.rows, endian, stream);
+ writeDword(hfData.columns, endian, stream);
+ writeFloat(hfData.rowLimit, endian, stream);
+ writeFloat(hfData.colLimit, endian, stream);
+ writeFloat(hfData.nbColumns, endian, stream);
+ writeFloat(hfData.thickness, endian, stream);
+ writeFloat(hfData.convexEdgeThreshold, endian, stream);
+ writeWord(hfData.flags, endian, stream);
+ writeDword(hfData.format, endian, stream);
+
+ writeFloat(hfData.mAABB.getMin(0), endian, stream);
+ writeFloat(hfData.mAABB.getMin(1), endian, stream);
+ writeFloat(hfData.mAABB.getMin(2), endian, stream);
+ writeFloat(hfData.mAABB.getMax(0), endian, stream);
+ writeFloat(hfData.mAABB.getMax(1), endian, stream);
+ writeFloat(hfData.mAABB.getMax(2), endian, stream);
+
+ // write this-> members
+ writeDword(hf.mSampleStride, endian, stream);
+ writeDword(hf.mNbSamples, endian, stream);
+ writeFloat(hf.mMinHeight, endian, stream);
+ writeFloat(hf.mMaxHeight, endian, stream);
+
+ // write samples
+ for(PxU32 i = 0; i < hf.mNbSamples; i++)
+ {
+ const PxHeightFieldSample& s = hfData.samples[i];
+ writeWord(PxU16(s.height), endian, stream);
+ stream.write(&s.materialIndex0, sizeof(s.materialIndex0));
+ stream.write(&s.materialIndex1, sizeof(s.materialIndex1));
+ }
+
+ return true;
+}
+
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.h
new file mode 100644
index 00000000..9e1e2ce4
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.h
@@ -0,0 +1,35 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "GuHeightField.h"
+
+namespace physx
+{
+ bool saveHeightField(const Gu::HeightField& hf, PxOutputStream& stream, bool endianSwap);
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/QuickSelect.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/QuickSelect.h
new file mode 100644
index 00000000..7dddd554
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/QuickSelect.h
@@ -0,0 +1,114 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef QUICKSELECT_H
+#define QUICKSELECT_H
+
+#include "foundation/PxSimpleTypes.h"
+
+// Google "wikipedia QuickSelect" for algorithm explanation
+namespace physx { namespace quickSelect {
+
+
+ #define SWAP32(x, y) { PxU32 tmp = y; y = x; x = tmp; }
+
+ // left is the index of the leftmost element of the subarray
+ // right is the index of the rightmost element of the subarray (inclusive)
+ // number of elements in subarray = right-left+1
+ template<typename LtEq>
+ PxU32 partition(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 pivotIndex, const LtEq& cmpLtEq)
+ {
+ PX_ASSERT(pivotIndex >= left && pivotIndex <= right);
+ PxU32 pivotValue = a[pivotIndex];
+ SWAP32(a[pivotIndex], a[right]) // Move pivot to end
+ PxU32 storeIndex = left;
+ for (PxU32 i = left; i < right; i++) // left <= i < right
+ if (cmpLtEq(a[i], pivotValue))
+ {
+ SWAP32(a[i], a[storeIndex]);
+ storeIndex++;
+ }
+ SWAP32(a[storeIndex], a[right]); // Move pivot to its final place
+ for (PxU32 i = left; i < storeIndex; i++)
+ PX_ASSERT(cmpLtEq(a[i], a[storeIndex]));
+ for (PxU32 i = storeIndex+1; i <= right; i++)
+ PX_ASSERT(cmpLtEq(a[storeIndex], a[i]));
+ return storeIndex;
+ }
+
+ // left is the index of the leftmost element of the subarray
+ // right is the index of the rightmost element of the subarray (inclusive)
+ // number of elements in subarray = right-left+1
+ // recursive version
+ template<typename LtEq>
+ void quickFindFirstK(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 k, const LtEq& cmpLtEq)
+ {
+ PX_ASSERT(k <= right-left+1);
+ if (right > left)
+ {
+ // select pivotIndex between left and right
+ PxU32 pivotIndex = (left + right) >> 1;
+ PxU32 pivotNewIndex = partition(a, left, right, pivotIndex, cmpLtEq);
+ // now all elements to the left of pivotNewIndex are < old value of a[pivotIndex] (bottom half values)
+ if (pivotNewIndex > left + k) // new condition
+ quickFindFirstK(a, left, pivotNewIndex-1, k, cmpLtEq);
+ if (pivotNewIndex < left + k)
+ quickFindFirstK(a, pivotNewIndex+1, right, k+left-pivotNewIndex-1, cmpLtEq);
+ }
+ }
+
+ // non-recursive version
+ template<typename LtEq>
+ void quickSelectFirstK(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 k, const LtEq& cmpLtEq)
+ {
+ PX_ASSERT(k <= right-left+1);
+ for (;;)
+ {
+ PxU32 pivotIndex = (left+right) >> 1;
+ PxU32 pivotNewIndex = partition(a, left, right, pivotIndex, cmpLtEq);
+ PxU32 pivotDist = pivotNewIndex - left + 1;
+ if (pivotDist == k)
+ return;
+ else if (k < pivotDist)
+ {
+ PX_ASSERT(pivotNewIndex > 0);
+ right = pivotNewIndex - 1;
+ }
+ else
+ {
+ k = k - pivotDist;
+ left = pivotNewIndex+1;
+ }
+ }
+ }
+
+} } // namespace quickSelect, physx
+
+#endif
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.cpp b/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.cpp
new file mode 100644
index 00000000..08ab1a1b
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.cpp
@@ -0,0 +1,893 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxBounds3.h"
+#include "CmPhysXCommon.h"
+#include "RTreeCooking.h"
+#include "PsSort.h"
+#include "PsMathUtils.h"
+#include "PsAllocator.h"
+#include "PsVecMath.h"
+#include "PxTolerancesScale.h"
+#include "QuickSelect.h"
+#include "PsInlineArray.h"
+#include "GuRTree.h"
+
+#define PRINT_RTREE_COOKING_STATS 0 // AP: keeping this frequently used macro for diagnostics/benchmarking
+
+#if PRINT_RTREE_COOKING_STATS
+#include <stdio.h>
+#endif
+
+using namespace physx::Gu;
+using namespace physx::shdfnd;
+using namespace physx::shdfnd::aos;
+
+namespace physx
+{
+
+// Intermediate non-quantized representation for RTree node in a page (final format is SIMD transposed page)
+struct RTreeNodeNQ
+{
+ PxBounds3 bounds;
+ PxI32 childPageFirstNodeIndex; // relative to the beginning of all build tree nodes array
+ PxI32 leafCount; // -1 for empty nodes, 0 for non-terminal nodes, number of enclosed tris if non-zero (LeafTriangles), also means a terminal node
+
+ struct U {}; // selector struct for uninitialized constructor
+ RTreeNodeNQ(U) {} // uninitialized constructor
+ RTreeNodeNQ() : bounds(PxBounds3::empty()), childPageFirstNodeIndex(-1), leafCount(0) {}
+};
+
+// SIMD version of bounds class
+struct PxBounds3V
+{
+ struct U {}; // selector struct for uninitialized constructor
+ Vec3V mn, mx;
+ PxBounds3V(Vec3VArg mn_, Vec3VArg mx_) : mn(mn_), mx(mx_) {}
+ PxBounds3V(U) {} // uninitialized constructor
+
+ PX_FORCE_INLINE Vec3V getExtents() const { return V3Sub(mx, mn); }
+ PX_FORCE_INLINE void include(const PxBounds3V& other) { mn = V3Min(mn, other.mn); mx = V3Max(mx, other.mx); }
+
+ // convert vector extents to PxVec3
+ PX_FORCE_INLINE const PxVec3 getMinVec3() const { PxVec3 ret; V3StoreU(mn, ret); return ret; }
+ PX_FORCE_INLINE const PxVec3 getMaxVec3() const { PxVec3 ret; V3StoreU(mx, ret); return ret; }
+};
+
+static void buildFromBounds(
+ Gu::RTree& resultTree, const PxBounds3V* allBounds, PxU32 numBounds,
+ Array<PxU32>& resultPermute, RTreeCooker::RemapCallback* rc, Vec3VArg allMn, Vec3VArg allMx,
+ PxReal sizePerfTradeOff, PxMeshCookingHint::Enum hint);
+
+/////////////////////////////////////////////////////////////////////////
+void RTreeCooker::buildFromTriangles(
+ Gu::RTree& result, const PxVec3* verts, PxU32 numVerts, const PxU16* tris16, const PxU32* tris32, PxU32 numTris,
+ Array<PxU32>& resultPermute, RTreeCooker::RemapCallback* rc, PxReal sizePerfTradeOff01, PxMeshCookingHint::Enum hint)
+{
+ PX_UNUSED(numVerts);
+ Array<PxBounds3V> allBounds;
+ allBounds.reserve(numTris);
+ Vec3V allMn = Vec3V_From_FloatV(FMax()), allMx = Vec3V_From_FloatV(FNegMax());
+ Vec3V eps = V3Splat(FLoad(5e-4f)); // AP scaffold: use PxTolerancesScale here?
+
+ // build RTree AABB bounds from triangles, conservative bound inflation is also performed here
+ for(PxU32 i = 0; i < numTris; i ++)
+ {
+ PxU32 i0, i1, i2;
+ PxU32 i3 = i*3;
+ if(tris16)
+ {
+ i0 = tris16[i3]; i1 = tris16[i3+1]; i2 = tris16[i3+2];
+ } else
+ {
+ i0 = tris32[i3]; i1 = tris32[i3+1]; i2 = tris32[i3+2];
+ }
+ PX_ASSERT_WITH_MESSAGE(i0 < numVerts && i1 < numVerts && i2 < numVerts ,"Input mesh triangle's vertex index exceeds specified numVerts.");
+ Vec3V v0 = V3LoadU(verts[i0]), v1 = V3LoadU(verts[i1]), v2 = V3LoadU(verts[i2]);
+ Vec3V mn = V3Sub(V3Min(V3Min(v0, v1), v2), eps); // min over 3 verts, subtract eps to inflate
+ Vec3V mx = V3Add(V3Max(V3Max(v0, v1), v2), eps); // max over 3 verts, add eps to inflate
+ allMn = V3Min(allMn, mn); allMx = V3Max(allMx, mx);
+ allBounds.pushBack(PxBounds3V(mn, mx));
+ }
+
+ buildFromBounds(result, allBounds.begin(), numTris, resultPermute, rc, allMn, allMx, sizePerfTradeOff01, hint);
+}
+
+/////////////////////////////////////////////////////////////////////////
+// Fast but lower quality 4-way split sorting using repeated application of quickselect
+
+// comparator template struct for sortin gbounds centers given a coordinate index (x,y,z=0,1,2)
+struct BoundsLTE
+{
+ PxU32 coordIndex;
+ const PxVec3* PX_RESTRICT boundCenters; // AP: precomputed centers are faster than recomputing the centers
+ BoundsLTE(PxU32 coordIndex_, const PxVec3* boundCenters_)
+ : coordIndex(coordIndex_), boundCenters(boundCenters_)
+ {}
+
+ PX_FORCE_INLINE bool operator()(const PxU32 & idx1, const PxU32 & idx2) const
+ {
+ PxF32 center1 = boundCenters[idx1][coordIndex];
+ PxF32 center2 = boundCenters[idx2][coordIndex];
+ return (center1 <= center2);
+ }
+};
+
+// ======================================================================
+// Quick sorting method
+// recursive sorting procedure:
+// 1. find min and max extent along each axis for the current cluster
+// 2. split input cluster into two 3 times using quickselect, splitting off a quarter of the initial cluster size each time
+// 3. the axis is potentialy different for each split using the following
+// approximate splitting heuristic - reduce max length by some estimated factor to encourage split along other axis
+// since we cut off between a quarter to a half of elements in this direction per split
+// the reduction for first split should be *0.75f but we use 0.8
+// to account for some node overlap. This is somewhat of an arbitrary choice and there's room for improvement.
+// 4. recurse on new clusters (goto step 1)
+//
+struct SubSortQuick
+{
+ static const PxReal reductionFactors[RTREE_N-1];
+
+ enum { NTRADEOFF = 9 };
+ static const PxU32 stopAtTrisPerLeaf1[NTRADEOFF]; // presets for PxCookingParams::meshSizePerformanceTradeoff implementation
+
+ const PxU32* permuteEnd;
+ const PxU32* permuteStart;
+ const PxBounds3V* allBounds;
+ Array<PxVec3> boundCenters;
+ PxU32 maxBoundsPerLeafPage;
+
+ // initialize the context for the sorting routine
+ SubSortQuick(PxU32* permute, const PxBounds3V* allBounds_, PxU32 allBoundsSize, PxReal sizePerfTradeOff01)
+ : allBounds(allBounds_)
+ {
+ permuteEnd = permute + allBoundsSize;
+ permuteStart = permute;
+ PxU32 boundsCount = allBoundsSize;
+ boundCenters.reserve(boundsCount); // AP - measured that precomputing centers helps with perf significantly (~20% on 1k verts)
+ for(PxU32 i = 0; i < boundsCount; i++)
+ boundCenters.pushBack( allBounds[i].getMinVec3() + allBounds[i].getMaxVec3() );
+ PxU32 iTradeOff = PxMin<PxU32>( PxU32(PxMax<PxReal>(0.0f, sizePerfTradeOff01)*NTRADEOFF), NTRADEOFF-1 );
+ maxBoundsPerLeafPage = stopAtTrisPerLeaf1[iTradeOff];
+ }
+
+ // implements the sorting/splitting procedure
+ void sort4(
+ PxU32* PX_RESTRICT permute, const PxU32 clusterSize, // beginning and size of current recursively processed cluster
+ Array<RTreeNodeNQ>& resultTree, PxU32& maxLevels,
+ PxBounds3V& subTreeBound, PxU32 level = 0)
+ {
+ if(level == 0)
+ maxLevels = 1;
+ else
+ maxLevels = PxMax(maxLevels, level+1);
+
+ PX_ASSERT(permute + clusterSize <= permuteEnd);
+ PX_ASSERT(maxBoundsPerLeafPage >= RTREE_N-1);
+
+ const PxU32 cluster4 = PxMax<PxU32>(clusterSize/RTREE_N, 1);
+
+ PX_ASSERT(clusterSize > 0);
+ // find min and max world bound for current cluster
+ Vec3V mx = allBounds[permute[0]].mx, mn = allBounds[permute[0]].mn; PX_ASSERT(permute[0] < boundCenters.size());
+ for(PxU32 i = 1; i < clusterSize; i ++)
+ {
+ PX_ASSERT(permute[i] < boundCenters.size());
+ mx = V3Max(mx, allBounds[permute[i]].mx);
+ mn = V3Min(mn, allBounds[permute[i]].mn);
+ }
+ PX_ALIGN_PREFIX(16) PxReal maxElem[4] PX_ALIGN_SUFFIX(16);
+ V3StoreA(V3Sub(mx, mn), *reinterpret_cast<PxVec3*>(maxElem)); // compute the dimensions and store into a scalar maxElem array
+
+ // split along the longest axis
+ const PxU32 maxDiagElement = PxU32(maxElem[0] > maxElem[1] && maxElem[0] > maxElem[2] ? 0 : (maxElem[1] > maxElem[2] ? 1 : 2));
+ BoundsLTE cmpLte(maxDiagElement, boundCenters.begin());
+
+ const PxU32 startNodeIndex = resultTree.size();
+ resultTree.resizeUninitialized(startNodeIndex+RTREE_N); // at each recursion level we add 4 nodes to the tree
+
+ PxBounds3V childBound( (PxBounds3V::U()) ); // start off uninitialized for performance
+ const PxI32 leftover = PxMax<PxI32>(PxI32(clusterSize - cluster4*(RTREE_N-1)), 0);
+ PxU32 totalCount = 0;
+ for(PxU32 i = 0; i < RTREE_N; i++)
+ {
+ // split off cluster4 count nodes out of the entire cluster for each i
+ const PxU32 clusterOffset = cluster4*i;
+ PxU32 count1; // cluster4 or leftover depending on whether it's the last cluster
+ if(i < RTREE_N-1)
+ {
+ // only need to so quickSelect for the first pagesize-1 clusters
+ if(clusterOffset <= clusterSize-1)
+ {
+ quickSelect::quickSelectFirstK(permute, clusterOffset, clusterSize-1, cluster4, cmpLte);
+ // approximate heuristic - reduce max length by some estimated factor to encourage split along other axis
+ // since we cut off a quarter of elements in this direction the reduction should be *0.75f but we use 0.8
+ // to account for some node overlap. This is somewhat of an arbitrary choice though
+ maxElem[cmpLte.coordIndex] *= reductionFactors[i];
+ // recompute cmpLte.coordIndex from updated maxElements
+ cmpLte.coordIndex = PxU32(maxElem[0] > maxElem[1] && maxElem[0] > maxElem[2] ? 0 : (maxElem[1] > maxElem[2] ? 1 : 2));
+ }
+ count1 = cluster4;
+ } else
+ {
+ count1 = PxU32(leftover);
+ // verify that leftover + sum of previous clusters adds up to clusterSize or leftover is 0
+ // leftover can be 0 if clusterSize<RTREE_N, this is generally rare, can happen for meshes with < RTREE_N tris
+ PX_ASSERT(leftover == 0 || cluster4*i + count1 == clusterSize);
+ }
+
+ RTreeNodeNQ& curNode = resultTree[startNodeIndex+i];
+
+ totalCount += count1; // accumulate total node count
+ if(count1 <= maxBoundsPerLeafPage) // terminal page according to specified maxBoundsPerLeafPage
+ {
+ if(count1 && totalCount <= clusterSize)
+ {
+ // this will be true most of the time except when the total number of triangles in the mesh is < PAGESIZE
+ curNode.leafCount = PxI32(count1);
+ curNode.childPageFirstNodeIndex = PxI32(clusterOffset + PxU32(permute-permuteStart));
+ childBound = allBounds[permute[clusterOffset+0]];
+ for(PxU32 i1 = 1; i1 < count1; i1++)
+ {
+ const PxBounds3V& bnd = allBounds[permute[clusterOffset+i1]];
+ childBound.include(bnd);
+ }
+ } else
+ {
+ // since we are required to have PAGESIZE nodes per page for simd, we fill any leftover with empty nodes
+ // we should only hit this if the total number of triangles in the mesh is < PAGESIZE
+ childBound.mn = childBound.mx = V3Zero(); // shouldn't be necessary but setting just in case
+ curNode.bounds.setEmpty();
+ curNode.leafCount = -1;
+ curNode.childPageFirstNodeIndex = -1; // using -1 for empty node
+ }
+ } else // not a terminal page, recurse on count1 nodes cluster
+ {
+ curNode.childPageFirstNodeIndex = PxI32(resultTree.size());
+ curNode.leafCount = 0;
+ sort4(permute+cluster4*i, count1, resultTree, maxLevels, childBound, level+1);
+ }
+ if(i == 0)
+ subTreeBound = childBound; // initialize subTreeBound with first childBound
+ else
+ subTreeBound.include(childBound); // expand subTreeBound with current childBound
+
+ // can use curNode since the reference change due to resizing in recursive call, need to recompute the pointer
+ RTreeNodeNQ& curNode1 = resultTree[startNodeIndex+i];
+ curNode1.bounds.minimum = childBound.getMinVec3(); // update node bounds using recursively computed childBound
+ curNode1.bounds.maximum = childBound.getMaxVec3();
+ }
+ }
+};
+
+// heuristic size reduction factors for splitting heuristic (see how it's used above)
+const PxReal SubSortQuick::reductionFactors[RTREE_N-1] = {0.8f, 0.7f, 0.6f};
+
+// sizePerf trade-off presets for sorting routines
+const PxU32 SubSortQuick::stopAtTrisPerLeaf1[SubSortQuick::NTRADEOFF] = {16, 14, 12, 10, 8, 7, 6, 5, 4};
+
+/////////////////////////////////////////////////////////////////////////
+// SAH sorting method
+//
+// Preset table: lower index=better size -> higher index = better perf
+static const PxU32 NTRADEOFF = 15;
+ // % -24 -23 -17 -15 -10 -8 -5 -3 0 +3 +3 +5 +7 +8 +9 - % raycast MeshSurface*Random benchmark perf
+ // K 717 734 752 777 793 811 824 866 903 939 971 1030 1087 1139 1266 - testzone size in K
+ // # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 - preset number
+static const PxU32 stopAtTrisPerPage[NTRADEOFF] = { 64, 60, 56, 48, 46, 44, 40, 36, 32, 28, 24, 20, 16, 12, 12};
+static const PxU32 stopAtTrisPerLeaf[NTRADEOFF] = { 16, 14, 12, 10, 9, 8, 8, 6, 5, 5, 5, 4, 4, 4, 2}; // capped at 2 anyway
+
+/////////////////////////////////////////////////////////////////////////
+// comparator struct for sorting the bounds along a specified coordIndex (coordIndex=0,1,2 for X,Y,Z)
+struct SortBoundsPredicate
+{
+ PxU32 coordIndex;
+ const PxBounds3V* allBounds;
+ SortBoundsPredicate(PxU32 coordIndex_, const PxBounds3V* allBounds_) : coordIndex(coordIndex_), allBounds(allBounds_)
+ {}
+
+ bool operator()(const PxU32 & idx1, const PxU32 & idx2) const
+ {
+ // using the bounds center for comparison
+ PxF32 center1 = V3ReadXYZ(allBounds[idx1].mn)[coordIndex] + V3ReadXYZ(allBounds[idx1].mx)[coordIndex];
+ PxF32 center2 = V3ReadXYZ(allBounds[idx2].mn)[coordIndex] + V3ReadXYZ(allBounds[idx2].mx)[coordIndex];
+ return (center1 < center2);
+ }
+};
+
+
+/////////////////////////////////////////////////////////////////////////
+// auxiliary class for SAH build (SAH = surface area heuristic)
+struct Interval
+{
+ PxU32 start, count;
+ Interval(PxU32 s, PxU32 c) : start(s), count(c) {}
+};
+
+// SAH function - returns surface area for given AABB extents
+static PX_FORCE_INLINE void PxSAH(const Vec3VArg v, PxF32& sah)
+{
+ FStore(V3Dot(v, V3PermZXY(v)), &sah); // v.x*v.y + v.y*v.z + v.x*v.z;
+}
+
+struct SubSortSAH
+{
+ PxU32* PX_RESTRICT permuteStart, *PX_RESTRICT tempPermute;
+ const PxBounds3V* PX_RESTRICT allBounds;
+ PxF32* PX_RESTRICT metricL;
+ PxF32* PX_RESTRICT metricR;
+ const PxU32* PX_RESTRICT xOrder, *PX_RESTRICT yOrder, *PX_RESTRICT zOrder;
+ const PxU32* PX_RESTRICT xRanks, *PX_RESTRICT yRanks, *PX_RESTRICT zRanks;
+ PxU32* PX_RESTRICT tempRanks;
+ PxU32 nbTotalBounds;
+ PxU32 iTradeOff;
+
+ // precompute various values used during sort
+ SubSortSAH(
+ PxU32* permute, const PxBounds3V* allBounds_, PxU32 numBounds,
+ const PxU32* xOrder_, const PxU32* yOrder_, const PxU32* zOrder_,
+ const PxU32* xRanks_, const PxU32* yRanks_, const PxU32* zRanks_, PxReal sizePerfTradeOff01)
+ : permuteStart(permute), allBounds(allBounds_),
+ xOrder(xOrder_), yOrder(yOrder_), zOrder(zOrder_),
+ xRanks(xRanks_), yRanks(yRanks_), zRanks(zRanks_), nbTotalBounds(numBounds)
+ {
+ metricL = new PxF32[numBounds];
+ metricR = new PxF32[numBounds];
+ tempPermute = new PxU32[numBounds*2+1];
+ tempRanks = new PxU32[numBounds];
+ iTradeOff = PxMin<PxU32>( PxU32(PxMax<PxReal>(0.0f, sizePerfTradeOff01)*NTRADEOFF), NTRADEOFF-1 );
+ }
+
+ ~SubSortSAH() // release temporarily used memory
+ {
+ delete [] metricL; metricL = NULL;
+ delete [] metricR; metricR = NULL;
+ delete [] tempPermute; tempPermute = NULL;
+ delete [] tempRanks; tempRanks = NULL;
+ }
+
+ ////////////////////////////////////////////////////////////////////
+ // returns split position for second array start relative to permute ptr
+ PxU32 split(PxU32* permute, PxU32 clusterSize)
+ {
+ if(clusterSize <= 1)
+ return 0;
+ if(clusterSize == 2)
+ return 1;
+
+ PxI32 minCount = clusterSize >= 4 ? 2 : 1;
+ PxI32 splitStartL = minCount; // range=[startL->endL)
+ PxI32 splitEndL = PxI32(clusterSize-minCount);
+ PxI32 splitStartR = PxI32(clusterSize-splitStartL); // range=(endR<-startR], startR > endR
+ PxI32 splitEndR = PxI32(clusterSize-splitEndL);
+ PX_ASSERT(splitEndL-splitStartL == splitStartR-splitEndR);
+ PX_ASSERT(splitStartL <= splitEndL);
+ PX_ASSERT(splitStartR >= splitEndR);
+ PX_ASSERT(splitEndR >= 1);
+ PX_ASSERT(splitEndL < PxI32(clusterSize));
+
+ // pick the best axis with some splitting metric
+ // axis index is X=0, Y=1, Z=2
+ PxF32 minMetric[3];
+ PxU32 minMetricSplit[3];
+ const PxU32* ranks3[3] = { xRanks, yRanks, zRanks };
+ const PxU32* orders3[3] = { xOrder, yOrder, zOrder };
+ for(PxU32 coordIndex = 0; coordIndex <= 2; coordIndex++)
+ {
+ SortBoundsPredicate sortPredicateLR(coordIndex, allBounds);
+
+ const PxU32* rank = ranks3[coordIndex];
+ const PxU32* order = orders3[coordIndex];
+
+ // build ranks in tempPermute
+ if(clusterSize == nbTotalBounds) // AP: about 4% perf gain from this optimization
+ {
+ // if this is a full cluster sort, we already have it done
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ tempPermute[i] = order[i];
+ } else
+ {
+ // sort the tempRanks
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ tempRanks[i] = rank[permute[i]];
+ Ps::sort(tempRanks, clusterSize);
+ for(PxU32 i = 0; i < clusterSize; i ++) // convert back from ranks to indices
+ tempPermute[i] = order[tempRanks[i]];
+ }
+
+ // we consider overlapping intervals for minimum sum of metrics
+ // left interval is from splitStartL up to splitEndL
+ // right interval is from splitStartR down to splitEndR
+
+
+ // first compute the array metricL
+ Vec3V boundsLmn = allBounds[tempPermute[0]].mn; // init with 0th bound
+ Vec3V boundsLmx = allBounds[tempPermute[0]].mx; // init with 0th bound
+ PxI32 ii;
+ for(ii = 1; ii < splitStartL; ii++) // sweep right to include all bounds up to splitStartL-1
+ {
+ boundsLmn = V3Min(boundsLmn, allBounds[tempPermute[ii]].mn);
+ boundsLmx = V3Max(boundsLmx, allBounds[tempPermute[ii]].mx);
+ }
+
+ PxU32 countL0 = 0;
+ for(ii = splitStartL; ii <= splitEndL; ii++) // compute metric for inclusive bounds from splitStartL to splitEndL
+ {
+ boundsLmn = V3Min(boundsLmn, allBounds[tempPermute[ii]].mn);
+ boundsLmx = V3Max(boundsLmx, allBounds[tempPermute[ii]].mx);
+ PxSAH(V3Sub(boundsLmx, boundsLmn), metricL[countL0++]);
+ }
+ // now we have metricL
+
+ // now compute the array metricR
+ Vec3V boundsRmn = allBounds[tempPermute[clusterSize-1]].mn; // init with last bound
+ Vec3V boundsRmx = allBounds[tempPermute[clusterSize-1]].mx; // init with last bound
+ for(ii = PxI32(clusterSize-2); ii > splitStartR; ii--) // include bounds to the left of splitEndR down to splitStartR
+ {
+ boundsRmn = V3Min(boundsRmn, allBounds[tempPermute[ii]].mn);
+ boundsRmx = V3Max(boundsRmx, allBounds[tempPermute[ii]].mx);
+ }
+
+ PxU32 countR0 = 0;
+ for(ii = splitStartR; ii >= splitEndR; ii--) // continue sweeping left, including bounds and recomputing the metric
+ {
+ boundsRmn = V3Min(boundsRmn, allBounds[tempPermute[ii]].mn);
+ boundsRmx = V3Max(boundsRmx, allBounds[tempPermute[ii]].mx);
+ PxSAH(V3Sub(boundsRmx, boundsRmn), metricR[countR0++]);
+ }
+
+ PX_ASSERT((countL0 == countR0) && (countL0 == PxU32(splitEndL-splitStartL+1)));
+
+ // now iterate over splitRange and compute the minimum sum of SAHLeft*countLeft + SAHRight*countRight
+ PxU32 minMetricSplitPosition = 0;
+ PxF32 minMetricLocal = PX_MAX_REAL;
+ const PxI32 hsI32 = PxI32(clusterSize/2);
+ const PxI32 splitRange = (splitEndL-splitStartL+1);
+ for(ii = 0; ii < splitRange; ii++)
+ {
+ PxF32 countL = PxF32(ii+minCount); // need to add minCount since ii iterates over splitRange
+ PxF32 countR = PxF32(splitRange-ii-1+minCount);
+ PX_ASSERT(PxU32(countL + countR) == clusterSize);
+
+ const PxF32 metric = (countL*metricL[ii] + countR*metricR[splitRange-ii-1]);
+ const PxU32 splitPos = PxU32(ii+splitStartL);
+ if(metric < minMetricLocal ||
+ (metric <= minMetricLocal && // same metric but more even split
+ PxAbs(PxI32(splitPos)-hsI32) < PxAbs(PxI32(minMetricSplitPosition)-hsI32)))
+ {
+ minMetricLocal = metric;
+ minMetricSplitPosition = splitPos;
+ }
+ }
+
+ minMetric[coordIndex] = minMetricLocal;
+ minMetricSplit[coordIndex] = minMetricSplitPosition;
+
+ // sum of axis lengths for both left and right AABBs
+ }
+
+ PxU32 winIndex = 2;
+ if(minMetric[0] <= minMetric[1] && minMetric[0] <= minMetric[2])
+ winIndex = 0;
+ else if(minMetric[1] <= minMetric[2])
+ winIndex = 1;
+
+ const PxU32* rank = ranks3[winIndex];
+ const PxU32* order = orders3[winIndex];
+ if(clusterSize == nbTotalBounds) // AP: about 4% gain from this special case optimization
+ {
+ // if this is a full cluster sort, we already have it done
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ permute[i] = order[i];
+ } else
+ {
+ // sort the tempRanks
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ tempRanks[i] = rank[permute[i]];
+ Ps::sort(tempRanks, clusterSize);
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ permute[i] = order[tempRanks[i]];
+ }
+
+ PxU32 splitPoint = minMetricSplit[winIndex];
+ if(clusterSize == 3 && splitPoint == 0)
+ splitPoint = 1; // special case due to rounding
+ return splitPoint;
+ }
+
+ // compute surface area for a given split
+ PxF32 computeSA(const PxU32* permute, const Interval& split) // both permute and i are relative
+ {
+ PX_ASSERT(split.count >= 1);
+ Vec3V bmn = allBounds[permute[split.start]].mn;
+ Vec3V bmx = allBounds[permute[split.start]].mx;
+ for(PxU32 i = 1; i < split.count; i++)
+ {
+ const PxBounds3V& b1 = allBounds[permute[split.start+i]];
+ bmn = V3Min(bmn, b1.mn); bmx = V3Max(bmx, b1.mx);
+ }
+
+ PxF32 ret; PxSAH(V3Sub(bmx, bmn), ret);
+ return ret;
+ }
+
+ ////////////////////////////////////////////////////////////////////
+ // main SAH sort routine
+ void sort4(PxU32* permute, PxU32 clusterSize,
+ Array<RTreeNodeNQ>& resultTree, PxU32& maxLevels, PxU32 level = 0, RTreeNodeNQ* parentNode = NULL)
+ {
+ PX_UNUSED(parentNode);
+
+ if(level == 0)
+ maxLevels = 1;
+ else
+ maxLevels = PxMax(maxLevels, level+1);
+
+ PxU32 splitPos[RTREE_N];
+ for(PxU32 j = 0; j < RTREE_N; j++)
+ splitPos[j] = j+1;
+
+ if(clusterSize >= RTREE_N)
+ {
+ // split into RTREE_N number of regions via RTREE_N-1 subsequent splits
+ // each split is represented as a current interval
+ // we iterate over currently active intervals and compute it's surface area
+ // then we split the interval with maximum surface area
+ // AP scaffold: possible optimization - seems like computeSA can be cached for unchanged intervals
+ InlineArray<Interval, 4> splits;
+ splits.pushBack(Interval(0, clusterSize));
+ for(PxU32 iSplit = 0; iSplit < RTREE_N-1; iSplit++)
+ {
+ PxF32 maxSAH = -FLT_MAX;
+ PxU32 maxSplit = 0xFFFFffff;
+ for(PxU32 i = 0; i < splits.size(); i++)
+ {
+ if(splits[i].count == 1)
+ continue;
+ PxF32 SAH = computeSA(permute, splits[i])*splits[i].count;
+ if(SAH > maxSAH)
+ {
+ maxSAH = SAH;
+ maxSplit = i;
+ }
+ }
+ PX_ASSERT(maxSplit != 0xFFFFffff);
+
+ // maxSplit is now the index of the interval in splits array with maximum surface area
+ // we now split it into 2 using the split() function
+ Interval old = splits[maxSplit];
+ PX_ASSERT(old.count > 1);
+ PxU32 splitLocal = split(permute+old.start, old.count); // relative split pos
+
+ PX_ASSERT(splitLocal >= 1);
+ PX_ASSERT(old.count-splitLocal >= 1);
+ splits.pushBack(Interval(old.start, splitLocal));
+ splits.pushBack(Interval(old.start+splitLocal, old.count-splitLocal));
+ splits.replaceWithLast(maxSplit);
+ splitPos[iSplit] = old.start+splitLocal;
+ }
+
+ // verification code, make sure split counts add up to clusterSize
+ PX_ASSERT(splits.size() == RTREE_N);
+ PxU32 sum = 0;
+ for(PxU32 j = 0; j < RTREE_N; j++)
+ sum += splits[j].count;
+ PX_ASSERT(sum == clusterSize);
+ }
+ else // clusterSize < RTREE_N
+ {
+ // make it so splitCounts based on splitPos add up correctly for small cluster sizes
+ for(PxU32 i = clusterSize; i < RTREE_N-1; i++)
+ splitPos[i] = clusterSize;
+ }
+
+ // sort splitPos index array using quicksort (just a few values)
+ Ps::sort(splitPos, RTREE_N-1);
+ splitPos[RTREE_N-1] = clusterSize; // splitCount[n] is computed as splitPos[n+1]-splitPos[n], so we need to add this last value
+
+ // now compute splitStarts and splitCounts from splitPos[] array. Also perform a bunch of correctness verification
+ PxU32 splitStarts[RTREE_N];
+ PxU32 splitCounts[RTREE_N];
+ splitStarts[0] = 0;
+ splitCounts[0] = splitPos[0];
+ PxU32 sumCounts = splitCounts[0];
+ for(PxU32 j = 1; j < RTREE_N; j++)
+ {
+ splitStarts[j] = splitPos[j-1];
+ PX_ASSERT(splitStarts[j-1]<=splitStarts[j]);
+ splitCounts[j] = splitPos[j]-splitPos[j-1];
+ PX_ASSERT(splitCounts[j] > 0 || clusterSize < RTREE_N);
+ sumCounts += splitCounts[j];
+ PX_ASSERT(splitStarts[j-1]+splitCounts[j-1]<=splitStarts[j]);
+ }
+ PX_ASSERT(sumCounts == clusterSize);
+ PX_ASSERT(splitStarts[RTREE_N-1]+splitCounts[RTREE_N-1]<=clusterSize);
+
+ // mark this cluster as terminal based on clusterSize <= stopAtTrisPerPage parameter for current iTradeOff user specified preset
+ bool terminalClusterByTotalCount = (clusterSize <= stopAtTrisPerPage[iTradeOff]);
+ // iterate over splitCounts for the current cluster, if any of counts exceed 16 (which is the maximum supported by LeafTriangles
+ // we cannot mark this cluster as terminal (has to be split more)
+ for(PxU32 s = 0; s < RTREE_N; s++)
+ if(splitCounts[s] > 16) // LeafTriangles doesn't support > 16 tris
+ terminalClusterByTotalCount = false;
+
+ // iterate over all the splits
+ for(PxU32 s = 0; s < RTREE_N; s++)
+ {
+ RTreeNodeNQ rtn;
+ PxU32 splitCount = splitCounts[s];
+ if(splitCount > 0) // splits shouldn't be empty generally
+ {
+ // sweep left to right and compute min and max SAH for each individual bound in current split
+ PxBounds3V b = allBounds[permute[splitStarts[s]]];
+ PxF32 sahMin; PxSAH(b.getExtents(), sahMin);
+ PxF32 sahMax = sahMin;
+ // AP scaffold - looks like this could be optimized (we are recomputing bounds top down)
+ for(PxU32 i = 1; i < splitCount; i++)
+ {
+ PxU32 localIndex = i + splitStarts[s];
+ const PxBounds3V& b1 = allBounds[permute[localIndex]];
+ PxF32 sah1; PxSAH(b1.getExtents(), sah1);
+ sahMin = PxMin(sahMin, sah1);
+ sahMax = PxMax(sahMax, sah1);
+ b.include(b1);
+ }
+
+ rtn.bounds.minimum = V3ReadXYZ(b.mn);
+ rtn.bounds.maximum = V3ReadXYZ(b.mx);
+
+ // if bounds differ widely (according to some heuristic preset), we continue splitting
+ // this is important for a mixed cluster with large and small triangles
+ bool okSAH = (sahMax/sahMin < 40.0f);
+ if(!okSAH)
+ terminalClusterByTotalCount = false; // force splitting this cluster
+
+ bool stopSplitting = // compute the final splitting criterion
+ splitCount <= 2 || (okSAH && splitCount <= 3) // stop splitting at 2 nodes or if SAH ratio is OK and splitCount <= 3
+ || terminalClusterByTotalCount || splitCount <= stopAtTrisPerLeaf[iTradeOff];
+ if(stopSplitting)
+ {
+ // this is a terminal page then, mark as such
+ // first node index is relative to the top level input array beginning
+ rtn.childPageFirstNodeIndex = PxI32(splitStarts[s]+(permute-permuteStart));
+ rtn.leafCount = PxI32(splitCount);
+ PX_ASSERT(splitCount <= 16); // LeafTriangles doesn't support more
+ }
+ else
+ {
+ // this is not a terminal page, we will recompute this later, after we recurse on subpages (label ZZZ)
+ rtn.childPageFirstNodeIndex = -1;
+ rtn.leafCount = 0;
+ }
+ }
+ else // splitCount == 0 at this point, this is an empty paddding node (with current presets it's very rare)
+ {
+ PX_ASSERT(splitCount == 0);
+ rtn.bounds.setEmpty();
+ rtn.childPageFirstNodeIndex = -1;
+ rtn.leafCount = -1;
+ }
+ resultTree.pushBack(rtn); // push the new node into the resultTree array
+ }
+
+ if(terminalClusterByTotalCount) // abort recursion if terminal cluster
+ return;
+
+ // recurse on subpages
+ PxU32 parentIndex = resultTree.size() - RTREE_N; // save the parentIndex as specified (array can be resized during recursion)
+ for(PxU32 s = 0; s<RTREE_N; s++)
+ {
+ RTreeNodeNQ* sParent = &resultTree[parentIndex+s]; // array can be resized and relocated during recursion
+ if(sParent->leafCount == 0) // only split pages that were marked as non-terminal during splitting (see "label ZZZ" above)
+ {
+ // all child nodes will be pushed inside of this recursive call,
+ // so we set the child pointer for parent node to resultTree.size()
+ sParent->childPageFirstNodeIndex = PxI32(resultTree.size());
+ sort4(permute+splitStarts[s], splitCounts[s], resultTree, maxLevels, level+1, sParent);
+ }
+ }
+ }
+};
+
+
+
+
+/////////////////////////////////////////////////////////////////////////
+// initializes the input permute array with identity permutation
+// and shuffles it so that new sorted index, newIndex = resultPermute[oldIndex]
+static void buildFromBounds(
+ Gu::RTree& result, const PxBounds3V* allBounds, PxU32 numBounds,
+ Array<PxU32>& permute, RTreeCooker::RemapCallback* rc, Vec3VArg allMn, Vec3VArg allMx,
+ PxReal sizePerfTradeOff01, PxMeshCookingHint::Enum hint)
+{
+ PX_UNUSED(sizePerfTradeOff01);
+ PxBounds3V treeBounds(allMn, allMx);
+
+ // start off with an identity permutation
+ permute.resize(0);
+ permute.reserve(numBounds+1);
+ for(PxU32 j = 0; j < numBounds; j ++)
+ permute.pushBack(j);
+ const PxU32 sentinel = 0xABCDEF01;
+ permute.pushBack(sentinel);
+
+ // load sorted nodes into an RTreeNodeNQ tree representation
+ // build the tree structure from sorted nodes
+ const PxU32 pageSize = RTREE_N;
+ Array<RTreeNodeNQ> resultTree;
+ resultTree.reserve(numBounds*2);
+
+ PxU32 maxLevels = 0;
+ if(hint == PxMeshCookingHint::eSIM_PERFORMANCE) // use high quality SAH build
+ {
+ Array<PxU32> xRanks(numBounds), yRanks(numBounds), zRanks(numBounds), xOrder(numBounds), yOrder(numBounds), zOrder(numBounds);
+ memcpy(xOrder.begin(), permute.begin(), sizeof(xOrder[0])*numBounds);
+ memcpy(yOrder.begin(), permute.begin(), sizeof(yOrder[0])*numBounds);
+ memcpy(zOrder.begin(), permute.begin(), sizeof(zOrder[0])*numBounds);
+ // sort by shuffling the permutation, precompute sorted ranks for x,y,z-orders
+ Ps::sort(xOrder.begin(), xOrder.size(), SortBoundsPredicate(0, allBounds));
+ for(PxU32 i = 0; i < numBounds; i++) xRanks[xOrder[i]] = i;
+ Ps::sort(yOrder.begin(), yOrder.size(), SortBoundsPredicate(1, allBounds));
+ for(PxU32 i = 0; i < numBounds; i++) yRanks[yOrder[i]] = i;
+ Ps::sort(zOrder.begin(), zOrder.size(), SortBoundsPredicate(2, allBounds));
+ for(PxU32 i = 0; i < numBounds; i++) zRanks[zOrder[i]] = i;
+
+ SubSortSAH ss(permute.begin(), allBounds, numBounds,
+ xOrder.begin(), yOrder.begin(), zOrder.begin(), xRanks.begin(), yRanks.begin(), zRanks.begin(), sizePerfTradeOff01);
+ ss.sort4(permute.begin(), numBounds, resultTree, maxLevels);
+ } else
+ { // use fast cooking path
+ PX_ASSERT(hint == PxMeshCookingHint::eCOOKING_PERFORMANCE);
+ SubSortQuick ss(permute.begin(), allBounds, numBounds, sizePerfTradeOff01);
+ PxBounds3V discard((PxBounds3V::U()));
+ ss.sort4(permute.begin(), permute.size()-1, resultTree, maxLevels, discard); // AP scaffold: need to implement build speed/runtime perf slider
+ }
+
+ PX_ASSERT(permute[numBounds] == sentinel); // verify we didn't write past the array
+ permute.popBack(); // discard the sentinel value
+
+ #if PRINT_RTREE_COOKING_STATS // stats code
+ PxU32 totalLeafTris = 0;
+ PxU32 numLeaves = 0;
+ PxI32 maxLeafTris = 0;
+ PxU32 numEmpty = 0;
+ for(PxU32 i = 0; i < resultTree.size(); i++)
+ {
+ PxI32 leafCount = resultTree[i].leafCount;
+ numEmpty += (resultTree[i].bounds.isEmpty());
+ if(leafCount > 0)
+ {
+ numLeaves++;
+ totalLeafTris += leafCount;
+ if(leafCount > maxLeafTris)
+ maxLeafTris = leafCount;
+ }
+ }
+
+ printf("AABBs total/empty=%d/%d\n", resultTree.size(), numEmpty);
+ printf("numTris=%d, numLeafAABBs=%d, avgTrisPerLeaf=%.2f, maxTrisPerLeaf = %d\n",
+ numBounds, numLeaves, PxF32(totalLeafTris)/numLeaves, maxLeafTris);
+ #endif
+
+ PX_ASSERT(RTREE_N*sizeof(RTreeNodeQ) == sizeof(RTreePage)); // needed for nodePtrMultiplier computation to be correct
+ const int nodePtrMultiplier = sizeof(RTreeNodeQ); // convert offset as count in qnodes to page ptr
+
+ // Quantize the tree. AP scaffold - might be possible to merge this phase with the page pass below this loop
+ Array<RTreeNodeQ> qtreeNodes;
+ PxU32 firstEmptyIndex = PxU32(-1);
+ PxU32 resultCount = resultTree.size();
+ qtreeNodes.reserve(resultCount);
+
+ for(PxU32 i = 0; i < resultCount; i++) // AP scaffold - eliminate this pass
+ {
+ RTreeNodeNQ & u = resultTree[i];
+ RTreeNodeQ q;
+ q.setLeaf(u.leafCount > 0); // set the leaf flag
+ if(u.childPageFirstNodeIndex == -1) // empty node?
+ {
+ if(firstEmptyIndex == PxU32(-1))
+ firstEmptyIndex = qtreeNodes.size();
+ q.minx = q.miny = q.minz = FLT_MAX; // AP scaffold improvement - use empty 1e30 bounds instead and reference a valid leaf
+ q.maxx = q.maxy = q.maxz = -FLT_MAX; // that will allow to remove the empty node test from the runtime
+
+ q.ptr = firstEmptyIndex*nodePtrMultiplier; PX_ASSERT((q.ptr & 1) == 0);
+ q.setLeaf(true); // label empty node as leaf node
+ } else
+ {
+ // non-leaf node
+ q.minx = u.bounds.minimum.x;
+ q.miny = u.bounds.minimum.y;
+ q.minz = u.bounds.minimum.z;
+ q.maxx = u.bounds.maximum.x;
+ q.maxy = u.bounds.maximum.y;
+ q.maxz = u.bounds.maximum.z;
+ if(u.leafCount > 0)
+ {
+ q.ptr = PxU32(u.childPageFirstNodeIndex);
+ rc->remap(&q.ptr, q.ptr, PxU32(u.leafCount));
+ PX_ASSERT(q.isLeaf()); // remap is expected to set the isLeaf bit
+ }
+ else
+ {
+ // verify that all children bounds are included in the parent bounds
+ for(PxU32 s = 0; s < RTREE_N; s++)
+ {
+ const RTreeNodeNQ& child = resultTree[u.childPageFirstNodeIndex+s];
+ PX_UNUSED(child);
+ // is a sentinel node or is inside parent's bounds
+ PX_ASSERT(child.leafCount == -1 || child.bounds.isInside(u.bounds));
+ }
+
+ q.ptr = PxU32(u.childPageFirstNodeIndex * nodePtrMultiplier);
+ PX_ASSERT(q.ptr % RTREE_N == 0);
+ q.setLeaf(false);
+ }
+ }
+ qtreeNodes.pushBack(q);
+ }
+
+ // build the final rtree image
+ result.mInvDiagonal = PxVec4(1.0f);
+ PX_ASSERT(qtreeNodes.size() % RTREE_N == 0);
+ result.mTotalNodes = qtreeNodes.size();
+ result.mTotalPages = result.mTotalNodes / pageSize;
+ result.mPages = static_cast<RTreePage*>(
+ Ps::AlignedAllocator<128>().allocate(sizeof(RTreePage)*result.mTotalPages, __FILE__, __LINE__));
+ result.mBoundsMin = PxVec4(V3ReadXYZ(treeBounds.mn), 0.0f);
+ result.mBoundsMax = PxVec4(V3ReadXYZ(treeBounds.mx), 0.0f);
+ result.mDiagonalScaler = (result.mBoundsMax - result.mBoundsMin) / 65535.0f;
+ result.mPageSize = pageSize;
+ result.mNumLevels = maxLevels;
+ PX_ASSERT(result.mTotalNodes % pageSize == 0);
+ result.mNumRootPages = 1;
+
+ for(PxU32 j = 0; j < result.mTotalPages; j++)
+ {
+ RTreePage& page = result.mPages[j];
+ for(PxU32 k = 0; k < RTREE_N; k ++)
+ {
+ const RTreeNodeQ& n = qtreeNodes[j*RTREE_N+k];
+ page.maxx[k] = n.maxx;
+ page.maxy[k] = n.maxy;
+ page.maxz[k] = n.maxz;
+ page.minx[k] = n.minx;
+ page.miny[k] = n.miny;
+ page.minz[k] = n.minz;
+ page.ptrs[k] = n.ptr;
+ }
+ }
+
+ //printf("Tree size=%d\n", result.mTotalPages*sizeof(RTreePage));
+#if PX_DEBUG
+ result.validate(); // make sure the child bounds are included in the parent and other validation
+#endif
+}
+
+} // namespace physx
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.h
new file mode 100644
index 00000000..04a0b15c
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.h
@@ -0,0 +1,51 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CmPhysXCommon.h"
+#include "GuMeshData.h"
+#include "PxCooking.h"
+#include "PsArray.h"
+#include "GuRTree.h"
+
+namespace physx
+{
+ struct RTreeCooker
+ {
+ struct RemapCallback // a callback to convert indices from triangle to LeafTriangles or other uses
+ {
+ virtual ~RemapCallback() {}
+ virtual void remap(PxU32* rtreePtr, PxU32 start, PxU32 leafCount) = 0;
+ };
+
+ // triangles will be remapped so that newIndex = resultPermute[oldIndex]
+ static void buildFromTriangles(
+ Gu::RTree& resultTree, const PxVec3* verts, PxU32 numVerts, const PxU16* tris16, const PxU32* tris32, PxU32 numTris,
+ Ps::Array<PxU32>& resultPermute, RemapCallback* rc, PxReal sizePerfTradeOff01, PxMeshCookingHint::Enum hint);
+ };
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.cpp
new file mode 100644
index 00000000..877d752e
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.cpp
@@ -0,0 +1,1443 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "RTreeCooking.h"
+#include "TriangleMeshBuilder.h"
+#include "EdgeList.h"
+#include "MeshCleaner.h"
+#include "GuConvexEdgeFlags.h"
+#include "PxTriangleMeshDesc.h"
+#include "GuSerialize.h"
+#include "Cooking.h"
+#include "GuMeshData.h"
+#include "GuTriangle32.h"
+#include "GuRTree.h"
+#include "GuInternal.h"
+#include "GuBV4Build.h"
+#include "GuBV32Build.h"
+#include "PsFoundation.h"
+#include "PsHashMap.h"
+#include "PsSort.h"
+
+namespace physx {
+
+struct int3
+{
+ int x, y, z;
+};
+
+struct uint3
+{
+ unsigned int x, y, z;
+};
+
+PX_ALIGN_PREFIX(16)
+struct uint4
+{
+ unsigned int x, y, z, w;
+}
+PX_ALIGN_SUFFIX(16);
+
+PX_ALIGN_PREFIX(16)
+struct float4
+{
+ float x, y, z, w;
+}
+PX_ALIGN_SUFFIX(16);
+
+}
+
+#include "GrbTriangleMeshCooking.h"
+
+using namespace physx;
+using namespace Gu;
+using namespace Ps;
+
+namespace physx {
+
+TriangleMeshBuilder::TriangleMeshBuilder(TriangleMeshData& m, const PxCookingParams& params) :
+ edgeList (NULL),
+ mParams (params),
+ mMeshData (m)
+{
+}
+
+TriangleMeshBuilder::~TriangleMeshBuilder()
+{
+ releaseEdgeList();
+}
+
+void TriangleMeshBuilder::remapTopology(const PxU32* order)
+{
+ if(!mMeshData.mNbTriangles)
+ return;
+
+ // Remap one array at a time to limit memory usage
+
+ Gu::TriangleT<PxU32>* newTopo = reinterpret_cast<Gu::TriangleT<PxU32>*>(PX_ALLOC(mMeshData.mNbTriangles * sizeof(Gu::TriangleT<PxU32>), "Gu::TriangleT<PxU32>"));
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newTopo[i] = reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.mTriangles)[order[i]];
+ PX_FREE_AND_RESET(mMeshData.mTriangles);
+ mMeshData.mTriangles = newTopo;
+
+ if(mMeshData.mMaterialIndices)
+ {
+ PxMaterialTableIndex* newMat = PX_NEW(PxMaterialTableIndex)[mMeshData.mNbTriangles];
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newMat[i] = mMeshData.mMaterialIndices[order[i]];
+ PX_DELETE_POD(mMeshData.mMaterialIndices);
+ mMeshData.mMaterialIndices = newMat;
+ }
+
+ if(!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
+ {
+ PxU32* newMap = PX_NEW(PxU32)[mMeshData.mNbTriangles];
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newMap[i] = mMeshData.mFaceRemap ? mMeshData.mFaceRemap[order[i]] : order[i];
+ PX_DELETE_POD(mMeshData.mFaceRemap);
+ mMeshData.mFaceRemap = newMap;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool TriangleMeshBuilder::cleanMesh(bool validate, PxTriangleMeshCookingResult::Enum* condition)
+{
+ PX_ASSERT(mMeshData.mFaceRemap == NULL);
+
+ PxF32 meshWeldTolerance = 0.0f;
+ if(mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eWELD_VERTICES)
+ {
+ if(mParams.meshWeldTolerance == 0.f)
+ {
+ Ps::getFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__, "TriangleMesh: Enable mesh welding with 0 weld tolerance!");
+ }
+ else
+ {
+ meshWeldTolerance = mParams.meshWeldTolerance;
+ }
+ }
+ MeshCleaner cleaner(mMeshData.mNbVertices, mMeshData.mVertices, mMeshData.mNbTriangles, reinterpret_cast<const PxU32*>(mMeshData.mTriangles), meshWeldTolerance);
+ if(!cleaner.mNbTris)
+ return false;
+
+ if(validate)
+ {
+ // if we do only validate, we check if cleaning did not remove any verts or triangles.
+ // such a mesh can be then directly used for cooking without clean flag
+ if((cleaner.mNbVerts != mMeshData.mNbVertices) || (cleaner.mNbTris != mMeshData.mNbTriangles))
+ {
+ return false;
+ }
+ }
+
+ // PT: deal with the remap table
+ {
+ // PT: TODO: optimize this
+ if(cleaner.mRemap)
+ {
+ const PxU32 newNbTris = cleaner.mNbTris;
+
+ // Remap material array
+ if(mMeshData.mMaterialIndices)
+ {
+ PxMaterialTableIndex* tmp = PX_NEW(PxMaterialTableIndex)[newNbTris];
+ for(PxU32 i=0;i<newNbTris;i++)
+ tmp[i] = mMeshData.mMaterialIndices[cleaner.mRemap[i]];
+
+ PX_DELETE_POD(mMeshData.mMaterialIndices);
+ mMeshData.mMaterialIndices = tmp;
+ }
+
+ if (!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
+ {
+ mMeshData.mFaceRemap = PX_NEW(PxU32)[newNbTris];
+ PxMemCopy(mMeshData.mFaceRemap, cleaner.mRemap, newNbTris*sizeof(PxU32));
+ }
+ }
+ }
+
+ // PT: deal with geometry
+ {
+ if(mMeshData.mNbVertices!=cleaner.mNbVerts)
+ {
+ PX_FREE_AND_RESET(mMeshData.mVertices);
+ mMeshData.allocateVertices(cleaner.mNbVerts);
+ }
+ PxMemCopy(mMeshData.mVertices, cleaner.mVerts, mMeshData.mNbVertices*sizeof(PxVec3));
+ }
+
+ // PT: deal with topology
+ {
+ PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+ if(mMeshData.mNbTriangles!=cleaner.mNbTris)
+ {
+ PX_FREE_AND_RESET(mMeshData.mTriangles);
+ mMeshData.allocateTriangles(cleaner.mNbTris, true);
+ }
+
+ const float testLength = 500.0f*500.0f*mParams.scale.length*mParams.scale.length;
+ bool bigTriangle = false;
+ const PxVec3* v = mMeshData.mVertices;
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ {
+ const PxU32 vref0 = cleaner.mIndices[i*3+0];
+ const PxU32 vref1 = cleaner.mIndices[i*3+1];
+ const PxU32 vref2 = cleaner.mIndices[i*3+2];
+ PX_ASSERT(vref0!=vref1 && vref0!=vref2 && vref1!=vref2);
+
+ reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.mTriangles)[i].v[0] = vref0;
+ reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.mTriangles)[i].v[1] = vref1;
+ reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.mTriangles)[i].v[2] = vref2;
+
+ if( (v[vref0] - v[vref1]).magnitudeSquared() >= testLength
+ || (v[vref1] - v[vref2]).magnitudeSquared() >= testLength
+ || (v[vref2] - v[vref0]).magnitudeSquared() >= testLength
+ )
+ bigTriangle = true;
+ }
+ if(bigTriangle)
+ {
+ if(condition)
+ *condition = PxTriangleMeshCookingResult::eLARGE_TRIANGLE;
+ Ps::getFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__, "TriangleMesh: triangles are too big, reduce their size to increase simulation stability!");
+ }
+ }
+
+ return true;
+}
+
+void TriangleMeshBuilder::createSharedEdgeData(bool buildAdjacencies, bool buildActiveEdges)
+{
+ if(buildAdjacencies) // building edges is required if buildAdjacencies is requested
+ buildActiveEdges = true;
+
+ PX_ASSERT(mMeshData.mExtraTrigData == NULL);
+ PX_ASSERT(mMeshData.mAdjacencies == NULL);
+
+ if(!buildActiveEdges)
+ return;
+
+ const PxU32 nTrigs = mMeshData.mNbTriangles;
+
+ mMeshData.mExtraTrigData = PX_NEW(PxU8)[nTrigs];
+ memset(mMeshData.mExtraTrigData, 0, sizeof(PxU8)*nTrigs);
+
+ const Gu::TriangleT<PxU32>* trigs = reinterpret_cast<const Gu::TriangleT<PxU32>*>(mMeshData.mTriangles);
+ if(0x40000000 <= nTrigs)
+ {
+ //mesh is too big for this algo, need to be able to express trig indices in 30 bits, and still have an index reserved for "unused":
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "TriangleMesh: mesh is too big for this algo!");
+ return;
+ }
+
+ createEdgeList();
+ if(edgeList)
+ {
+ PX_ASSERT(edgeList->getNbFaces()==mMeshData.mNbTriangles);
+ if(edgeList->getNbFaces()==mMeshData.mNbTriangles)
+ {
+ for(PxU32 i=0;i<edgeList->getNbFaces();i++)
+ {
+ const Gu::EdgeTriangleData& ET = edgeList->getEdgeTriangle(i);
+ // Replicate flags
+ if(Gu::EdgeTriangleAC::HasActiveEdge01(ET)) mMeshData.mExtraTrigData[i] |= Gu::ETD_CONVEX_EDGE_01;
+ if(Gu::EdgeTriangleAC::HasActiveEdge12(ET)) mMeshData.mExtraTrigData[i] |= Gu::ETD_CONVEX_EDGE_12;
+ if(Gu::EdgeTriangleAC::HasActiveEdge20(ET)) mMeshData.mExtraTrigData[i] |= Gu::ETD_CONVEX_EDGE_20;
+ }
+ }
+ }
+
+ // fill the adjacencies
+ if(buildAdjacencies)
+ {
+ mMeshData.mAdjacencies = PX_NEW(PxU32)[nTrigs*3];
+ memset(mMeshData.mAdjacencies, 0xFFFFffff, sizeof(PxU32)*nTrigs*3);
+
+ PxU32 NbEdges = edgeList->getNbEdges();
+ const Gu::EdgeDescData* ED = edgeList->getEdgeToTriangles();
+ const Gu::EdgeData* Edges = edgeList->getEdges();
+ const PxU32* FBE = edgeList->getFacesByEdges();
+
+ while(NbEdges--)
+ {
+ // Get number of triangles sharing current edge
+ PxU32 Count = ED->Count;
+
+ if(Count > 1)
+ {
+ PxU32 FaceIndex0 = FBE[ED->Offset+0];
+ PxU32 FaceIndex1 = FBE[ED->Offset+1];
+
+ const Gu::EdgeData& edgeData = *Edges;
+ const Gu::TriangleT<PxU32>& T0 = trigs[FaceIndex0];
+ const Gu::TriangleT<PxU32>& T1 = trigs[FaceIndex1];
+
+ PxU32 offset0 = T0.findEdgeCCW(edgeData.Ref0,edgeData.Ref1);
+ PxU32 offset1 = T1.findEdgeCCW(edgeData.Ref0,edgeData.Ref1);
+
+ mMeshData.setTriangleAdjacency(FaceIndex0, FaceIndex1, offset0);
+ mMeshData.setTriangleAdjacency(FaceIndex1, FaceIndex0, offset1);
+ }
+ ED++;
+ Edges++;
+ }
+ }
+
+#if PX_DEBUG
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ {
+ const Gu::TriangleT<PxU32>& T = trigs[i];
+ PX_UNUSED(T);
+ const Gu::EdgeTriangleData& ET = edgeList->getEdgeTriangle(i);
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge01(ET) && (mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_01)) || (!Gu::EdgeTriangleAC::HasActiveEdge01(ET) && !(mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_01)));
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge12(ET) && (mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_12)) || (!Gu::EdgeTriangleAC::HasActiveEdge12(ET) && !(mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_12)));
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge20(ET) && (mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_20)) || (!Gu::EdgeTriangleAC::HasActiveEdge20(ET) && !(mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_20)));
+ }
+#endif
+ return;
+}
+
+namespace GrbTrimeshCookerHelper
+{
+
+struct SortedNeighbor
+{
+ PxU32 v, a; // vertex and adjacent vertex
+ bool boundary;
+
+ SortedNeighbor(PxU32 v_, PxU32 a_, bool b_): v(v_), a(a_), boundary(b_) {}
+
+ // sort boundary edges to the front so that they are kept when duplicates are removed
+ bool operator<(const SortedNeighbor& b) const
+ {
+ return (v<b.v || (v == b.v && a<b.a) || (v == b.v && a == b.a && boundary && !b.boundary));
+ }
+};
+
+struct SharpEdgeRange
+{
+ PxU32 start, length;
+ SharpEdgeRange(): start(0), length(0) {}
+ SharpEdgeRange(PxU32 s, PxU32 l): start(s), length(l) {}
+};
+
+class LocalIndexer
+{
+public:
+ bool insert(PxU32 meshIndex) // returns true if this is a new index
+ {
+ bool isNew = mMeshToLocal.insert(meshIndex, mLocalToMesh.size());
+ if(isNew)
+ mLocalToMesh.pushBack(meshIndex);
+ return isNew;
+ }
+
+ PxU32 meshIndex(PxU32 localIndex)
+ {
+ PX_ASSERT(localIndex<mLocalToMesh.size());
+ return mLocalToMesh[localIndex];
+ }
+
+ PxU32 localIndex(PxU32 meshIndex)
+ {
+ PX_ASSERT(mMeshToLocal.find(meshIndex));
+ return mMeshToLocal[meshIndex];
+ }
+
+ bool contains(PxU32 meshIndex)
+ {
+ return mMeshToLocal.find(meshIndex) != 0;
+ }
+
+ PxU32 size()
+ {
+ return mLocalToMesh.size();
+ }
+
+private:
+ Ps::Array<PxU32> mLocalToMesh;
+ Ps::HashMap<PxU32, PxU32> mMeshToLocal;
+};
+
+#include <stdio.h>
+
+
+void findSharpVertices(
+ Ps::Array<SortedNeighbor>& pairList,
+ Ps::Array<SharpEdgeRange>& edgeRanges,
+ /*const Ps::Array<Triangle>& triangles,*/
+ const uint3* triIndices,
+ const uint4* triAdjacencies,
+ PxU32 nbTris,
+ PxU32 nbVerts
+ )
+{
+ // sort the edges which are sharp or boundary
+ for(PxU32 i=0;i<nbTris;i++)
+ {
+ const uint4& triAdj = triAdjacencies[i];
+ const uint3& triIdx = triIndices[i];
+
+ if (!isEdgeNonconvex(triAdj.x))
+ {
+ pairList.pushBack(SortedNeighbor(triIdx.x, triIdx.y, triAdj.x == BOUNDARY));
+ pairList.pushBack(SortedNeighbor(triIdx.y, triIdx.x, triAdj.x == BOUNDARY));
+ }
+
+ if (!isEdgeNonconvex(triAdj.y))
+ {
+ pairList.pushBack(SortedNeighbor(triIdx.y, triIdx.z, triAdj.y == BOUNDARY));
+ pairList.pushBack(SortedNeighbor(triIdx.z, triIdx.y, triAdj.y == BOUNDARY));
+ }
+
+ if (!isEdgeNonconvex(triAdj.z))
+ {
+ pairList.pushBack(SortedNeighbor(triIdx.z, triIdx.x, triAdj.z == BOUNDARY));
+ pairList.pushBack(SortedNeighbor(triIdx.x, triIdx.z, triAdj.z == BOUNDARY));
+ }
+ }
+
+ Ps::sort(pairList.begin(), pairList.size());
+
+ // remove duplicates - note that boundary edges are sorted earlier, so we keep them
+ PxU32 unique = 1;
+ for(PxU32 i=1;i<pairList.size();i++)
+ {
+ if(pairList[i].v != pairList[i-1].v || pairList[i].a != pairList[i-1].a)
+ pairList[unique++] = pairList[i];
+ }
+ pairList.resizeUninitialized(unique);
+
+ // a vertex is marked for sharp vertex processing if it has a boundary edge or at least three convex edges
+ edgeRanges.resize(nbVerts);
+ for(PxU32 p = 0, u ; p<pairList.size(); p = u)
+ {
+ bool boundary = false;
+ for(u=p+1; u<pairList.size() && pairList[u].v == pairList[p].v; u++)
+ boundary |= pairList[u].boundary;
+ if(boundary || u-p>=3)
+ edgeRanges[pairList[p].v] = SharpEdgeRange(p, u-p);
+ }
+}
+
+#if 0
+PxU32 buildVertexConnectionNew_p1(
+ Ps::Array<SortedNeighbor> & pairList,
+ Ps::Array<SharpEdgeRange> & edgeRanges,
+ LocalIndexer & vertexMap,
+
+ const uint4 * triIndices,
+ const uint4 * triAdjacencies,
+
+ PxU32 nbTris,
+ PxU32 nbVerts
+ )
+{
+ findSharpVertices(
+ pairList,
+ edgeRanges,
+ triIndices,
+ triAdjacencies,
+ nbTris,
+ nbVerts
+ );
+
+ // add all the original triangles and vertices and record how big the core is
+ for(PxU32 i=0; i<nbTris; i++)
+ {
+ const uint4 & triIdx = triIndices[i];
+ vertexMap.insert(triIdx.x);
+ vertexMap.insert(triIdx.y);
+ vertexMap.insert(triIdx.z);
+ }
+ PxU32 nbCoreVerts = vertexMap.size();
+
+ PX_ASSERT(nbCoreVerts == nbVerts);
+
+ // add adjacent triangles
+ for(PxU32 i=0;i<nbTris;i++)
+ {
+ const uint4 & triAdj = triAdjacencies[i];
+
+#define IS_TRI(triAdjIdx) (( (triAdjIdx) != BOUNDARY ) && ( !((triAdjIdx) & NONCONVEX_FLAG) ))
+
+ if(IS_TRI(triAdj.x))
+ {
+ const uint4 & triIdx = triIndices[triAdj.x];
+ vertexMap.insert(triIdx.x);
+ vertexMap.insert(triIdx.y);
+ vertexMap.insert(triIdx.z);
+ }
+
+ if(IS_TRI(triAdj.y))
+ {
+ const uint4 & triIdx = triIndices[triAdj.y];
+ vertexMap.insert(triIdx.x);
+ vertexMap.insert(triIdx.y);
+ vertexMap.insert(triIdx.z);
+ }
+
+ if(IS_TRI(triAdj.z))
+ {
+ const uint4 & triIdx = triIndices[triAdj.z];
+ vertexMap.insert(triIdx.x);
+ vertexMap.insert(triIdx.y);
+ vertexMap.insert(triIdx.z);
+
+ }
+
+#undef IS_TRI
+ }
+
+ // add the neighbors of the sharp vertices
+ PxU32 nbNeighbors = 0;
+ for(PxU32 i=0;i<nbCoreVerts;i++)
+ {
+ PxU32 meshIndex = vertexMap.meshIndex(i);
+ const SharpEdgeRange& er = edgeRanges[meshIndex];
+ for(PxU32 j = 0;j<er.length;j++)
+ {
+ PX_ASSERT(pairList[er.start+j].v == meshIndex);
+ vertexMap.insert(pairList[er.start + j].a);
+ }
+ nbNeighbors += er.length;
+ }
+
+ return nbNeighbors;
+}
+
+void buildVertexConnectionNew_p2(
+ PxU32 * adjVertStart,
+ PxU32 * vertValency,
+ PxU32 * adjVertices,
+
+ Ps::Array<SortedNeighbor>& pairList,
+ Ps::Array<SharpEdgeRange>& edgeRanges,
+ LocalIndexer & vertexMap,
+
+ const uint4 * /*triIndices*/,
+ const uint4 * /*triAdjacencies*/,
+
+ PxU32 /*nbTris*/,
+ PxU32 nbVerts,
+ PxU32 /*nbNeighbors*/
+ )
+{
+ PxU32 n = 0;
+ for(PxU32 i=0;i<nbVerts;i++)
+ {
+ PxU32 meshIdx = vertexMap.meshIndex(i);
+ const SharpEdgeRange& er = edgeRanges[vertexMap.meshIndex(i)];
+ adjVertStart[meshIdx] = n;
+ vertValency[meshIdx] = er.length;
+ for(PxU32 j = 0;j<er.length;j++)
+ adjVertices[n++] = pairList[er.start+j].a;
+ }
+}
+#else
+
+
+PxU32 buildVertexConnectionNew_p1(
+ Ps::Array<SortedNeighbor> & pairList,
+ Ps::Array<SharpEdgeRange> & edgeRanges,
+
+ const uint3* triIndices,
+ const uint4 * triAdjacencies,
+
+ PxU32 nbTris,
+ PxU32 nbVerts
+ )
+{
+ findSharpVertices(
+ pairList,
+ edgeRanges,
+ triIndices,
+ triAdjacencies,
+ nbTris,
+ nbVerts
+ );
+
+
+ // add the neighbors of the sharp vertices
+ PxU32 nbNeighbors = 0;
+ for (PxU32 i = 0; i<nbVerts; i++)
+ {
+ const SharpEdgeRange& er = edgeRanges[i];
+ nbNeighbors += er.length;
+ }
+
+ return nbNeighbors;
+}
+
+void buildVertexConnectionNew_p2(
+ PxU32 * adjVertStart,
+ PxU32 * vertValency,
+ PxU32 * adjVertices,
+
+ Ps::Array<SortedNeighbor>& pairList,
+ Ps::Array<SharpEdgeRange>& edgeRanges,
+ PxU32 nbVerts
+ )
+{
+ PxU32 n = 0;
+ for (PxU32 i = 0; i<nbVerts; i++)
+ {
+ const SharpEdgeRange& er = edgeRanges[i];
+ adjVertStart[i] = n;
+ vertValency[i] = er.length;
+ for (PxU32 j = 0; j<er.length; j++)
+ adjVertices[n++] = pairList[er.start + j].a;
+ }
+}
+#endif
+
+} // namespace GrbTrimeshCookerHelper
+
+void TriangleMeshBuilder::recordTriangleIndices()
+{
+ if (mParams.buildGPUData)
+ {
+ PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+ PX_ASSERT(mMeshData.mGRB_triIndices);
+
+ //copy the original traingle indices to originalTrangles32
+ PxMemCopy(mMeshData.mGRB_triIndices, mMeshData.mTriangles, sizeof(IndTri32) *mMeshData.mNbTriangles);
+
+
+ if (mMeshData.mFaceRemap)
+ {
+ //We must have discarded some triangles so let's
+ mMeshData.mGRB_faceRemap = PX_NEW(PxU32)[mMeshData.mNbTriangles];
+ PxMemCopy(mMeshData.mGRB_faceRemap, mMeshData.mFaceRemap, sizeof(PxU32)*mMeshData.mNbTriangles);
+ }
+
+ }
+}
+
+void TriangleMeshBuilder::createGRBData()
+{
+
+ const PxU32 & numTris = mMeshData.mNbTriangles;
+ const PxU32 & numVerts = mMeshData.mNbVertices;
+
+ PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+
+
+ // Core: Mesh data
+ ///////////////////////////////////////////////////////////////////////////////////
+
+ // (by using adjacency info generated by physx cooker)
+ PxVec3 * tempNormalsPerTri_prealloc = reinterpret_cast<PxVec3 *>(PX_ALLOC(numTris * sizeof(PxVec3), PX_DEBUG_EXP("tempNormalsPerTri_prealloc")));
+
+ mMeshData.mGRB_triAdjacencies = PX_ALLOC(sizeof(uint4)*numTris, PX_DEBUG_EXP("GRB_triAdjacencies"));
+
+
+ buildAdjacencies(
+ reinterpret_cast<uint4 *>(mMeshData.mGRB_triAdjacencies),
+ tempNormalsPerTri_prealloc,
+ mMeshData.mVertices,
+ reinterpret_cast<uint3*>(mMeshData.mGRB_triIndices),
+ numTris
+ );
+
+
+ PX_FREE(tempNormalsPerTri_prealloc);
+
+ mMeshData.mGRB_vertValency = PX_NEW(PxU32)[numVerts];
+ mMeshData.mGRB_adjVertStart = PX_NEW(PxU32)[numVerts];
+
+
+ Ps::Array<GrbTrimeshCookerHelper::SortedNeighbor> pairsList;
+ Ps::Array<GrbTrimeshCookerHelper::SharpEdgeRange> edgeRanges;
+
+
+ mMeshData.mGRB_meshAdjVerticiesTotal = GrbTrimeshCookerHelper::buildVertexConnectionNew_p1(
+ pairsList,
+ edgeRanges,
+
+ reinterpret_cast<uint3*>(mMeshData.mGRB_triIndices),
+ reinterpret_cast<uint4 *>(mMeshData.mGRB_triAdjacencies),
+ numTris,
+ numVerts
+ );
+
+
+
+ mMeshData.mGRB_adjVertices = PX_NEW(PxU32)[mMeshData.mGRB_meshAdjVerticiesTotal];
+ GrbTrimeshCookerHelper::buildVertexConnectionNew_p2(
+ mMeshData.mGRB_adjVertStart,
+ mMeshData.mGRB_vertValency,
+ mMeshData.mGRB_adjVertices,
+ pairsList,
+ edgeRanges,
+ numVerts
+ );
+
+
+}
+
+void TriangleMeshBuilder::createGRBMidPhaseAndData(const PxU32 originalTriangleCount)
+{
+ if (mParams.buildGPUData)
+ {
+
+ PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+
+ BV32Tree* bv32Tree = PX_NEW(BV32Tree);
+ mMeshData.mGRB_BV32Tree = bv32Tree;
+
+ BV32TriangleMeshBuilder::createMidPhaseStructure(mParams, mMeshData, *bv32Tree);
+
+ createGRBData();
+
+ //create a remap table from GPU to CPU remap table
+ PxU32* orignalToRemap = PX_NEW(PxU32)[originalTriangleCount];
+
+ PX_ASSERT(mMeshData.mFaceRemap);
+
+
+ for (PxU32 i = 0; i < mMeshData.mNbTriangles; ++i)
+ {
+ const PxU32 index = mMeshData.mFaceRemap[i];
+ PX_ASSERT(index < originalTriangleCount);
+ orignalToRemap[index] = i;
+ }
+
+
+ //map CPU remap triangle index to GPU remap triangle index
+ for (PxU32 i = 0; i < mMeshData.mNbTriangles; ++i)
+ {
+ const PxU32 index = mMeshData.mGRB_faceRemap[i];
+ mMeshData.mGRB_faceRemap[i] = orignalToRemap[index];
+ }
+
+#if BV32_VALIDATE
+ IndTri32* grbTriIndices = reinterpret_cast<IndTri32*>(mMeshData.mGRB_triIndices);
+ IndTri32* cpuTriIndices = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
+ //map CPU remap triangle index to GPU remap triangle index
+ for (PxU32 i = 0; i < mMeshData.mNbTriangles; ++i)
+ {
+ PX_ASSERT(grbTriIndices[i].mRef[0] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[0]);
+ PX_ASSERT(grbTriIndices[i].mRef[1] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[1]);
+ PX_ASSERT(grbTriIndices[i].mRef[2] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[2]);
+ }
+#endif
+
+ if (orignalToRemap)
+ PX_DELETE_POD(orignalToRemap);
+
+ }
+}
+
+void TriangleMeshBuilder::createEdgeList()
+{
+ Gu::EDGELISTCREATE create;
+ create.NbFaces = mMeshData.mNbTriangles;
+ if(mMeshData.has16BitIndices())
+ {
+ create.DFaces = NULL;
+ create.WFaces = reinterpret_cast<PxU16*>(mMeshData.mTriangles);
+ }
+ else
+ {
+ create.DFaces = reinterpret_cast<PxU32*>(mMeshData.mTriangles);
+ create.WFaces = NULL;
+ }
+ create.FacesToEdges = true;
+ create.EdgesToFaces = true;
+ create.Verts = mMeshData.mVertices;
+ //create.Epsilon = 0.1f;
+ // create.Epsilon = convexEdgeThreshold;
+ edgeList = PX_NEW(Gu::EdgeListBuilder);
+ if(!edgeList->init(create))
+ {
+ PX_DELETE(edgeList);
+ edgeList = 0;
+ }
+}
+
+void TriangleMeshBuilder::releaseEdgeList()
+{
+ PX_DELETE_AND_RESET(edgeList);
+}
+
+//
+// When suppressTriangleMeshRemapTable is true, the face remap table is not created. This saves a significant amount of memory,
+// but the SDK will not be able to provide information about which mesh triangle is hit in collisions, sweeps or raycasts hits.
+//
+// The sequence is as follows:
+
+bool TriangleMeshBuilder::loadFromDesc(const PxTriangleMeshDesc& _desc, PxTriangleMeshCookingResult::Enum* condition, bool validateMesh)
+{
+ const PxU32 originalTriangleCount = _desc.triangles.count;
+ if(!_desc.isValid())
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "TriangleMesh::loadFromDesc: desc.isValid() failed!");
+ return false;
+ }
+
+ // verify the mesh params
+ if(!mParams.midphaseDesc.isValid())
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "TriangleMesh::loadFromDesc: mParams.midphaseDesc.isValid() failed!");
+ return false;
+ }
+
+ // Create a local copy that we can modify
+ PxTriangleMeshDesc desc = _desc;
+
+ // Save simple params
+ {
+ // Handle implicit topology
+ PxU32* topology = NULL;
+ if(!desc.triangles.data)
+ {
+ // We'll create 32-bit indices
+ desc.flags &= ~PxMeshFlag::e16_BIT_INDICES;
+ desc.triangles.stride = sizeof(PxU32)*3;
+
+ {
+ // Non-indexed mesh => create implicit topology
+ desc.triangles.count = desc.points.count/3;
+ // Create default implicit topology
+ topology = PX_NEW_TEMP(PxU32)[desc.points.count];
+ for(PxU32 i=0;i<desc.points.count;i++)
+ topology[i] = i;
+ desc.triangles.data = topology;
+ }
+ }
+ // Continue as usual using our new descriptor
+
+ // Convert and clean the input mesh
+ if (!importMesh(desc, mParams, condition, validateMesh))
+ return false;
+
+ // Cleanup if needed
+ PX_DELETE_POD(topology);
+ }
+
+
+ //copy the original triangle indices to grb triangle indices if buildGRBData is true
+ recordTriangleIndices();
+
+ createMidPhaseStructure();
+
+ // Compute local bounds
+ computeLocalBounds(); // AP scaffold: local bounds are already computed in builder.createRTree efficiently with SIMD
+
+ createSharedEdgeData(mParams.buildTriangleAdjacencies, !(mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eDISABLE_ACTIVE_EDGES_PRECOMPUTE));
+
+ createGRBMidPhaseAndData(originalTriangleCount);
+
+
+ return true;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool TriangleMeshBuilder::save(PxOutputStream& stream, bool platformMismatch, const PxCookingParams& params) const
+{
+ // Export header
+ if(!writeHeader('M', 'E', 'S', 'H', PX_MESH_VERSION, platformMismatch, stream))
+ return false;
+
+ // Export midphase ID
+ writeDword(getMidphaseID(), platformMismatch, stream);
+
+ // Export serialization flags
+ PxU32 serialFlags = 0;
+ if(mMeshData.mMaterialIndices) serialFlags |= Gu::IMSF_MATERIALS;
+ if(mMeshData.mFaceRemap) serialFlags |= Gu::IMSF_FACE_REMAP;
+ if(mMeshData.mAdjacencies) serialFlags |= Gu::IMSF_ADJACENCIES;
+ if (params.buildGPUData) serialFlags |= Gu::IMSF_GRB_DATA;
+ // Compute serialization flags for indices
+ PxU32 maxIndex=0;
+ const Gu::TriangleT<PxU32>* tris = reinterpret_cast<const Gu::TriangleT<PxU32>*>(mMeshData.mTriangles);
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ {
+ if(tris[i].v[0]>maxIndex) maxIndex = tris[i].v[0];
+ if(tris[i].v[1]>maxIndex) maxIndex = tris[i].v[1];
+ if(tris[i].v[2]>maxIndex) maxIndex = tris[i].v[2];
+ }
+
+ bool force32 = (params.meshPreprocessParams & PxMeshPreprocessingFlag::eFORCE_32BIT_INDICES);
+ if (maxIndex <= 0xFFFF && !force32)
+ serialFlags |= (maxIndex <= 0xFF ? Gu::IMSF_8BIT_INDICES : Gu::IMSF_16BIT_INDICES);
+ writeDword(serialFlags, platformMismatch, stream);
+
+ // Export mesh
+ writeDword(mMeshData.mNbVertices, platformMismatch, stream);
+ writeDword(mMeshData.mNbTriangles, platformMismatch, stream);
+ writeFloatBuffer(&mMeshData.mVertices->x, mMeshData.mNbVertices*3, platformMismatch, stream);
+ if(serialFlags & Gu::IMSF_8BIT_INDICES)
+ {
+ const PxU32* indices = tris->v;
+ for(PxU32 i=0;i<mMeshData.mNbTriangles*3;i++)
+ {
+ PxI8 data = PxI8(indices[i]);
+ stream.write(&data, sizeof(PxU8));
+ }
+ }
+ else if(serialFlags & Gu::IMSF_16BIT_INDICES)
+ {
+ const PxU32* indices = tris->v;
+ for(PxU32 i=0;i<mMeshData.mNbTriangles*3;i++)
+ writeWord(Ps::to16(indices[i]), platformMismatch, stream);
+ }
+ else
+ writeIntBuffer(tris->v, mMeshData.mNbTriangles*3, platformMismatch, stream);
+
+ if(mMeshData.mMaterialIndices)
+ writeWordBuffer(mMeshData.mMaterialIndices, mMeshData.mNbTriangles, platformMismatch, stream);
+
+ if(mMeshData.mFaceRemap)
+ {
+ PxU32 maxId = computeMaxIndex(mMeshData.mFaceRemap, mMeshData.mNbTriangles);
+ writeDword(maxId, platformMismatch, stream);
+ storeIndices(maxId, mMeshData.mNbTriangles, mMeshData.mFaceRemap, stream, platformMismatch);
+// writeIntBuffer(mMeshData.mFaceRemap, mMeshData.mNbTriangles, platformMismatch, stream);
+ }
+
+ if(mMeshData.mAdjacencies)
+ writeIntBuffer(mMeshData.mAdjacencies, mMeshData.mNbTriangles*3, platformMismatch, stream);
+
+ // Export midphase structure
+ saveMidPhaseStructure(stream);
+
+
+ // Export local bounds
+ writeFloat(mMeshData.mGeomEpsilon, platformMismatch, stream);
+
+ writeFloat(mMeshData.mAABB.minimum.x, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.minimum.y, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.minimum.z, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.maximum.x, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.maximum.y, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.maximum.z, platformMismatch, stream);
+
+ if(mMeshData.mExtraTrigData)
+ {
+ writeDword(mMeshData.mNbTriangles, platformMismatch, stream);
+ // No need to convert those bytes
+ stream.write(mMeshData.mExtraTrigData, mMeshData.mNbTriangles*sizeof(PxU8));
+ }
+ else
+ writeDword(0, platformMismatch, stream);
+
+ // GRB write -----------------------------------------------------------------
+ if (params.buildGPUData)
+ {
+ writeDword(mMeshData.mGRB_meshAdjVerticiesTotal, platformMismatch, stream);
+
+ const PxU32* indices = reinterpret_cast<PxU32*>(mMeshData.mGRB_triIndices);
+ if (serialFlags & Gu::IMSF_8BIT_INDICES)
+ {
+ for (PxU32 i = 0; i<mMeshData.mNbTriangles * 3; i++)
+ {
+ PxI8 data = PxI8(indices[i]);
+ stream.write(&data, sizeof(PxU8));
+ }
+ }
+ else if (serialFlags & Gu::IMSF_16BIT_INDICES)
+ {
+ for (PxU32 i = 0; i<mMeshData.mNbTriangles * 3; i++)
+ writeWord(Ps::to16(indices[i]), platformMismatch, stream);
+ }
+ else
+ writeIntBuffer(indices, mMeshData.mNbTriangles * 3, platformMismatch, stream);
+
+
+ //writeIntBuffer(reinterpret_cast<PxU32*>(mMeshData.mGRB_triIndices), , mMeshData.mNbTriangles*3, platformMismatch, stream);
+
+ //writeIntBuffer(reinterpret_cast<PxU32 *>(mMeshData.mGRB_triIndices), mMeshData.mNbTriangles*4, platformMismatch, stream);
+
+ writeIntBuffer(reinterpret_cast<PxU32 *>(mMeshData.mGRB_triAdjacencies), mMeshData.mNbTriangles*4, platformMismatch, stream);
+ writeIntBuffer(mMeshData.mGRB_vertValency, mMeshData.mNbVertices, platformMismatch, stream);
+ writeIntBuffer(mMeshData.mGRB_adjVertStart, mMeshData.mNbVertices, platformMismatch, stream);
+ writeIntBuffer(mMeshData.mGRB_adjVertices, mMeshData.mGRB_meshAdjVerticiesTotal, platformMismatch, stream);
+ writeIntBuffer(mMeshData.mGRB_faceRemap, mMeshData.mNbTriangles, platformMismatch, stream);
+
+ //Export GPU midphase structure
+ BV32Tree* bv32Tree = reinterpret_cast<BV32Tree*>(mMeshData.mGRB_BV32Tree);
+ BV32TriangleMeshBuilder::saveMidPhaseStructure(bv32Tree, stream);
+ }
+
+ // End of GRB write ----------------------------------------------------------
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable:4996) // permitting use of gatherStrided until we have a replacement.
+#endif
+
+bool TriangleMeshBuilder::importMesh(const PxTriangleMeshDesc& desc,const PxCookingParams& params,PxTriangleMeshCookingResult::Enum* condition, bool validate)
+{
+ //convert and clean the input mesh
+ //this is where the mesh data gets copied from user mem to our mem
+
+ PxVec3* verts = mMeshData.allocateVertices(desc.points.count);
+ Gu::TriangleT<PxU32>* tris = reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.allocateTriangles(desc.triangles.count, true, PxU32(params.buildGPUData)));
+
+ //copy, and compact to get rid of strides:
+ Cooking::gatherStrided(desc.points.data, verts, mMeshData.mNbVertices, sizeof(PxVec3), desc.points.stride);
+
+#if PX_CHECKED
+ // PT: check all input vertices are valid
+ for(PxU32 i=0;i<desc.points.count;i++)
+ {
+ const PxVec3& p = verts[i];
+ if(!PxIsFinite(p.x) || !PxIsFinite(p.y) || !PxIsFinite(p.z))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "input mesh contains corrupted vertex data");
+ return false;
+ }
+ }
+#endif
+
+ //for trigs index stride conversion and eventual reordering is also needed, I don't think flexicopy can do that for us.
+
+ Gu::TriangleT<PxU32>* dest = tris;
+ const Gu::TriangleT<PxU32>* pastLastDest = tris + mMeshData.mNbTriangles;
+ const PxU8* source = reinterpret_cast<const PxU8*>(desc.triangles.data);
+
+ //4 combos of 16 vs 32 and flip vs no flip
+ PxU32 c = (desc.flags & PxMeshFlag::eFLIPNORMALS)?PxU32(1):0;
+ if (desc.flags & PxMeshFlag::e16_BIT_INDICES)
+ {
+ //index stride conversion is also needed, I don't think flexicopy can do that for us.
+ while (dest < pastLastDest)
+ {
+ const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
+ dest->v[0] = trig16[0];
+ dest->v[1] = trig16[1+c];
+ dest->v[2] = trig16[2-c];
+ dest ++;
+ source += desc.triangles.stride;
+ }
+ }
+ else
+ {
+ while (dest < pastLastDest)
+ {
+ const PxU32 * trig32 = reinterpret_cast<const PxU32*>(source);
+ dest->v[0] = trig32[0];
+ dest->v[1] = trig32[1+c];
+ dest->v[2] = trig32[2-c];
+ dest ++;
+ source += desc.triangles.stride;
+ }
+ }
+
+ //copy the material index list if any:
+ if(desc.materialIndices.data)
+ {
+ PxMaterialTableIndex* materials = mMeshData.allocateMaterials();
+ Cooking::gatherStrided(desc.materialIndices.data, materials, mMeshData.mNbTriangles, sizeof(PxMaterialTableIndex), desc.materialIndices.stride);
+
+ // Check material indices
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++) PX_ASSERT(materials[i]!=0xffff);
+ }
+
+ // Clean the mesh using ICE's MeshBuilder
+ // This fixes the bug in ConvexTest06 where the inertia tensor computation fails for a mesh => it works with a clean mesh
+
+ if (!(params.meshPreprocessParams & PxMeshPreprocessingFlag::eDISABLE_CLEAN_MESH) || validate)
+ {
+ if(!cleanMesh(validate, condition))
+ {
+ if(!validate)
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "cleaning the mesh failed");
+ return false;
+ }
+ }
+ else
+ {
+ // we need to fill the remap table if no cleaning was done
+ if(params.suppressTriangleMeshRemapTable == false)
+ {
+ PX_ASSERT(mMeshData.mFaceRemap == NULL);
+ mMeshData.mFaceRemap = PX_NEW(PxU32)[mMeshData.mNbTriangles];
+ for (PxU32 i = 0; i < mMeshData.mNbTriangles; i++)
+ mMeshData.mFaceRemap[i] = i;
+ }
+ }
+ return true;
+}
+
+#if PX_VC
+#pragma warning(pop)
+#endif
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+//#define PROFILE_BOUNDS
+#ifdef PROFILE_BOUNDS
+ #include <windows.h>
+ #pragma comment(lib, "winmm.lib")
+#endif
+
+void TriangleMeshBuilder::computeLocalBounds()
+{
+#ifdef PROFILE_BOUNDS
+ int time = timeGetTime();
+#endif
+
+ PxBounds3& localBounds = mMeshData.mAABB;
+ computeBoundsAroundVertices(localBounds, mMeshData.mNbVertices, mMeshData.mVertices);
+
+ // Derive a good geometric epsilon from local bounds. We must do this before bounds extrusion for heightfields.
+ //
+ // From Charles Bloom:
+ // "Epsilon must be big enough so that the consistency condition abs(D(Hit))
+ // <= Epsilon is satisfied for all queries. You want the smallest epsilon
+ // you can have that meets that constraint. Normal floats have a 24 bit
+ // mantissa. When you do any float addition, you may have round-off error
+ // that makes the result off by roughly 2^-24 * result. Our result is
+ // scaled by the position values. If our world is strictly required to be
+ // in a box of world size W (each coordinate in -W to W), then the maximum
+ // error is 2^-24 * W. Thus Epsilon must be at least >= 2^-24 * W. If
+ // you're doing coordinate transforms, that may scale your error up by some
+ // amount, so you'll need a bigger epsilon. In general something like
+ // 2^-22*W is reasonable. If you allow scaled transforms, it needs to be
+ // something like 2^-22*W*MAX_SCALE."
+ // PT: TODO: runtime checkings for this
+ PxReal geomEpsilon = 0.0f;
+ for (PxU32 i = 0; i < 3; i++)
+ geomEpsilon = PxMax(geomEpsilon, PxMax(PxAbs(localBounds.maximum[i]), PxAbs(localBounds.minimum[i])));
+ geomEpsilon *= powf(2.0f, -22.0f);
+ mMeshData.mGeomEpsilon = geomEpsilon;
+
+#ifdef PROFILE_BOUNDS
+ int deltaTime = timeGetTime() - time;
+ printf("Bounds time: %f\n", float(deltaTime)*0.001f);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void TriangleMeshBuilder::checkMeshIndicesSize()
+{
+ Gu::TriangleMeshData& m = mMeshData;
+
+ // check if we can change indices from 32bits to 16bits
+ if(m.mNbVertices <= 0xffff && !m.has16BitIndices())
+ {
+ const PxU32 numTriangles = m.mNbTriangles;
+ PxU32* PX_RESTRICT indices32 = reinterpret_cast<PxU32*> (m.mTriangles);
+
+ m.mTriangles = 0; // force a realloc
+ m.allocateTriangles(numTriangles, false);
+ PX_ASSERT(m.has16BitIndices()); // realloc'ing without the force32bit flag changed it.
+
+ PxU16* PX_RESTRICT indices16 = reinterpret_cast<PxU16*> (m.mTriangles);
+ for (PxU32 i = 0; i < numTriangles*3; i++)
+ indices16[i] = Ps::to16(indices32[i]);
+
+ PX_FREE(indices32);
+
+ onMeshIndexFormatChange();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+BV4TriangleMeshBuilder::BV4TriangleMeshBuilder(const PxCookingParams& params) : TriangleMeshBuilder(mData, params)
+{
+}
+
+BV4TriangleMeshBuilder::~BV4TriangleMeshBuilder()
+{
+}
+
+void BV4TriangleMeshBuilder::onMeshIndexFormatChange()
+{
+ IndTri32* triangles32 = NULL;
+ IndTri16* triangles16 = NULL;
+ if(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES)
+ triangles16 = reinterpret_cast<IndTri16*>(mMeshData.mTriangles);
+ else
+ triangles32 = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
+
+ mData.mMeshInterface.setPointers(triangles32, triangles16, mMeshData.mVertices);
+}
+
+void BV4TriangleMeshBuilder::createMidPhaseStructure()
+{
+ const float gBoxEpsilon = 2e-4f;
+// const float gBoxEpsilon = 0.1f;
+ mData.mMeshInterface.initRemap();
+ mData.mMeshInterface.setNbVertices(mMeshData.mNbVertices);
+ mData.mMeshInterface.setNbTriangles(mMeshData.mNbTriangles);
+
+ IndTri32* triangles32 = NULL;
+ IndTri16* triangles16 = NULL;
+ if (mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES)
+ {
+ triangles16 = reinterpret_cast<IndTri16*>(mMeshData.mTriangles);
+ }
+ else
+ {
+ triangles32 = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
+ }
+
+ mData.mMeshInterface.setPointers(triangles32, triangles16, mMeshData.mVertices);
+
+ const PxU32 nbTrisPerLeaf = (mParams.midphaseDesc.getType() == PxMeshMidPhase::eBVH34) ? mParams.midphaseDesc.mBVH34Desc.numTrisPerLeaf : 4;
+
+ if(!BuildBV4Ex(mData.mBV4Tree, mData.mMeshInterface, gBoxEpsilon, nbTrisPerLeaf))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "BV4 tree failed to build.");
+ return;
+ }
+
+// remapTopology(mData.mMeshInterface);
+
+ const PxU32* order = mData.mMeshInterface.getRemap();
+ if(mMeshData.mMaterialIndices)
+ {
+ PxMaterialTableIndex* newMat = PX_NEW(PxMaterialTableIndex)[mMeshData.mNbTriangles];
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newMat[i] = mMeshData.mMaterialIndices[order[i]];
+ PX_DELETE_POD(mMeshData.mMaterialIndices);
+ mMeshData.mMaterialIndices = newMat;
+ }
+
+ if (!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
+ {
+ PxU32* newMap = PX_NEW(PxU32)[mMeshData.mNbTriangles];
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newMap[i] = mMeshData.mFaceRemap ? mMeshData.mFaceRemap[order[i]] : order[i];
+ PX_DELETE_POD(mMeshData.mFaceRemap);
+ mMeshData.mFaceRemap = newMap;
+ }
+ mData.mMeshInterface.releaseRemap();
+}
+
+void BV4TriangleMeshBuilder::saveMidPhaseStructure(PxOutputStream& stream) const
+{
+ const PxU32 version = 1;
+
+ const bool mismatch = (littleEndian() == 1);
+ writeChunk('B', 'V', '4', ' ', stream);
+ writeDword(version, mismatch, stream);
+
+ writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.x, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.y, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.z, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mLocalBounds.mExtentsMagnitude, mismatch, stream);
+
+ writeDword(mData.mBV4Tree.mInitData, mismatch, stream);
+#ifdef GU_BV4_QUANTIZED_TREE
+ writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.x, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.y, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.z, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.x, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.y, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.z, mismatch, stream);
+#endif
+ writeDword(mData.mBV4Tree.mNbNodes, mismatch, stream);
+ for(PxU32 i=0;i<mData.mBV4Tree.mNbNodes;i++)
+ {
+ const BVDataPacked& node = mData.mBV4Tree.mNodes[i];
+#ifdef GU_BV4_QUANTIZED_TREE
+ writeWordBuffer(&node.mAABB.mData[0].mExtents, 6, mismatch, stream);
+#else
+ writeFloatBuffer(&node.mAABB.mCenter.x, 6, mismatch, stream);
+#endif
+ writeDword(node.mData, mismatch, stream);
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void BV32TriangleMeshBuilder::createMidPhaseStructure(const PxCookingParams& params, Gu::TriangleMeshData& meshData, Gu::BV32Tree& bv32Tree)
+{
+ const float gBoxEpsilon = 2e-4f;
+
+ Gu::SourceMesh meshInterface;
+ // const float gBoxEpsilon = 0.1f;
+ meshInterface.initRemap();
+ meshInterface.setNbVertices(meshData.mNbVertices);
+ meshInterface.setNbTriangles(meshData.mNbTriangles);
+
+ PX_ASSERT(!(meshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+
+ IndTri32* triangles32 = reinterpret_cast<IndTri32*>(meshData.mGRB_triIndices);
+
+ meshInterface.setPointers(triangles32, NULL, meshData.mVertices);
+
+ PxU32 nbTrisPerLeaf = 32;
+
+ if (!BuildBV32Ex(bv32Tree, meshInterface, gBoxEpsilon, nbTrisPerLeaf))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "BV32 tree failed to build.");
+ return;
+ }
+
+ const PxU32* order = meshInterface.getRemap();
+
+ if (!params.suppressTriangleMeshRemapTable || params.buildGPUData)
+ {
+ PxU32* newMap = PX_NEW(PxU32)[meshData.mNbTriangles];
+ for (PxU32 i = 0; i<meshData.mNbTriangles; i++)
+ newMap[i] = meshData.mGRB_faceRemap ? meshData.mGRB_faceRemap[order[i]] : order[i];
+ PX_DELETE_POD(meshData.mGRB_faceRemap);
+ meshData.mGRB_faceRemap = newMap;
+ }
+
+ meshInterface.releaseRemap();
+
+}
+
+void BV32TriangleMeshBuilder::saveMidPhaseStructure(Gu::BV32Tree* bv32Tree, PxOutputStream& stream)
+{
+ const PxU32 version = 1;
+
+ const bool mismatch = (littleEndian() == 1);
+ writeChunk('B', 'V', '3', '2', stream);
+ writeDword(version, mismatch, stream);
+
+ writeFloat(bv32Tree->mLocalBounds.mCenter.x, mismatch, stream);
+ writeFloat(bv32Tree->mLocalBounds.mCenter.y, mismatch, stream);
+ writeFloat(bv32Tree->mLocalBounds.mCenter.z, mismatch, stream);
+ writeFloat(bv32Tree->mLocalBounds.mExtentsMagnitude, mismatch, stream);
+
+ writeDword(bv32Tree->mInitData, mismatch, stream);
+
+ writeDword(bv32Tree->mNbPackedNodes, mismatch, stream);
+
+ PX_ASSERT(bv32Tree->mNbPackedNodes > 0);
+ for (PxU32 i = 0; i < bv32Tree->mNbPackedNodes; ++i)
+ {
+ BV32DataPacked& node = bv32Tree->mPackedNodes[i];
+
+ const PxU32 nbElements = node.mNbNodes * 4;
+ writeDword(node.mNbNodes, mismatch, stream);
+ WriteDwordBuffer(node.mData, node.mNbNodes, mismatch, stream);
+ writeFloatBuffer(&node.mCenter[0].x, nbElements, mismatch, stream);
+ writeFloatBuffer(&node.mExtents[0].x, nbElements, mismatch, stream);
+
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+RTreeTriangleMeshBuilder::RTreeTriangleMeshBuilder(const PxCookingParams& params) : TriangleMeshBuilder(mData, params)
+{
+}
+
+RTreeTriangleMeshBuilder::~RTreeTriangleMeshBuilder()
+{
+}
+
+struct RTreeCookerRemap : RTreeCooker::RemapCallback
+{
+ PxU32 mNbTris;
+ RTreeCookerRemap(PxU32 numTris) : mNbTris(numTris)
+ {
+ }
+
+ virtual void remap(PxU32* val, PxU32 start, PxU32 leafCount)
+ {
+ PX_ASSERT(leafCount > 0);
+ PX_ASSERT(leafCount <= 16); // sanity check
+ PX_ASSERT(start < mNbTris);
+ PX_ASSERT(start+leafCount <= mNbTris);
+ PX_ASSERT(val);
+ LeafTriangles lt;
+ // here we remap from ordered leaf index in the rtree to index in post-remap in triangles
+ // this post-remap will happen later
+ lt.SetData(leafCount, start);
+ *val = lt.Data;
+ }
+};
+
+void RTreeTriangleMeshBuilder::createMidPhaseStructure()
+{
+ const PxReal meshSizePerformanceTradeOff = (mParams.midphaseDesc.getType() == PxMeshMidPhase::eINVALID) ?
+ mParams.meshSizePerformanceTradeOff : mParams.midphaseDesc.mBVH33Desc.meshSizePerformanceTradeOff;
+ const PxMeshCookingHint::Enum meshCookingHint = (mParams.midphaseDesc.getType() == PxMeshMidPhase::eINVALID) ?
+ mParams.meshCookingHint : mParams.midphaseDesc.mBVH33Desc.meshCookingHint;
+
+ Array<PxU32> resultPermute;
+ RTreeCookerRemap rc(mMeshData.mNbTriangles);
+ RTreeCooker::buildFromTriangles(
+ mData.mRTree,
+ mMeshData.mVertices, mMeshData.mNbVertices,
+ (mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES) ? reinterpret_cast<PxU16*>(mMeshData.mTriangles) : NULL,
+ !(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES) ? reinterpret_cast<PxU32*>(mMeshData.mTriangles) : NULL,
+ mMeshData.mNbTriangles, resultPermute, &rc, meshSizePerformanceTradeOff, meshCookingHint);
+
+ PX_ASSERT(resultPermute.size() == mMeshData.mNbTriangles);
+
+ remapTopology(resultPermute.begin());
+}
+
+static void saveRTreeData(PxOutputStream& stream, const RTree& d)
+{
+ // save the RTree root structure followed immediately by RTreePage pages to an output stream
+ const bool mismatch = (littleEndian() == 1);
+ writeChunk('R', 'T', 'R', 'E', stream);
+ writeDword(RTREE_COOK_VERSION, mismatch, stream);
+ writeFloatBuffer(&d.mBoundsMin.x, 4, mismatch, stream);
+ writeFloatBuffer(&d.mBoundsMax.x, 4, mismatch, stream);
+ writeFloatBuffer(&d.mInvDiagonal.x, 4, mismatch, stream);
+ writeFloatBuffer(&d.mDiagonalScaler.x, 4, mismatch, stream);
+ writeDword(d.mPageSize, mismatch, stream);
+ writeDword(d.mNumRootPages, mismatch, stream);
+ writeDword(d.mNumLevels, mismatch, stream);
+ writeDword(d.mTotalNodes, mismatch, stream);
+ writeDword(d.mTotalPages, mismatch, stream);
+ PxU32 unused = 0; writeDword(unused, mismatch, stream); // backwards compatibility
+ for (PxU32 j = 0; j < d.mTotalPages; j++)
+ {
+ writeFloatBuffer(d.mPages[j].minx, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].miny, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].minz, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].maxx, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].maxy, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].maxz, RTREE_N, mismatch, stream);
+ WriteDwordBuffer(d.mPages[j].ptrs, RTREE_N, mismatch, stream);
+ }
+}
+
+void RTreeTriangleMeshBuilder::saveMidPhaseStructure(PxOutputStream& stream) const
+{
+ // Export RTree
+ saveRTreeData(stream, mData.mRTree);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.h
new file mode 100644
index 00000000..639d0a12
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.h
@@ -0,0 +1,120 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_COLLISION_TriangleMeshBUILDER
+#define PX_COLLISION_TriangleMeshBUILDER
+
+#include "GuMeshData.h"
+#include "cooking/PxCooking.h"
+
+namespace physx
+{
+ namespace Gu
+ {
+ class EdgeListBuilder;
+ }
+
+ class TriangleMeshBuilder
+ {
+ public:
+ TriangleMeshBuilder(Gu::TriangleMeshData& mesh, const PxCookingParams& params);
+ virtual ~TriangleMeshBuilder();
+
+ virtual PxMeshMidPhase::Enum getMidphaseID() const = 0;
+ // Called by base code when midphase structure should be built
+ virtual void createMidPhaseStructure() = 0;
+ // Called by base code when midphase structure should be saved
+ virtual void saveMidPhaseStructure(PxOutputStream& stream) const = 0;
+ // Called by base code when mesh index format has changed and the change should be reflected in midphase structure
+ virtual void onMeshIndexFormatChange() {}
+
+ bool cleanMesh(bool validate, PxTriangleMeshCookingResult::Enum* condition);
+ void remapTopology(const PxU32* order);
+
+ void createSharedEdgeData(bool buildAdjacencies, bool buildActiveEdges);
+
+ void recordTriangleIndices();
+ void createGRBMidPhaseAndData(const PxU32 originalTriangleCount);
+ void createGRBData();
+
+ bool loadFromDesc(const PxTriangleMeshDesc&, PxTriangleMeshCookingResult::Enum* condition ,bool validate = false);
+ bool save(PxOutputStream& stream, bool platformMismatch, const PxCookingParams& params) const;
+ void checkMeshIndicesSize();
+ PX_FORCE_INLINE Gu::TriangleMeshData& getMeshData() { return mMeshData; }
+ protected:
+ void computeLocalBounds();
+ bool importMesh(const PxTriangleMeshDesc& desc, const PxCookingParams& params, PxTriangleMeshCookingResult::Enum* condition ,bool validate = false);
+
+ TriangleMeshBuilder& operator=(const TriangleMeshBuilder&);
+ Gu::EdgeListBuilder* edgeList;
+ const PxCookingParams& mParams;
+ Gu::TriangleMeshData& mMeshData;
+
+ void releaseEdgeList();
+ void createEdgeList();
+ };
+
+ class RTreeTriangleMeshBuilder : public TriangleMeshBuilder
+ {
+ public:
+ RTreeTriangleMeshBuilder(const PxCookingParams& params);
+ virtual ~RTreeTriangleMeshBuilder();
+
+ virtual PxMeshMidPhase::Enum getMidphaseID() const { return PxMeshMidPhase::eBVH33; }
+ virtual void createMidPhaseStructure();
+ virtual void saveMidPhaseStructure(PxOutputStream& stream) const;
+
+ Gu::RTreeTriangleData mData;
+ };
+
+ class BV4TriangleMeshBuilder : public TriangleMeshBuilder
+ {
+ public:
+ BV4TriangleMeshBuilder(const PxCookingParams& params);
+ virtual ~BV4TriangleMeshBuilder();
+
+ virtual PxMeshMidPhase::Enum getMidphaseID() const { return PxMeshMidPhase::eBVH34; }
+ virtual void createMidPhaseStructure();
+ virtual void saveMidPhaseStructure(PxOutputStream& stream) const;
+ virtual void onMeshIndexFormatChange();
+
+ Gu::BV4TriangleData mData;
+ };
+
+ class BV32TriangleMeshBuilder
+ {
+ public:
+ static void createMidPhaseStructure(const PxCookingParams& params, Gu::TriangleMeshData& meshData, Gu::BV32Tree& bv32Tree);
+ static void saveMidPhaseStructure(Gu::BV32Tree* tree, PxOutputStream& stream);
+ };
+
+}
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/windows/WindowsCookingDelayLoadHook.cpp b/PhysX_3.4/Source/PhysXCooking/src/windows/WindowsCookingDelayLoadHook.cpp
new file mode 100644
index 00000000..9652bccb
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/windows/WindowsCookingDelayLoadHook.cpp
@@ -0,0 +1,82 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "windows/PxWindowsDelayLoadHook.h"
+#include "windows/PsWindowsInclude.h"
+#include "windows/CmWindowsLoadLibrary.h"
+
+// Prior to Visual Studio 2015 Update 3, these hooks were non-const.
+#define DELAYIMP_INSECURE_WRITABLE_HOOKS
+#include <delayimp.h>
+
+static const physx::PxDelayLoadHook* gDelayLoadHook = NULL;
+
+void physx::PxSetPhysXCookingDelayLoadHook(const physx::PxDelayLoadHook* hook)
+{
+ gDelayLoadHook = hook;
+}
+
+using namespace physx;
+
+#pragma comment(lib, "delayimp")
+
+FARPROC WINAPI delayHook(unsigned dliNotify, PDelayLoadInfo pdli)
+{
+ switch (dliNotify) {
+ case dliStartProcessing :
+ break;
+
+ case dliNotePreLoadLibrary :
+ {
+ return Cm::physXCommonDliNotePreLoadLibrary(pdli->szDll,gDelayLoadHook);
+ }
+ break;
+
+ case dliNotePreGetProcAddress :
+ break;
+
+ case dliFailLoadLib :
+ break;
+
+ case dliFailGetProc :
+ break;
+
+ case dliNoteEndProcessing :
+ break;
+
+ default :
+
+ return NULL;
+ }
+
+ return NULL;
+}
+
+PfnDliHook __pfnDliNotifyHook2 = delayHook;