aboutsummaryrefslogtreecommitdiff
path: root/NvCloth/extensions/src
diff options
context:
space:
mode:
authormtamis <[email protected]>2017-02-15 16:06:25 +0100
committermtamis <[email protected]>2017-02-15 16:06:25 +0100
commit85305930aeeb1d513e23522bd91f29ba81aa6d14 (patch)
tree45f1bb20a45a300d1fef107e436cac95602a0e57 /NvCloth/extensions/src
downloadnvcloth-85305930aeeb1d513e23522bd91f29ba81aa6d14.tar.xz
nvcloth-85305930aeeb1d513e23522bd91f29ba81aa6d14.zip
NvCloth library v1.0.0
Diffstat (limited to 'NvCloth/extensions/src')
-rw-r--r--NvCloth/extensions/src/ClothFabricCooker.cpp742
-rw-r--r--NvCloth/extensions/src/ClothGeodesicTetherCooker.cpp994
-rw-r--r--NvCloth/extensions/src/ClothMeshQuadifier.cpp443
-rw-r--r--NvCloth/extensions/src/ClothSimpleTetherCooker.cpp144
4 files changed, 2323 insertions, 0 deletions
diff --git a/NvCloth/extensions/src/ClothFabricCooker.cpp b/NvCloth/extensions/src/ClothFabricCooker.cpp
new file mode 100644
index 0000000..f0e4dea
--- /dev/null
+++ b/NvCloth/extensions/src/ClothFabricCooker.cpp
@@ -0,0 +1,742 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxVec4.h"
+#include "foundation/PxIO.h"
+#include "foundation/PxStrideIterator.h"
+#include "NvClothExt/ClothFabricCooker.h"
+#include "NvClothExt/ClothTetherCooker.h"
+#include "PsSort.h"
+#include "NvCloth/Fabric.h"
+#include "NvCloth/Allocator.h"
+#include "NvCloth/Range.h"
+
+#include <algorithm>
+#include "PsMathUtils.h"
+
+namespace
+{
+float safeLog2(float x)
+{
+ float saturated = std::max(0.0f, std::min(x, 1.0f));
+ return saturated ? physx::shdfnd::log2(saturated) : -FLT_MAX_EXP;
+}
+}
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+
+struct FabricCookerImpl : public ClothFabricCooker
+{
+ FabricCookerImpl(){}
+ bool cook(const ClothMeshDesc& desc, PxVec3 gravity, bool useGeodesicTether);
+
+ ClothFabricDesc getDescriptor() const;
+ CookedData getCookedData() const;
+ void save(PxOutputStream& stream, bool platformMismatch) const;
+
+public:
+ PxU32 mNumParticles;
+
+ nv::cloth::Vector<PxU32>::Type mPhaseSetIndices;
+ nv::cloth::Vector<ClothFabricPhaseType::Enum>::Type mPhaseTypes;
+ nv::cloth::Vector<PxU32>::Type mSets; // with 0 prefix
+ nv::cloth::Vector<PxReal>::Type mRestvalues;
+ nv::cloth::Vector<PxReal>::Type mStiffnessValues;
+ nv::cloth::Vector<PxU32>::Type mIndices;
+
+ nv::cloth::Vector<PxU32>::Type mTetherAnchors;
+ nv::cloth::Vector<PxReal>::Type mTetherLengths;
+
+ nv::cloth::Vector<PxU32>::Type mTriangles;
+
+private:
+ mutable nv::cloth::Vector<ClothFabricPhase>::Type mLegacyPhases;
+};
+
+namespace
+{
+template<typename T>
+nv::cloth::Range<const T> CreateRange(typename nv::cloth::Vector<T>::Type const& vector, int offset = 0)
+{
+ const T* begin = vector.begin()+offset;
+ const T* end = vector.end();
+
+ return nv::cloth::Range<const T>(begin, end);
+}
+template<typename T, typename U>
+nv::cloth::Range<const T> CreateRangeF(typename nv::cloth::Vector<U>::Type const& vector, int offset = 0)
+{
+ static_assert(sizeof(T) == sizeof(U), "Type T and U need to be of the same size");
+ const T* begin = reinterpret_cast<const T*>(vector.begin()+offset);
+ const T* end = reinterpret_cast<const T*>(vector.end());
+
+ return nv::cloth::Range<const T>(begin, end);
+}
+}
+
+namespace
+{
+ // calculate the inclusive prefix sum, equivalent of std::partial_sum
+ template <typename T>
+ void prefixSum(const T* first, const T* last, T* dest)
+ {
+ if (first != last)
+ {
+ *(dest++) = *(first++);
+ for (; first != last; ++first, ++dest)
+ *dest = *(dest-1) + *first;
+ }
+ }
+
+ template <typename T>
+ void gatherAdjacencies(nv::cloth::Vector<PxU32>::Type& valency, nv::cloth::Vector<PxU32>::Type& adjacencies,
+ const BoundedData& triangles, const BoundedData& quads)
+ {
+ // count number of edges per vertex
+ PxStrideIterator<const T> tIt, qIt;
+ tIt = PxMakeIterator(reinterpret_cast<const T*>(triangles.data), triangles.stride);
+ for(PxU32 i=0; i<triangles.count; ++i, ++tIt, ++qIt)
+ {
+ for(PxU32 j=0; j<3; ++j)
+ valency[tIt.ptr()[j]] += 2;
+ }
+ qIt = PxMakeIterator(reinterpret_cast<const T*>(quads.data), quads.stride);
+ for(PxU32 i=0; i<quads.count; ++i, ++tIt, ++qIt)
+ {
+ for(PxU32 j=0; j<4; ++j)
+ valency[qIt.ptr()[j]] += 2;
+ }
+
+ prefixSum(valency.begin(), valency.end(), valency.begin());
+ adjacencies.resize(valency.back());
+
+ // gather adjacent vertices
+ tIt = PxMakeIterator(reinterpret_cast<const T*>(triangles.data), triangles.stride);
+ for(PxU32 i=0; i<triangles.count; ++i, ++tIt)
+ {
+ for(PxU32 j=0; j<3; ++j)
+ {
+ adjacencies[--valency[tIt.ptr()[j]]] = tIt.ptr()[(j+1)%3];
+ adjacencies[--valency[tIt.ptr()[j]]] = tIt.ptr()[(j+2)%3];
+ }
+ }
+ qIt = PxMakeIterator(reinterpret_cast<const T*>(quads.data), quads.stride);
+ for(PxU32 i=0; i<quads.count; ++i, ++qIt)
+ {
+ for(PxU32 j=0; j<4; ++j)
+ {
+ adjacencies[--valency[qIt.ptr()[j]]] = qIt.ptr()[(j+1)%4];
+ adjacencies[--valency[qIt.ptr()[j]]] = qIt.ptr()[(j+3)%4];
+ }
+ }
+ }
+
+ template <typename T>
+ void gatherTriangles(nv::cloth::Vector<PxU32>::Type& indices, const BoundedData& triangles, const BoundedData& quads)
+ {
+ indices.reserve(triangles.count * 3 + quads.count * 6);
+
+ PxStrideIterator<const T> tIt, qIt;
+ tIt = PxMakeIterator(reinterpret_cast<const T*>(triangles.data), triangles.stride);
+ for (PxU32 i = 0; i<triangles.count; ++i, ++tIt, ++qIt)
+ {
+ for (PxU32 j = 0; j<3; ++j)
+ indices.pushBack(tIt.ptr()[j]);
+ }
+ qIt = PxMakeIterator(reinterpret_cast<const T*>(quads.data), quads.stride);
+ for (PxU32 i = 0; i<quads.count; ++i, ++tIt, ++qIt)
+ {
+ indices.pushBack(qIt.ptr()[0]);
+ indices.pushBack(qIt.ptr()[1]);
+ indices.pushBack(qIt.ptr()[2]);
+ indices.pushBack(qIt.ptr()[2]);
+ indices.pushBack(qIt.ptr()[3]);
+ indices.pushBack(qIt.ptr()[0]);
+ }
+ }
+
+
+ struct Edge
+ {
+ Edge() : mStretching(0.0f), mBending(0.0f), mShearing(0.0f) {}
+
+ void classify()
+ {
+ mStretching += 0.1f;
+ }
+
+ // classify v0-v2 edge based on alternative v0-v1-v2 path
+ void classify(const PxVec4& v0, const PxVec4& v1, const PxVec4& v2)
+ {
+ const PxVec3& p0 = reinterpret_cast<const PxVec3&>(v0);
+ const PxVec3& p1 = reinterpret_cast<const PxVec3&>(v1);
+ const PxVec3& p2 = reinterpret_cast<const PxVec3&>(v2);
+
+ PxReal area = (p1-p0).cross(p2-p1).magnitude();
+ // triangle height / base length
+ // 1.0 = quad edge, 0.2 = quad diagonal + quad edge,
+ PxReal ratio = area / (p2-p0).magnitudeSquared();
+
+ // 0.5 = quad diagonal
+ mShearing += PxMax(0.0f, 0.15f - fabsf(0.45f - ratio));
+ // 0.0 = collinear points
+ mBending += PxMax(0.0f, 0.1f - ratio) * 3;
+ }
+
+ PxReal mStretching;
+ PxReal mBending;
+ PxReal mShearing;
+ };
+
+ typedef shdfnd::Pair<PxU32, PxU32> Pair;
+ typedef shdfnd::Pair<Pair, ClothFabricPhaseType::Enum> Entry;
+
+ // maintain heap status after elements have been pushed (heapify)
+ template<typename T>
+ void pushHeap(typename nv::cloth::Vector<T>::Type &heap, const T &value)
+ {
+ heap.pushBack(value);
+ T* begin = heap.begin();
+ T* end = heap.end();
+
+ if (end <= begin)
+ return;
+
+ PxU32 current = PxU32(end - begin) - 1;
+ while (current > 0)
+ {
+ const PxU32 parent = (current - 1) / 2;
+ if (!(begin[parent] < begin[current]))
+ break;
+
+ shdfnd::swap(begin[parent], begin[current]);
+ current = parent;
+ }
+ }
+
+ // pop one element from the heap
+ template<typename T>
+ T popHeap(typename nv::cloth::Vector<T>::Type &heap)
+ {
+ T* begin = heap.begin();
+ T* end = heap.end();
+
+ shdfnd::swap(begin[0], end[-1]); // exchange elements
+
+ // shift down
+ end--;
+
+ PxU32 current = 0;
+ while (begin + (current * 2 + 1) < end)
+ {
+ PxU32 child = current * 2 + 1;
+ if (begin + child + 1 < end && begin[child] < begin[child + 1])
+ ++child;
+
+ if (!(begin[current] < begin[child]))
+ break;
+
+ shdfnd::swap(begin[current], begin[child]);
+ current = child;
+ }
+
+ return heap.popBack();
+ }
+
+ // ---------------------------------------------------------------------------------------
+ // Heap element to sort constraint based on graph color count
+ struct ConstraintGraphColorCount
+ {
+ ConstraintGraphColorCount(PxU32 cid, PxU32 count)
+ : constraint(cid), colorCount(count) {}
+
+ PxU32 constraint;
+ PxU32 colorCount;
+
+ bool operator < (const ConstraintGraphColorCount& c) const
+ {
+ return colorCount < c.colorCount;
+ }
+ };
+
+ struct ConstraintSorter
+ {
+ public:
+
+ ConstraintSorter(PxU32* constraints_) : constraints(constraints_) {}
+
+ bool operator()(PxU32 i, PxU32 j) const
+ {
+ PxU32 ci = i*2;
+ PxU32 cj = j*2;
+
+ if (constraints[ci] == constraints[cj])
+ return constraints[ci+1] < constraints[cj+1];
+ else
+ return constraints[ci] < constraints[cj];
+ }
+
+ PxU32* constraints;
+ };
+
+} // anonymous namespace
+
+bool FabricCookerImpl::cook(const ClothMeshDesc& desc, PxVec3 gravity, bool useGeodesicTether)
+{
+ if(!desc.isValid())
+ {
+ NV_CLOTH_LOG_INVALID_PARAMETER("FabricCookerImpl::cook: desc.isValid() failed!");
+ return false;
+ }
+
+ gravity = gravity.getNormalized();
+
+ mNumParticles = desc.points.count;
+
+ // assemble points
+ nv::cloth::Vector<PxVec4>::Type particles;
+ particles.reserve(mNumParticles);
+ PxStrideIterator<const PxVec3> pIt(reinterpret_cast<const PxVec3*>(desc.points.data), desc.points.stride);
+ PxStrideIterator<const PxReal> wIt(reinterpret_cast<const PxReal*>(desc.invMasses.data), desc.invMasses.stride);
+ for(PxU32 i=0; i<mNumParticles; ++i)
+ particles.pushBack(PxVec4(*pIt++, wIt.ptr() ? *wIt++ : 1.0f));
+
+ // build adjacent vertex list
+ nv::cloth::Vector<PxU32>::Type valency(mNumParticles+1, 0);
+ nv::cloth::Vector<PxU32>::Type adjacencies;
+ if (desc.flags & MeshFlag::e16_BIT_INDICES)
+ {
+ gatherTriangles<PxU16>(mTriangles, desc.triangles, desc.quads);
+ gatherAdjacencies<PxU16>(valency, adjacencies, desc.triangles, desc.quads);
+ }
+ else
+ {
+ gatherTriangles<PxU32>(mTriangles, desc.triangles, desc.quads);
+ gatherAdjacencies<PxU32>(valency, adjacencies, desc.triangles, desc.quads);
+ }
+
+ // build unique neighbors from adjacencies
+ nv::cloth::Vector<PxU32>::Type mark(valency.size(), 0);
+ nv::cloth::Vector<PxU32>::Type neighbors; neighbors.reserve(adjacencies.size());
+ for(PxU32 i=1, j=0; i<valency.size(); ++i)
+ {
+ for(; j<valency[i]; ++j)
+ {
+ PxU32 k = adjacencies[j];
+ if(mark[k] != i)
+ {
+ mark[k] = i;
+ neighbors.pushBack(k);
+ }
+ }
+ valency[i] = neighbors.size();
+ }
+
+ // build map of unique edges and classify
+ nv::cloth::HashMap<Pair, Edge>::Type edges;
+ for(PxU32 i=0; i<mNumParticles; ++i)
+ {
+ PxReal wi = particles[i].w;
+ // iterate all neighbors
+ PxU32 jlast = valency[i+1];
+ for(PxU32 j=valency[i]; j<jlast; ++j)
+ {
+ // add 1-ring edge
+ PxU32 m = neighbors[j];
+ if(wi + particles[m].w > 0.0f)
+ edges[Pair(PxMin(i, m), PxMax(i, m))].classify();
+
+ // iterate all neighbors of neighbor
+ PxU32 klast = valency[m+1];
+ for(PxU32 k=valency[m]; k<klast; ++k)
+ {
+ PxU32 n = neighbors[k];
+ if(n != i && wi + particles[n].w > 0.0f)
+ {
+ // add 2-ring edge
+ edges[Pair(PxMin(i, n), PxMax(i, n))].classify(
+ particles[i], particles[m], particles[n]);
+ }
+ }
+ }
+ }
+
+ // copy classified edges to constraints array
+ // build histogram of constraints per vertex
+ nv::cloth::Vector<Entry>::Type constraints;
+ constraints.reserve(edges.size());
+ valency.resize(0); valency.resize(mNumParticles+1, 0);
+
+ const PxReal sqrtHalf = PxSqrt(0.4f);
+ for(nv::cloth::HashMap<Pair, Edge>::Type::Iterator eIt = edges.getIterator(); !eIt.done(); ++eIt)
+ {
+ const Edge& edge = eIt->second;
+ const Pair& pair = eIt->first;
+ if((edge.mStretching + edge.mBending + edge.mShearing) > 0.0f)
+ {
+ ClothFabricPhaseType::Enum type = ClothFabricPhaseType::eINVALID;
+ if(edge.mBending > PxMax(edge.mStretching, edge.mShearing))
+ type = ClothFabricPhaseType::eBENDING;
+ else if(edge.mShearing > PxMax(edge.mStretching, edge.mBending))
+ type = ClothFabricPhaseType::eSHEARING;
+ else
+ {
+ PxVec4 diff = particles[pair.first]-particles[pair.second];
+ PxReal dot = gravity.dot(reinterpret_cast<const PxVec3&>(diff).getNormalized());
+ type = fabsf(dot) < sqrtHalf ? ClothFabricPhaseType::eHORIZONTAL : ClothFabricPhaseType::eVERTICAL;
+ }
+ ++valency[pair.first];
+ ++valency[pair.second];
+ constraints.pushBack(Entry(pair, type));
+ }
+ }
+
+ prefixSum(valency.begin(), valency.end(), valency.begin());
+
+ PxU32 numConstraints = constraints.size();
+
+ // build adjacent constraint list
+ adjacencies.resize(0); adjacencies.resize(valency.back(), 0);
+ for(PxU32 i=0; i<numConstraints; ++i)
+ {
+ adjacencies[--valency[constraints[i].first.first]] = i;
+ adjacencies[--valency[constraints[i].first.second]] = i;
+ }
+
+ nv::cloth::Vector<PxU32>::Type::ConstIterator aFirst = adjacencies.begin();
+ nv::cloth::Vector<PxU32>::Type colors(numConstraints, numConstraints); // constraint -> color, initialily not colored
+ mark.resize(0); mark.resize(numConstraints+1, PX_MAX_U32); // color -> constraint index
+ nv::cloth::Vector<PxU32>::Type adjColorCount(numConstraints, 0); // # of neighbors that are already colored
+
+ nv::cloth::Vector<ConstraintGraphColorCount>::Type constraintHeap;
+ constraintHeap.reserve(numConstraints); // set of constraints to color (added in edge distance order)
+
+ // Do graph coloring based on edge distance.
+ // For each constraint, we add its uncolored neighbors to the heap
+ // ,and we pick the constraint with most colored neighbors from the heap.
+ for(;;)
+ {
+ PxU32 constraint = 0;
+ while ( (constraint < numConstraints) && (colors[constraint] != numConstraints))
+ constraint++; // start with the first uncolored constraint
+
+ if (constraint >= numConstraints)
+ break;
+
+ constraintHeap.clear();
+ pushHeap(constraintHeap, ConstraintGraphColorCount(constraint, adjColorCount[constraint]));
+ ClothFabricPhaseType::Enum type = constraints[constraint].second;
+
+ while (!constraintHeap.empty())
+ {
+ ConstraintGraphColorCount heapItem = popHeap<ConstraintGraphColorCount>(constraintHeap);
+ constraint = heapItem.constraint;
+ if (colors[constraint] != numConstraints)
+ continue; // skip if already colored
+
+ const Pair& pair = constraints[constraint].first;
+ for(PxU32 j=0; j<2; ++j)
+ {
+ PxU32 index = j ? pair.first : pair.second;
+ if(particles[index].w == 0.0f)
+ continue; // don't mark adjacent particles if attached
+
+ for(nv::cloth::Vector<PxU32>::Type::ConstIterator aIt = aFirst + valency[index], aEnd = aFirst + valency[index+1]; aIt != aEnd; ++aIt)
+ {
+ PxU32 adjacentConstraint = *aIt;
+ if ((constraints[adjacentConstraint].second != type) || (adjacentConstraint == constraint))
+ continue;
+
+ mark[colors[adjacentConstraint]] = constraint;
+ ++adjColorCount[adjacentConstraint];
+ pushHeap(constraintHeap, ConstraintGraphColorCount(adjacentConstraint, adjColorCount[adjacentConstraint]));
+ }
+ }
+
+ // find smallest color with matching type
+ PxU32 color = 0;
+ while((color < mPhaseSetIndices.size() && mPhaseTypes[color] != type) || mark[color] == constraint)
+ ++color;
+
+ // create a new color set
+ if(color == mPhaseSetIndices.size())
+ {
+ mPhaseSetIndices.pushBack(mPhaseSetIndices.size());
+ mPhaseTypes.pushBack(type);
+ mSets.pushBack(0);
+ }
+
+ colors[constraint] = color;
+ ++mSets[color];
+ }
+ }
+
+#if 0 // PX_DEBUG
+ printf("set[%u] = ", mSets.size());
+ for(PxU32 i=0; i<mSets.size(); ++i)
+ printf("%u ", mSets[i]);
+#endif
+
+ prefixSum(mSets.begin(), mSets.end(), mSets.begin());
+
+#if 0 // PX_DEBUG
+ printf(" = %u\n", mSets.back());
+#endif
+
+ // write indices and rest lengths
+ // convert mSets to exclusive sum
+ PxU32 back = mSets.back();
+ mSets.pushBack(back);
+ mIndices.resize(numConstraints*2);
+ mRestvalues.resize(numConstraints);
+ for(PxU32 i=0; i<numConstraints; ++i)
+ {
+ PxU32 first = constraints[i].first.first;
+ PxU32 second = constraints[i].first.second;
+
+ PxU32 index = --mSets[colors[i]];
+
+ mIndices[2*index ] = first;
+ mIndices[2*index+1] = second;
+
+ PxVec4 diff = particles[second] - particles[first];
+ mRestvalues[index] = reinterpret_cast<
+ const PxVec3&>(diff).magnitude();
+ }
+
+ // reorder constraints and rest values for more efficient cache access (linear)
+ nv::cloth::Vector<PxU32>::Type newIndices(mIndices.size());
+ nv::cloth::Vector<PxF32>::Type newRestValues(mRestvalues.size());
+
+ // sort each constraint set in vertex order
+ for (PxU32 i=0; i < mSets.size()-1; ++i)
+ {
+ // create a re-ordering list
+ nv::cloth::Vector<PxU32>::Type reorder(mSets[i+1]-mSets[i]);
+
+ for (PxU32 r=0; r < reorder.size(); ++r)
+ reorder[r] = r;
+
+ const PxU32 indicesOffset = mSets[i]*2;
+ const PxU32 restOffset = mSets[i];
+
+ ConstraintSorter predicate(&mIndices[indicesOffset]);
+ shdfnd::sort(&reorder[0], reorder.size(), predicate, nv::cloth::NonTrackingAllocator());
+
+ for (PxU32 r=0; r < reorder.size(); ++r)
+ {
+ newIndices[indicesOffset + r*2] = mIndices[indicesOffset + reorder[r]*2];
+ newIndices[indicesOffset + r*2+1] = mIndices[indicesOffset + reorder[r]*2+1];
+ newRestValues[restOffset + r] = mRestvalues[restOffset + reorder[r]];
+ }
+ }
+
+ mIndices = newIndices;
+ mRestvalues = newRestValues;
+
+ NV_CLOTH_ASSERT(mIndices.size() == mRestvalues.size()*2);
+ NV_CLOTH_ASSERT(mRestvalues.size() == mSets.back());
+
+ // calculate per constraint stiffness values if point stiffness values are provided
+ if(desc.pointsStiffness.count)
+ {
+ mStiffnessValues.resize(mIndices.size()>>1);
+ PxStrideIterator<const PxReal> stIt(reinterpret_cast<const PxReal*>(desc.pointsStiffness.data), desc.pointsStiffness.stride);
+ for(int i = 0; i<static_cast<int>(mIndices.size()); i+=2)
+ {
+
+ physx::PxU32 indexA = mIndices[i];
+ physx::PxU32 indexB = mIndices[i+1];
+
+ //Uses min instead of average to get better bending constraints
+ mStiffnessValues[i>>1] = safeLog2(1.0f-std::min(stIt[indexA],stIt[indexB]));
+ }
+ }
+
+#if 0 // PX_DEBUG
+ for (PxU32 i = 1; i < mSets.size(); i++)
+ {
+ ClothFabricPhase phase = mPhases[i-1];
+ printf("%d : type %d, size %d\n",
+ i-1, phase.phaseType, mSets[i] - mSets[i-1]);
+ }
+#endif
+
+ if (useGeodesicTether)
+ {
+ ClothTetherCooker* tetherCooker = NvClothCreateGeodesicTetherCooker();
+ if (tetherCooker->cook(desc))
+ {
+ PxU32 numTethersPerParticle = tetherCooker->getNbTethersPerParticle();
+ PxU32 tetherSize = mNumParticles * numTethersPerParticle;
+ mTetherAnchors.resize(tetherSize);
+ mTetherLengths.resize(tetherSize);
+ tetherCooker->getTetherData(mTetherAnchors.begin(), mTetherLengths.begin());
+ }
+ else
+ useGeodesicTether = false;
+ delete tetherCooker;
+ }
+
+ if (!useGeodesicTether)
+ {
+ ClothTetherCooker* tetherCooker = NvClothCreateSimpleTetherCooker();
+ if (tetherCooker->cook(desc))
+ {
+ PxU32 numTethersPerParticle = tetherCooker->getNbTethersPerParticle();
+ PxU32 tetherSize = mNumParticles * numTethersPerParticle;
+ mTetherAnchors.resize(tetherSize);
+ mTetherLengths.resize(tetherSize);
+ tetherCooker->getTetherData(mTetherAnchors.begin(), mTetherLengths.begin());
+ }
+ delete tetherCooker;
+ }
+
+ return true;
+}
+
+CookedData FabricCookerImpl::getCookedData() const
+{
+ CookedData result;
+ result.mNumParticles = mNumParticles;
+ result.mPhaseIndices = CreateRange<PxU32>(mPhaseSetIndices);
+ result.mPhaseTypes = CreateRangeF<PxI32, ClothFabricPhaseType::Enum>(mPhaseTypes);
+ result.mSets = CreateRange<PxU32>(mSets, 1);
+ result.mRestvalues = CreateRange<PxReal>(mRestvalues);
+ result.mStiffnessValues = CreateRange<PxReal>(mStiffnessValues);
+ result.mIndices = CreateRange<PxU32>(mIndices);
+ result.mAnchors = CreateRange<PxU32>(mTetherAnchors);
+ result.mTetherLengths = CreateRange<PxReal>(mTetherLengths);
+ result.mTriangles = CreateRange<PxU32>(mTriangles);
+
+ return result;
+}
+
+ClothFabricDesc FabricCookerImpl::getDescriptor() const
+{
+ ClothFabricDesc result;
+
+ result.nbParticles = mNumParticles;
+ result.nbPhases = mPhaseSetIndices.size();
+
+ mLegacyPhases.resize(mPhaseSetIndices.size());
+ for(unsigned int i = 0; i < mPhaseSetIndices.size(); i++)
+ {
+ mLegacyPhases[i].setIndex = mPhaseSetIndices[i];
+ mLegacyPhases[i].phaseType = mPhaseTypes[i];
+ }
+
+ result.phases = mLegacyPhases.begin();
+ result.nbSets = mSets.size()-1;
+ result.sets = mSets.begin()+1;
+ result.restvalues = mRestvalues.begin();
+ result.indices = mIndices.begin();
+ result.nbTethers = mTetherAnchors.size();
+ result.tetherAnchors = mTetherAnchors.begin();
+ result.tetherLengths = mTetherLengths.begin();
+ result.nbTriangles = mTriangles.size() / 3;
+ result.triangles = mTriangles.begin();
+
+ return result;
+}
+
+void FabricCookerImpl::save( PxOutputStream& stream, bool /*platformMismatch*/ ) const
+{
+ // version 1 is equivalent to 0x030300 and 0x030301 (PX_PHYSICS_VERSION of 3.3.0 and 3.3.1).
+ // If the stream format changes, the loader code in ScClothFabricCore.cpp
+ // and the version number need to change too.
+ PxU32 version = 1;
+ stream.write(&version, sizeof(PxU32));
+
+ ClothFabricDesc desc = getDescriptor();
+
+ // write explicit sizes, others are implicit
+ stream.write(&mNumParticles, sizeof(PxU32));
+ stream.write(&desc.nbPhases, sizeof(PxU32));
+ stream.write(&desc.nbSets, sizeof(PxU32));
+ stream.write(&desc.nbTethers, sizeof(PxU32));
+
+ PxU32 nbConstraints = desc.sets[desc.nbSets-1];
+
+ // write actual data
+ PX_COMPILE_TIME_ASSERT(sizeof(ClothFabricPhaseType::Enum) == sizeof(PxU32));
+ stream.write(desc.phases, desc.nbPhases*sizeof(ClothFabricPhase));
+ stream.write(desc.sets, desc.nbSets*sizeof(PxU32));
+
+ stream.write(desc.restvalues, nbConstraints*sizeof(PxReal));
+ stream.write(desc.indices, nbConstraints*2*sizeof(PxU32));
+
+ stream.write(desc.tetherAnchors, desc.nbTethers*sizeof(PxU32));
+ stream.write(desc.tetherLengths, desc.nbTethers*sizeof(PxReal));
+}
+
+} // namespace cloth
+} // namespace nv
+
+
+NV_CLOTH_API(nv::cloth::ClothFabricCooker*) NvClothCreateFabricCooker()
+{
+ return NV_CLOTH_NEW(nv::cloth::FabricCookerImpl);
+}
+
+NV_CLOTH_API(nv::cloth::Fabric*) NvClothCookFabricFromMesh( nv::cloth::Factory* factory, const nv::cloth::ClothMeshDesc& desc, const PxVec3& gravity, nv::cloth::Vector<int32_t>::Type* phaseTypes, bool useGeodesicTether )
+{
+ nv::cloth::FabricCookerImpl impl;
+
+ if(!impl.cook(desc, gravity, useGeodesicTether))
+ return 0;
+
+ nv::cloth::CookedData data = impl.getCookedData();
+
+ if(phaseTypes)
+ {
+ phaseTypes->resize(data.mPhaseTypes.size());
+ for(int i = 0; i < static_cast<int>(data.mPhaseTypes.size()); i++)
+ {
+ (*phaseTypes)[i] = data.mPhaseTypes[i];
+ }
+ }
+
+ return factory->createFabric(
+ data.mNumParticles,
+ data.mPhaseIndices,
+ data.mSets,
+ data.mRestvalues,
+ data.mStiffnessValues,
+ data.mIndices,
+ data.mAnchors,
+ data.mTetherLengths,
+ data.mTriangles
+ );
+}
diff --git a/NvCloth/extensions/src/ClothGeodesicTetherCooker.cpp b/NvCloth/extensions/src/ClothGeodesicTetherCooker.cpp
new file mode 100644
index 0000000..00f9cb2
--- /dev/null
+++ b/NvCloth/extensions/src/ClothGeodesicTetherCooker.cpp
@@ -0,0 +1,994 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxVec4.h"
+#include "foundation/PxMemory.h"
+#include "foundation/PxStrideIterator.h"
+#include "NvClothExt/ClothTetherCooker.h"
+
+// from shared foundation
+#include <PsSort.h>
+#include <PsMathUtils.h>
+#include "NvCloth/Allocator.h"
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+
+namespace
+{
+ // calculate the inclusive prefix sum, equivalent of std::partial_sum
+ template <typename T>
+ void prefixSum(const T* first, const T* last, T* dest)
+ {
+ if (first != last)
+ {
+ *(dest++) = *(first++);
+ for (; first != last; ++first, ++dest)
+ *dest = *(dest-1) + *first;
+ }
+ }
+
+ template <typename T>
+ void gatherAdjacencies(nv::cloth::Vector<PxU32>::Type& valency, nv::cloth::Vector<PxU32>::Type& adjacencies,
+ const BoundedData& triangles, const BoundedData& quads)
+ {
+ // count number of edges per vertex
+ PxStrideIterator<const T> tIt, qIt;
+ tIt = PxMakeIterator(reinterpret_cast<const T*>(triangles.data), triangles.stride);
+ for(PxU32 i=0; i<triangles.count; ++i, ++tIt, ++qIt)
+ {
+ for(PxU32 j=0; j<3; ++j)
+ valency[tIt.ptr()[j]] += 2;
+ }
+ qIt = PxMakeIterator(reinterpret_cast<const T*>(quads.data), quads.stride);
+ for(PxU32 i=0; i<quads.count; ++i, ++tIt, ++qIt)
+ {
+ for(PxU32 j=0; j<4; ++j)
+ valency[qIt.ptr()[j]] += 2;
+ }
+
+ prefixSum(valency.begin(), valency.end(), valency.begin());
+ adjacencies.resize(valency.back());
+
+ // gather adjacent vertices
+ tIt = PxMakeIterator(reinterpret_cast<const T*>(triangles.data), triangles.stride);
+ for(PxU32 i=0; i<triangles.count; ++i, ++tIt)
+ {
+ for(PxU32 j=0; j<3; ++j)
+ {
+ adjacencies[--valency[tIt.ptr()[j]]] = tIt.ptr()[(j+1)%3];
+ adjacencies[--valency[tIt.ptr()[j]]] = tIt.ptr()[(j+2)%3];
+ }
+ }
+ qIt = PxMakeIterator(reinterpret_cast<const T*>(quads.data), quads.stride);
+ for(PxU32 i=0; i<quads.count; ++i, ++qIt)
+ {
+ for(PxU32 j=0; j<4; ++j)
+ {
+ adjacencies[--valency[qIt.ptr()[j]]] = qIt.ptr()[(j+1)%4];
+ adjacencies[--valency[qIt.ptr()[j]]] = qIt.ptr()[(j+3)%4];
+ }
+ }
+ }
+
+ template <typename T>
+ void gatherIndices(nv::cloth::Vector<PxU32>::Type& indices,
+ const BoundedData& triangles, const BoundedData& quads)
+ {
+ PxStrideIterator<const T> tIt, qIt;
+
+ indices.reserve(triangles.count * 3 + quads.count * 6);
+
+ tIt = PxMakeIterator(reinterpret_cast<const T*>(triangles.data), triangles.stride);
+ for(PxU32 i=0; i<triangles.count; ++i, ++tIt)
+ {
+ indices.pushBack(tIt.ptr()[0]);
+ indices.pushBack(tIt.ptr()[1]);
+ indices.pushBack(tIt.ptr()[2]);
+ }
+ qIt = PxMakeIterator(reinterpret_cast<const T*>(quads.data), quads.stride);
+ for(PxU32 i=0; i<quads.count; ++i, ++qIt)
+ {
+ indices.pushBack(qIt.ptr()[0]);
+ indices.pushBack(qIt.ptr()[1]);
+ indices.pushBack(qIt.ptr()[2]);
+ indices.pushBack(qIt.ptr()[0]);
+ indices.pushBack(qIt.ptr()[2]);
+ indices.pushBack(qIt.ptr()[3]);
+ }
+ }
+
+ // maintain heap status after elements have been pushed (heapify)
+ template<typename T>
+ void pushHeap(typename nv::cloth::Vector<T>::Type &heap, const T &value)
+ {
+ heap.pushBack(value);
+ T* begin = heap.begin();
+ T* end = heap.end();
+
+ if (end <= begin)
+ return;
+
+ PxU32 current = PxU32(end - begin) - 1;
+ while (current > 0)
+ {
+ const PxU32 parent = (current - 1) / 2;
+ if (!(begin[parent] < begin[current]))
+ break;
+
+ shdfnd::swap(begin[parent], begin[current]);
+ current = parent;
+ }
+ }
+
+ // pop one element from the heap
+ template<typename T>
+ T popHeap(typename nv::cloth::Vector<T>::Type &heap)
+ {
+ T* begin = heap.begin();
+ T* end = heap.end();
+
+ shdfnd::swap(begin[0], end[-1]); // exchange elements
+
+ // shift down
+ end--;
+
+ PxU32 current = 0;
+ while (begin + (current * 2 + 1) < end)
+ {
+ PxU32 child = current * 2 + 1;
+ if (begin + child + 1 < end && begin[child] < begin[child + 1])
+ ++child;
+
+ if (!(begin[current] < begin[child]))
+ break;
+
+ shdfnd::swap(begin[current], begin[child]);
+ current = child;
+ }
+
+ return heap.popBack();
+ }
+
+ // ---------------------------------------------------------------------------------------
+ struct VertexDistanceCount
+ {
+ VertexDistanceCount(int vert, float dist, int count)
+ : vertNr(vert), distance(dist), edgeCount(count) {}
+
+ int vertNr;
+ float distance;
+ int edgeCount;
+ bool operator < (const VertexDistanceCount& v) const
+ {
+ return v.distance < distance;
+ }
+ };
+
+ // ---------------------------------------------------------------------------------------
+ struct PathIntersection
+ {
+ PxU32 vertOrTriangle;
+ PxU32 index; // vertex id or triangle edge id
+ float s; // only used for edge intersection
+ float distance; // computed distance
+
+ public:
+ PathIntersection() {}
+
+ PathIntersection(PxU32 vort, PxU32 in_index, float in_distance, float in_s = 0.0f)
+ : vertOrTriangle(vort), index(in_index), s(in_s), distance(in_distance)
+ {
+ }
+ };
+
+ //---------------------------------------------------------------------------------------
+ struct VertTriangle
+ {
+ VertTriangle(int vert, int triangle)
+ : mVertIndex(vert), mTriangleIndex(triangle)
+ {
+ }
+
+ bool operator<(const VertTriangle &vt) const
+ {
+ return mVertIndex == vt.mVertIndex ?
+ mTriangleIndex < vt.mTriangleIndex : mVertIndex < vt.mVertIndex;
+ }
+
+ int mVertIndex;
+ int mTriangleIndex;
+ };
+
+ // ---------------------------------------------------------------------------------------
+ struct MeshEdge
+ {
+ MeshEdge(int v0, int v1, int halfEdgeIndex)
+ : mFromVertIndex(v0), mToVertIndex(v1), mHalfEdgeIndex(halfEdgeIndex)
+ {
+ if(mFromVertIndex > mToVertIndex)
+ shdfnd::swap(mFromVertIndex, mToVertIndex);
+ }
+
+ bool operator<(const MeshEdge& e) const
+ {
+ return mFromVertIndex == e.mFromVertIndex ?
+ mToVertIndex < e.mToVertIndex : mFromVertIndex < e.mFromVertIndex;
+ }
+
+ bool operator==(const MeshEdge& e) const
+ {
+ return mFromVertIndex == e.mFromVertIndex
+ && mToVertIndex == e.mToVertIndex;
+ }
+
+ int mFromVertIndex, mToVertIndex;
+ int mHalfEdgeIndex;
+ };
+
+ // check if the edge is following triangle order or not
+ bool checkEdgeOrientation(const MeshEdge &e, const nv::cloth::Vector<PxU32>::Type &indices)
+ {
+ int offset0 = e.mHalfEdgeIndex % 3;
+ int offset1 = (offset0 < 2) ? 1 : -2;
+
+ int v0 = int(indices[PxU32(e.mHalfEdgeIndex)]);
+ int v1 = int(indices[PxU32(e.mHalfEdgeIndex + offset1)]);
+
+ if ((e.mFromVertIndex == v0) && (e.mToVertIndex == v1))
+ return true;
+
+ return false;
+ }
+
+ // check if two index pairs represent same edge regardless of order.
+ inline bool checkEdge(int ei0, int ei1, int ej0, int ej1)
+ {
+ return ( (ei0 == ej0) && (ei1 == ej1) ) ||
+ ( (ei0 == ej1) && (ei1 == ej0) );
+ }
+
+ // compute ray edge intersection
+ bool intersectRayEdge(const PxVec3 &O, const PxVec3 &D, const PxVec3 &A, const PxVec3 &B, float &s, float &t)
+ {
+ // point on edge P = A + s * AB
+ // point on ray R = o + t * d
+ // for this two points to intersect, we have
+ // |AB -d| | s t | = o - A
+ const float eps = 1e-4;
+
+ PxVec3 OA = O - A;
+ PxVec3 AB = B - A;
+
+ float a = AB.dot(AB), b = -AB.dot(D);
+ float c = b, d = D.dot(D);
+
+ float e = AB.dot(OA);
+ float f = -D.dot(OA);
+
+ float det = a * d - b * c;
+ if (fabsf(det) < eps) // coplanar case
+ return false;
+
+ float inv_det = 1.0f / det;
+
+ s = (d * inv_det) * e + (-b * inv_det) * f;
+ t = (-c * inv_det) * e + (a * inv_det) * f;
+
+ return true;
+ }
+}
+
+
+struct ClothGeodesicTetherCooker : public ClothTetherCooker
+{
+
+ virtual bool cook(const ClothMeshDesc& desc) override;
+
+ virtual PxU32 getCookerStatus() const override;
+ virtual PxU32 getNbTethersPerParticle() const override;
+ virtual void getTetherData(PxU32* userTetherAnchors, PxReal* userTetherLengths) const override;
+
+public:
+ // internal variables
+ PxU32 mNumParticles;
+ nv::cloth::Vector<PxVec3>::Type mVertices;
+ nv::cloth::Vector<PxU32>::Type mIndices;
+ nv::cloth::Vector<PxU8>::Type mAttached;
+ nv::cloth::Vector<PxU32>::Type mFirstVertTriAdj;
+ nv::cloth::Vector<PxU32>::Type mVertTriAdjs;
+ nv::cloth::Vector<PxU32>::Type mTriNeighbors; // needs changing for non-manifold support
+
+ // error status
+ PxU32 mCookerStatus;
+
+ // output
+ nv::cloth::Vector<PxU32>::Type mTetherAnchors;
+ nv::cloth::Vector<PxReal>::Type mTetherLengths;
+
+protected:
+ void createTetherData(const ClothMeshDesc &desc);
+ int computeVertexIntersection(PxU32 parent, PxU32 src, PathIntersection &path);
+ int computeEdgeIntersection(PxU32 parent, PxU32 edge, float in_s, PathIntersection &path);
+ float computeGeodesicDistance(PxU32 i, PxU32 parent, int &errorCode);
+ PxU32 findTriNeighbors();
+ void findVertTriNeighbors();
+
+private:
+ ClothGeodesicTetherCooker& operator=(const ClothGeodesicTetherCooker&);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+bool ClothGeodesicTetherCooker::cook(const ClothMeshDesc &desc)
+{
+ mCookerStatus = 0;
+ createTetherData(desc);
+ return getCookerStatus() == 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void ClothGeodesicTetherCooker::createTetherData(const ClothMeshDesc &desc)
+{
+ mNumParticles = desc.points.count;
+
+ if (!desc.invMasses.data)
+ return;
+
+ // assemble points
+ mVertices.resize(mNumParticles);
+ mAttached.resize(mNumParticles);
+ PxStrideIterator<const PxVec3> pIt(reinterpret_cast<const PxVec3*>(desc.points.data), desc.points.stride);
+ PxStrideIterator<const PxReal> wIt(reinterpret_cast<const PxReal*>(desc.invMasses.data), desc.invMasses.stride);
+ for(PxU32 i=0; i<mNumParticles; ++i)
+ {
+ mVertices[i] = *pIt++;
+ mAttached[i] = PxU8(wIt.ptr() ? (*wIt++ == 0.0f) : 0);
+ }
+
+ // build triangle indices
+ if(desc.flags & MeshFlag::e16_BIT_INDICES)
+ gatherIndices<PxU16>(mIndices, desc.triangles, desc.quads);
+ else
+ gatherIndices<PxU32>(mIndices, desc.triangles, desc.quads);
+
+ // build vertex-triangle adjacencies
+ findVertTriNeighbors();
+
+ // build triangle-triangle adjacencies
+ mCookerStatus = findTriNeighbors();
+ if (mCookerStatus != 0)
+ return;
+
+ // build adjacent vertex list
+ nv::cloth::Vector<PxU32>::Type valency(mNumParticles+1, 0);
+ nv::cloth::Vector<PxU32>::Type adjacencies;
+ if(desc.flags & MeshFlag::e16_BIT_INDICES)
+ gatherAdjacencies<PxU16>(valency, adjacencies, desc.triangles, desc.quads);
+ else
+ gatherAdjacencies<PxU32>(valency, adjacencies, desc.triangles, desc.quads);
+
+ // build unique neighbors from adjacencies
+ nv::cloth::Vector<PxU32>::Type mark(valency.size(), 0);
+ nv::cloth::Vector<PxU32>::Type neighbors; neighbors.reserve(adjacencies.size());
+ for(PxU32 i=1, j=0; i<valency.size(); ++i)
+ {
+ for(; j<valency[i]; ++j)
+ {
+ PxU32 k = adjacencies[j];
+ if(mark[k] != i)
+ {
+ mark[k] = i;
+ neighbors.pushBack(k);
+ }
+ }
+ valency[i] = neighbors.size();
+ }
+
+ // create islands of attachment points
+ nv::cloth::Vector<PxU32>::Type vertexIsland(mNumParticles);
+ nv::cloth::Vector<VertexDistanceCount>::Type vertexIslandHeap;
+
+ // put all the attachments in heap
+ for (PxU32 i = 0; i < mNumParticles; ++i)
+ {
+ // we put each attached point with large distance so that
+ // we can prioritize things that are added during mesh traversal.
+ vertexIsland[i] = PxU32(-1);
+ if (mAttached[i])
+ vertexIslandHeap.pushBack(VertexDistanceCount(int(i), FLT_MAX, 0));
+ }
+ PxU32 attachedCnt = vertexIslandHeap.size();
+
+ // no attached vertices
+ if (vertexIslandHeap.empty())
+ return;
+
+ // identify islands of attached vertices
+ nv::cloth::Vector<PxU32>::Type islandIndices;
+ nv::cloth::Vector<PxU32>::Type islandFirst;
+ PxU32 islandCnt = 0;
+ PxU32 islandIndexCnt = 0;
+
+ islandIndices.reserve(attachedCnt);
+ islandFirst.reserve(attachedCnt+1);
+
+ // while the island heap is not empty
+ while (!vertexIslandHeap.empty())
+ {
+ // pop vi from heap
+ VertexDistanceCount vi = popHeap<VertexDistanceCount>(vertexIslandHeap);
+
+ // new cluster
+ if (vertexIsland[PxU32(vi.vertNr)] == PxU32(-1))
+ {
+ islandFirst.pushBack(islandIndexCnt++);
+ vertexIsland[PxU32(vi.vertNr)] = islandCnt++;
+ vi.distance = 0;
+ islandIndices.pushBack(PxU32(vi.vertNr));
+ }
+
+ // for each adjacent vj that's not visited
+ const PxU32 begin = PxU32(valency[PxU32(vi.vertNr)]);
+ const PxU32 end = PxU32(valency[PxU32(vi.vertNr + 1)]);
+ for (PxU32 j = begin; j < end; ++j)
+ {
+ const PxU32 vj = neighbors[j];
+
+ // do not expand unattached vertices
+ if (!mAttached[vj])
+ continue;
+
+ // already visited
+ if (vertexIsland[vj] != PxU32(-1))
+ continue;
+
+ islandIndices.pushBack(vj);
+ islandIndexCnt++;
+ vertexIsland[vj] = vertexIsland[PxU32(vi.vertNr)];
+ pushHeap(vertexIslandHeap, VertexDistanceCount(int(vj), vi.distance + 1.0f, 0));
+ }
+ }
+
+ islandFirst.pushBack(islandIndexCnt);
+
+ NV_CLOTH_ASSERT(islandCnt == (islandFirst.size() - 1));
+
+ /////////////////////////////////////////////////////////
+ PxU32 bufferSize = mNumParticles * islandCnt;
+ NV_CLOTH_ASSERT(bufferSize > 0);
+
+ nv::cloth::Vector<float>::Type vertexDistanceBuffer(bufferSize, PX_MAX_F32);
+ nv::cloth::Vector<PxU32>::Type vertexParentBuffer(bufferSize, 0);
+ nv::cloth::Vector<VertexDistanceCount>::Type vertexHeap;
+
+ // now process each island
+ for (PxU32 i = 0; i < islandCnt; i++)
+ {
+ vertexHeap.clear();
+ float* vertexDistance = &vertexDistanceBuffer[0] + (i * mNumParticles);
+ PxU32* vertexParent = &vertexParentBuffer[0] + (i * mNumParticles);
+
+ // initialize parent and distance
+ for (PxU32 j = 0; j < mNumParticles; ++j)
+ {
+ vertexParent[j] = j;
+ vertexDistance[j] = PX_MAX_F32;
+ }
+
+ // put all the attached vertices in this island to heap
+ const PxU32 beginIsland = islandFirst[i];
+ const PxU32 endIsland = islandFirst[i+1];
+ for (PxU32 j = beginIsland; j < endIsland; j++)
+ {
+ PxU32 vj = islandIndices[j];
+ vertexDistance[vj] = 0.0f;
+ vertexHeap.pushBack(VertexDistanceCount(int(vj), 0.0f, 0));
+ }
+
+ // no attached vertices in this island (error?)
+ NV_CLOTH_ASSERT(vertexHeap.empty() == false);
+ if (vertexHeap.empty())
+ continue;
+
+ // while heap is not empty
+ while (!vertexHeap.empty())
+ {
+ // pop vi from heap
+ VertexDistanceCount vi = popHeap<VertexDistanceCount>(vertexHeap);
+
+ // obsolete entry ( we already found better distance)
+ if (vi.distance > vertexDistance[vi.vertNr])
+ continue;
+
+ // for each adjacent vj that's not visited
+ const PxI32 begin = PxI32(valency[PxU32(vi.vertNr)]);
+ const PxI32 end = PxI32(valency[PxU32(vi.vertNr + 1)]);
+ for (PxI32 j = begin; j < end; ++j)
+ {
+ const PxI32 vj = PxI32(neighbors[PxU32(j)]);
+ PxVec3 edge = mVertices[PxU32(vj)] - mVertices[PxU32(vi.vertNr)];
+ const PxF32 edgeLength = edge.magnitude();
+ float newDistance = vi.distance + edgeLength;
+
+ if (newDistance < vertexDistance[vj])
+ {
+ vertexDistance[vj] = newDistance;
+ vertexParent[vj] = vertexParent[vi.vertNr];
+
+ pushHeap(vertexHeap, VertexDistanceCount(vj, newDistance, 0));
+ }
+ }
+ }
+ }
+
+ const PxU32 maxTethersPerParticle = 4; // max tethers
+ const PxU32 nbTethersPerParticle = (islandCnt > maxTethersPerParticle) ? maxTethersPerParticle : islandCnt;
+
+ PxU32 nbTethers = nbTethersPerParticle * mNumParticles;
+ mTetherAnchors.resize(nbTethers);
+ mTetherLengths.resize(nbTethers);
+
+ // now process the parent and distance and add to fibers
+ for (PxU32 i = 0; i < mNumParticles; i++)
+ {
+ // we use the heap to sort out N-closest island
+ vertexHeap.clear();
+ for (PxU32 j = 0; j < islandCnt; j++)
+ {
+ int parent = int(vertexParentBuffer[j * mNumParticles + i]);
+ float edgeDistance = vertexDistanceBuffer[j * mNumParticles + i];
+ pushHeap(vertexHeap, VertexDistanceCount(parent, edgeDistance, 0));
+ }
+
+ // take out N-closest island from the heap
+ for (PxU32 j = 0; j < nbTethersPerParticle; j++)
+ {
+ VertexDistanceCount vi = popHeap<VertexDistanceCount>(vertexHeap);
+ PxU32 parent = PxU32(vi.vertNr);
+ float distance = 0.0f;
+
+ if (parent != i)
+ {
+ float euclideanDistance = (mVertices[i] - mVertices[parent]).magnitude();
+ float dijkstraDistance = vi.distance;
+ int errorCode = 0;
+ float geodesicDistance = computeGeodesicDistance(i,parent, errorCode);
+ if (errorCode < 0)
+ geodesicDistance = dijkstraDistance;
+ distance = PxMax(euclideanDistance, geodesicDistance);
+ }
+
+ PxU32 tetherLoc = j * mNumParticles + i;
+ mTetherAnchors[ tetherLoc ] = parent;
+ mTetherLengths[ tetherLoc ] = distance;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+PxU32 ClothGeodesicTetherCooker::getCookerStatus() const
+{
+ return mCookerStatus;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+PxU32 ClothGeodesicTetherCooker::getNbTethersPerParticle() const
+{
+ return mTetherAnchors.size() / mNumParticles;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void
+ClothGeodesicTetherCooker::getTetherData(PxU32* userTetherAnchors, PxReal* userTetherLengths) const
+{
+ PxMemCopy(userTetherAnchors, mTetherAnchors.begin(), mTetherAnchors.size() * sizeof(PxU32));
+ PxMemCopy(userTetherLengths, mTetherLengths.begin(), mTetherLengths.size() * sizeof(PxReal));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// find triangle-triangle adjacency (return non-zero if there is an error)
+PxU32 ClothGeodesicTetherCooker::findTriNeighbors()
+{
+ nv::cloth::Vector<MeshEdge>::Type edges;
+
+ mTriNeighbors.resize(mIndices.size(), PxU32(-1));
+
+ // assemble all edges
+ PxU32 numTriangles = mIndices.size() / 3;
+ for (PxU32 i = 0; i < numTriangles; ++i)
+ {
+ PxU32 i0 = mIndices[3 * i];
+ PxU32 i1 = mIndices[3 * i + 1];
+ PxU32 i2 = mIndices[3 * i + 2];
+ edges.pushBack(MeshEdge(int(i0), int(i1), int(3*i)));
+ edges.pushBack(MeshEdge(int(i1), int(i2), int(3*i+1)));
+ edges.pushBack(MeshEdge(int(i2), int(i0), int(3*i+2)));
+ }
+
+ shdfnd::sort(edges.begin(), edges.size(), shdfnd::Less<MeshEdge>(), NonTrackingAllocator());
+
+ int numEdges = int(edges.size());
+ for(int i=0; i < numEdges; )
+ {
+ const MeshEdge& e0 = edges[PxU32(i)];
+ bool orientation0 = checkEdgeOrientation(e0, mIndices);
+
+ int j = i;
+ while(++i < numEdges && edges[PxU32(i)] == e0)
+ ;
+
+ if(i - j > 2)
+ return 1; // non-manifold
+
+ while(++j < i)
+ {
+ const MeshEdge& e1 = edges[PxU32(j)];
+ bool orientation1 = checkEdgeOrientation(e1, mIndices);
+ mTriNeighbors[PxU32(e0.mHalfEdgeIndex)] = PxU32(e1.mHalfEdgeIndex/3);
+ mTriNeighbors[PxU32(e1.mHalfEdgeIndex)] = PxU32(e0.mHalfEdgeIndex/3);
+
+ if (orientation0 == orientation1)
+ return 2; // bad winding
+ }
+ }
+
+ return 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// find vertex triangle adjacency information
+void ClothGeodesicTetherCooker::findVertTriNeighbors()
+{
+ nv::cloth::Vector<VertTriangle>::Type vertTriangles;
+ vertTriangles.reserve(mIndices.size());
+
+ int numTriangles = int(mIndices.size() / 3);
+ for (int i = 0; i < numTriangles; ++i)
+ {
+ vertTriangles.pushBack(VertTriangle(int(mIndices[PxU32(3*i)]), i));
+ vertTriangles.pushBack(VertTriangle(int(mIndices[PxU32(3*i+1)]), i));
+ vertTriangles.pushBack(VertTriangle(int(mIndices[PxU32(3*i+2)]), i));
+ }
+
+ shdfnd::sort(vertTriangles.begin(), vertTriangles.size(), shdfnd::Less<VertTriangle>(), NonTrackingAllocator());
+ mFirstVertTriAdj.resize(mNumParticles);
+ mVertTriAdjs.reserve(mIndices.size());
+
+ for (PxU32 i = 0; i < PxU32(vertTriangles.size()); )
+ {
+ int v = vertTriangles[i].mVertIndex;
+
+ mFirstVertTriAdj[PxU32(v)] = i;
+
+ while ((i < mIndices.size()) && (vertTriangles[i].mVertIndex == v))
+ {
+ int t = vertTriangles[i].mTriangleIndex;
+ mVertTriAdjs.pushBack(PxU32(t));
+ i++;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// compute intersection of a ray from a source vertex in direction toward parent
+int ClothGeodesicTetherCooker::computeVertexIntersection(PxU32 parent, PxU32 src, PathIntersection &path)
+{
+ if (src == parent)
+ {
+ path = PathIntersection(true, src, 0.0);
+ return 0;
+ }
+
+ float maxdot = -1.0f;
+ int closestVert = -1;
+
+ // gradient is toward the parent vertex
+ PxVec3 g = (mVertices[parent] - mVertices[src]).getNormalized();
+
+ // for every triangle incident on this vertex, we intersect against opposite edge of the triangle
+ PxU32 sfirst = mFirstVertTriAdj[src];
+ PxU32 slast = (src < (PxU32(mNumParticles-1))) ? mFirstVertTriAdj[src+1] : PxU32(mVertTriAdjs.size());
+ for (PxU32 adj = sfirst; adj < slast; adj++)
+ {
+ PxU32 tid = mVertTriAdjs[adj];
+
+ PxU32 i0 = mIndices[tid*3];
+ PxU32 i1 = mIndices[tid*3+1];
+ PxU32 i2 = mIndices[tid*3+2];
+
+ int eid = 0;
+ if (i0 == src) eid = 1;
+ else if (i1 == src) eid = 2;
+ else if (i2 == src) eid = 0;
+ else continue; // error
+
+ // reshuffle so that src is located at i2
+ i0 = mIndices[tid*3 + eid];
+ i1 = mIndices[tid*3 + (eid+1)%3];
+ i2 = src;
+
+ PxVec3 p0 = mVertices[i0];
+ PxVec3 p1 = mVertices[i1];
+ PxVec3 p2 = mVertices[i2];
+
+ // check if we hit source immediately from this triangle
+ if (i0 == parent)
+ {
+ path = PathIntersection(true, parent, (p0 - p2).magnitude());
+ return 1;
+ }
+
+ if (i1 == parent)
+ {
+ path = PathIntersection(true, parent, (p1 - p2).magnitude());
+ return 1;
+ }
+
+ // ray direction is the gradient projected on the plane of this triangle
+ PxVec3 n = ((p0 - p2).cross(p1 - p2)).getNormalized();
+ PxVec3 d = (g - g.dot(n) * n).getNormalized();
+
+ // find intersection of ray (p2, d) against the edge (p0,p1)
+ float s, t;
+ bool result = intersectRayEdge(p2, d, p0, p1, s, t);
+ if (result == false)
+ continue;
+
+ // t should be positive, otherwise we just hit the triangle in opposite direction, so ignore
+ const float eps = 1e-5;
+ if (t > -eps)
+ {
+ PxVec3 ip; // intersection point
+ if (( s > -eps ) && (s < (1.0f + eps)))
+ {
+ // if intersection point is too close to each vertex, we record a vertex intersection
+ if ( ( s < eps) || (s > (1.0f-eps)))
+ {
+ path.vertOrTriangle = true;
+ path.index = (s < eps) ? i0 : i1;
+ path.distance = (p2 - mVertices[path.index]).magnitude();
+ }
+ else // found an edge instersection
+ {
+ ip = p0 + s * (p1 - p0);
+ path = PathIntersection(false, tid*3 + eid, (p2 - ip).magnitude(), s);
+ }
+ return 1;
+ }
+ }
+
+ // for fall back (see below)
+ PxVec3 d0 = (p0 - p2).getNormalized();
+ PxVec3 d1 = (p1 - p2).getNormalized();
+ float d0dotg = d0.dot(d);
+ float d1dotg = d1.dot(d);
+
+ if (d0dotg > maxdot)
+ {
+ closestVert = int(i0);
+ maxdot = d0dotg;
+ }
+ if (d1dotg > maxdot)
+ {
+ closestVert = int(i1);
+ maxdot = d1dotg;
+ }
+ } // end for (PxU32 adj = sfirst...
+
+ // Fall back to use greedy (Dijkstra-like) path selection.
+ // This happens as triangles are curved and we may not find intersection on any triangle.
+ // In this case, we choose a vertex closest to the gradient direction.
+ if (closestVert > 0)
+ {
+ path = PathIntersection(true, PxU32(closestVert), (mVertices[src] - mVertices[PxU32(closestVert)]).magnitude());
+ return 1;
+ }
+
+ // Error, (possibly dangling vertex)
+ return -1;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// compute intersection of a ray from a source vertex in direction toward parent
+int ClothGeodesicTetherCooker::computeEdgeIntersection(PxU32 parent, PxU32 edge, float in_s, PathIntersection &path)
+{
+ int tid = int(edge / 3);
+ int eid = int(edge % 3);
+
+ PxU32 e0 = mIndices[PxU32(tid*3 + eid)];
+ PxU32 e1 = mIndices[PxU32(tid*3 + (eid+1)%3)];
+
+ PxVec3 v0 = mVertices[e0];
+ PxVec3 v1 = mVertices[e1];
+
+ PxVec3 v = v0 + in_s * (v1 - v0);
+ PxVec3 g = mVertices[parent] - v;
+
+ PxU32 triNbr = mTriNeighbors[edge];
+
+ if (triNbr == PxU32(-1)) // boundary edge
+ {
+ float dir = g.dot(v1-v0);
+ PxU32 vid = (dir > 0) ? e1 : e0;
+ path = PathIntersection(true, vid, (mVertices[vid] - v).magnitude());
+ return 1;
+ }
+
+ PxU32 i0 = mIndices[triNbr*3];
+ PxU32 i1 = mIndices[triNbr*3+1];
+ PxU32 i2 = mIndices[triNbr*3+2];
+
+ // vertex is sorted s.t i0,i1 contains the edge point
+ if ( checkEdge(int(i0), int(i1), int(e0), int(e1))) {
+ eid = 0;
+ }
+ else if ( checkEdge(int(i1), int(i2), int(e0), int(e1))) {
+ eid = 1;
+ PxU32 tmp = i2;
+ i2 = i0;
+ i0 = i1;
+ i1 = tmp;
+ }
+ else if ( checkEdge(int(i2), int(i0), int(e0), int(e1)))
+ {
+ eid = 2;
+ PxU32 tmp = i0;
+ i0 = i2;
+ i2 = i1;
+ i1 = tmp;
+ }
+
+ // we hit the parent
+ if (i2 == parent)
+ {
+ path = PathIntersection(true, i2, (mVertices[i2] - v).magnitude());
+ return 1;
+ }
+
+ PxVec3 p0 = mVertices[i0];
+ PxVec3 p1 = mVertices[i1];
+ PxVec3 p2 = mVertices[i2];
+
+ // project gradient vector on the plane of the triangle
+ PxVec3 n = ((p0 - p2).cross(p1 - p2)).getNormalized();
+ g = (g - g.dot(n) * n).getNormalized();
+
+ float s = 0.0f, t = 0.0f;
+ const float eps = 1e-5;
+ PxVec3 ip;
+
+ // intersect against edge form p2 to p0
+ if (intersectRayEdge(v, g, p2, p0, s, t) && ( s >= -eps) && ( s <= (1.0f+eps) ) && (t > -eps))
+ {
+ if ( ( s < eps) || (s > (1.0f-eps)))
+ {
+ path.vertOrTriangle = true;
+ path.index = (s < eps) ? i2 : i0;
+ path.distance = (mVertices[path.index] - v).magnitude();
+ }
+ else
+ {
+ ip = p2 + s * (p0 - p2);
+ path = PathIntersection(false, triNbr*3 + (eid + 2) % 3, (ip - v).magnitude(), s);
+
+ }
+
+ return 1;
+ }
+
+ // intersect against edge form p1 to p2
+ if (intersectRayEdge(v, g, p1, p2, s, t) && ( s >= -eps) && ( s <= (1.0f+eps) ) && (t > -eps))
+ {
+ if ( ( s < eps) || (s > (1.0f-eps)))
+ {
+ path.vertOrTriangle = true;
+ path.index = (s < eps) ? i1 : i2;
+ path.distance = (mVertices[path.index] - v).magnitude();
+ }
+ else
+ {
+ ip = p1 + s * (p2 - p1);
+ path = PathIntersection(false, triNbr*3 + (eid + 1) % 3, (ip - v).magnitude(), s);
+ }
+
+ return 1;
+ }
+
+ // fallback to pick closer vertex when no edges intersect
+ float dir = g.dot(v1-v0);
+ path.vertOrTriangle = true;
+ path.index = (dir > 0) ? e1 : e0;
+ path.distance = (mVertices[path.index] - v).magnitude();
+
+ return 1;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// compute geodesic distance and path from vertex i to its parent
+float ClothGeodesicTetherCooker::computeGeodesicDistance(PxU32 i, PxU32 parent, int &errorCode)
+{
+ if (i == parent)
+ return 0.0f;
+
+ PathIntersection path;
+
+ errorCode = 0;
+
+ // find intial intersection
+ int status = computeVertexIntersection(parent, i, path);
+ if (status < 0)
+ {
+ errorCode = -1;
+ return 0;
+ }
+
+ int pathcnt = 0;
+ float geodesicDistance = 0;
+
+ while (status > 0)
+ {
+ geodesicDistance += path.distance;
+
+ if (path.vertOrTriangle)
+ status = computeVertexIntersection(parent, path.index, path);
+ else
+ status = computeEdgeIntersection(parent, path.index, path.s, path);
+
+ // cannot find valid path
+ if (status < 0)
+ {
+ errorCode = -2;
+ return 0.0f;
+ }
+
+ // possibly cycles, too many path
+ if (pathcnt > 1000)
+ {
+ errorCode = -3;
+ return 0.0f;
+ }
+
+ pathcnt++;
+ }
+
+ return geodesicDistance;
+}
+
+} // namespace cloth
+} // namespace nv
+
+NV_CLOTH_API(nv::cloth::ClothTetherCooker*) NvClothCreateGeodesicTetherCooker()
+{
+ return NV_CLOTH_NEW(nv::cloth::ClothGeodesicTetherCooker);
+}
diff --git a/NvCloth/extensions/src/ClothMeshQuadifier.cpp b/NvCloth/extensions/src/ClothMeshQuadifier.cpp
new file mode 100644
index 0000000..4ae081e
--- /dev/null
+++ b/NvCloth/extensions/src/ClothMeshQuadifier.cpp
@@ -0,0 +1,443 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxStrideIterator.h"
+#include "NvClothExt/ClothMeshQuadifier.h"
+
+// from shared foundation
+#include <PsSort.h>
+#include <Ps.h>
+#include <PsMathUtils.h>
+#include "NvCloth/Allocator.h"
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+
+struct ClothMeshQuadifierImpl : public ClothMeshQuadifier
+{
+ virtual bool quadify(const ClothMeshDesc& desc) override;
+ ClothMeshDesc getDescriptor() const override;
+
+public:
+ ClothMeshDesc mDesc;
+ nv::cloth::Vector<PxU32>::Type mQuads;
+ nv::cloth::Vector<PxU32>::Type mTriangles;
+};
+
+namespace
+{
+ struct UniqueEdge
+ {
+ PX_FORCE_INLINE bool operator()(const UniqueEdge& e1, const UniqueEdge& e2) const
+ {
+ return e1 < e2;
+ }
+
+ PX_FORCE_INLINE bool operator==(const UniqueEdge& other) const
+ {
+ return vertex0 == other.vertex0 && vertex1 == other.vertex1;
+ }
+ PX_FORCE_INLINE bool operator<(const UniqueEdge& other) const
+ {
+ if (vertex0 != other.vertex0)
+ {
+ return vertex0 < other.vertex0;
+ }
+
+ return vertex1 < other.vertex1;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ UniqueEdge()
+ : vertex0(0), vertex1(0), vertex2(0), vertex3(0xffffffff),
+ maxAngle(0.0f), isQuadDiagonal(false), isUsed(false) {}
+
+ UniqueEdge(PxU32 v0, PxU32 v1, PxU32 v2)
+ : vertex0(PxMin(v0, v1)), vertex1(PxMax(v0, v1)), vertex2(v2), vertex3(0xffffffff),
+ maxAngle(0.0f), isQuadDiagonal(false), isUsed(false) {}
+
+
+ PxU32 vertex0, vertex1;
+ PxU32 vertex2, vertex3;
+ PxF32 maxAngle;
+ bool isQuadDiagonal;
+ bool isUsed;
+ };
+
+ struct SortHiddenEdges
+ {
+ SortHiddenEdges(nv::cloth::Vector<UniqueEdge>::Type& uniqueEdges) : mUniqueEdges(uniqueEdges) {}
+
+ bool operator()(PxU32 a, PxU32 b) const
+ {
+ return mUniqueEdges[a].maxAngle < mUniqueEdges[b].maxAngle;
+ }
+
+ private:
+ SortHiddenEdges& operator=(const SortHiddenEdges&);
+ nv::cloth::Vector<UniqueEdge>::Type& mUniqueEdges;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ template <typename T>
+ void copyIndices(const ClothMeshDesc &desc, nv::cloth::Vector<PxU32>::Type &triangles, nv::cloth::Vector<PxU32>::Type &quads)
+ {
+ triangles.resize(desc.triangles.count*3);
+ PxStrideIterator<const T> tIt = PxMakeIterator(reinterpret_cast<const T*>(desc.triangles.data), desc.triangles.stride);
+ for(PxU32 i=0; i<desc.triangles.count; ++i, ++tIt)
+ for(PxU32 j=0; j<3; ++j)
+ triangles[i*3+j] = tIt.ptr()[j];
+
+ quads.resize(desc.quads.count*4);
+ PxStrideIterator<const T> qIt = PxMakeIterator(reinterpret_cast<const T*>(desc.quads.data), desc.quads.stride);
+ for(PxU32 i=0; i<desc.quads.count; ++i, ++qIt)
+ for(PxU32 j=0; j<4; ++j)
+ quads[i*4+j] = qIt.ptr()[j];
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void computeUniqueEdges(nv::cloth::Vector<UniqueEdge>::Type &uniqueEdges, const PxVec3* positions, const nv::cloth::Vector<PxU32>::Type& triangles)
+ {
+ uniqueEdges.resize(0);
+ uniqueEdges.reserve(triangles.size());
+
+ PxU32 indexMap[3][3] = { { 0, 1, 2 }, { 1, 2, 0 }, { 0, 2, 1 } };
+
+ const PxF32 rightAngle = PxCos(shdfnd::degToRad(85.0f));
+
+ for(PxU32 i=0; i<triangles.size(); i+=3)
+ {
+ UniqueEdge edges[3];
+ PxF32 edgeLengths[3];
+ PxF32 edgeAngles[3];
+
+ for (PxU32 j = 0; j < 3; j++)
+ {
+ edges[j] = UniqueEdge(triangles[i+indexMap[j][0]], triangles[i+indexMap[j][1]], triangles[i+indexMap[j][2]]);
+ edgeLengths[j] = (positions[edges[j].vertex0] - positions[edges[j].vertex1]).magnitude();
+ const PxVec3 v1 = positions[edges[j].vertex2] - positions[edges[j].vertex0];
+ const PxVec3 v2 = positions[edges[j].vertex2] - positions[edges[j].vertex1];
+ edgeAngles[j] = PxAbs(v1.dot(v2)) / (v1.magnitude() * v2.magnitude());
+ }
+
+ // find the longest edge
+ PxU32 longest = 0;
+ for (PxU32 j = 1; j < 3; j++)
+ {
+ if (edgeLengths[j] > edgeLengths[longest])
+ longest = j;
+ }
+
+ // check it's angle
+ if (edgeAngles[longest] < rightAngle)
+ edges[longest].isQuadDiagonal = true;
+
+ for (PxU32 j = 0; j < 3; j++)
+ uniqueEdges.pushBack(edges[j]);
+ }
+
+ shdfnd::sort(uniqueEdges.begin(), uniqueEdges.size(), UniqueEdge(0, 0, 0), NonTrackingAllocator());
+
+ PxU32 writeIndex = 0, readStart = 0, readEnd = 0;
+ PxU32 numQuadEdges = 0;
+ while (readEnd < uniqueEdges.size())
+ {
+ while (readEnd < uniqueEdges.size() && uniqueEdges[readStart] == uniqueEdges[readEnd])
+ readEnd++;
+
+ const PxU32 count = readEnd - readStart;
+
+ UniqueEdge uniqueEdge = uniqueEdges[readStart];
+
+ if (count == 2)
+ // know the other diagonal
+ uniqueEdge.vertex3 = uniqueEdges[readStart + 1].vertex2;
+ else
+ uniqueEdge.isQuadDiagonal = false;
+
+ for (PxU32 i = 1; i < count; i++)
+ uniqueEdge.isQuadDiagonal &= uniqueEdges[readStart + i].isQuadDiagonal;
+
+ numQuadEdges += uniqueEdge.isQuadDiagonal ? 1 : 0;
+
+ uniqueEdges[writeIndex] = uniqueEdge;
+
+ writeIndex++;
+ readStart = readEnd;
+ }
+
+ uniqueEdges.resize(writeIndex, UniqueEdge(0, 0, 0));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ PxU32 findUniqueEdge(const nv::cloth::Vector<UniqueEdge>::Type &uniqueEdges, PxU32 index1, PxU32 index2)
+ {
+ UniqueEdge searchFor(index1, index2, 0);
+
+ PxU32 curMin = 0;
+ PxU32 curMax = uniqueEdges.size();
+ while (curMax > curMin)
+ {
+ PxU32 middle = (curMin + curMax) >> 1;
+
+ const UniqueEdge& probe = uniqueEdges[middle];
+ if (probe < searchFor)
+ curMin = middle + 1;
+ else
+ curMax = middle;
+ }
+
+ return curMin;
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void refineUniqueEdges(nv::cloth::Vector<UniqueEdge>::Type &uniqueEdges, const PxVec3* positions)
+ {
+ nv::cloth::Vector<PxU32>::Type hideEdges;
+ hideEdges.reserve(uniqueEdges.size());
+
+ for (PxU32 i = 0; i < uniqueEdges.size(); i++)
+ {
+ UniqueEdge& uniqueEdge = uniqueEdges[i];
+ uniqueEdge.maxAngle = 0.0f;
+ uniqueEdge.isQuadDiagonal = false; // just to be sure
+
+ if (uniqueEdge.vertex3 != 0xffffffff)
+ {
+ PxU32 indices[4] = { uniqueEdge.vertex0, uniqueEdge.vertex2, uniqueEdge.vertex1, uniqueEdge.vertex3 };
+
+ // compute max angle of the quad
+ for (PxU32 j = 0; j < 4; j++)
+ {
+ PxVec3 e0 = positions[indices[ j + 0 ]] - positions[indices[(j + 1) % 4]];
+ PxVec3 e1 = positions[indices[(j + 1) % 4]] - positions[indices[(j + 2) % 4]];
+
+ //From physx
+ //PxF32 cosAngle = PxAbs(e0.dot(e1)) / (e0.magnitude() * e1.magnitude());
+ //uniqueEdge.maxAngle = PxMax(uniqueEdge.maxAngle, cosAngle);
+
+ //From Apex
+ float denominator = e0.magnitude() * e1.magnitude();
+ if (denominator != 0.0f)
+ {
+ float cosAngle = PxAbs(e0.dot(e1)) / denominator;
+ uniqueEdge.maxAngle = PxMax(uniqueEdge.maxAngle, cosAngle);
+ }
+ }
+
+ hideEdges.pushBack(i);
+ }
+ }
+
+ shdfnd::sort(hideEdges.begin(), hideEdges.size(), SortHiddenEdges(uniqueEdges), NonTrackingAllocator());
+
+ const PxF32 maxAngle = PxSin(shdfnd::degToRad(60.0f));
+
+ PxU32 numHiddenEdges = 0;
+
+ for (PxU32 i = 0; i < hideEdges.size(); i++)
+ {
+ UniqueEdge& uniqueEdge = uniqueEdges[hideEdges[i]];
+
+ // find some stop criterion
+ if (uniqueEdge.maxAngle > maxAngle)
+ break;
+
+ // check if all four adjacent edges are still visible?
+ PxU32 indices[5] = { uniqueEdge.vertex0, uniqueEdge.vertex2, uniqueEdge.vertex1, uniqueEdge.vertex3, uniqueEdge.vertex0 };
+
+ PxU32 numVisible = 0;
+ for (PxU32 j = 0; j < 4; j++)
+ {
+ const PxU32 edgeIndex = findUniqueEdge(uniqueEdges, indices[j], indices[j + 1]);
+ NV_CLOTH_ASSERT(edgeIndex < uniqueEdges.size());
+
+ numVisible += uniqueEdges[edgeIndex].isQuadDiagonal ? 0 : 1;
+ }
+
+ if (numVisible == 4)
+ {
+ uniqueEdge.isQuadDiagonal = true;
+ numHiddenEdges++;
+ }
+ }
+ }
+
+
+ // calculate the inclusive prefix sum, equivalent of std::partial_sum
+ template <typename T>
+ void prefixSum(const T* first, const T* last, T* dest)
+ {
+ if (first != last)
+ {
+ *(dest++) = *(first++);
+ for (; first != last; ++first, ++dest)
+ *dest = *(dest-1) + *first;
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ void quadifyTriangles(const nv::cloth::Vector<UniqueEdge>::Type &uniqueEdges, nv::cloth::Vector<PxU32>::Type& triangles, nv::cloth::Vector<PxU32>::Type &quads)
+ {
+ nv::cloth::Vector<PxU32>::Type valency(uniqueEdges.size()+1, 0); // edge valency
+ nv::cloth::Vector<PxU32>::Type adjacencies; // adjacency from unique edge to triangles
+ PxU32 numTriangles = triangles.size() / 3;
+
+ // compute edge valency w.r.t triangles
+ for(PxU32 i=0; i<numTriangles; ++i)
+ {
+ for (PxU32 j=0; j < 3; j++)
+ {
+ PxU32 uniqueEdgeIndex = findUniqueEdge(uniqueEdges, triangles[i*3+j], triangles[i*3+(j+1)%3]);
+ ++valency[uniqueEdgeIndex];
+ }
+ }
+
+ // compute adjacency from each edge to triangle, the value also encodes which side of the triangle this edge belongs to
+ prefixSum(valency.begin(), valency.end(), valency.begin());
+ adjacencies.resize(valency.back());
+ for(PxU32 i=0; i<numTriangles; ++i)
+ {
+ for (PxU32 j=0; j < 3; j++)
+ {
+ PxU32 uniqueEdgeIndex = findUniqueEdge(uniqueEdges, triangles[i*3+j], triangles[i*3+(j+1)%3]);
+ adjacencies[--valency[uniqueEdgeIndex]] = i*3+j;
+ }
+ }
+
+ // now go through unique edges that are identified as diagonal, and build a quad out of two adjacent triangles
+ nv::cloth::Vector<PxU32>::Type mark(numTriangles, 0);
+ for (PxU32 i = 0; i < uniqueEdges.size(); i++)
+ {
+ const UniqueEdge& edge = uniqueEdges[i];
+ if (edge.isQuadDiagonal)
+ {
+ PxU32 vi = valency[i];
+ if ((valency[i+1]-vi) != 2)
+ continue; // we do not quadify around non-manifold edges
+
+ PxU32 adj0 = adjacencies[vi], adj1 = adjacencies[vi+1];
+ PxU32 tid0 = adj0 / 3, tid1 = adj1 / 3;
+ PxU32 eid0 = adj0 % 3, eid1 = adj1 % 3;
+
+ quads.pushBack(triangles[tid0 * 3 + eid0]);
+ quads.pushBack(triangles[tid1 * 3 + (eid1+2)%3]);
+ quads.pushBack(triangles[tid0 * 3 + (eid0+1)%3]);
+ quads.pushBack(triangles[tid0 * 3 + (eid0+2)%3]);
+
+ mark[tid0] = 1;
+ mark[tid1] = 1;
+#if 0 // PX_DEBUG
+ printf("Deleting %d, %d, %d - %d, %d, %d, creating %d, %d, %d, %d\n",
+ triangles[tid0*3],triangles[tid0*3+1],triangles[tid0*3+2],
+ triangles[tid1*3],triangles[tid1*3+1],triangles[tid1*3+2],
+ v0,v3,v1,v2);
+#endif
+ }
+ }
+
+ // add remaining triangles that are not marked as already quadified
+ nv::cloth::Vector<PxU32>::Type oldTriangles = triangles;
+ triangles.resize(0);
+ for (PxU32 i = 0; i < numTriangles; i++)
+ {
+ if (mark[i]) continue;
+
+ triangles.pushBack(oldTriangles[i*3]);
+ triangles.pushBack(oldTriangles[i*3+1]);
+ triangles.pushBack(oldTriangles[i*3+2]);
+ }
+ }
+
+} // namespace
+
+
+///////////////////////////////////////////////////////////////////////////////
+bool ClothMeshQuadifierImpl::quadify(const ClothMeshDesc &desc)
+{
+ mDesc = desc;
+ nv::cloth::Vector<PxVec3>::Type particles(desc.points.count);
+ PxStrideIterator<const PxVec3> pIt(reinterpret_cast<const PxVec3*>(desc.points.data), desc.points.stride);
+ for(PxU32 i=0; i<desc.points.count; ++i)
+ particles[i] = *pIt++;
+
+ // copy triangle indices
+ if(desc.flags & MeshFlag::e16_BIT_INDICES)
+ copyIndices<PxU16>(desc, mTriangles, mQuads);
+ else
+ copyIndices<PxU32>(desc, mTriangles, mQuads);
+
+ nv::cloth::Vector<UniqueEdge>::Type uniqueEdges;
+
+ computeUniqueEdges(uniqueEdges, particles.begin(), mTriangles);
+
+ refineUniqueEdges(uniqueEdges, particles.begin());
+
+// printf("before %d triangles, %d quads\n", mTriangles.size()/3, mQuads.size()/4);
+ quadifyTriangles(uniqueEdges, mTriangles, mQuads);
+
+// printf("after %d triangles, %d quads\n", mTriangles.size()/3, mQuads.size()/4);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+ClothMeshDesc
+ClothMeshQuadifierImpl::getDescriptor() const
+{
+ // copy points and other data
+ ClothMeshDesc desc = mDesc;
+
+ // for now use only 32 bit for temporary indices out of quadifier
+ desc.flags &= ~MeshFlag::e16_BIT_INDICES;
+
+ desc.triangles.count = mTriangles.size() / 3;
+ desc.triangles.data = mTriangles.begin();
+ desc.triangles.stride = 3 * sizeof(PxU32);
+
+ desc.quads.count = mQuads.size() / 4;
+ desc.quads.data = mQuads.begin();
+ desc.quads.stride = 4 * sizeof(PxU32);
+
+ NV_CLOTH_ASSERT(desc.isValid());
+
+ return desc;
+}
+
+} // namespace cloth
+} // namespace nv
+
+NV_CLOTH_API(nv::cloth::ClothMeshQuadifier*) NvClothCreateMeshQuadifier()
+{
+ return NV_CLOTH_NEW(nv::cloth::ClothMeshQuadifierImpl);
+}
diff --git a/NvCloth/extensions/src/ClothSimpleTetherCooker.cpp b/NvCloth/extensions/src/ClothSimpleTetherCooker.cpp
new file mode 100644
index 0000000..73884d9
--- /dev/null
+++ b/NvCloth/extensions/src/ClothSimpleTetherCooker.cpp
@@ -0,0 +1,144 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxVec4.h"
+#include "foundation/PxMemory.h"
+#include "foundation/PxStrideIterator.h"
+#include "NvClothExt/ClothTetherCooker.h"
+#include "NvCloth/Allocator.h"
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+
+struct ClothSimpleTetherCooker : public ClothTetherCooker
+{
+ virtual bool cook(const ClothMeshDesc& desc) override;
+
+ virtual uint32_t getCookerStatus() const override; //From APEX
+ virtual void getTetherData(PxU32* userTetherAnchors, PxReal* userTetherLengths) const override;
+ virtual PxU32 getNbTethersPerParticle() const override{ return 1; }
+
+public:
+ // output
+ nv::cloth::Vector<PxU32>::Type mTetherAnchors;
+ nv::cloth::Vector<PxReal>::Type mTetherLengths;
+
+protected:
+ void createTetherData(const ClothMeshDesc &desc);
+
+ uint32_t mCookerStatus; //From APEX
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+bool ClothSimpleTetherCooker::cook(const ClothMeshDesc &desc)
+{
+ mCookerStatus = 1;
+ createTetherData(desc);
+ return getCookerStatus() == 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void ClothSimpleTetherCooker::createTetherData(const ClothMeshDesc &desc)
+{
+ PxU32 numParticles = desc.points.count;
+
+ if (!desc.invMasses.data)
+ return;
+
+ // assemble points
+ nv::cloth::Vector<PxVec4>::Type particles;
+ particles.reserve(numParticles);
+ PxStrideIterator<const PxVec3> pIt(reinterpret_cast<const PxVec3*>(desc.points.data), desc.points.stride);
+ PxStrideIterator<const PxReal> wIt(reinterpret_cast<const PxReal*>(desc.invMasses.data), desc.invMasses.stride);
+ for(PxU32 i=0; i<numParticles; ++i)
+ particles.pushBack(PxVec4(*pIt++, wIt.ptr() ? *wIt++ : 1.0f));
+
+ // compute tether data
+ nv::cloth::Vector<PxU32>::Type attachedIndices;
+ for(PxU32 i=0; i < numParticles; ++i)
+ if(particles[i].w == 0.0f)
+ attachedIndices.pushBack(i);
+
+ PxU32 n = attachedIndices.empty() ? 0 : numParticles;
+ for(PxU32 i=0; i < n; ++i)
+ {
+ mTetherAnchors.reserve(numParticles);
+ mTetherLengths.reserve(numParticles);
+
+ PxVec3 position = reinterpret_cast<const PxVec3&>(particles[i]);
+ float minSqrDist = FLT_MAX;
+ PxU32 minIndex = numParticles;
+ const PxU32 *aIt, *aEnd = attachedIndices.end();
+ for(aIt = attachedIndices.begin(); aIt != aEnd; ++aIt)
+ {
+ float sqrDist = (reinterpret_cast<const PxVec3&>(
+ particles[*aIt]) - position).magnitudeSquared();
+ if(minSqrDist > sqrDist)
+ minSqrDist = sqrDist, minIndex = *aIt;
+ }
+
+ mTetherAnchors.pushBack(minIndex);
+ mTetherLengths.pushBack(PxSqrt(minSqrDist));
+ }
+
+ NV_CLOTH_ASSERT(mTetherAnchors.size() == mTetherLengths.size());
+
+ if (numParticles == mTetherAnchors.size() && numParticles == mTetherLengths.size())
+ {
+ mCookerStatus = 0;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+uint32_t ClothSimpleTetherCooker::getCookerStatus() const
+{
+ return mCookerStatus;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void
+ClothSimpleTetherCooker::getTetherData(PxU32* userTetherAnchors, PxReal* userTetherLengths) const
+{
+ PxMemCopy(userTetherAnchors, mTetherAnchors.begin(), mTetherAnchors.size() * sizeof(PxU32));
+ PxMemCopy(userTetherLengths, mTetherLengths.begin(), mTetherLengths.size() * sizeof(PxReal));
+}
+
+
+} // namespace cloth
+} // namespace nv
+
+NV_CLOTH_API(nv::cloth::ClothTetherCooker*) NvClothCreateSimpleTetherCooker()
+{
+ return NV_CLOTH_NEW(nv::cloth::ClothSimpleTetherCooker);
+}