// This code contains NVIDIA Confidential Information and is disclosed to you // under a form of NVIDIA software license agreement provided separately to you. // // Notice // NVIDIA Corporation and its licensors retain all intellectual property and // proprietary rights in and to this software and related documentation and // any modifications thereto. Any use, reproduction, disclosure, or // distribution of this software and related documentation without an express // license agreement from NVIDIA Corporation is strictly prohibited. // // ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES // NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO // THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, // MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. // // Information and code furnished is believed to be accurate and reliable. // However, NVIDIA Corporation assumes no responsibility for the consequences of use of such // information or for any infringement of patents or other rights of third parties that may // result from its use. No license is granted by implication or otherwise under any patent // or patent rights of NVIDIA Corporation. Details are subject to change without notice. // This code supersedes and replaces all information previously supplied. // NVIDIA Corporation products are not authorized for use as critical // components in life support devices or systems without express written approval of // NVIDIA Corporation. // // Copyright (c) 2008-2020 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxVec4.h" #include "foundation/PxIO.h" #include "foundation/PxStrideIterator.h" #include "ps/PsSort.h" #include "NvCloth/ps/PsMathUtils.h" #include "NvClothExt/ClothFabricCooker.h" #include "NvClothExt/ClothTetherCooker.h" #include "NvCloth/Fabric.h" #include "NvCloth/Allocator.h" #include "NvCloth/Range.h" #include "ClothClone.h" #include using namespace physx; namespace nv { namespace cloth { struct FabricCookerImpl : public ClothFabricCooker { FabricCookerImpl(){} bool cook(const ClothMeshDesc& desc, PxVec3 gravity, bool useGeodesicTether); ClothFabricDesc getDescriptor() const; CookedData getCookedData() const; void save(PxOutputStream& stream, bool platformMismatch) const; public: PxU32 mNumParticles; nv::cloth::Vector::Type mPhaseSetIndices; nv::cloth::Vector::Type mPhaseTypes; nv::cloth::Vector::Type mSets; // with 0 prefix nv::cloth::Vector::Type mRestvalues; nv::cloth::Vector::Type mStiffnessValues; nv::cloth::Vector::Type mIndices; nv::cloth::Vector::Type mTetherAnchors; nv::cloth::Vector::Type mTetherLengths; nv::cloth::Vector::Type mTriangles; private: mutable nv::cloth::Vector::Type mLegacyPhases; }; namespace { template nv::cloth::Range CreateRange(typename nv::cloth::Vector::Type const& vector, int offset = 0) { const T* begin = vector.begin()+offset; const T* end = vector.end(); return nv::cloth::Range(begin, end); } template nv::cloth::Range CreateRangeF(typename nv::cloth::Vector::Type const& vector, int offset = 0) { #ifndef _LIBCPP_HAS_NO_STATIC_ASSERT static_assert(sizeof(T) == sizeof(U), "Type T and U need to be of the same size"); #endif const T* begin = reinterpret_cast(vector.begin()+offset); const T* end = reinterpret_cast(vector.end()); return nv::cloth::Range(begin, end); } } namespace { // calculate the inclusive prefix sum, equivalent of std::partial_sum template void prefixSum(const T* first, const T* last, T* dest) { if (first != last) { *(dest++) = *(first++); for (; first != last; ++first, ++dest) *dest = *(dest-1) + *first; } } template void gatherAdjacencies(nv::cloth::Vector::Type& valency, nv::cloth::Vector::Type& adjacencies, const BoundedData& triangles, const BoundedData& quads) { // count number of edges per vertex PxStrideIterator tIt, qIt; tIt = PxMakeIterator(reinterpret_cast(triangles.data), triangles.stride); for(PxU32 i=0; i(quads.data), quads.stride); for(PxU32 i=0; i(triangles.data), triangles.stride); for(PxU32 i=0; i(quads.data), quads.stride); for(PxU32 i=0; i void gatherTriangles(nv::cloth::Vector::Type& indices, const BoundedData& triangles, const BoundedData& quads) { indices.reserve(triangles.count * 3 + quads.count * 6); PxStrideIterator tIt, qIt; tIt = PxMakeIterator(reinterpret_cast(triangles.data), triangles.stride); for (PxU32 i = 0; i(quads.data), quads.stride); for (PxU32 i = 0; i(v0); const PxVec3& p1 = reinterpret_cast(v1); const PxVec3& p2 = reinterpret_cast(v2); PxReal area = (p1-p0).cross(p2-p1).magnitude(); // triangle height / base length // 1.0 = quad edge, 0.2 = quad diagonal + quad edge, PxReal ratio = area / (p2-p0).magnitudeSquared(); // 0.5 = quad diagonal mShearing += PxMax(0.0f, 0.15f - fabsf(0.45f - ratio)); // 0.0 = collinear points mBending += PxMax(0.0f, 0.1f - ratio) * 3; } physx::PxReal mStretching; physx::PxReal mBending; physx::PxReal mShearing; }; typedef ps::Pair Pair; typedef ps::Pair Entry; // maintain heap status after elements have been pushed (heapify) template void pushHeap(typename nv::cloth::Vector::Type &heap, const T &value) { heap.pushBack(value); T* begin = heap.begin(); T* end = heap.end(); if (end <= begin) return; PxU32 current = PxU32(end - begin) - 1; while (current > 0) { const PxU32 parent = (current - 1) / 2; if (!(begin[parent] < begin[current])) break; ps::swap(begin[parent], begin[current]); current = parent; } } // pop one element from the heap template T popHeap(typename nv::cloth::Vector::Type &heap) { T* begin = heap.begin(); T* end = heap.end(); ps::swap(begin[0], end[-1]); // exchange elements // shift down end--; PxU32 current = 0; while (begin + (current * 2 + 1) < end) { PxU32 child = current * 2 + 1; if (begin + child + 1 < end && begin[child] < begin[child + 1]) ++child; if (!(begin[current] < begin[child])) break; ps::swap(begin[current], begin[child]); current = child; } return heap.popBack(); } // --------------------------------------------------------------------------------------- // Heap element to sort constraint based on graph color count struct ConstraintGraphColorCount { ConstraintGraphColorCount(PxU32 cid, PxU32 count) : constraint(cid), colorCount(count) {} PxU32 constraint; PxU32 colorCount; bool operator < (const ConstraintGraphColorCount& c) const { return colorCount < c.colorCount; } }; struct ConstraintSorter { public: ConstraintSorter(PxU32* constraints_) : constraints(constraints_) {} bool operator()(PxU32 i, PxU32 j) const { PxU32 ci = i*2; PxU32 cj = j*2; if (constraints[ci] == constraints[cj]) return constraints[ci+1] < constraints[cj+1]; else return constraints[ci] < constraints[cj]; } PxU32* constraints; }; } // anonymous namespace bool FabricCookerImpl::cook(const ClothMeshDesc& desc, PxVec3 gravity, bool useGeodesicTether) { if(!desc.isValid()) { NV_CLOTH_LOG_INVALID_PARAMETER("FabricCookerImpl::cook: desc.isValid() failed!"); return false; } gravity = gravity.getNormalized(); mNumParticles = desc.points.count; // assemble points nv::cloth::Vector::Type particles; particles.reserve(mNumParticles); PxStrideIterator pIt(reinterpret_cast(desc.points.data), desc.points.stride); PxStrideIterator wIt(reinterpret_cast(desc.invMasses.data), desc.invMasses.stride); for(PxU32 i=0; i::Type valency(mNumParticles+1, 0); nv::cloth::Vector::Type adjacencies; if (desc.flags & MeshFlag::e16_BIT_INDICES) { gatherTriangles(mTriangles, desc.triangles, desc.quads); gatherAdjacencies(valency, adjacencies, desc.triangles, desc.quads); } else { gatherTriangles(mTriangles, desc.triangles, desc.quads); gatherAdjacencies(valency, adjacencies, desc.triangles, desc.quads); } // build unique neighbors from adjacencies nv::cloth::Vector::Type mark(valency.size(), 0); nv::cloth::Vector::Type neighbors; neighbors.reserve(adjacencies.size()); for(PxU32 i=1, j=0; i::Type edges; for(PxU32 i=0; i 0.0f) edges[Pair(PxMin(i, m), PxMax(i, m))].classify(); // iterate all neighbors of neighbor PxU32 klast = valency[m+1]; for(PxU32 k=valency[m]; k 0.0f) { // add 2-ring edge edges[Pair(PxMin(i, n), PxMax(i, n))].classify( particles[i], particles[m], particles[n]); } } } } // copy classified edges to constraints array // build histogram of constraints per vertex nv::cloth::Vector::Type constraints; constraints.reserve(edges.size()); valency.resize(0); valency.resize(mNumParticles+1, 0); const PxReal sqrtHalf = PxSqrt(0.4f); for(nv::cloth::HashMap::Type::Iterator eIt = edges.getIterator(); !eIt.done(); ++eIt) { const Edge& edge = eIt->second; const Pair& pair = eIt->first; if((edge.mStretching + edge.mBending + edge.mShearing) > 0.0f) { ClothFabricPhaseType::Enum type = ClothFabricPhaseType::eINVALID; if(edge.mBending > PxMax(edge.mStretching, edge.mShearing)) type = ClothFabricPhaseType::eBENDING; else if(edge.mShearing > PxMax(edge.mStretching, edge.mBending)) type = ClothFabricPhaseType::eSHEARING; else { PxVec4 diff = particles[pair.first]-particles[pair.second]; PxReal dot = gravity.dot(reinterpret_cast(diff).getNormalized()); type = fabsf(dot) < sqrtHalf ? ClothFabricPhaseType::eHORIZONTAL : ClothFabricPhaseType::eVERTICAL; } ++valency[pair.first]; ++valency[pair.second]; constraints.pushBack(Entry(pair, type)); } } prefixSum(valency.begin(), valency.end(), valency.begin()); PxU32 numConstraints = constraints.size(); // build adjacent constraint list adjacencies.resize(0); adjacencies.resize(valency.back(), 0); for(PxU32 i=0; i::Type::ConstIterator aFirst = adjacencies.begin(); nv::cloth::Vector::Type colors(numConstraints, numConstraints); // constraint -> color, initialily not colored mark.resize(0); mark.resize(numConstraints+1, PX_MAX_U32); // color -> constraint index nv::cloth::Vector::Type adjColorCount(numConstraints, 0); // # of neighbors that are already colored nv::cloth::Vector::Type constraintHeap; constraintHeap.reserve(numConstraints); // set of constraints to color (added in edge distance order) // Do graph coloring based on edge distance. // For each constraint, we add its uncolored neighbors to the heap // ,and we pick the constraint with most colored neighbors from the heap. for(;;) { PxU32 constraint = 0; while ( (constraint < numConstraints) && (colors[constraint] != numConstraints)) constraint++; // start with the first uncolored constraint if (constraint >= numConstraints) break; constraintHeap.clear(); pushHeap(constraintHeap, ConstraintGraphColorCount(constraint, adjColorCount[constraint])); ClothFabricPhaseType::Enum type = constraints[constraint].second; while (!constraintHeap.empty()) { ConstraintGraphColorCount heapItem = popHeap(constraintHeap); constraint = heapItem.constraint; if (colors[constraint] != numConstraints) continue; // skip if already colored const Pair& pair = constraints[constraint].first; for(PxU32 j=0; j<2; ++j) { PxU32 index = j ? pair.first : pair.second; if(particles[index].w == 0.0f) continue; // don't mark adjacent particles if attached for(nv::cloth::Vector::Type::ConstIterator aIt = aFirst + valency[index], aEnd = aFirst + valency[index+1]; aIt != aEnd; ++aIt) { PxU32 adjacentConstraint = *aIt; if ((constraints[adjacentConstraint].second != type) || (adjacentConstraint == constraint)) continue; mark[colors[adjacentConstraint]] = constraint; ++adjColorCount[adjacentConstraint]; pushHeap(constraintHeap, ConstraintGraphColorCount(adjacentConstraint, adjColorCount[adjacentConstraint])); } } // find smallest color with matching type PxU32 color = 0; while((color < mPhaseSetIndices.size() && mPhaseTypes[color] != type) || mark[color] == constraint) ++color; // create a new color set if(color == mPhaseSetIndices.size()) { mPhaseSetIndices.pushBack(mPhaseSetIndices.size()); mPhaseTypes.pushBack(type); mSets.pushBack(0); } colors[constraint] = color; ++mSets[color]; } } #if 0 // PX_DEBUG printf("set[%u] = ", mSets.size()); for(PxU32 i=0; i(diff).magnitude(); } // reorder constraints and rest values for more efficient cache access (linear) nv::cloth::Vector::Type newIndices(mIndices.size()); nv::cloth::Vector::Type newRestValues(mRestvalues.size()); // sort each constraint set in vertex order for (PxU32 i=0; i < mSets.size()-1; ++i) { // create a re-ordering list nv::cloth::Vector::Type reorder(mSets[i+1]-mSets[i]); for (PxU32 r=0; r < reorder.size(); ++r) reorder[r] = r; const PxU32 indicesOffset = mSets[i]*2; const PxU32 restOffset = mSets[i]; ConstraintSorter predicate(&mIndices[indicesOffset]); ps::sort(&reorder[0], reorder.size(), predicate, nv::cloth::ps::NonTrackingAllocator()); for (PxU32 r=0; r < reorder.size(); ++r) { newIndices[indicesOffset + r*2] = mIndices[indicesOffset + reorder[r]*2]; newIndices[indicesOffset + r*2+1] = mIndices[indicesOffset + reorder[r]*2+1]; newRestValues[restOffset + r] = mRestvalues[restOffset + reorder[r]]; } } mIndices = newIndices; mRestvalues = newRestValues; NV_CLOTH_ASSERT(mIndices.size() == mRestvalues.size()*2); NV_CLOTH_ASSERT(mRestvalues.size() == mSets.back()); // calculate per constraint stiffness values if point stiffness values are provided if(desc.pointsStiffness.count) { mStiffnessValues.resize(mIndices.size()>>1); PxStrideIterator stIt(reinterpret_cast(desc.pointsStiffness.data), desc.pointsStiffness.stride); for(int i = 0; i(mIndices.size()); i+=2) { physx::PxU32 indexA = mIndices[i]; physx::PxU32 indexB = mIndices[i+1]; //Uses min instead of average to get better bending constraints mStiffnessValues[i>>1] = safeLog2(1.0f-std::min(stIt[indexA],stIt[indexB])); } } #if 0 // PX_DEBUG for (PxU32 i = 1; i < mSets.size(); i++) { ClothFabricPhase phase = mPhases[i-1]; printf("%d : type %d, size %d\n", i-1, phase.phaseType, mSets[i] - mSets[i-1]); } #endif if (useGeodesicTether) { ClothTetherCooker* tetherCooker = NvClothCreateGeodesicTetherCooker(); if (tetherCooker->cook(desc)) { PxU32 numTethersPerParticle = tetherCooker->getNbTethersPerParticle(); PxU32 tetherSize = mNumParticles * numTethersPerParticle; mTetherAnchors.resize(tetherSize); mTetherLengths.resize(tetherSize); tetherCooker->getTetherData(mTetherAnchors.begin(), mTetherLengths.begin()); } else useGeodesicTether = false; delete tetherCooker; } if (!useGeodesicTether) { ClothTetherCooker* tetherCooker = NvClothCreateSimpleTetherCooker(); if (tetherCooker->cook(desc)) { PxU32 numTethersPerParticle = tetherCooker->getNbTethersPerParticle(); PxU32 tetherSize = mNumParticles * numTethersPerParticle; mTetherAnchors.resize(tetherSize); mTetherLengths.resize(tetherSize); tetherCooker->getTetherData(mTetherAnchors.begin(), mTetherLengths.begin()); } delete tetherCooker; } return true; } CookedData FabricCookerImpl::getCookedData() const { CookedData result; result.mNumParticles = mNumParticles; result.mPhaseIndices = CreateRange(mPhaseSetIndices); result.mPhaseTypes = CreateRangeF(mPhaseTypes); result.mSets = CreateRange(mSets, 1); result.mRestvalues = CreateRange(mRestvalues); result.mStiffnessValues = CreateRange(mStiffnessValues); result.mIndices = CreateRange(mIndices); result.mAnchors = CreateRange(mTetherAnchors); result.mTetherLengths = CreateRange(mTetherLengths); result.mTriangles = CreateRange(mTriangles); return result; } ClothFabricDesc FabricCookerImpl::getDescriptor() const { ClothFabricDesc result; result.nbParticles = mNumParticles; result.nbPhases = mPhaseSetIndices.size(); mLegacyPhases.resize(mPhaseSetIndices.size()); for(unsigned int i = 0; i < mPhaseSetIndices.size(); i++) { mLegacyPhases[i].setIndex = mPhaseSetIndices[i]; mLegacyPhases[i].phaseType = mPhaseTypes[i]; } result.phases = mLegacyPhases.begin(); result.nbSets = mSets.size()-1; result.sets = mSets.begin()+1; result.restvalues = mRestvalues.begin(); result.indices = mIndices.begin(); result.nbTethers = mTetherAnchors.size(); result.tetherAnchors = mTetherAnchors.begin(); result.tetherLengths = mTetherLengths.begin(); result.nbTriangles = mTriangles.size() / 3; result.triangles = mTriangles.begin(); return result; } void FabricCookerImpl::save( PxOutputStream& stream, bool /*platformMismatch*/ ) const { // version 1 is equivalent to 0x030300 and 0x030301 (PX_PHYSICS_VERSION of 3.3.0 and 3.3.1). // If the stream format changes, the loader code in ScClothFabricCore.cpp // and the version number need to change too. PxU32 version = 1; stream.write(&version, sizeof(PxU32)); ClothFabricDesc desc = getDescriptor(); // write explicit sizes, others are implicit stream.write(&mNumParticles, sizeof(PxU32)); stream.write(&desc.nbPhases, sizeof(PxU32)); stream.write(&desc.nbSets, sizeof(PxU32)); stream.write(&desc.nbTethers, sizeof(PxU32)); PxU32 nbConstraints = desc.sets[desc.nbSets-1]; // write actual data PX_COMPILE_TIME_ASSERT(sizeof(ClothFabricPhaseType::Enum) == sizeof(PxU32)); stream.write(desc.phases, desc.nbPhases*sizeof(ClothFabricPhase)); stream.write(desc.sets, desc.nbSets*sizeof(PxU32)); stream.write(desc.restvalues, nbConstraints*sizeof(PxReal)); stream.write(desc.indices, nbConstraints*2*sizeof(PxU32)); stream.write(desc.tetherAnchors, desc.nbTethers*sizeof(PxU32)); stream.write(desc.tetherLengths, desc.nbTethers*sizeof(PxReal)); } } // namespace cloth } // namespace nv NV_CLOTH_API(nv::cloth::ClothFabricCooker*) NvClothCreateFabricCooker() { return NV_CLOTH_NEW(nv::cloth::FabricCookerImpl); } NV_CLOTH_API(nv::cloth::Fabric*) NvClothCookFabricFromMesh( nv::cloth::Factory* factory, const nv::cloth::ClothMeshDesc& desc, const PxVec3& gravity, nv::cloth::Vector::Type* phaseTypes, bool useGeodesicTether ) { nv::cloth::FabricCookerImpl impl; if(!impl.cook(desc, gravity, useGeodesicTether)) return 0; nv::cloth::CookedData data = impl.getCookedData(); if(phaseTypes) { phaseTypes->resize(data.mPhaseTypes.size()); for(int i = 0; i < static_cast(data.mPhaseTypes.size()); i++) { (*phaseTypes)[i] = data.mPhaseTypes[i]; } } return factory->createFabric( data.mNumParticles, data.mPhaseIndices, data.mSets, data.mRestvalues, data.mStiffnessValues, data.mIndices, data.mAnchors, data.mTetherLengths, data.mTriangles ); }