aboutsummaryrefslogtreecommitdiff
path: root/PhysX_3.4/Source/SceneQuery/src
diff options
context:
space:
mode:
Diffstat (limited to 'PhysX_3.4/Source/SceneQuery/src')
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.cpp116
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.h3
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqAABBTree.cpp258
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqAABBTree.h107
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqAABBTreeBuild.cpp256
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqAABBTreeBuild.h162
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqAABBTreeQuery.h32
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.cpp75
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.h47
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBPrunerCore.cpp436
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBPrunerCore.h114
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBTree.cpp764
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBTree.h195
-rw-r--r--PhysX_3.4/Source/SceneQuery/src/SqSceneQueryManager.cpp160
14 files changed, 2186 insertions, 539 deletions
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.cpp b/PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.cpp
index 43532883..4b148a53 100644
--- a/PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.cpp
+++ b/PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.cpp
@@ -118,7 +118,15 @@ bool AABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, cons
if(!hasPruningStructure)
{
for(PxU32 i=0;i<valid;i++)
- mBucketPruner.addObject(payload[i], bounds[i], mTimeStamp);
+ {
+#if USE_INCREMENTAL_PRUNER
+ const PrunerHandle& handle = results[i];
+ const PoolIndex poolIndex = mPool.getIndex(handle);
+ mBucketPruner.addObject(payload[i], bounds[i], mTimeStamp, poolIndex);
+#else
+ mBucketPruner.addObject(payload[i], bounds[i], mTimeStamp, INVALID_NODE_ID);
+#endif
+ }
}
}
return valid==count;
@@ -146,7 +154,7 @@ void AABBPruner::updateObjectsAfterManualBoundsUpdates(const PrunerHandle* handl
mAABBTree->markNodeForRefit(treeNodeIndex);
else // otherwise it means it should be in the bucket pruner
{
- bool found = mBucketPruner.updateObject(newBounds[poolIndex], payloads[poolIndex]);
+ bool found = mBucketPruner.updateObject(newBounds[poolIndex], payloads[poolIndex], poolIndex);
PX_UNUSED(found); PX_ASSERT(found);
}
@@ -183,7 +191,7 @@ void AABBPruner::updateObjectsAndInflateBounds(const PrunerHandle* handles, cons
// bool found = mBucketPruner.updateObject(newBounds[indices[i]], mPool.getPayload(handles[i]));
PX_ASSERT(&payloads[poolIndex]==&mPool.getPayload(handles[i]));
// PT: TODO: don't we need to read the pool's array here, to pass the inflated bounds?
- bool found = mBucketPruner.updateObject(newBounds[indices[i]], payloads[poolIndex]);
+ bool found = mBucketPruner.updateObject(newBounds[indices[i]], payloads[poolIndex], poolIndex);
PX_UNUSED(found); PX_ASSERT(found);
}
@@ -268,12 +276,12 @@ PxAgain AABBPruner::overlap(const ShapeData& queryVolume, PrunerCallback& pcb) c
if(queryVolume.isOBB())
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
- again = AABBTreeOverlap<Gu::OBBAABBTest>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
+ again = AABBTreeOverlap<Gu::OBBAABBTest, AABBTree, AABBTreeRuntimeNode>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
else
{
const Gu::AABBAABBTest test(queryVolume.getPrunerInflatedWorldAABB());
- again = AABBTreeOverlap<Gu::AABBAABBTest>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
+ again = AABBTreeOverlap<Gu::AABBAABBTest, AABBTree, AABBTreeRuntimeNode>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
}
break;
@@ -282,20 +290,20 @@ PxAgain AABBPruner::overlap(const ShapeData& queryVolume, PrunerCallback& pcb) c
const Gu::Capsule& capsule = queryVolume.getGuCapsule();
const Gu::CapsuleAABBTest test( capsule.p1, queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
- again = AABBTreeOverlap<Gu::CapsuleAABBTest>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
+ again = AABBTreeOverlap<Gu::CapsuleAABBTest, AABBTree, AABBTreeRuntimeNode>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const Gu::Sphere& sphere = queryVolume.getGuSphere();
Gu::SphereAABBTest test(sphere.center, sphere.radius);
- again = AABBTreeOverlap<Gu::SphereAABBTest>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
+ again = AABBTreeOverlap<Gu::SphereAABBTest, AABBTree, AABBTreeRuntimeNode>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
- again = AABBTreeOverlap<Gu::OBBAABBTest>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
+ again = AABBTreeOverlap<Gu::OBBAABBTest, AABBTree, AABBTreeRuntimeNode>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::ePLANE:
@@ -323,7 +331,7 @@ PxAgain AABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, P
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
- again = AABBTreeRaycast<true>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, extents, pcb);
+ again = AABBTreeRaycast<true, AABBTree, AABBTreeRuntimeNode>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, extents, pcb);
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
@@ -339,7 +347,7 @@ PxAgain AABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal&
PxAgain again = true;
if(mAABBTree)
- again = AABBTreeRaycast<false>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
+ again = AABBTreeRaycast<false, AABBTree, AABBTreeRuntimeNode>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.raycast(origin, unitDir, inOutDistance, pcb);
@@ -376,7 +384,7 @@ void AABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mContextID);
- if(!mUncommittedChanges)
+ if(!mUncommittedChanges && (mProgress != BUILD_FINISHED))
// Q: seems like this is both for refit and finalization so is this is correct?
// i.e. in a situation when we started rebuilding a tree and didn't add anything since
// who is going to set mUncommittedChanges to true?
@@ -550,7 +558,7 @@ void AABBPruner::visualize(Cm::RenderOutput& out, PxU32 color) const
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-bool AABBPruner::buildStep()
+bool AABBPruner::buildStep(bool synchronousCall)
{
PX_PROFILE_ZONE("SceneQuery.prunerBuildStep", mContextID);
@@ -559,36 +567,8 @@ bool AABBPruner::buildStep()
{
if(mProgress==BUILD_NOT_STARTED)
{
- const PxU32 nbObjects = mPool.getNbActiveObjects();
- if(!nbObjects)
- return true;
-
- PX_DELETE(mNewTree);
- mNewTree = PX_NEW(AABBTree);
-
- mNbCachedBoxes = nbObjects;
- // PT: we always allocate one extra box, to make sure we can safely use V4 loads on the array
- mCachedBoxes = reinterpret_cast<PxBounds3*>(PX_ALLOC(sizeof(PxBounds3)*(nbObjects+1), "PxBound3"));
-
- PxMemCopy(mCachedBoxes, mPool.getCurrentWorldBoxes(), nbObjects*sizeof(PxBounds3));
-
- // PT: objects currently in the bucket pruner will be in the new tree. They are marked with the
- // current timestamp (mTimeStamp). However more objects can get added while we compute the new tree,
- // and those ones will not be part of it. These new objects will be marked with the new timestamp
- // value (mTimeStamp+1), and we can use these different values to remove the proper objects from
- // the bucket pruner (when switching to the new tree).
- mTimeStamp++;
- mBuilder.reset();
- mBuilder.mNbPrimitives = mNbCachedBoxes;
- mBuilder.mAABBArray = mCachedBoxes;
- mBuilder.mLimit = NB_OBJECTS_PER_NODE;
-
- mBuildStats.reset();
-
- // start recording modifications to the tree made during rebuild to reapply (fix the new tree) eventually
- PX_ASSERT(mNewTreeFixups.size()==0);
-
- mProgress = BUILD_INIT;
+ if(!synchronousCall || !prepareBuild())
+ return false;
}
else if(mProgress==BUILD_INIT)
{
@@ -684,16 +664,66 @@ bool AABBPruner::buildStep()
// This is required to be set because commit handles both refit and a portion of build finalization (why?)
// This is overly conservative also only necessary in case there were no updates at all to the tree since the last tree swap
// It also overly conservative in a sense that it could be set only if mProgress was just set to BUILD_FINISHED
+ // If run asynchronously from a different thread, we touched just the new AABB build phase, we should not mark the main tree as dirty
+ if(synchronousCall)
mUncommittedChanges = true;
return mProgress==BUILD_FINISHED;
}
- return true;
+ return false;
}
+bool AABBPruner::prepareBuild()
+{
+ PX_PROFILE_ZONE("SceneQuery.prepareBuild", mContextID);
+
+ PX_ASSERT(mIncrementalRebuild);
+ if(mNeedsNewTree)
+ {
+ if(mProgress==BUILD_NOT_STARTED)
+ {
+ const PxU32 nbObjects = mPool.getNbActiveObjects();
+ if(!nbObjects)
+ return false;
+
+ PX_DELETE(mNewTree);
+ mNewTree = PX_NEW(AABBTree);
+ mNbCachedBoxes = nbObjects;
+ // PT: we always allocate one extra box, to make sure we can safely use V4 loads on the array
+ mCachedBoxes = reinterpret_cast<PxBounds3*>(PX_ALLOC(sizeof(PxBounds3)*(nbObjects+1), "PxBound3"));
+
+ PxMemCopy(mCachedBoxes, mPool.getCurrentWorldBoxes(), nbObjects*sizeof(PxBounds3));
+ // PT: objects currently in the bucket pruner will be in the new tree. They are marked with the
+ // current timestamp (mTimeStamp). However more objects can get added while we compute the new tree,
+ // and those ones will not be part of it. These new objects will be marked with the new timestamp
+ // value (mTimeStamp+1), and we can use these different values to remove the proper objects from
+ // the bucket pruner (when switching to the new tree).
+ mTimeStamp++;
+#if USE_INCREMENTAL_PRUNER
+ // notify the incremental pruner to swap trees
+ mBucketPruner.timeStampChange();
+#endif
+ mBuilder.reset();
+ mBuilder.mNbPrimitives = mNbCachedBoxes;
+ mBuilder.mAABBArray = mCachedBoxes;
+ mBuilder.mLimit = NB_OBJECTS_PER_NODE;
+
+ mBuildStats.reset();
+
+ // start recording modifications to the tree made during rebuild to reapply (fix the new tree) eventually
+ PX_ASSERT(mNewTreeFixups.size()==0);
+
+ mProgress = BUILD_INIT;
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.h b/PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.h
index a644781c..2b66cad4 100644
--- a/PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.h
+++ b/PhysX_3.4/Source/SceneQuery/src/SqAABBPruner.h
@@ -147,7 +147,8 @@ namespace Sq
// IncrementalPruner
virtual void purge(); // gets rid of internal accel struct
virtual void setRebuildRateHint(PxU32 nbStepsForRebuild); // Besides the actual rebuild steps, 3 additional steps are needed.
- virtual bool buildStep(); // returns true if finished
+ virtual bool buildStep(bool synchronousCall = true); // returns true if finished
+ virtual bool prepareBuild(); // returns true if new tree is needed
//~IncrementalPruner
// direct access for test code
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqAABBTree.cpp b/PhysX_3.4/Source/SceneQuery/src/SqAABBTree.cpp
index bb628010..bcb0e0ef 100644
--- a/PhysX_3.4/Source/SceneQuery/src/SqAABBTree.cpp
+++ b/PhysX_3.4/Source/SceneQuery/src/SqAABBTree.cpp
@@ -42,105 +42,42 @@ using namespace Sq;
// Progressive building
class Sq::FIFOStack : public Ps::UserAllocated
{
- public:
- FIFOStack() : mStack(PX_DEBUG_EXP("SQFIFOStack")), mCurIndex(0) {}
- ~FIFOStack() {}
+public:
+ FIFOStack() : mStack(PX_DEBUG_EXP("SQFIFOStack")), mCurIndex(0) {}
+ ~FIFOStack() {}
PX_FORCE_INLINE PxU32 getNbEntries() const { return mStack.size(); }
- PX_FORCE_INLINE void push(AABBTreeBuildNode* entry) { mStack.pushBack(entry); }
- bool pop(AABBTreeBuildNode*& entry);
- private:
- Ps::Array<AABBTreeBuildNode*> mStack;
- PxU32 mCurIndex; //!< Current index within the container
+ PX_FORCE_INLINE void push(AABBTreeBuildNode* entry) { mStack.pushBack(entry); }
+ bool pop(AABBTreeBuildNode*& entry);
+private:
+ Ps::Array<AABBTreeBuildNode*> mStack;
+ PxU32 mCurIndex; //!< Current index within the container
};
bool Sq::FIFOStack::pop(AABBTreeBuildNode*& entry)
{
const PxU32 NbEntries = mStack.size(); // Get current number of entries
- if(!NbEntries)
+ if (!NbEntries)
return false; // Can be NULL when no value has been pushed. This is an invalid pop call.
entry = mStack[mCurIndex++]; // Get oldest entry, move to next one
- if(mCurIndex==NbEntries)
+ if (mCurIndex == NbEntries)
{
// All values have been poped
mStack.clear();
- mCurIndex=0;
+ mCurIndex = 0;
}
return true;
}
//~Progressive building
-NodeAllocator::NodeAllocator() : mPool(NULL), mCurrentSlabIndex(0), mTotalNbNodes(0)
-{
-}
-
-NodeAllocator::~NodeAllocator()
-{
- release();
-}
-
-void NodeAllocator::release()
-{
- const PxU32 nbSlabs = mSlabs.size();
- for(PxU32 i=0;i<nbSlabs;i++)
- {
- Slab& s = mSlabs[i];
- PX_DELETE_ARRAY(s.mPool);
- }
-
- mSlabs.reset();
- mCurrentSlabIndex = 0;
- mTotalNbNodes = 0;
-}
-
-void NodeAllocator::init(PxU32 nbPrimitives, PxU32 limit)
-{
- const PxU32 maxSize = nbPrimitives*2 - 1; // PT: max possible #nodes for a complete tree
- const PxU32 estimatedFinalSize = maxSize<=1024 ? maxSize : maxSize/limit;
- mPool = PX_NEW(AABBTreeBuildNode)[estimatedFinalSize];
- PxMemZero(mPool, sizeof(AABBTreeBuildNode)*estimatedFinalSize);
-
- // Setup initial node. Here we have a complete permutation of the app's primitives.
- mPool->mNodeIndex = 0;
- mPool->mNbPrimitives = nbPrimitives;
-
- mSlabs.pushBack(Slab(mPool, 1, estimatedFinalSize));
- mCurrentSlabIndex = 0;
- mTotalNbNodes = 1;
-}
-
-// PT: TODO: inline this?
-AABBTreeBuildNode* NodeAllocator::getBiNode()
-{
- mTotalNbNodes += 2;
- Slab& currentSlab = mSlabs[mCurrentSlabIndex];
- if(currentSlab.mNbUsedNodes+2<=currentSlab.mMaxNbNodes)
- {
- AABBTreeBuildNode* biNode = currentSlab.mPool + currentSlab.mNbUsedNodes;
- currentSlab.mNbUsedNodes += 2;
- return biNode;
- }
- else
- {
- // Allocate new slab
- const PxU32 size = 1024;
- AABBTreeBuildNode* pool = PX_NEW(AABBTreeBuildNode)[size];
- PxMemZero(pool, sizeof(AABBTreeBuildNode)*size);
-
- mSlabs.pushBack(Slab(pool, 2, size));
- mCurrentSlabIndex++;
- return pool;
- }
-}
-
-void NodeAllocator::flatten(AABBTreeRuntimeNode* dest)
+void flatten(const NodeAllocator& nodeAllocator, AABBTreeRuntimeNode* dest)
{
// PT: gathers all build nodes allocated so far and flatten them to a linear destination array of smaller runtime nodes
PxU32 offset = 0;
- const PxU32 nbSlabs = mSlabs.size();
+ const PxU32 nbSlabs = nodeAllocator.mSlabs.size();
for(PxU32 s=0;s<nbSlabs;s++)
{
- const Slab& currentSlab = mSlabs[s];
+ const NodeAllocator::Slab& currentSlab = nodeAllocator.mSlabs[s];
AABBTreeBuildNode* pool = currentSlab.mPool;
for(PxU32 i=0;i<currentSlab.mNbUsedNodes;i++)
@@ -162,177 +99,19 @@ void NodeAllocator::flatten(AABBTreeRuntimeNode* dest)
PxU32 nodeBase = 0;
for(PxU32 j=0;j<nbSlabs;j++)
{
- if(pool[i].mPos>=mSlabs[j].mPool && pool[i].mPos<mSlabs[j].mPool+mSlabs[j].mNbUsedNodes)
+ if(pool[i].mPos>= nodeAllocator.mSlabs[j].mPool && pool[i].mPos < nodeAllocator.mSlabs[j].mPool + nodeAllocator.mSlabs[j].mNbUsedNodes)
{
- localNodeIndex = PxU32(pool[i].mPos - mSlabs[j].mPool);
+ localNodeIndex = PxU32(pool[i].mPos - nodeAllocator.mSlabs[j].mPool);
break;
}
- nodeBase += mSlabs[j].mNbUsedNodes;
+ nodeBase += nodeAllocator.mSlabs[j].mNbUsedNodes;
}
const PxU32 nodeIndex = nodeBase + localNodeIndex;
- PX_ASSERT(nodeIndex<mTotalNbNodes);
dest[offset].mData = nodeIndex<<1;
}
offset++;
}
}
- PX_ASSERT(offset==mTotalNbNodes);
- release();
-}
-
-static PX_FORCE_INLINE float getSplittingValue(const PxBounds3& global_box, PxU32 axis)
-{
- // Default split value = middle of the axis (using only the box)
- return global_box.getCenter(axis);
-}
-
-static PxU32 split(const PxBounds3& box, PxU32 nb, PxU32* const PX_RESTRICT prims, PxU32 axis, const AABBTreeBuildParams& params)
-{
- // Get node split value
- const float splitValue = getSplittingValue(box, axis);
-
- PxU32 nbPos = 0;
- // Loop through all node-related primitives. Their indices range from "mNodePrimitives[0]" to "mNodePrimitives[mNbPrimitives-1]",
- // with mNodePrimitives = mIndices + mNodeIndex (i.e. those indices map the global list in the tree params).
-
- // PT: to avoid calling the unsafe [] operator
- const size_t ptrValue = size_t(params.mCache) + axis*sizeof(float);
- const PxVec3* /*PX_RESTRICT*/ cache = reinterpret_cast<const PxVec3*>(ptrValue);
-
- for(PxU32 i=0;i<nb;i++)
- {
- // Get index in global list
- const PxU32 index = prims[i];
-
- // Test against the splitting value. The primitive value is tested against the enclosing-box center.
- // [We only need an approximate partition of the enclosing box here.]
- const float primitiveValue = cache[index].x;
- PX_ASSERT(primitiveValue==params.mCache[index][axis]);
-
- // Reorganize the list of indices in this order: positive - negative.
- if(primitiveValue > splitValue)
- {
- // Swap entries
- prims[i] = prims[nbPos];
- prims[nbPos] = index;
- // Count primitives assigned to positive space
- nbPos++;
- }
- }
- return nbPos;
-}
-
-void AABBTreeBuildNode::subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices)
-{
- PxU32* const PX_RESTRICT primitives = indices + mNodeIndex;
- const PxU32 nbPrims = mNbPrimitives;
-
- // Compute global box & means for current node. The box is stored in mBV.
- Vec4V meansV;
- {
- const PxBounds3* PX_RESTRICT boxes = params.mAABBArray;
- PX_ASSERT(boxes);
- PX_ASSERT(primitives);
- PX_ASSERT(nbPrims);
-
- Vec4V minV = V4LoadU(&boxes[primitives[0]].minimum.x);
- Vec4V maxV = V4LoadU(&boxes[primitives[0]].maximum.x);
-
- meansV = V4LoadU(&params.mCache[primitives[0]].x);
-
- for(PxU32 i=1;i<nbPrims;i++)
- {
- const PxU32 index = primitives[i];
- const Vec4V curMinV = V4LoadU(&boxes[index].minimum.x);
- const Vec4V curMaxV = V4LoadU(&boxes[index].maximum.x);
- meansV = V4Add(meansV, V4LoadU(&params.mCache[index].x));
- minV = V4Min(minV, curMinV);
- maxV = V4Max(maxV, curMaxV);
- }
-
- StoreBounds(mBV, minV, maxV);
-
- const float coeff = 1.0f/float(nbPrims);
- meansV = V4Scale(meansV, FLoad(coeff));
- }
-
- // Check the user-defined limit. Also ensures we stop subdividing if we reach a leaf node.
- if(nbPrims<=params.mLimit)
- return;
-
- bool validSplit = true;
- PxU32 nbPos;
- {
- // Compute variances
- Vec4V varsV = V4Zero();
- for(PxU32 i=0;i<nbPrims;i++)
- {
- const PxU32 index = primitives[i];
- Vec4V centerV = V4LoadU(&params.mCache[index].x);
- centerV = V4Sub(centerV, meansV);
- centerV = V4Mul(centerV, centerV);
- varsV = V4Add(varsV, centerV);
- }
- const float coeffNb1 = 1.0f/float(nbPrims-1);
- varsV = V4Scale(varsV, FLoad(coeffNb1));
- PX_ALIGN(16, PxVec4) vars;
- V4StoreA(varsV, &vars.x);
-
- // Choose axis with greatest variance
- const PxU32 axis = Ps::largestAxis(PxVec3(vars.x, vars.y, vars.z));
-
- // Split along the axis
- nbPos = split(mBV, nbPrims, primitives, axis, params);
-
- // Check split validity
- if(!nbPos || nbPos==nbPrims)
- validSplit = false;
- }
-
- // Check the subdivision has been successful
- if(!validSplit)
- {
- // Here, all boxes lie in the same sub-space. Two strategies:
- // - if we are over the split limit, make an arbitrary 50-50 split
- // - else stop subdividing
- if(nbPrims>params.mLimit)
- {
- nbPos = nbPrims>>1;
- }
- else return;
- }
-
- // Now create children and assign their pointers.
- mPos = allocator.getBiNode();
-
- stats.increaseCount(2);
-
- // Assign children
- PX_ASSERT(!isLeaf());
- AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(mPos);
- AABBTreeBuildNode* Neg = Pos + 1;
- Pos->mNodeIndex = mNodeIndex;
- Pos->mNbPrimitives = nbPos;
- Neg->mNodeIndex = mNodeIndex + nbPos;
- Neg->mNbPrimitives = mNbPrimitives - nbPos;
-}
-
-void AABBTreeBuildNode::_buildHierarchy(AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
-{
- // Subdivide current node
- subdivide(params, stats, nodeBase, indices);
-
- // Recurse
- if(!isLeaf())
- {
- AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(getPos());
- PX_ASSERT(Pos);
- AABBTreeBuildNode* Neg = Pos + 1;
- Pos->_buildHierarchy(params, stats, nodeBase, indices);
- Neg->_buildHierarchy(params, stats, nodeBase, indices);
- }
-
- stats.mTotalPrims += mNbPrimitives;
}
AABBTree::AABBTree() :
@@ -449,7 +228,8 @@ void AABBTree::buildEnd(AABBTreeBuildParams& params, BuildStats& stats)
mRuntimePool = PX_NEW(AABBTreeRuntimeNode)[mTotalNbNodes];
PX_ASSERT(mTotalNbNodes==mNodeAllocator.mTotalNbNodes);
- mNodeAllocator.flatten(mRuntimePool);
+ flatten(mNodeAllocator, mRuntimePool);
+ mNodeAllocator.release();
}
bool AABBTree::build(AABBTreeBuildParams& params)
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqAABBTree.h b/PhysX_3.4/Source/SceneQuery/src/SqAABBTree.h
index 376de64a..7444299e 100644
--- a/PhysX_3.4/Source/SceneQuery/src/SqAABBTree.h
+++ b/PhysX_3.4/Source/SceneQuery/src/SqAABBTree.h
@@ -35,6 +35,7 @@
#include "PsUserAllocated.h"
#include "PsVecMath.h"
#include "SqTypedef.h"
+#include "SqAABBTreeBuild.h"
#include "PsArray.h"
namespace physx
@@ -91,80 +92,6 @@ namespace Sq
PxU32 mSize; //!< Size of the array in dwords
};
- //! Contains AABB-tree build statistics
- struct BuildStats
- {
- BuildStats() : mCount(0), mTotalPrims(0) {}
-
- PxU32 mCount; //!< Number of nodes created
- PxU32 mTotalPrims; //!< Total accumulated number of primitives. Should be much higher than the source
- //!< number of prims, since it accumulates all prims covered by each node (i.e. internal
- //!< nodes too, not just leaf ones)
-
- PX_FORCE_INLINE void reset() { mCount = mTotalPrims = 0; }
-
- PX_FORCE_INLINE void setCount(PxU32 nb) { mCount=nb; }
- PX_FORCE_INLINE void increaseCount(PxU32 nb) { mCount+=nb; }
- PX_FORCE_INLINE PxU32 getCount() const { return mCount; }
- };
-
- //! Contains AABB-tree build parameters
- class AABBTreeBuildParams : public Ps::UserAllocated
- {
- public:
- AABBTreeBuildParams(PxU32 limit=1, PxU32 nb_prims=0, const PxBounds3* boxes=NULL) :
- mLimit(limit), mNbPrimitives(nb_prims), mAABBArray(boxes), mCache(NULL) {}
- ~AABBTreeBuildParams()
- {
- reset();
- }
-
- PX_FORCE_INLINE void reset()
- {
- mLimit = mNbPrimitives = 0;
- mAABBArray = NULL;
- PX_FREE_AND_RESET(mCache);
- }
-
- PxU32 mLimit; //!< Limit number of primitives / node. If limit is 1, build a complete tree (2*N-1 nodes)
- PxU32 mNbPrimitives; //!< Number of (source) primitives.
- const PxBounds3* mAABBArray; //!< Shortcut to an app-controlled array of AABBs.
- PxVec3* mCache; //!< Cache for AABB centers - managed by build code.
- };
-
- class NodeAllocator;
-
- //! AABB tree node used for building
- class AABBTreeBuildNode : public Ps::UserAllocated
- {
- public:
- PX_FORCE_INLINE AABBTreeBuildNode() {}
- PX_FORCE_INLINE ~AABBTreeBuildNode() {}
-
- PX_FORCE_INLINE const PxBounds3& getAABB() const { return mBV; }
- PX_FORCE_INLINE const AABBTreeBuildNode* getPos() const { return mPos; }
- PX_FORCE_INLINE const AABBTreeBuildNode* getNeg() const { const AABBTreeBuildNode* P = mPos; return P ? P+1 : NULL; }
-
- PX_FORCE_INLINE bool isLeaf() const { return !getPos(); }
-
- PxBounds3 mBV; //!< Global bounding-volume enclosing all the node-related primitives
- const AABBTreeBuildNode* mPos; //!< "Positive" & "Negative" children
-
- PxU32 mNodeIndex; //!< Index of node-related primitives (in the tree's mIndices array)
- PxU32 mNbPrimitives; //!< Number of primitives for this node
-
- // Data access
- PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mNbPrimitives; }
-
- PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return mNbPrimitives; }
- PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val) { mNbPrimitives = val; }
- PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base+mNodeIndex; }
- PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base+mNodeIndex; }
-
- // Internal methods
- void subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
- void _buildHierarchy(AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
- };
//! AABB tree node used for runtime (smaller than for build)
class AABBTreeRuntimeNode : public Ps::UserAllocated
@@ -254,38 +181,6 @@ namespace Sq
class FIFOStack;
//~Progressive building
- //! For complete trees we can predict the final number of nodes and preallocate them. For incomplete trees we can't.
- //! But we don't want to allocate nodes one by one (which would be quite slow), so we use this helper class to
- //! allocate N nodes at once, while minimizing the amount of nodes allocated for nothing. An initial amount of
- //! nodes is estimated using the max number for a complete tree, and the user-defined number of primitives per leaf.
- //! In ideal cases this estimated number will be quite close to the final number of nodes. When that number is not
- //! enough though, slabs of N=1024 extra nodes are allocated until the build is complete.
- class NodeAllocator : public Ps::UserAllocated
- {
- public:
- NodeAllocator();
- ~NodeAllocator();
-
- void release();
- void init(PxU32 nbPrimitives, PxU32 limit);
- void flatten(AABBTreeRuntimeNode* dest);
- AABBTreeBuildNode* getBiNode();
-
- AABBTreeBuildNode* mPool;
-
- struct Slab
- {
- PX_FORCE_INLINE Slab() {}
- PX_FORCE_INLINE Slab(AABBTreeBuildNode* pool, PxU32 nbUsedNodes, PxU32 maxNbNodes) : mPool(pool), mNbUsedNodes(nbUsedNodes), mMaxNbNodes(maxNbNodes) {}
- AABBTreeBuildNode* mPool;
- PxU32 mNbUsedNodes;
- PxU32 mMaxNbNodes;
- };
- Ps::Array<Slab> mSlabs;
- PxU32 mCurrentSlabIndex;
- PxU32 mTotalNbNodes;
- };
-
//! AABB-tree, N primitives/leaf
class AABBTree : public Ps::UserAllocated
{
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeBuild.cpp b/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeBuild.cpp
new file mode 100644
index 00000000..3a8fb9ab
--- /dev/null
+++ b/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeBuild.cpp
@@ -0,0 +1,256 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "SqAABBTreeBuild.h"
+#include "SqBounds.h"
+
+#include "PsMathUtils.h"
+#include "PsFoundation.h"
+#include "GuInternal.h"
+
+using namespace physx;
+using namespace Sq;
+
+NodeAllocator::NodeAllocator() : mPool(NULL), mCurrentSlabIndex(0), mTotalNbNodes(0)
+{
+}
+
+NodeAllocator::~NodeAllocator()
+{
+ release();
+}
+
+void NodeAllocator::release()
+{
+ const PxU32 nbSlabs = mSlabs.size();
+ for (PxU32 i = 0; i<nbSlabs; i++)
+ {
+ Slab& s = mSlabs[i];
+ PX_DELETE_ARRAY(s.mPool);
+ }
+
+ mSlabs.reset();
+ mCurrentSlabIndex = 0;
+ mTotalNbNodes = 0;
+}
+
+void NodeAllocator::init(PxU32 nbPrimitives, PxU32 limit)
+{
+ const PxU32 maxSize = nbPrimitives * 2 - 1; // PT: max possible #nodes for a complete tree
+ const PxU32 estimatedFinalSize = maxSize <= 1024 ? maxSize : maxSize / limit;
+ mPool = PX_NEW(AABBTreeBuildNode)[estimatedFinalSize];
+ PxMemZero(mPool, sizeof(AABBTreeBuildNode)*estimatedFinalSize);
+
+ // Setup initial node. Here we have a complete permutation of the app's primitives.
+ mPool->mNodeIndex = 0;
+ mPool->mNbPrimitives = nbPrimitives;
+
+ mSlabs.pushBack(Slab(mPool, 1, estimatedFinalSize));
+ mCurrentSlabIndex = 0;
+ mTotalNbNodes = 1;
+}
+
+// PT: TODO: inline this?
+AABBTreeBuildNode* NodeAllocator::getBiNode()
+{
+ mTotalNbNodes += 2;
+ Slab& currentSlab = mSlabs[mCurrentSlabIndex];
+ if (currentSlab.mNbUsedNodes + 2 <= currentSlab.mMaxNbNodes)
+ {
+ AABBTreeBuildNode* biNode = currentSlab.mPool + currentSlab.mNbUsedNodes;
+ currentSlab.mNbUsedNodes += 2;
+ return biNode;
+ }
+ else
+ {
+ // Allocate new slab
+ const PxU32 size = 1024;
+ AABBTreeBuildNode* pool = PX_NEW(AABBTreeBuildNode)[size];
+ PxMemZero(pool, sizeof(AABBTreeBuildNode)*size);
+
+ mSlabs.pushBack(Slab(pool, 2, size));
+ mCurrentSlabIndex++;
+ return pool;
+ }
+}
+
+static PX_FORCE_INLINE float getSplittingValue(const PxBounds3& global_box, PxU32 axis)
+{
+ // Default split value = middle of the axis (using only the box)
+ return global_box.getCenter(axis);
+}
+
+static PxU32 split(const PxBounds3& box, PxU32 nb, PxU32* const PX_RESTRICT prims, PxU32 axis, const AABBTreeBuildParams& params)
+{
+ // Get node split value
+ const float splitValue = getSplittingValue(box, axis);
+
+ PxU32 nbPos = 0;
+ // Loop through all node-related primitives. Their indices range from "mNodePrimitives[0]" to "mNodePrimitives[mNbPrimitives-1]",
+ // with mNodePrimitives = mIndices + mNodeIndex (i.e. those indices map the global list in the tree params).
+
+ // PT: to avoid calling the unsafe [] operator
+ const size_t ptrValue = size_t(params.mCache) + axis * sizeof(float);
+ const PxVec3* /*PX_RESTRICT*/ cache = reinterpret_cast<const PxVec3*>(ptrValue);
+
+ for (PxU32 i = 0; i<nb; i++)
+ {
+ // Get index in global list
+ const PxU32 index = prims[i];
+
+ // Test against the splitting value. The primitive value is tested against the enclosing-box center.
+ // [We only need an approximate partition of the enclosing box here.]
+ const float primitiveValue = cache[index].x;
+ PX_ASSERT(primitiveValue == params.mCache[index][axis]);
+
+ // Reorganize the list of indices in this order: positive - negative.
+ if (primitiveValue > splitValue)
+ {
+ // Swap entries
+ prims[i] = prims[nbPos];
+ prims[nbPos] = index;
+ // Count primitives assigned to positive space
+ nbPos++;
+ }
+ }
+ return nbPos;
+}
+
+void AABBTreeBuildNode::subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices)
+{
+ PxU32* const PX_RESTRICT primitives = indices + mNodeIndex;
+ const PxU32 nbPrims = mNbPrimitives;
+
+ // Compute global box & means for current node. The box is stored in mBV.
+ Vec4V meansV;
+ {
+ const PxBounds3* PX_RESTRICT boxes = params.mAABBArray;
+ PX_ASSERT(boxes);
+ PX_ASSERT(primitives);
+ PX_ASSERT(nbPrims);
+
+ Vec4V minV = V4LoadU(&boxes[primitives[0]].minimum.x);
+ Vec4V maxV = V4LoadU(&boxes[primitives[0]].maximum.x);
+
+ meansV = V4LoadU(&params.mCache[primitives[0]].x);
+
+ for (PxU32 i = 1; i<nbPrims; i++)
+ {
+ const PxU32 index = primitives[i];
+ const Vec4V curMinV = V4LoadU(&boxes[index].minimum.x);
+ const Vec4V curMaxV = V4LoadU(&boxes[index].maximum.x);
+ meansV = V4Add(meansV, V4LoadU(&params.mCache[index].x));
+ minV = V4Min(minV, curMinV);
+ maxV = V4Max(maxV, curMaxV);
+ }
+
+ StoreBounds(mBV, minV, maxV);
+
+ const float coeff = 1.0f / float(nbPrims);
+ meansV = V4Scale(meansV, FLoad(coeff));
+ }
+
+ // Check the user-defined limit. Also ensures we stop subdividing if we reach a leaf node.
+ if (nbPrims <= params.mLimit)
+ return;
+
+ bool validSplit = true;
+ PxU32 nbPos;
+ {
+ // Compute variances
+ Vec4V varsV = V4Zero();
+ for (PxU32 i = 0; i<nbPrims; i++)
+ {
+ const PxU32 index = primitives[i];
+ Vec4V centerV = V4LoadU(&params.mCache[index].x);
+ centerV = V4Sub(centerV, meansV);
+ centerV = V4Mul(centerV, centerV);
+ varsV = V4Add(varsV, centerV);
+ }
+ const float coeffNb1 = 1.0f / float(nbPrims - 1);
+ varsV = V4Scale(varsV, FLoad(coeffNb1));
+ PX_ALIGN(16, PxVec4) vars;
+ V4StoreA(varsV, &vars.x);
+
+ // Choose axis with greatest variance
+ const PxU32 axis = Ps::largestAxis(PxVec3(vars.x, vars.y, vars.z));
+
+ // Split along the axis
+ nbPos = split(mBV, nbPrims, primitives, axis, params);
+
+ // Check split validity
+ if (!nbPos || nbPos == nbPrims)
+ validSplit = false;
+ }
+
+ // Check the subdivision has been successful
+ if (!validSplit)
+ {
+ // Here, all boxes lie in the same sub-space. Two strategies:
+ // - if we are over the split limit, make an arbitrary 50-50 split
+ // - else stop subdividing
+ if (nbPrims>params.mLimit)
+ {
+ nbPos = nbPrims >> 1;
+ }
+ else return;
+ }
+
+ // Now create children and assign their pointers.
+ mPos = allocator.getBiNode();
+
+ stats.increaseCount(2);
+
+ // Assign children
+ PX_ASSERT(!isLeaf());
+ AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(mPos);
+ AABBTreeBuildNode* Neg = Pos + 1;
+ Pos->mNodeIndex = mNodeIndex;
+ Pos->mNbPrimitives = nbPos;
+ Neg->mNodeIndex = mNodeIndex + nbPos;
+ Neg->mNbPrimitives = mNbPrimitives - nbPos;
+}
+
+void AABBTreeBuildNode::_buildHierarchy(AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
+{
+ // Subdivide current node
+ subdivide(params, stats, nodeBase, indices);
+
+ // Recurse
+ if (!isLeaf())
+ {
+ AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(getPos());
+ PX_ASSERT(Pos);
+ AABBTreeBuildNode* Neg = Pos + 1;
+ Pos->_buildHierarchy(params, stats, nodeBase, indices);
+ Neg->_buildHierarchy(params, stats, nodeBase, indices);
+ }
+
+ stats.mTotalPrims += mNbPrimitives;
+}
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeBuild.h b/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeBuild.h
new file mode 100644
index 00000000..a8ab94f3
--- /dev/null
+++ b/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeBuild.h
@@ -0,0 +1,162 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef SQ_AABBTREE_BUILD_H
+#define SQ_AABBTREE_BUILD_H
+
+#include "foundation/PxMemory.h"
+#include "foundation/PxBounds3.h"
+#include "PsUserAllocated.h"
+#include "PsVecMath.h"
+#include "SqTypedef.h"
+#include "PsArray.h"
+
+namespace physx
+{
+
+ using namespace shdfnd::aos;
+
+ namespace Sq
+ {
+ //! Contains AABB-tree build statistics
+ struct BuildStats
+ {
+ BuildStats() : mCount(0), mTotalPrims(0) {}
+
+ PxU32 mCount; //!< Number of nodes created
+ PxU32 mTotalPrims; //!< Total accumulated number of primitives. Should be much higher than the source
+ //!< number of prims, since it accumulates all prims covered by each node (i.e. internal
+ //!< nodes too, not just leaf ones)
+
+ PX_FORCE_INLINE void reset() { mCount = mTotalPrims = 0; }
+
+ PX_FORCE_INLINE void setCount(PxU32 nb) { mCount = nb; }
+ PX_FORCE_INLINE void increaseCount(PxU32 nb) { mCount += nb; }
+ PX_FORCE_INLINE PxU32 getCount() const { return mCount; }
+ };
+
+ //! Contains AABB-tree build parameters
+ class AABBTreeBuildParams : public Ps::UserAllocated
+ {
+ public:
+ AABBTreeBuildParams(PxU32 limit = 1, PxU32 nb_prims = 0, const PxBounds3* boxes = NULL) :
+ mLimit(limit), mNbPrimitives(nb_prims), mAABBArray(boxes), mCache(NULL) {}
+ ~AABBTreeBuildParams()
+ {
+ reset();
+ }
+
+ PX_FORCE_INLINE void reset()
+ {
+ mLimit = mNbPrimitives = 0;
+ mAABBArray = NULL;
+ PX_FREE_AND_RESET(mCache);
+ }
+
+ PxU32 mLimit; //!< Limit number of primitives / node. If limit is 1, build a complete tree (2*N-1 nodes)
+ PxU32 mNbPrimitives; //!< Number of (source) primitives.
+ const PxBounds3* mAABBArray; //!< Shortcut to an app-controlled array of AABBs.
+ PxVec3* mCache; //!< Cache for AABB centers - managed by build code.
+ };
+
+ class NodeAllocator;
+
+ //! AABB tree node used for building
+ class AABBTreeBuildNode : public Ps::UserAllocated
+ {
+ public:
+ PX_FORCE_INLINE AABBTreeBuildNode() {}
+ PX_FORCE_INLINE ~AABBTreeBuildNode() {}
+
+ PX_FORCE_INLINE const PxBounds3& getAABB() const { return mBV; }
+ PX_FORCE_INLINE const AABBTreeBuildNode* getPos() const { return mPos; }
+ PX_FORCE_INLINE const AABBTreeBuildNode* getNeg() const { const AABBTreeBuildNode* P = mPos; return P ? P + 1 : NULL; }
+
+ PX_FORCE_INLINE bool isLeaf() const { return !getPos(); }
+
+ PxBounds3 mBV; //!< Global bounding-volume enclosing all the node-related primitives
+ const AABBTreeBuildNode* mPos; //!< "Positive" & "Negative" children
+
+ PxU32 mNodeIndex; //!< Index of node-related primitives (in the tree's mIndices array)
+ PxU32 mNbPrimitives; //!< Number of primitives for this node
+
+ // Data access
+ PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mNbPrimitives; }
+
+ PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return mNbPrimitives; }
+ PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val) { mNbPrimitives = val; }
+ PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base + mNodeIndex; }
+ PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base + mNodeIndex; }
+
+ // Internal methods
+ void subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
+ void _buildHierarchy(AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
+ };
+
+ // Progressive building
+ class FIFOStack;
+ //~Progressive building
+
+ //! For complete trees we can predict the final number of nodes and preallocate them. For incomplete trees we can't.
+ //! But we don't want to allocate nodes one by one (which would be quite slow), so we use this helper class to
+ //! allocate N nodes at once, while minimizing the amount of nodes allocated for nothing. An initial amount of
+ //! nodes is estimated using the max number for a complete tree, and the user-defined number of primitives per leaf.
+ //! In ideal cases this estimated number will be quite close to the final number of nodes. When that number is not
+ //! enough though, slabs of N=1024 extra nodes are allocated until the build is complete.
+ class NodeAllocator : public Ps::UserAllocated
+ {
+ public:
+ NodeAllocator();
+ ~NodeAllocator();
+
+ void release();
+ void init(PxU32 nbPrimitives, PxU32 limit);
+ AABBTreeBuildNode* getBiNode();
+
+ AABBTreeBuildNode* mPool;
+
+ struct Slab
+ {
+ PX_FORCE_INLINE Slab() {}
+ PX_FORCE_INLINE Slab(AABBTreeBuildNode* pool, PxU32 nbUsedNodes, PxU32 maxNbNodes) : mPool(pool), mNbUsedNodes(nbUsedNodes), mMaxNbNodes(maxNbNodes) {}
+ AABBTreeBuildNode* mPool;
+ PxU32 mNbUsedNodes;
+ PxU32 mMaxNbNodes;
+ };
+ Ps::Array<Slab> mSlabs;
+ PxU32 mCurrentSlabIndex;
+ PxU32 mTotalNbNodes;
+ };
+
+
+ } // namespace Sq
+
+}
+
+#endif // SQ_AABBTREE_H
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeQuery.h b/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeQuery.h
index 062212a2..ec1ccb46 100644
--- a/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeQuery.h
+++ b/PhysX_3.4/Source/SceneQuery/src/SqAABBTreeQuery.h
@@ -54,22 +54,22 @@ namespace physx
//////////////////////////////////////////////////////////////////////////
- template<typename Test>
+ template<typename Test, typename Tree, typename Node>
class AABBTreeOverlap
{
public:
- bool operator()(const PrunerPayload* objects, const PxBounds3* boxes, const AABBTree& tree, const Test& test, PrunerCallback& visitor)
+ bool operator()(const PrunerPayload* objects, const PxBounds3* boxes, const Tree& tree, const Test& test, PrunerCallback& visitor)
{
using namespace Cm;
- const AABBTreeRuntimeNode* stack[RAW_TRAVERSAL_STACK_SIZE];
- const AABBTreeRuntimeNode* const nodeBase = tree.getNodes();
+ const Node* stack[RAW_TRAVERSAL_STACK_SIZE];
+ const Node* const nodeBase = tree.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while (stackIndex > 0)
{
- const AABBTreeRuntimeNode* node = stack[--stackIndex];
+ const Node* node = stack[--stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV(&center, &extents);
while (test(center, extents))
@@ -107,7 +107,7 @@ namespace physx
break;
}
- const AABBTreeRuntimeNode* children = node->getPos(nodeBase);
+ const Node* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
@@ -121,9 +121,9 @@ namespace physx
//////////////////////////////////////////////////////////////////////////
- template <bool tInflate> // use inflate=true for sweeps, inflate=false for raycasts
- static PX_FORCE_INLINE bool doLeafTest(const AABBTreeRuntimeNode* node, Gu::RayAABBTest& test, PxReal& md, PxReal oldMaxDist,
- const PrunerPayload* objects, const PxBounds3* boxes, const AABBTree& tree,
+ template <bool tInflate, typename Tree, typename Node> // use inflate=true for sweeps, inflate=false for raycasts
+ static PX_FORCE_INLINE bool doLeafTest(const Node* node, Gu::RayAABBTest& test, PxReal& md, PxReal oldMaxDist,
+ const PrunerPayload* objects, const PxBounds3* boxes, const Tree& tree,
PxReal& maxDist, PrunerCallback& pcb)
{
PxU32 nbPrims = node->getNbPrimitives();
@@ -158,12 +158,12 @@ namespace physx
//////////////////////////////////////////////////////////////////////////
- template <bool tInflate> // use inflate=true for sweeps, inflate=false for raycasts
+ template <bool tInflate, typename Tree, typename Node> // use inflate=true for sweeps, inflate=false for raycasts
class AABBTreeRaycast
{
public:
bool operator()(
- const PrunerPayload* objects, const PxBounds3* boxes, const AABBTree& tree,
+ const PrunerPayload* objects, const PxBounds3* boxes, const Tree& tree,
const PxVec3& origin, const PxVec3& unitDir, PxReal& maxDist, const PxVec3& inflation,
PrunerCallback& pcb)
{
@@ -173,15 +173,15 @@ namespace physx
// So we initialize the test with values multiplied by 2 as well, to get correct results
Gu::RayAABBTest test(origin*2.0f, unitDir*2.0f, maxDist, inflation*2.0f);
- const AABBTreeRuntimeNode* stack[RAW_TRAVERSAL_STACK_SIZE]; // stack always contains PPU addresses
- const AABBTreeRuntimeNode* const nodeBase = tree.getNodes();
+ const Node* stack[RAW_TRAVERSAL_STACK_SIZE]; // stack always contains PPU addresses
+ const Node* const nodeBase = tree.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
PxReal oldMaxDist;
while (stackIndex--)
{
- const AABBTreeRuntimeNode* node = stack[stackIndex];
+ const Node* node = stack[stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV2(&center, &extents);
if (test.check<tInflate>(center, extents)) // TODO: try timestamp ray shortening to skip this
@@ -189,7 +189,7 @@ namespace physx
PxReal md = maxDist; // has to be before the goto below to avoid compile error
while (!node->isLeaf())
{
- const AABBTreeRuntimeNode* children = node->getPos(nodeBase);
+ const Node* children = node->getPos(nodeBase);
Vec3V c0, e0;
children[0].getAABBCenterExtentsV2(&c0, &e0);
@@ -217,7 +217,7 @@ namespace physx
oldMaxDist = maxDist; // we copy since maxDist can be updated in the callback and md<maxDist test below can fail
- if (!doLeafTest<tInflate>(node, test, md, oldMaxDist,
+ if (!doLeafTest<tInflate, Tree, Node>(node, test, md, oldMaxDist,
objects, boxes, tree,
maxDist,
pcb))
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.cpp b/PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.cpp
index a9a7b2ef..beaa6e01 100644
--- a/PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.cpp
+++ b/PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.cpp
@@ -45,7 +45,13 @@ using namespace Ps;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Constructor, preallocate trees, bounds
ExtendedBucketPruner::ExtendedBucketPruner(const PruningPool* pool)
- : mBucketCore(false), mPruningPool(pool), mMainTree(NULL), mBounds(NULL), mMergedTrees(NULL),
+ :
+#if USE_INCREMENTAL_PRUNER
+ mPrunerCore(pool),
+#else
+ mPrunerCore(false),
+#endif
+ mPruningPool(pool), mMainTree(NULL), mBounds(NULL), mMergedTrees(NULL),
mCurrentTreeIndex(0), mTreesDirty(false)
{
// preallocated size for bounds, trees
@@ -92,7 +98,7 @@ ExtendedBucketPruner::~ExtendedBucketPruner()
void ExtendedBucketPruner::release()
{
// release core bucket pruner
- mBucketCore.release();
+ mPrunerCore.release();
mMainTreeUpdateMap.release();
mMergeTreeUpdateMap.release();
@@ -216,14 +222,20 @@ void ExtendedBucketPruner::resize(PxU32 size)
//////////////////////////////////////////////////////////////////////////
// Update object
-bool ExtendedBucketPruner::updateObject(const PxBounds3& worldAABB, const PrunerPayload& object)
+bool ExtendedBucketPruner::updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PoolIndex poolIndex)
{
const ExtendedBucketPrunerMap::Entry* extendedPrunerEntry = mExtendedBucketPrunerMap.find(object);
// if object is not in tree of trees, it is in bucket pruner core
if(!extendedPrunerEntry)
{
- return mBucketCore.updateObject(worldAABB, object);
+#if USE_INCREMENTAL_PRUNER
+ PX_UNUSED(worldAABB);
+ return mPrunerCore.updateObject(poolIndex);
+#else
+ PX_UNUSED(poolIndex);
+ return mPrunerCore.updateObject(worldAABB, object);
+#endif
}
else
{
@@ -352,8 +364,13 @@ bool ExtendedBucketPruner::removeObject(const PrunerPayload& object, PxU32 objec
// we need to call invalidateObjects, it might happen that the swapped object
// does belong to the extended bucket pruner, in that case the objects index
// needs to be swapped.
- swapIndex(objectIndex, swapObject, swapObjectIndex);
- return mBucketCore.removeObject(object, timeStamp);
+ // do not call additional bucket pruner swap, that does happen during remove
+ swapIndex(objectIndex, swapObject, swapObjectIndex, false);
+#if USE_INCREMENTAL_PRUNER
+ return mPrunerCore.removeObject(objectIndex, swapObjectIndex, timeStamp);
+#else
+ return mPrunerCore.removeObject(object, timeStamp);
+#endif
}
else
{
@@ -424,8 +441,9 @@ void ExtendedBucketPruner::invalidateObject(const ExtendedBucketPrunerData& data
// Swap object index
// if swapObject is in a merged tree its index needs to be swapped with objectIndex
-void ExtendedBucketPruner::swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex)
+void ExtendedBucketPruner::swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded)
{
+ PX_UNUSED(corePrunerIncluded);
if (objectIndex == swapObjectIndex)
return;
@@ -463,6 +481,13 @@ void ExtendedBucketPruner::swapIndex(PxU32 objectIndex, const PrunerPayload& swa
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
+#if USE_INCREMENTAL_PRUNER
+ else
+ {
+ if(corePrunerIncluded)
+ mPrunerCore.swapIndex(objectIndex, swapObjectIndex);
+ }
+#endif
}
//////////////////////////////////////////////////////////////////////////
@@ -470,7 +495,7 @@ void ExtendedBucketPruner::swapIndex(PxU32 objectIndex, const PrunerPayload& swa
PxU32 ExtendedBucketPruner::removeMarkedObjects(PxU32 timeStamp)
{
// remove objects from the core bucket pruner
- PxU32 retVal = mBucketCore.removeMarkedObjects(timeStamp);
+ PxU32 retVal = mPrunerCore.removeMarkedObjects(timeStamp);
// nothing to be removed
if(!mCurrentTreeIndex)
@@ -591,7 +616,7 @@ void ExtendedBucketPruner::shiftOrigin(const PxVec3& shift)
mMergedTrees[i].mTree->shiftOrigin(shift);
}
- mBucketCore.shiftOrigin(shift);
+ mPrunerCore.shiftOrigin(shift);
}
//////////////////////////////////////////////////////////////////////////
@@ -611,7 +636,7 @@ struct MainTreeRaycastPrunerCallback: public PrunerCallback
// payload data match merged tree data MergedTree, we can cast it
const AABBTree* aabbTree = reinterpret_cast<const AABBTree*> (payload.data[0]);
// raycast the merged tree
- return AABBTreeRaycast<tInflate>()(mPruningPool->getObjects(), mPruningPool->getCurrentWorldBoxes(), *aabbTree, mOrigin, mUnitDir, distance, mExtent, mPrunerCallback);
+ return AABBTreeRaycast<tInflate, AABBTree, AABBTreeRuntimeNode>()(mPruningPool->getObjects(), mPruningPool->getCurrentWorldBoxes(), *aabbTree, mOrigin, mUnitDir, distance, mExtent, mPrunerCallback);
}
PX_NOCOPY(MainTreeRaycastPrunerCallback)
@@ -631,8 +656,8 @@ PxAgain ExtendedBucketPruner::raycast(const PxVec3& origin, const PxVec3& unitDi
PxAgain again = true;
// searc the bucket pruner first
- if (mBucketCore.getNbObjects())
- again = mBucketCore.raycast(origin, unitDir, inOutDistance, prunerCallback);
+ if (mPrunerCore.getNbObjects())
+ again = mPrunerCore.raycast(origin, unitDir, inOutDistance, prunerCallback);
if (again && mExtendedBucketPrunerMap.size())
{
@@ -640,7 +665,7 @@ PxAgain ExtendedBucketPruner::raycast(const PxVec3& origin, const PxVec3& unitDi
// main tree callback
MainTreeRaycastPrunerCallback<false> pcb(origin, unitDir, extent, prunerCallback, mPruningPool);
// traverse the main tree
- again = AABBTreeRaycast<false>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, origin, unitDir, inOutDistance, extent, pcb);
+ again = AABBTreeRaycast<false, AABBTree, AABBTreeRuntimeNode>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, origin, unitDir, inOutDistance, extent, pcb);
}
return again;
@@ -661,7 +686,7 @@ struct MainTreeOverlapPrunerCallback : public PrunerCallback
// payload data match merged tree data MergedTree, we can cast it
const AABBTree* aabbTree = reinterpret_cast<const AABBTree*> (payload.data[0]);
// overlap the merged tree
- return AABBTreeOverlap<Test>()(mPruningPool->getObjects(), mPruningPool->getCurrentWorldBoxes(), *aabbTree, mTest, mPrunerCallback);
+ return AABBTreeOverlap<Test, AABBTree, AABBTreeRuntimeNode>()(mPruningPool->getObjects(), mPruningPool->getCurrentWorldBoxes(), *aabbTree, mTest, mPrunerCallback);
}
PX_NOCOPY(MainTreeOverlapPrunerCallback)
@@ -679,8 +704,8 @@ PxAgain ExtendedBucketPruner::overlap(const Gu::ShapeData& queryVolume, PrunerCa
PxAgain again = true;
// core bucket pruner overlap
- if (mBucketCore.getNbObjects())
- again = mBucketCore.overlap(queryVolume, prunerCallback);
+ if (mPrunerCore.getNbObjects())
+ again = mPrunerCore.overlap(queryVolume, prunerCallback);
if(again && mExtendedBucketPrunerMap.size())
{
@@ -692,13 +717,13 @@ PxAgain ExtendedBucketPruner::overlap(const Gu::ShapeData& queryVolume, PrunerCa
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
MainTreeOverlapPrunerCallback<Gu::OBBAABBTest> pcb(test, prunerCallback, mPruningPool);
- again = AABBTreeOverlap<Gu::OBBAABBTest>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
+ again = AABBTreeOverlap<Gu::OBBAABBTest, AABBTree, AABBTreeRuntimeNode>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
else
{
const Gu::AABBAABBTest test(queryVolume.getPrunerInflatedWorldAABB());
MainTreeOverlapPrunerCallback<Gu::AABBAABBTest> pcb(test, prunerCallback, mPruningPool);
- again = AABBTreeOverlap<Gu::AABBAABBTest>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
+ again = AABBTreeOverlap<Gu::AABBAABBTest, AABBTree, AABBTreeRuntimeNode>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
}
break;
@@ -708,7 +733,7 @@ PxAgain ExtendedBucketPruner::overlap(const Gu::ShapeData& queryVolume, PrunerCa
const Gu::CapsuleAABBTest test(capsule.p1, queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
MainTreeOverlapPrunerCallback<Gu::CapsuleAABBTest> pcb(test, prunerCallback, mPruningPool);
- again = AABBTreeOverlap<Gu::CapsuleAABBTest>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
+ again = AABBTreeOverlap<Gu::CapsuleAABBTest, AABBTree, AABBTreeRuntimeNode>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
@@ -716,14 +741,14 @@ PxAgain ExtendedBucketPruner::overlap(const Gu::ShapeData& queryVolume, PrunerCa
const Gu::Sphere& sphere = queryVolume.getGuSphere();
Gu::SphereAABBTest test(sphere.center, sphere.radius);
MainTreeOverlapPrunerCallback<Gu::SphereAABBTest> pcb(test, prunerCallback, mPruningPool);
- again = AABBTreeOverlap<Gu::SphereAABBTest>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
+ again = AABBTreeOverlap<Gu::SphereAABBTest, AABBTree, AABBTreeRuntimeNode>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
MainTreeOverlapPrunerCallback<Gu::OBBAABBTest> pcb(test, prunerCallback, mPruningPool);
- again = AABBTreeOverlap<Gu::OBBAABBTest>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
+ again = AABBTreeOverlap<Gu::OBBAABBTest, AABBTree, AABBTreeRuntimeNode>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::ePLANE:
@@ -745,8 +770,8 @@ PxAgain ExtendedBucketPruner::sweep(const Gu::ShapeData& queryVolume, const PxVe
PxAgain again = true;
// core bucket pruner sweep
- if (mBucketCore.getNbObjects())
- again = mBucketCore.sweep(queryVolume, unitDir, inOutDistance, prunerCallback);
+ if (mPrunerCore.getNbObjects())
+ again = mPrunerCore.sweep(queryVolume, unitDir, inOutDistance, prunerCallback);
if(again && mExtendedBucketPrunerMap.size())
{
@@ -754,7 +779,7 @@ PxAgain ExtendedBucketPruner::sweep(const Gu::ShapeData& queryVolume, const PxVe
const PxVec3 extents = aabb.getExtents();
const PxVec3 center = aabb.getCenter();
MainTreeRaycastPrunerCallback<true> pcb(center, unitDir, extents, prunerCallback, mPruningPool);
- again = AABBTreeRaycast<true>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, center, unitDir, inOutDistance, extents, pcb);
+ again = AABBTreeRaycast<true, AABBTree, AABBTreeRuntimeNode>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, center, unitDir, inOutDistance, extents, pcb);
}
return again;
}
@@ -794,7 +819,7 @@ void ExtendedBucketPruner::visualize(Cm::RenderOutput& out, PxU32 color) const
visualizeTree(out, color, mMergedTrees[i].mTree);
}
- mBucketCore.visualize(out, color);
+ mPrunerCore.visualize(out, color);
}
//////////////////////////////////////////////////////////////////////////
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.h b/PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.h
index 923501a0..4106519a 100644
--- a/PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.h
+++ b/PhysX_3.4/Source/SceneQuery/src/SqExtendedBucketPruner.h
@@ -32,9 +32,12 @@
#include "SqTypedef.h"
#include "SqBucketPruner.h"
+#include "SqIncrementalAABBPrunerCore.h"
#include "SqAABBTreeUpdateMap.h"
#include "PsHashMap.h"
+#define USE_INCREMENTAL_PRUNER 0
+
namespace physx
{
namespace Sq
@@ -42,6 +45,12 @@ namespace Sq
struct AABBPrunerMergeData;
class AABBTreeMergeData;
+#if USE_INCREMENTAL_PRUNER
+ typedef IncrementalAABBPrunerCore PrunerCore;
+#else
+ typedef BucketPrunerCore PrunerCore;
+#endif
+
// Extended bucket pruner data, if an object belongs to the tree of trees, we need to
// remember node for the sub tree, the tree it belongs to and the main tree node
struct ExtendedBucketPrunerData
@@ -99,32 +108,40 @@ namespace Sq
void release();
// add single object into a bucket pruner directly
- PX_FORCE_INLINE bool addObject(const PrunerPayload& object, const PxBounds3& worldAABB, PxU32 timeStamp)
+ PX_FORCE_INLINE bool addObject(const PrunerPayload& object, const PxBounds3& worldAABB, PxU32 timeStamp, const PoolIndex poolIndex)
{
- return mBucketCore.addObject(object, worldAABB, timeStamp);
+#if USE_INCREMENTAL_PRUNER
+ PX_UNUSED(worldAABB);
+ PX_UNUSED(object);
+ return mPrunerCore.addObject(poolIndex, timeStamp);
+#else
+ PX_UNUSED(poolIndex);
+ return mPrunerCore.addObject(object, worldAABB, timeStamp);
+#endif
}
// add AABB tree from pruning structure - adds new primitive into main AABB tree
void addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp);
// update object
- bool updateObject(const PxBounds3& worldAABB, const PrunerPayload& object);
+ bool updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PoolIndex poolIndex);
// remove object, removed object is replaced in pruning pool by swapped object, indices needs to be updated
bool removeObject(const PrunerPayload& object, PxU32 objectIndex, const PrunerPayload& swapObject,
PxU32 swapObjectIndex, PxU32& timeStamp);
- // separate call for indices invalidation, object can be either in AABBPruner or Bucket pruner, but the swapped object can be
- // in the tree of trees
- void invalidateObject(const ExtendedBucketPrunerData& object, PxU32 objectIndex, const PrunerPayload& swapObject,
- PxU32 swapObjectIndex);
- // swap object index, the object index can be in bucket pruner or tree of trees
- void swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex);
+ // swap object index, the object index can be in core pruner or tree of trees
+ void swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded = true);
// refit marked nodes in tree of trees
void refitMarkedNodes(const PxBounds3* boxes);
+#if USE_INCREMENTAL_PRUNER
+ // notify timestampChange - swap trees in incremental pruner
+ void timeStampChange() { mPrunerCore.timeStampChange(); }
+#endif
+
// look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
@@ -141,11 +158,17 @@ namespace Sq
// debug visualize
void visualize(Cm::RenderOutput& out, PxU32 color) const;
- PX_FORCE_INLINE void build() { mBucketCore.build(); }
+ PX_FORCE_INLINE void build() { mPrunerCore.build(); }
- PX_FORCE_INLINE PxU32 getNbObjects() const { return mBucketCore.getNbObjects() + mExtendedBucketPrunerMap.size(); }
+ PX_FORCE_INLINE PxU32 getNbObjects() const { return mPrunerCore.getNbObjects() + mExtendedBucketPrunerMap.size(); }
private:
+
+ // separate call for indices invalidation, object can be either in AABBPruner or Bucket pruner, but the swapped object can be
+ // in the tree of trees
+ void invalidateObject(const ExtendedBucketPrunerData& object, PxU32 objectIndex, const PrunerPayload& swapObject,
+ PxU32 swapObjectIndex);
+
void resize(PxU32 size);
void buildMainAABBTree();
void copyTree(AABBTree& destTree, const AABBPrunerMergeData& inputData);
@@ -156,7 +179,7 @@ namespace Sq
bool checkValidity();
#endif
private:
- BucketPrunerCore mBucketCore; // Bucket pruner for single objects
+ PrunerCore mPrunerCore; // pruner for single objects
const PruningPool* mPruningPool; // Pruning pool from AABB pruner
ExtendedBucketPrunerMap mExtendedBucketPrunerMap; // Map holding objects from tree merge - objects in tree of trees
AABBTree* mMainTree; // Main tree holding merged trees
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBPrunerCore.cpp b/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBPrunerCore.cpp
new file mode 100644
index 00000000..bf2f9b41
--- /dev/null
+++ b/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBPrunerCore.cpp
@@ -0,0 +1,436 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "SqIncrementalAABBPrunerCore.h"
+#include "SqIncrementalAABBTree.h"
+#include "SqPruningPool.h"
+#include "SqAABBTree.h"
+#include "SqAABBTreeQuery.h"
+#include "GuSphere.h"
+#include "GuBox.h"
+#include "GuCapsule.h"
+#include "GuBounds.h"
+
+using namespace physx;
+using namespace Gu;
+using namespace Sq;
+using namespace Cm;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define PARANOIA_CHECKS 0
+
+IncrementalAABBPrunerCore::IncrementalAABBPrunerCore(const PruningPool* pool) :
+ mCurrentTree (1),
+ mLastTree (0),
+ mPool (pool)
+{
+ mAABBTree[0].mapping.reserve(256);
+ mAABBTree[1].mapping.reserve(256);
+}
+
+IncrementalAABBPrunerCore::~IncrementalAABBPrunerCore()
+{
+ release();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void IncrementalAABBPrunerCore::release() // this can be called from purge()
+{
+ for(PxU32 i = 0; i < NUM_TREES; i++)
+ {
+ if(mAABBTree[i].tree)
+ {
+ PX_DELETE(mAABBTree[i].tree);
+ mAABBTree[i].tree = NULL;
+ }
+ mAABBTree[i].mapping.clear();
+ mAABBTree[i].timeStamp = 0;
+ }
+ mCurrentTree = 1;
+ mLastTree = 0;
+}
+
+bool IncrementalAABBPrunerCore::addObject(const PoolIndex poolIndex, PxU32 timeStamp)
+{
+ CoreTree& tree = mAABBTree[mCurrentTree];
+ if(!tree.tree || !tree.tree->getNodes())
+ {
+ if(!tree.tree)
+ tree.tree = PX_NEW(IncrementalAABBTree)();
+ tree.timeStamp = timeStamp;
+ }
+ PX_ASSERT(tree.timeStamp == timeStamp);
+
+ bool split = false;
+ IncrementalAABBTreeNode* node = tree.tree->insert(poolIndex, mPool->getCurrentWorldBoxes(), split);
+ updateMapping(split, tree.mapping, poolIndex, node);
+
+#if PARANOIA_CHECKS
+ test();
+#endif
+
+ return true;
+}
+
+void IncrementalAABBPrunerCore::updateMapping(bool split, IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
+{
+ // if a node was split we need to update the node indices and also the sibling indices
+ if(split)
+ {
+ for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
+ {
+ const PoolIndex index = node->getPrimitives(NULL)[j];
+ mapping[index] = node;
+ }
+ // sibling
+ if(node->mParent)
+ {
+ IncrementalAABBTreeNode* sibling = (node->mParent->mChilds[0] == node) ? node->mParent->mChilds[1] : node->mParent->mChilds[0];
+ if(sibling->isLeaf())
+ {
+ for(PxU32 j = 0; j < sibling->getNbPrimitives(); j++)
+ {
+ const PoolIndex index = sibling->getPrimitives(NULL)[j];
+ mapping[index] = sibling;
+ }
+ }
+ }
+ }
+ else
+ {
+ mapping[poolIndex] = node;
+ }
+}
+
+bool IncrementalAABBPrunerCore::removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp)
+{
+ // erase the entry and get the data
+ IncrementalPrunerMap::Entry entry;
+ bool foundEntry = true;
+ const PxU32 treeIndex = mAABBTree[mLastTree].mapping.erase(poolIndex, entry) ? mLastTree : mCurrentTree;
+ // if it was not found in the last tree look at the current tree
+ if(treeIndex == mCurrentTree)
+ foundEntry = mAABBTree[mCurrentTree].mapping.erase(poolIndex, entry);
+
+ // exit somethings is wrong here, entry was not found here
+ PX_ASSERT(foundEntry);
+ if(!foundEntry)
+ return false;
+
+ // tree must exist
+ PX_ASSERT(mAABBTree[treeIndex].tree);
+ CoreTree& tree = mAABBTree[treeIndex];
+ timeStamp = tree.timeStamp;
+
+ // remove the poolIndex from the tree, update the tree bounds immediatelly
+ IncrementalAABBTreeNode* node = tree.tree->remove(entry.second, poolIndex, mPool->getCurrentWorldBoxes());
+ if(node && node->isLeaf())
+ {
+ for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
+ {
+ const PoolIndex index = node->getPrimitives(NULL)[j];
+ tree.mapping[index] = node;
+ }
+ }
+
+ // nothing to swap, last object, early exit
+ if(poolIndex == poolRelocatedLastIndex)
+ {
+#if PARANOIA_CHECKS
+ test();
+#endif
+ return true;
+ }
+
+ // fix the indices, we need to swap the index with last index
+ // erase the relocated index from the tre it is
+ IncrementalPrunerMap::Entry relocatedEntry;
+ const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
+ foundEntry = true;
+ if(treeRelocatedIndex == mLastTree)
+ foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
+
+ if(foundEntry)
+ {
+ CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
+
+ // set the new mapping
+ relocatedTree.mapping[poolIndex] = relocatedEntry.second;
+ // update the tree indices - swap
+ relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
+ }
+
+#if PARANOIA_CHECKS
+ test();
+#endif
+ return true;
+}
+
+void IncrementalAABBPrunerCore::swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex)
+{
+ // fix the indices, we need to swap the index with last index
+ // erase the relocated index from the tre it is
+ IncrementalPrunerMap::Entry relocatedEntry;
+ const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
+ bool foundEntry = true;
+ if(treeRelocatedIndex == mLastTree)
+ foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
+
+ // relocated index is not here
+ if(!foundEntry)
+ return;
+
+ CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
+
+ // set the new mapping
+ relocatedTree.mapping[poolIndex] = relocatedEntry.second;
+ // update the tree indices - swap
+ relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
+}
+
+bool IncrementalAABBPrunerCore::updateObject(const PoolIndex poolIndex)
+{
+ const IncrementalPrunerMap::Entry* entry = mAABBTree[mLastTree].mapping.find(poolIndex);
+ const PxU32 treeIndex = entry ? mLastTree : mCurrentTree;
+ if(!entry)
+ entry = mAABBTree[mCurrentTree].mapping.find(poolIndex);
+
+ // we have not found it
+ PX_ASSERT(entry);
+ if(!entry)
+ return false;
+
+ CoreTree& tree = mAABBTree[treeIndex];
+ bool split;
+ IncrementalAABBTreeNode* removedNode = NULL;
+ IncrementalAABBTreeNode* node = tree.tree->updateFast(entry->second, poolIndex, mPool->getCurrentWorldBoxes(), split, removedNode);
+ // we removed node during update, need to update the mapping
+ if(removedNode && removedNode->isLeaf())
+ {
+ for(PxU32 j = 0; j < removedNode->getNbPrimitives(); j++)
+ {
+ const PoolIndex index = removedNode->getPrimitives(NULL)[j];
+ tree.mapping[index] = removedNode;
+ }
+ }
+ if(split || node != entry->second)
+ updateMapping(split, tree.mapping, poolIndex, node);
+
+#if PARANOIA_CHECKS
+ test();
+#endif
+
+ return true;
+}
+
+PxU32 IncrementalAABBPrunerCore::removeMarkedObjects(PxU32 timeStamp)
+{
+ // early exit is no tree exists
+ if(!mAABBTree[mLastTree].tree || !mAABBTree[mLastTree].tree->getNodes())
+ {
+ PX_ASSERT(mAABBTree[mLastTree].mapping.size() == 0);
+ PX_ASSERT(!mAABBTree[mCurrentTree].tree || mAABBTree[mCurrentTree].timeStamp != timeStamp);
+ return 0;
+ }
+
+ PX_UNUSED(timeStamp);
+ PX_ASSERT(timeStamp == mAABBTree[mLastTree].timeStamp);
+
+ // release the last tree
+ CoreTree& tree = mAABBTree[mLastTree];
+ PxU32 nbObjects = tree.mapping.size();
+ tree.mapping.clear();
+ tree.timeStamp = 0;
+
+ tree.tree->release();
+
+ return nbObjects;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/**
+ * Query Implementation
+ */
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+PxAgain IncrementalAABBPrunerCore::overlap(const ShapeData& queryVolume, PrunerCallback& pcb) const
+{
+ PxAgain again = true;
+
+ for(PxU32 i = 0; i < NUM_TREES; i++)
+ {
+ const CoreTree& tree = mAABBTree[i];
+ if(tree.tree && tree.tree->getNodes() && again)
+ {
+ switch(queryVolume.getType())
+ {
+ case PxGeometryType::eBOX:
+ {
+ if(queryVolume.isOBB())
+ {
+ const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
+ again = AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
+ }
+ else
+ {
+ const Gu::AABBAABBTest test(queryVolume.getPrunerInflatedWorldAABB());
+ again = AABBTreeOverlap<Gu::AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
+ }
+ }
+ break;
+ case PxGeometryType::eCAPSULE:
+ {
+ const Gu::Capsule& capsule = queryVolume.getGuCapsule();
+ const Gu::CapsuleAABBTest test( capsule.p1, queryVolume.getPrunerWorldRot33().column0,
+ queryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
+ again = AABBTreeOverlap<Gu::CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
+ }
+ break;
+ case PxGeometryType::eSPHERE:
+ {
+ const Gu::Sphere& sphere = queryVolume.getGuSphere();
+ Gu::SphereAABBTest test(sphere.center, sphere.radius);
+ again = AABBTreeOverlap<Gu::SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
+ }
+ break;
+ case PxGeometryType::eCONVEXMESH:
+ {
+ const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
+ again = AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
+ }
+ break;
+ case PxGeometryType::ePLANE:
+ case PxGeometryType::eTRIANGLEMESH:
+ case PxGeometryType::eHEIGHTFIELD:
+ case PxGeometryType::eGEOMETRY_COUNT:
+ case PxGeometryType::eINVALID:
+ PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
+ }
+ }
+ }
+
+ return again;
+}
+
+PxAgain IncrementalAABBPrunerCore::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& pcb) const
+{
+ PxAgain again = true;
+
+ for(PxU32 i = 0; i < NUM_TREES; i++)
+ {
+ const CoreTree& tree = mAABBTree[i];
+ if(tree.tree && tree.tree->getNodes() && again)
+ {
+ const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
+ const PxVec3 extents = aabb.getExtents();
+ again = AABBTreeRaycast<true, IncrementalAABBTree, IncrementalAABBTreeNode>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, aabb.getCenter(), unitDir, inOutDistance, extents, pcb);
+ }
+ }
+
+ return again;
+}
+
+PxAgain IncrementalAABBPrunerCore::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& pcb) const
+{
+ PxAgain again = true;
+
+ for(PxU32 i = 0; i < NUM_TREES; i++)
+ {
+ const CoreTree& tree = mAABBTree[i];
+ if(tree.tree && tree.tree->getNodes() && again)
+ {
+ again = AABBTreeRaycast<false, IncrementalAABBTree, IncrementalAABBTreeNode>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
+ }
+ }
+ return again;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void IncrementalAABBPrunerCore::shiftOrigin(const PxVec3& shift)
+{
+ for(PxU32 i = 0; i < NUM_TREES; i++)
+ {
+ if(mAABBTree[i].tree)
+ {
+ mAABBTree[i].tree->shiftOrigin(shift);
+ }
+ }
+}
+
+
+#include "CmRenderOutput.h"
+void IncrementalAABBPrunerCore::visualize(Cm::RenderOutput& out, PxU32 color) const
+{
+ for(PxU32 i = 0; i < NUM_TREES; i++)
+ {
+ if(mAABBTree[i].tree && mAABBTree[i].tree->getNodes())
+ {
+ struct Local
+ {
+ static void _Draw(const IncrementalAABBTreeNode* root, const IncrementalAABBTreeNode* node, Cm::RenderOutput& out_)
+ {
+ PxBounds3 bounds;
+ V4StoreU(node->mBVMin, &bounds.minimum.x);
+ V4StoreU(node->mBVMax, &bounds.maximum.x);
+ out_ << Cm::DebugBox(bounds, true);
+ if (node->isLeaf())
+ return;
+ _Draw(root, node->getPos(root), out_);
+ _Draw(root, node->getNeg(root), out_);
+ }
+ };
+ out << PxTransform(PxIdentity);
+ out << color;
+ Local::_Draw(mAABBTree[i].tree->getNodes(), mAABBTree[i].tree->getNodes(), out);
+
+
+ // Render added objects not yet in the tree
+ out << PxTransform(PxIdentity);
+ out << PxU32(PxDebugColor::eARGB_WHITE);
+ }
+ }
+}
+
+void IncrementalAABBPrunerCore::test()
+{
+ for(PxU32 i = 0; i < NUM_TREES; i++)
+ {
+ if(mAABBTree[i].tree)
+ {
+ //mAABBTree[i].tree->hierarchyCheck(mPool->getNbActiveObjects(), mPool->getCurrentWorldBoxes());
+ for (IncrementalPrunerMap::Iterator iter = mAABBTree[i].mapping.getIterator(); !iter.done(); ++iter)
+ {
+ mAABBTree[i].tree->checkTreeLeaf(iter->second, iter->first);
+ }
+ }
+ }
+}
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBPrunerCore.h b/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBPrunerCore.h
new file mode 100644
index 00000000..bd2b7219
--- /dev/null
+++ b/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBPrunerCore.h
@@ -0,0 +1,114 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef SQ_INCREMENTAL_AABB_PRUNER_CORE_H
+#define SQ_INCREMENTAL_AABB_PRUNER_CORE_H
+
+#include "SqPruner.h"
+#include "SqPruningPool.h"
+#include "SqIncrementalAABBTree.h"
+#include "SqAABBTreeUpdateMap.h"
+#include "PsHashMap.h"
+
+namespace physx
+{
+
+namespace Sq
+{
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ typedef Ps::HashMap<PoolIndex, IncrementalAABBTreeNode*> IncrementalPrunerMap;
+
+ struct CoreTree
+ {
+ CoreTree():
+ timeStamp(0),
+ tree(NULL)
+ {
+ }
+
+ PxU32 timeStamp;
+ IncrementalAABBTree* tree;
+ IncrementalPrunerMap mapping;
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ class IncrementalAABBPrunerCore : public Ps::UserAllocated
+ {
+ public:
+ IncrementalAABBPrunerCore(const PruningPool* pool);
+ ~IncrementalAABBPrunerCore();
+
+ void release();
+
+ bool addObject(const PoolIndex poolIndex, PxU32 timeStamp);
+ bool removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp);
+
+ // if we swap object from bucket pruner index with an index in the regular AABB pruner
+ void swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex);
+
+ bool updateObject(const PoolIndex poolIndex);
+
+ PxU32 removeMarkedObjects(PxU32 timeStamp);
+
+ PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
+ PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&) const;
+ PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
+
+ void shiftOrigin(const PxVec3& shift);
+
+ void visualize(Cm::RenderOutput& out, PxU32 color) const;
+
+ PX_FORCE_INLINE void timeStampChange()
+ {
+ // swap current and last tree
+ mLastTree = (mLastTree + 1) % 2;
+ mCurrentTree = (mCurrentTree + 1) % 2;
+ }
+
+ void build() {}
+
+ PX_FORCE_INLINE PxU32 getNbObjects() const { return mAABBTree[0].mapping.size() + mAABBTree[1].mapping.size(); }
+
+ private:
+ void updateMapping(bool split, IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
+ void test();
+
+ private:
+ static const PxU32 NUM_TREES = 2;
+
+ PxU32 mCurrentTree;
+ PxU32 mLastTree;
+ CoreTree mAABBTree[NUM_TREES];
+ const PruningPool* mPool; // Pruning pool from AABB pruner
+ };
+
+}}
+
+#endif
+
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBTree.cpp b/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBTree.cpp
new file mode 100644
index 00000000..be5fccf4
--- /dev/null
+++ b/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBTree.cpp
@@ -0,0 +1,764 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxMemory.h"
+#include "SqIncrementalAABBTree.h"
+#include "SqAABBTree.h"
+#include "SqAABBTreeUpdateMap.h"
+#include "SqBounds.h"
+#include "PsVecMath.h"
+#include "PsFPU.h"
+
+using namespace physx;
+using namespace Sq;
+using namespace shdfnd::aos;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+IncrementalAABBTree::IncrementalAABBTree():
+ mIndicesPool("AABBTreeIndicesPool", 256),
+ mNodesPool("AABBTreeNodesPool", 256 ),
+ mRoot(NULL)
+{
+
+}
+
+IncrementalAABBTree::~IncrementalAABBTree()
+{
+ release();
+}
+
+void IncrementalAABBTree::release()
+{
+ if(mRoot)
+ {
+ releaseNode(mRoot);
+ mRoot = NULL;
+ }
+}
+
+void IncrementalAABBTree::releaseNode(IncrementalAABBTreeNode* node)
+{
+ PX_ASSERT(node);
+ if(node->isLeaf())
+ {
+ mIndicesPool.deallocate(node->mIndices);
+ }
+ else
+ {
+ releaseNode(node->mChilds[0]);
+ releaseNode(node->mChilds[1]);
+ }
+ if(!node->mParent)
+ {
+ mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node));
+ return;
+ }
+ if(node->mParent->mChilds[1] == node)
+ {
+ mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node->mParent->mChilds[0]));
+ }
+}
+
+
+// check if node is inside the given bounds
+PX_FORCE_INLINE static bool nodeInsideBounds(const Vec4V& nodeMin, const Vec4V& nodeMax, const Vec4V& parentMin, const Vec4V& parentMax)
+{
+ return !(Ps::IntBool(V4AnyGrtr3(parentMin, nodeMin)) || Ps::IntBool(V4AnyGrtr3(nodeMax, parentMax)));
+}
+
+// update the node parent hierarchy, when insert happen, we can early exit when the node is inside its parent
+// no further update is needed
+PX_FORCE_INLINE static void updateHierarchyAfterInsert(IncrementalAABBTreeNode* node)
+{
+ IncrementalAABBTreeNode* parent = node->mParent;
+ IncrementalAABBTreeNode* testNode = node;
+ while(parent)
+ {
+ // check if we can early exit
+ if(!nodeInsideBounds(testNode->mBVMin, testNode->mBVMax, parent->mBVMin, parent->mBVMax))
+ {
+ parent->mBVMin = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
+ parent->mBVMax = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
+ }
+ else
+ break;
+ testNode = parent;
+ parent = parent->mParent;
+ }
+}
+
+// add an index into the leaf indices list and update the node bounds
+PX_FORCE_INLINE static void addPrimitiveIntoNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV)
+{
+ PX_ASSERT(node->isLeaf());
+ AABBTreeIndices& nodeIndices = *node->mIndices;
+ PX_ASSERT(nodeIndices.nbIndices < NB_OBJECTS_PER_NODE);
+
+ // store the new handle
+ nodeIndices.indices[nodeIndices.nbIndices++] = index;
+
+ // increase the node bounds
+ node->mBVMin = V4Min(node->mBVMin, minV);
+ node->mBVMax = V4Max(node->mBVMax, maxV);
+
+ updateHierarchyAfterInsert(node);
+}
+
+// check if node does intersect with given bounds
+PX_FORCE_INLINE static bool nodeIntersection(IncrementalAABBTreeNode& node, const Vec4V& minV, const Vec4V& maxV)
+{
+ return !(Ps::IntBool(V4AnyGrtr3(node.mBVMin, maxV)) || Ps::IntBool(V4AnyGrtr3(minV, node.mBVMax)));
+}
+
+// traversal strategy
+PX_FORCE_INLINE static PxU32 traversalDirection(IncrementalAABBTreeNode& child0, IncrementalAABBTreeNode& child1, const Vec4V& testCenterV)
+{
+ // traverse in the direction of a node which is closer
+ // we compare the node and object centers
+ const Vec4V centerCh0V = V4Add(child0.mBVMax, child0.mBVMin);
+ const Vec4V centerCh1V = V4Add(child1.mBVMax, child1.mBVMin);
+
+ const Vec4V ch0D = V4Sub(testCenterV, centerCh0V);
+ const Vec4V ch1D = V4Sub(testCenterV, centerCh1V);
+
+ const BoolV con = FIsGrtr(V4Dot3(ch0D, ch0D), V4Dot3(ch1D, ch1D));
+ return (BAllEqTTTT(con) == 1) ? PxU32(1) : PxU32(0);
+}
+
+// remove an index from the leaf
+PX_FORCE_INLINE static void removePrimitiveFromNode(IncrementalAABBTreeNode* node, const PoolIndex index)
+{
+ AABBTreeIndices& indices = *node->mIndices;
+ PX_ASSERT(indices.nbIndices > 1);
+
+ for (PxU32 i = indices.nbIndices; i--; )
+ {
+ if(node->mIndices->indices[i] == index)
+ {
+ node->mIndices->indices[i] = node->mIndices->indices[--indices.nbIndices];
+ return;
+ }
+ }
+ // if handle was not found something is wrong here
+ PX_ASSERT(0);
+}
+
+// check if bounds are equal with given node min/max
+PX_FORCE_INLINE static bool boundsEqual(const Vec4V& testMin, const Vec4V& testMax, const Vec4V& nodeMin, const Vec4V& nodeMax)
+{
+ return (Ps::IntBool(V4AllEq(nodeMin, testMin)) && Ps::IntBool(V4AllEq(testMax, nodeMax)));
+}
+
+// update the node hierarchy bounds when remove happen, we can early exit if the bounds are equal and no bounds update
+// did happen
+PX_FORCE_INLINE static void updateHierarchyAfterRemove(IncrementalAABBTreeNode* node, const PxBounds3* bounds)
+{
+ if(node->isLeaf())
+ {
+ const AABBTreeIndices& indices = *node->mIndices;
+ PX_ASSERT(indices.nbIndices > 0);
+
+ Vec4V bvMin = V4LoadU(&bounds[indices.indices[0]].minimum.x);
+ Vec4V bvMax = V4LoadU(&bounds[indices.indices[0]].maximum.x);
+ for(PxU32 i = 1; i < indices.nbIndices; i++)
+ {
+ const Vec4V minV = V4LoadU(&bounds[indices.indices[i]].minimum.x);
+ const Vec4V maxV = V4LoadU(&bounds[indices.indices[i]].maximum.x);
+
+ bvMin = V4Min(bvMin, minV);
+ bvMax = V4Max(bvMax, maxV);
+ }
+
+ node->mBVMin = V4ClearW(bvMin);
+ node->mBVMax = V4ClearW(bvMax);
+ }
+ else
+ {
+ node->mBVMin = V4Min(node->mChilds[0]->mBVMin, node->mChilds[1]->mBVMin);
+ node->mBVMax = V4Max(node->mChilds[0]->mBVMax, node->mChilds[1]->mBVMax);
+ }
+
+ IncrementalAABBTreeNode* parent = node->mParent;
+ while(parent)
+ {
+ const Vec4V newMinV = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
+ const Vec4V newMaxV = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
+
+ const bool earlyExit = boundsEqual(newMinV, newMaxV, parent->mBVMin, parent->mBVMax);
+ if(earlyExit)
+ break;
+
+ parent->mBVMin = newMinV;
+ parent->mBVMax = newMaxV;
+
+ parent = parent->mParent;
+ }
+}
+
+// split the leaf node along the most significant axis
+IncrementalAABBTreeNode* IncrementalAABBTree::splitLeafNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV, const PxBounds3* bounds)
+{
+ PX_ASSERT(node->isLeaf());
+
+ IncrementalAABBTreeNode* returnNode = NULL;
+
+ // create new pairs of nodes, parent will remain the node (the one we split_
+ IncrementalAABBTreeNode* child0 = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
+ IncrementalAABBTreeNode* child1 = child0 + 1;
+ AABBTreeIndices* newIndices = mIndicesPool.allocate();
+
+ // get the split axis
+ PX_ALIGN(16, PxVec4) vars;
+ PX_ALIGN(16, PxVec4) center;
+ const float half = 0.5f;
+ const FloatV halfV = FLoad(half);
+ const Vec4V newMinV = V4Min(node->mBVMin, minV);
+ const Vec4V newMaxV = V4Max(node->mBVMax, maxV);
+ const Vec4V centerV = V4Scale(V4Add(newMaxV, newMinV), halfV);
+ const Vec4V varsV = V4Sub(newMaxV, newMinV);
+ V4StoreA(varsV, &vars.x);
+ V4StoreA(centerV, &center.x);
+ const PxU32 axis = Ps::largestAxis(PxVec3(vars.x, vars.y, vars.z));
+
+ // setup parent
+ child0->mParent = node;
+ child1->mParent = node;
+ child0->mIndices = node->mIndices;
+ child0->mChilds[1] = NULL;
+ child1->mIndices = newIndices;
+ child1->mChilds[1] = NULL;
+
+ AABBTreeIndices& child0Indices = *child0->mIndices; // the original node indices
+ AABBTreeIndices& child1Indices = *child1->mIndices; // new empty indices
+ child1Indices.nbIndices = 0;
+
+ // split the node
+ for(PxU32 i = child0Indices.nbIndices; i--;)
+ {
+ const PxBounds3& primitiveBounds = bounds[child0Indices.indices[i]];
+ const float pCenter = primitiveBounds.getCenter(axis);
+ if(center[axis] > pCenter)
+ {
+ // move to new node
+ child1Indices.indices[child1Indices.nbIndices++] = child0Indices.indices[i];
+ child0Indices.nbIndices--;
+ child0Indices.indices[i] = child0Indices.indices[child0Indices.nbIndices];
+ }
+ }
+
+ // check where to put the new node, if there is still a free space
+ if(child0Indices.nbIndices == 0 || child1Indices.nbIndices == NB_OBJECTS_PER_NODE)
+ {
+ child0Indices.nbIndices = 1;
+ child0Indices.indices[0] = index;
+ returnNode = child0;
+ }
+ else
+ {
+ if(child0Indices.nbIndices == NB_OBJECTS_PER_NODE)
+ {
+ child1Indices.nbIndices = 1;
+ child1Indices.indices[0] = index;
+ returnNode = child1;
+ }
+ else
+ {
+ const PxBounds3& primitiveBounds = bounds[index];
+ const float pCenter = primitiveBounds.getCenter(axis);
+ if(center[axis] > pCenter)
+ {
+ // move to new node
+ child1Indices.indices[child1Indices.nbIndices++] = index;
+ returnNode = child1;
+ }
+ else
+ {
+ // move to old node
+ child0Indices.indices[child0Indices.nbIndices++] = index;
+ returnNode = child0;
+ }
+ }
+ }
+
+ // update bounds for the new nodes
+ Vec4V bvMin = V4LoadU(&bounds[child0Indices.indices[0]].minimum.x);
+ Vec4V bvMax = V4LoadU(&bounds[child0Indices.indices[0]].maximum.x);
+ for(PxU32 i = 1; i < child0Indices.nbIndices; i++)
+ {
+ const Vec4V nodeMinV = V4LoadU(&bounds[child0Indices.indices[i]].minimum.x);
+ const Vec4V nodeMaxV = V4LoadU(&bounds[child0Indices.indices[i]].maximum.x);
+
+ bvMin = V4Min(bvMin, nodeMinV);
+ bvMax = V4Max(bvMax, nodeMaxV);
+ }
+ child0->mBVMin = V4ClearW(bvMin);
+ child0->mBVMax = V4ClearW(bvMax);
+
+ bvMin = V4LoadU(&bounds[child1Indices.indices[0]].minimum.x);
+ bvMax = V4LoadU(&bounds[child1Indices.indices[0]].maximum.x);
+ for(PxU32 i = 1; i < child1Indices.nbIndices; i++)
+ {
+ const Vec4V nodeMinV = V4LoadU(&bounds[child1Indices.indices[i]].minimum.x);
+ const Vec4V nodeMaxV = V4LoadU(&bounds[child1Indices.indices[i]].maximum.x);
+
+ bvMin = V4Min(bvMin, nodeMinV);
+ bvMax = V4Max(bvMax, nodeMaxV);
+ }
+ child1->mBVMin = V4ClearW(bvMin);
+ child1->mBVMax = V4ClearW(bvMax);
+
+ // node parent is the same, setup the new childs
+ node->mChilds[0] = child0;
+ node->mChilds[1] = child1;
+ node->mBVMin = newMinV;
+ node->mBVMax = newMaxV;
+
+ updateHierarchyAfterInsert(node);
+
+ PX_ASSERT(returnNode);
+ return returnNode;
+}
+
+
+// insert new bounds into tree
+IncrementalAABBTreeNode* IncrementalAABBTree::insert(const PoolIndex index, const PxBounds3* bounds, bool& split)
+{
+ PX_SIMD_GUARD;
+
+ // get the bounds, reset the W value
+ const Vec4V minV = V4ClearW(V4LoadU(&bounds[index].minimum.x));
+ const Vec4V maxV = V4ClearW(V4LoadU(&bounds[index].maximum.x));
+
+ split = false;
+
+ // check if tree is empty
+ if(!mRoot)
+ {
+ // make it a leaf
+ AABBTreeIndices* indices = mIndicesPool.construct(index);
+ mRoot = reinterpret_cast<IncrementalAABBTreeNode*> (mNodesPool.allocate());
+ mRoot->mBVMin = minV;
+ mRoot->mBVMax = maxV;
+ mRoot->mIndices = indices;
+ mRoot->mChilds[1] = NULL;
+ mRoot->mParent = NULL;
+
+ return mRoot;
+ }
+ else
+ {
+ // check if root is a leaf
+ if(mRoot->isLeaf())
+ {
+ // if we still can insert the primitive into the leaf, or we need to split
+ if(mRoot->getNbPrimitives() < NB_OBJECTS_PER_NODE)
+ {
+ // simply add the primitive into the current leaf
+ addPrimitiveIntoNode(mRoot, index, minV, maxV);
+ return mRoot;
+ }
+ else
+ {
+ // need to split the node
+ IncrementalAABBTreeNode* retNode = splitLeafNode(mRoot, index, minV, maxV, bounds);
+ mRoot = retNode->mParent;
+ split = true;
+ return retNode;
+ }
+ }
+ else
+ {
+ const Vec4V testCenterV = V4Add(maxV, minV);
+ // we dont need to modify root, lets traverse the tree to find the right spot
+ PxU32 traversalIndex = traversalDirection(*mRoot->mChilds[0], *mRoot->mChilds[1], testCenterV);
+ IncrementalAABBTreeNode* baseNode = mRoot->mChilds[traversalIndex];
+ while(!baseNode->isLeaf())
+ {
+ Ps::prefetchLine(baseNode->mChilds[0]->mChilds[0]);
+ Ps::prefetchLine(baseNode->mChilds[1]->mChilds[0]);
+
+ traversalIndex = traversalDirection(*baseNode->mChilds[0], *baseNode->mChilds[1], testCenterV);
+ baseNode = baseNode->mChilds[traversalIndex];
+ }
+
+ // if we still can insert the primitive into the leaf, or we need to split
+ if(baseNode->getNbPrimitives() < NB_OBJECTS_PER_NODE)
+ {
+ // simply add the primitive into the current leaf
+ addPrimitiveIntoNode(baseNode, index, minV, maxV);
+ return baseNode;
+ }
+ else
+ {
+ // split
+ IncrementalAABBTreeNode* retNode = splitLeafNode(baseNode, index, minV, maxV, bounds);
+ split = true;
+ return retNode;
+ }
+ }
+ }
+}
+
+// update the index, do a full remove/insert update
+IncrementalAABBTreeNode* IncrementalAABBTree::update(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, bool& split, IncrementalAABBTreeNode*& removedNode)
+{
+ PX_SIMD_GUARD;
+
+ removedNode = remove(node, index, bounds);
+ return insert(index, bounds, split);
+}
+
+// update the index, faster version with a lazy update of objects that moved just a bit
+IncrementalAABBTreeNode* IncrementalAABBTree::updateFast(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, bool& split, IncrementalAABBTreeNode*& removedNode)
+{
+ PX_SIMD_GUARD;
+
+ const Vec4V minV = V4ClearW(V4LoadU(&bounds[index].minimum.x));
+ const Vec4V maxV = V4ClearW(V4LoadU(&bounds[index].maximum.x));
+
+ // for update fast, we dont care if the tree gets slowly unbalanced, we are building a new tree already
+ if(nodeIntersection(*node, minV, maxV))
+ {
+ updateHierarchyAfterRemove(node, bounds);
+ return node;
+ }
+ else
+ {
+ removedNode = remove(node, index, bounds);
+ return insert(index, bounds, split);
+ }
+}
+
+// remove primitive from the tree, return a node if it moved to its parent
+IncrementalAABBTreeNode* IncrementalAABBTree::remove(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds)
+{
+ PX_SIMD_GUARD;
+ PX_ASSERT(node->isLeaf());
+ // if we just remove the primitive from the list
+ if(node->getNbPrimitives() > 1)
+ {
+ removePrimitiveFromNode(node, index);
+
+ // update the hierarchy
+ updateHierarchyAfterRemove(node, bounds);
+ return NULL;
+ }
+ else
+ {
+ // if root node and the last primitive remove root
+ if(node == mRoot)
+ {
+ mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node));
+ mRoot = NULL;
+ return NULL;
+ }
+ else
+ {
+ // create new parent and remove the current leaf
+ IncrementalAABBTreeNode* parent = node->mParent;
+ IncrementalAABBTreeNodePair* removedPair = reinterpret_cast<IncrementalAABBTreeNodePair*>(parent->mChilds[0]);
+ PX_ASSERT(!parent->isLeaf());
+
+ // copy the remaining child into parent
+ IncrementalAABBTreeNode* remainingChild = (parent->mChilds[0] == node) ? parent->mChilds[1] : parent->mChilds[0];
+ parent->mBVMax = remainingChild->mBVMax;
+ parent->mBVMin = remainingChild->mBVMin;
+ if(remainingChild->isLeaf())
+ {
+ parent->mIndices = remainingChild->mIndices;
+ parent->mChilds[1] = NULL;
+ }
+ else
+ {
+ parent->mChilds[0] = remainingChild->mChilds[0];
+ parent->mChilds[0]->mParent = parent;
+ parent->mChilds[1] = remainingChild->mChilds[1];
+ parent->mChilds[1]->mParent = parent;
+ }
+
+ if(parent->mParent)
+ {
+ updateHierarchyAfterRemove(parent->mParent, bounds);
+ }
+
+ mIndicesPool.deallocate(node->mIndices);
+ mNodesPool.deallocate(removedPair);
+ return parent;
+ }
+ }
+}
+
+// fixup the indices
+void IncrementalAABBTree::fixupTreeIndices(IncrementalAABBTreeNode* node, const PoolIndex index, const PoolIndex newIndex)
+{
+ PX_ASSERT(node->isLeaf());
+
+ AABBTreeIndices& indices = *node->mIndices;
+ for(PxU32 i = 0; i < indices.nbIndices; i++)
+ {
+ if(indices.indices[i] == index)
+ {
+ indices.indices[i] = newIndex;
+ return;
+ }
+ }
+ PX_ASSERT(0);
+}
+
+// shift node
+static void shiftNode(IncrementalAABBTreeNode* node, const Vec4V& shiftV)
+{
+ node->mBVMax = V4Sub(node->mBVMax, shiftV);
+ node->mBVMin = V4Sub(node->mBVMin, shiftV);
+
+ if(!node->isLeaf())
+ {
+ shiftNode(node->mChilds[0], shiftV);
+ shiftNode(node->mChilds[1], shiftV);
+ }
+}
+
+// shift origin
+void IncrementalAABBTree::shiftOrigin(const PxVec3& shift)
+{
+ if(mRoot)
+ {
+ const Vec4V shiftV = V4ClearW(V4LoadU(&shift.x));
+
+ shiftNode(mRoot, shiftV);
+ }
+}
+
+static void checkNode(IncrementalAABBTreeNode* node, IncrementalAABBTreeNode* parent, const PxBounds3* bounds, PoolIndex maxIndex, PxU32& numIndices)
+{
+ PX_ASSERT(node->mParent == parent);
+ PX_ASSERT(!parent->isLeaf());
+ PX_ASSERT(parent->mChilds[0] == node || parent->mChilds[1] == node);
+
+ ASSERT_ISVALIDVEC3V(node->mBVMin);
+ ASSERT_ISVALIDVEC3V(node->mBVMax);
+
+ if(!node->isLeaf())
+ {
+ PX_ASSERT(nodeInsideBounds(node->mChilds[0]->mBVMin, node->mChilds[0]->mBVMax, node->mBVMin, node->mBVMax));
+ PX_ASSERT(nodeInsideBounds(node->mChilds[1]->mBVMin, node->mChilds[1]->mBVMax, node->mBVMin, node->mBVMax));
+
+ const Vec4V testMinV = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
+ const Vec4V testMaxV = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
+
+ PX_UNUSED(testMinV);
+ PX_UNUSED(testMaxV);
+ PX_ASSERT(boundsEqual(testMinV, testMaxV, node->mBVMin, node->mBVMax));
+
+ checkNode(node->mChilds[0], node, bounds, maxIndex, numIndices);
+ checkNode(node->mChilds[1], node, bounds, maxIndex, numIndices);
+ }
+ else
+ {
+ const AABBTreeIndices& indices = *node->mIndices;
+ PX_ASSERT(indices.nbIndices);
+ Vec4V testMinV = V4ClearW(V4LoadU(&bounds[indices.indices[0]].minimum.x));
+ Vec4V testMaxV = V4ClearW(V4LoadU(&bounds[indices.indices[0]].maximum.x));
+ for(PxU32 i = 0; i < indices.nbIndices; i++)
+ {
+ PX_ASSERT(indices.indices[i] < maxIndex);
+ numIndices++;
+
+ const Vec4V minV = V4ClearW(V4LoadU(&bounds[indices.indices[i]].minimum.x));
+ const Vec4V maxV = V4ClearW(V4LoadU(&bounds[indices.indices[i]].maximum.x));
+
+ testMinV = V4Min(testMinV, minV);
+ testMaxV = V4Max(testMaxV, maxV);
+
+ PX_ASSERT(nodeInsideBounds(minV, maxV, node->mBVMin, node->mBVMax));
+ }
+
+ PX_ASSERT(boundsEqual(testMinV, testMaxV, node->mBVMin, node->mBVMax));
+ }
+}
+
+void IncrementalAABBTree::hierarchyCheck(PoolIndex maxIndex, const PxBounds3* bounds)
+{
+ PxU32 numHandles = 0;
+ if(mRoot && !mRoot->isLeaf())
+ {
+ checkNode(mRoot->mChilds[0], mRoot, bounds, maxIndex, numHandles);
+ checkNode(mRoot->mChilds[1], mRoot, bounds, maxIndex, numHandles);
+
+ PX_ASSERT(numHandles == maxIndex);
+ }
+}
+
+void IncrementalAABBTree::checkTreeLeaf(IncrementalAABBTreeNode* leaf, PoolIndex h)
+{
+ PX_ASSERT(leaf->isLeaf());
+
+ const AABBTreeIndices& indices = *leaf->mIndices;
+ bool found = false;
+ for(PxU32 i = 0; i < indices.nbIndices; i++)
+ {
+ if(indices.indices[i] == h)
+ {
+ found = true;
+ break;
+ }
+ }
+ PX_UNUSED(found);
+ PX_ASSERT(found);
+}
+
+// build the tree from given bounds
+bool IncrementalAABBTree::build(AABBTreeBuildParams& params, Ps::Array<IncrementalAABBTreeNode*>& mapping)
+{
+ // Init stats
+ BuildStats stats;
+ const PxU32 nbPrimitives = params.mNbPrimitives;
+ if (!nbPrimitives)
+ return false;
+
+ // Init stats
+ stats.setCount(1);
+
+ // Initialize indices. This list will be modified during build.
+ PxU32* indices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nbPrimitives, "AABB tree indices"));
+ // Identity permutation
+ for (PxU32 i = 0; i<nbPrimitives; i++)
+ indices[i] = i;
+
+ // Allocate a pool of nodes
+ mNodeAllocator.init(nbPrimitives, params.mLimit);
+
+ // Compute box centers only once and cache them
+ params.mCache = reinterpret_cast<PxVec3*>(PX_ALLOC(sizeof(PxVec3)*(nbPrimitives + 1), "cache"));
+ const float half = 0.5f;
+ const FloatV halfV = FLoad(half);
+ for (PxU32 i = 0; i<nbPrimitives; i++)
+ {
+ const Vec4V curMinV = V4LoadU(&params.mAABBArray[i].minimum.x);
+ const Vec4V curMaxV = V4LoadU(&params.mAABBArray[i].maximum.x);
+ const Vec4V centerV = V4Scale(V4Add(curMaxV, curMinV), halfV);
+ V4StoreU(centerV, &params.mCache[i].x);
+ }
+
+ // Build the hierarchy
+ mNodeAllocator.mPool->_buildHierarchy(params, stats, mNodeAllocator, indices);
+
+ PX_FREE_AND_RESET(params.mCache);
+
+ IncrementalAABBTreeNode** treeNodes = reinterpret_cast<IncrementalAABBTreeNode**>(PX_ALLOC(sizeof(IncrementalAABBTreeNode*)*(stats.getCount()), "temp node helper array"));
+ PxMemSet(treeNodes, 0, sizeof(IncrementalAABBTreeNode*)*(stats.getCount()));
+
+ clone(mapping, indices, treeNodes);
+ mRoot = treeNodes[0];
+ mRoot->mParent = NULL;
+
+ PX_FREE_AND_RESET(indices);
+ PX_FREE_AND_RESET(treeNodes);
+
+ mNodeAllocator.release();
+ return true;
+}
+
+// clone the tree, the tree is computed in the NodeAllocator, similar to AABBTree flatten
+void IncrementalAABBTree::clone(Ps::Array<IncrementalAABBTreeNode*>& mapping, const PxU32* _indices, IncrementalAABBTreeNode** treeNodes)
+{
+ PxU32 offset = 0;
+ const PxU32 nbSlabs = mNodeAllocator.mSlabs.size();
+ for (PxU32 s = 0; s<nbSlabs; s++)
+ {
+ const NodeAllocator::Slab& currentSlab = mNodeAllocator.mSlabs[s];
+
+ AABBTreeBuildNode* pool = currentSlab.mPool;
+ for (PxU32 i = 0; i < currentSlab.mNbUsedNodes; i++)
+ {
+ IncrementalAABBTreeNode* destNode = treeNodes[offset];
+ if(!destNode)
+ {
+ destNode = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
+ treeNodes[offset] = destNode;
+ }
+
+ destNode->mBVMin = V4ClearW(V4LoadU(&pool[i].mBV.minimum.x));
+ destNode->mBVMax = V4ClearW(V4LoadU(&pool[i].mBV.maximum.x));
+
+ if (pool[i].isLeaf())
+ {
+ AABBTreeIndices* indices = mIndicesPool.allocate();
+ destNode->mIndices = indices;
+ destNode->mChilds[1] = NULL;
+ indices->nbIndices = pool[i].getNbPrimitives();
+ PX_ASSERT(indices->nbIndices <= 16);
+ const PxU32* sourceIndices = _indices + pool[i].mNodeIndex;
+ for (PxU32 iIndices = 0; iIndices < indices->nbIndices; iIndices++)
+ {
+ const PxU32 sourceIndex = sourceIndices[iIndices];
+ indices->indices[iIndices] = sourceIndex;
+ PX_ASSERT(sourceIndex < mapping.size());
+ mapping[sourceIndex] = destNode;
+ }
+ }
+ else
+ {
+ PX_ASSERT(pool[i].mPos);
+ PxU32 localNodeIndex = 0xffffffff;
+ PxU32 nodeBase = 0;
+ for (PxU32 j = 0; j<nbSlabs; j++)
+ {
+ if (pool[i].mPos >= mNodeAllocator.mSlabs[j].mPool && pool[i].mPos < mNodeAllocator.mSlabs[j].mPool + mNodeAllocator.mSlabs[j].mNbUsedNodes)
+ {
+ localNodeIndex = PxU32(pool[i].mPos - mNodeAllocator.mSlabs[j].mPool);
+ break;
+ }
+ nodeBase += mNodeAllocator.mSlabs[j].mNbUsedNodes;
+ }
+ const PxU32 nodeIndex = nodeBase + localNodeIndex;
+
+ IncrementalAABBTreeNode* child0 = treeNodes[nodeIndex];
+ IncrementalAABBTreeNode* child1 = treeNodes[nodeIndex + 1];
+ if(!child0)
+ {
+ PX_ASSERT(!child1);
+ child0 = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
+ child1 = child0 + 1;
+ treeNodes[nodeIndex] = child0;
+ treeNodes[nodeIndex + 1] = child1;
+ }
+
+ destNode->mChilds[0] = child0;
+ destNode->mChilds[1] = child1;
+ child0->mParent = destNode;
+ child1->mParent = destNode;
+ }
+ offset++;
+ }
+ }
+}
+
+
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBTree.h b/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBTree.h
new file mode 100644
index 00000000..040302a7
--- /dev/null
+++ b/PhysX_3.4/Source/SceneQuery/src/SqIncrementalAABBTree.h
@@ -0,0 +1,195 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef SQ_INCREMENTAL_AABB_TREE_H
+#define SQ_INCREMENTAL_AABB_TREE_H
+
+#include "foundation/PxBounds3.h"
+#include "PsUserAllocated.h"
+#include "PsVecMath.h"
+#include "SqPruner.h"
+#include "SqAABBTreeBuild.h"
+#include "PsPool.h"
+
+namespace physx
+{
+ using namespace shdfnd::aos;
+
+ namespace Sq
+ {
+ class AABBTree;
+
+ #define NB_OBJECTS_PER_NODE 4
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // tree indices, can change in runtime
+ struct AABBTreeIndices
+ {
+ PX_FORCE_INLINE AABBTreeIndices(PoolIndex index) :
+ nbIndices(1)
+ {
+ indices[0] = index;
+ for(PxU32 i = 1; i < NB_OBJECTS_PER_NODE; i++)
+ {
+ indices[i] = 0;
+ }
+ }
+
+ PxU32 nbIndices;
+ PoolIndex indices[NB_OBJECTS_PER_NODE];
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // tree node, has parent information
+ class IncrementalAABBTreeNode : public Ps::UserAllocated
+ {
+ public:
+ PX_FORCE_INLINE IncrementalAABBTreeNode():
+ mParent(NULL)
+ {
+ mChilds[0] = NULL;
+ mChilds[1] = NULL;
+ }
+ PX_FORCE_INLINE IncrementalAABBTreeNode(AABBTreeIndices* indices):
+ mParent(NULL)
+ {
+ mIndices = indices;
+ mChilds[1] = NULL;
+ }
+ PX_FORCE_INLINE ~IncrementalAABBTreeNode() {}
+
+ PX_FORCE_INLINE PxU32 isLeaf() const { return PxU32(mChilds[1]==0); }
+
+ PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* ) const { return &mIndices->indices[0]; }
+ PX_FORCE_INLINE PxU32* getPrimitives(PxU32* ) { return &mIndices->indices[0]; }
+ PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mIndices->nbIndices; }
+
+ PX_FORCE_INLINE const IncrementalAABBTreeNode* getPos(const IncrementalAABBTreeNode* ) const { return mChilds[0]; }
+ PX_FORCE_INLINE const IncrementalAABBTreeNode* getNeg(const IncrementalAABBTreeNode* ) const { return mChilds[1]; }
+
+ PX_FORCE_INLINE IncrementalAABBTreeNode* getPos(IncrementalAABBTreeNode* ) { return mChilds[0]; }
+ PX_FORCE_INLINE IncrementalAABBTreeNode* getNeg(IncrementalAABBTreeNode* ) { return mChilds[1]; }
+
+ PX_FORCE_INLINE void getAABBCenterExtentsV(Vec4V* center, Vec4V* extents) const
+ {
+ const float half = 0.5f;
+ const FloatV halfV = FLoad(half);
+
+ *extents = V4Scale(V4Sub(mBVMax, mBVMin), halfV);
+ *center = V4Scale(V4Add(mBVMax, mBVMin), halfV);
+ }
+
+ PX_FORCE_INLINE void getAABBCenterExtentsV2(Vec4V* center, Vec4V* extents) const
+ {
+ *extents = V4Sub(mBVMax, mBVMin);
+ *center = V4Add(mBVMax, mBVMin);
+ }
+
+ PX_FORCE_INLINE void getAABBMinMaxV(Vec4V* minV, Vec4V* maxV) const
+ {
+ *minV = mBVMin;
+ *maxV = mBVMax;
+ }
+
+ Vec4V mBVMin; // Global bounding-volume min enclosing all the node-related primitives
+ Vec4V mBVMax; // Global bounding-volume max enclosing all the node-related primitives
+ IncrementalAABBTreeNode* mParent; // node parent
+ union
+ {
+ IncrementalAABBTreeNode* mChilds[2]; // childs of node if not a leaf
+ AABBTreeIndices* mIndices; // if leaf, indices information
+ };
+ };
+
+ struct IncrementalAABBTreeNodePair
+ {
+ IncrementalAABBTreeNode mNode0;
+ IncrementalAABBTreeNode mNode1;
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // incremental AABB tree, all changes are immediatelly reflected to the tree
+ class IncrementalAABBTree : public Ps::UserAllocated
+ {
+ public:
+ IncrementalAABBTree();
+ ~IncrementalAABBTree();
+
+ // Build the tree for the first time
+ bool build(AABBTreeBuildParams& params, Ps::Array<IncrementalAABBTreeNode*>& mapping);
+
+ // insert a new index into the tre
+ IncrementalAABBTreeNode* insert(const PoolIndex index, const PxBounds3* bounds, bool& split);
+
+ // update the object in the tree - full update insert/remove
+ IncrementalAABBTreeNode* update(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, bool& split, IncrementalAABBTreeNode*& removedNode);
+ // update the object in the tree, faster method, that may unballance the tree
+ IncrementalAABBTreeNode* updateFast(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, bool& split, IncrementalAABBTreeNode*& removedNode);
+
+ // remove object from the tree
+ IncrementalAABBTreeNode* remove(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds);
+
+ // fixup the tree indices, if we swapped the objects in the pruning pool
+ void fixupTreeIndices(IncrementalAABBTreeNode* node, const PoolIndex index, const PoolIndex newIndex);
+
+ // origin shift
+ void shiftOrigin(const PxVec3& shift);
+
+ // get the tree root node
+ const IncrementalAABBTreeNode* getNodes() const { return mRoot; }
+
+ // define this function so we can share the scene query code with regular AABBTree
+ const PxU32* getIndices() const { return NULL; }
+
+ // paranoia checks
+ void hierarchyCheck(PoolIndex maxIndex, const PxBounds3* bounds);
+ void checkTreeLeaf(IncrementalAABBTreeNode* leaf, PoolIndex h);
+
+ void release();
+
+ private:
+
+ // clone the tree from the generic AABB tree that was built
+ void clone(Ps::Array<IncrementalAABBTreeNode*>& mapping, const PxU32* indices, IncrementalAABBTreeNode** treeNodes);
+
+ // split leaf node, the newly added object does not fit in
+ IncrementalAABBTreeNode* splitLeafNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV, const PxBounds3* bounds);
+
+ void releaseNode(IncrementalAABBTreeNode* node);
+ private:
+ Ps::Pool<AABBTreeIndices> mIndicesPool;
+ Ps::Pool<IncrementalAABBTreeNodePair> mNodesPool;
+ IncrementalAABBTreeNode* mRoot;
+
+ NodeAllocator mNodeAllocator;
+ };
+ }
+}
+
+#endif
diff --git a/PhysX_3.4/Source/SceneQuery/src/SqSceneQueryManager.cpp b/PhysX_3.4/Source/SceneQuery/src/SqSceneQueryManager.cpp
index 3bc88dba..b889d944 100644
--- a/PhysX_3.4/Source/SceneQuery/src/SqSceneQueryManager.cpp
+++ b/PhysX_3.4/Source/SceneQuery/src/SqSceneQueryManager.cpp
@@ -189,6 +189,8 @@ SceneQueryManager::SceneQueryManager( Scb::Scene& scene, PxPruningStructureType:
mDynamicBoundsSync.mPruner = mPrunerExt[PruningIndex::eDYNAMIC].pruner();
mDynamicBoundsSync.mTimestamp = &mPrunerExt[PruningIndex::eDYNAMIC].mTimestamp;
+
+ mPrunerNeedsUpdating = false;
}
SceneQueryManager::~SceneQueryManager()
@@ -203,6 +205,7 @@ void SceneQueryManager::flushMemory()
void SceneQueryManager::markForUpdate(PrunerData data)
{
+ mPrunerNeedsUpdating = true;
const PxU32 index = getPrunerIndex(data);
const PrunerHandle handle = getPrunerHandle(data);
@@ -217,6 +220,8 @@ void SceneQueryManager::preallocate(PxU32 staticShapes, PxU32 dynamicShapes)
PrunerData SceneQueryManager::addPrunerShape(const NpShape& shape, const PxRigidActor& actor, bool dynamic, const PxBounds3* bounds, bool hasPrunerStructure)
{
+ mPrunerNeedsUpdating = true;
+
PrunerPayload pp;
const Scb::Shape& scbShape = shape.getScbShape();
const Scb::Actor& scbActor = gOffsetTable.convertPxActor2Scb(actor);
@@ -249,6 +254,7 @@ const PrunerPayload& SceneQueryManager::getPayload(PrunerData data) const
void SceneQueryManager::removePrunerShape(PrunerData data)
{
+ mPrunerNeedsUpdating = true;
const PxU32 index = getPrunerIndex(data);
const PrunerHandle handle = getPrunerHandle(data);
@@ -334,110 +340,33 @@ void SceneQueryManager::validateSimUpdates()
}
}
-void SceneQueryManager::processSimUpdates()
+void SceneQueryManager::afterSync(PxSceneQueryUpdateMode::Enum updateMode)
{
- PX_PROFILE_ZONE("Sim.updatePruningTrees", mScene.getContextId());
+ PX_PROFILE_ZONE("Sim.sceneQueryBuildStep", mScene.getContextId());
+ if(updateMode == PxSceneQueryUpdateMode::eBUILD_DISABLED_COMMIT_DISABLED)
{
- PX_PROFILE_ZONE("SceneQuery.processActiveShapes", mScene.getContextId());
-
- // update all active objects
- BodyCore*const* activeBodies = mScene.getScScene().getActiveBodiesArray();
- PxU32 nbActiveBodies = mScene.getScScene().getNumActiveBodies();
-
-#define NB_BATCHED_OBJECTS 128
- PrunerHandle batchedHandles[NB_BATCHED_OBJECTS];
- PxU32 nbBatchedObjects = 0;
- Pruner* pruner = mPrunerExt[PruningIndex::eDYNAMIC].pruner();
-
- while(nbActiveBodies--)
- {
- // PT: TODO: don't put frozen objects in "active bodies" array? After all they
- // are also not included in the 'active transforms' or 'active actors' arrays.
- BodyCore* currentBody = *activeBodies++;
- if(currentBody->isFrozen())
- continue;
-
- PxActorType::Enum type;
- PxRigidBody* pxBody = static_cast<PxRigidBody*>(getPxActorFromBodyCore(currentBody, type));
- PX_ASSERT(pxBody->getConcreteType()==PxConcreteType::eRIGID_DYNAMIC || pxBody->getConcreteType()==PxConcreteType::eARTICULATION_LINK);
-
- NpShapeManager* shapeManager;
- if(type==PxActorType::eRIGID_DYNAMIC)
- {
- NpRigidDynamic* rigidDynamic = static_cast<NpRigidDynamic*>(pxBody);
- shapeManager = &rigidDynamic->getShapeManager();
- }
- else
- {
- NpArticulationLink* articulationLink = static_cast<NpArticulationLink*>(pxBody);
- shapeManager = &articulationLink->getShapeManager();
- }
-
- const PxU32 nbShapes = shapeManager->getNbShapes();
- for(PxU32 i=0; i<nbShapes; i++)
- {
- const PrunerData data = shapeManager->getPrunerData(i);
- if(data!=SQ_INVALID_PRUNER_DATA)
- {
- // PT: index can't be zero here!
- PX_ASSERT(getPrunerIndex(data)==PruningIndex::eDYNAMIC);
-
- const PrunerHandle handle = getPrunerHandle(data);
-
- if(!mPrunerExt[PruningIndex::eDYNAMIC].isDirty(handle)) // PT: if dirty, will be updated in "flushShapes"
- {
- batchedHandles[nbBatchedObjects] = handle;
-
- PxBounds3* bounds;
- const PrunerPayload& pp = pruner->getPayload(handle, bounds);
- computeDynamicWorldAABB(*bounds, *(reinterpret_cast<Scb::Shape*>(pp.data[0])), *(reinterpret_cast<Scb::Actor*>(pp.data[1])));
- nbBatchedObjects++;
-
- if(nbBatchedObjects==NB_BATCHED_OBJECTS)
- {
- mPrunerExt[PruningIndex::eDYNAMIC].invalidateTimestamp();
- pruner->updateObjectsAfterManualBoundsUpdates(batchedHandles, nbBatchedObjects);
- nbBatchedObjects = 0;
- }
- }
- }
- }
- }
- if(nbBatchedObjects)
- {
- mPrunerExt[PruningIndex::eDYNAMIC].invalidateTimestamp();
- pruner->updateObjectsAfterManualBoundsUpdates(batchedHandles, nbBatchedObjects);
- }
+ mPrunerNeedsUpdating = true;
+ return;
}
// flush user modified objects
flushShapes();
- for(PxU32 i=0;i<PruningIndex::eCOUNT;i++)
- {
- if(mPrunerExt[i].pruner() && mPrunerExt[i].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
- static_cast<AABBPruner*>(mPrunerExt[i].pruner())->buildStep();
-
- mPrunerExt[i].pruner()->commit();
- }
-}
-
-void SceneQueryManager::afterSync(bool commit)
-{
- PX_PROFILE_ZONE("Sim.sceneQueryBuildStep", mScene.getContextId());
+ bool commit = updateMode == PxSceneQueryUpdateMode::eBUILD_ENABLED_COMMIT_ENABLED;
- // flush user modified objects
- flushShapes();
-
- for (PxU32 i = 0; i<2; i++)
+ for(PxU32 i = 0; i<2; i++)
{
- if (mPrunerExt[i].pruner() && mPrunerExt[i].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
- static_cast<AABBPruner*>(mPrunerExt[i].pruner())->buildStep();
+ if(mPrunerExt[i].pruner() && mPrunerExt[i].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
+ static_cast<AABBPruner*>(mPrunerExt[i].pruner())->buildStep(true);
- if (commit)
+ if(commit)
mPrunerExt[i].pruner()->commit();
}
+
+ //If we didn't commit changes, then the next query must perform that operation so this bool must be set to true, otherwise there should be
+ //no outstanding work required by queries at this time
+ mPrunerNeedsUpdating = !commit;
}
void SceneQueryManager::flushShapes()
@@ -454,17 +383,30 @@ void SceneQueryManager::flushUpdates()
{
PX_PROFILE_ZONE("SceneQuery.flushUpdates", mScene.getContextId());
+ if (mPrunerNeedsUpdating)
+ {
// no need to take lock if manual sq update is enabled
// as flushUpdates will only be called from NpScene::flushQueryUpdates()
- mSceneQueryLock.lock();
+ mSceneQueryLock.lock();
- flushShapes();
+ //Check again because another thread may have already performed all pending updated (this secondary check might be unnecessary)
+ if (mPrunerNeedsUpdating)
+ {
- for(PxU32 i=0;i<PruningIndex::eCOUNT;i++)
- if(mPrunerExt[i].pruner())
- mPrunerExt[i].pruner()->commit();
+ flushShapes();
+
+ for (PxU32 i = 0; i < PruningIndex::eCOUNT; i++)
+ if (mPrunerExt[i].pruner())
+ mPrunerExt[i].pruner()->commit();
- mSceneQueryLock.unlock();
+ //KS - force memory writes to have completed before updating the volatile mPrunerNeedsUpdating member. This should ensure that, if another thread
+ //reads this value and finds it is false, that all the modifications we made to the pruner are visible in memory.
+ physx::shdfnd::memoryBarrier();
+
+ mPrunerNeedsUpdating = false;
+ }
+ mSceneQueryLock.unlock();
+ }
}
void SceneQueryManager::forceDynamicTreeRebuild(bool rebuildStaticStructure, bool rebuildDynamicStructure)
@@ -484,6 +426,30 @@ void SceneQueryManager::forceDynamicTreeRebuild(bool rebuildStaticStructure, boo
}
}
+void SceneQueryManager::sceneQueryBuildStep(PruningIndex::Enum index)
+{
+ PX_PROFILE_ZONE("SceneQuery.sceneQueryBuildStep", mScene.getContextId());
+
+ if (mPrunerExt[index].pruner() && mPrunerExt[index].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
+ {
+ const bool buildFinished = static_cast<AABBPruner*>(mPrunerExt[index].pruner())->buildStep(false);
+ if(buildFinished)
+ {
+ mPrunerNeedsUpdating = true;
+ }
+ }
+}
+
+bool SceneQueryManager::prepareSceneQueriesUpdate(PruningIndex::Enum index)
+{
+ bool retVal = false;
+ if (mPrunerExt[index].pruner() && mPrunerExt[index].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
+ {
+ retVal = static_cast<AABBPruner*>(mPrunerExt[index].pruner())->prepareBuild();
+ }
+ return retVal;
+}
+
void SceneQueryManager::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i=0; i<PruningIndex::eCOUNT; i++)