aboutsummaryrefslogtreecommitdiff
path: root/PhysX_3.4/Source/LowLevel/common/include/utils
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /PhysX_3.4/Source/LowLevel/common/include/utils
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'PhysX_3.4/Source/LowLevel/common/include/utils')
-rw-r--r--PhysX_3.4/Source/LowLevel/common/include/utils/PxcScratchAllocator.h138
-rw-r--r--PhysX_3.4/Source/LowLevel/common/include/utils/PxcThreadCoherentCache.h150
2 files changed, 288 insertions, 0 deletions
diff --git a/PhysX_3.4/Source/LowLevel/common/include/utils/PxcScratchAllocator.h b/PhysX_3.4/Source/LowLevel/common/include/utils/PxcScratchAllocator.h
new file mode 100644
index 00000000..aaf43626
--- /dev/null
+++ b/PhysX_3.4/Source/LowLevel/common/include/utils/PxcScratchAllocator.h
@@ -0,0 +1,138 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PXC_SCRATCHALLOCATOR_H
+#define PXC_SCRATCHALLOCATOR_H
+
+#include "foundation/PxAssert.h"
+#include "PxvConfig.h"
+#include "PsMutex.h"
+#include "PsArray.h"
+#include "PsAllocator.h"
+
+namespace physx
+{
+class PxcScratchAllocator
+{
+ PX_NOCOPY(PxcScratchAllocator)
+public:
+ PxcScratchAllocator() : mStack(PX_DEBUG_EXP("PxcScratchAllocator")), mStart(NULL), mSize(0)
+ {
+ mStack.reserve(64);
+ mStack.pushBack(0);
+ }
+
+ void setBlock(void* addr, PxU32 size)
+ {
+ // if the stack is not empty then some scratch memory was not freed on the previous frame. That's
+ // likely indicative of a problem, because when the scratch block is too small the memory will have
+ // come from the heap
+
+ PX_ASSERT(mStack.size()==1);
+ mStack.popBack();
+
+ mStart = reinterpret_cast<PxU8*>(addr);
+ mSize = size;
+ mStack.pushBack(mStart + size);
+ }
+
+ void* allocAll(PxU32& size)
+ {
+ Ps::Mutex::ScopedLock lock(mLock);
+ PX_ASSERT(mStack.size()>0);
+ size = PxU32(mStack.back()-mStart);
+
+ if(size==0)
+ return NULL;
+
+ mStack.pushBack(mStart);
+ return mStart;
+ }
+
+
+ void* alloc(PxU32 requestedSize, bool fallBackToHeap = false)
+ {
+ requestedSize = (requestedSize+15)&~15;
+
+ Ps::Mutex::ScopedLock lock(mLock);
+ PX_ASSERT(mStack.size()>=1);
+
+ PxU8* top = mStack.back();
+
+ if(top - mStart >= ptrdiff_t(requestedSize))
+ {
+ PxU8* addr = top - requestedSize;
+ mStack.pushBack(addr);
+ return addr;
+ }
+
+ if(!fallBackToHeap)
+ return NULL;
+
+ return PX_ALLOC(requestedSize, "Scratch Block Fallback");
+ }
+
+ void free(void* addr)
+ {
+ PX_ASSERT(addr!=NULL);
+ if(!isScratchAddr(addr))
+ {
+ PX_FREE(addr);
+ return;
+ }
+
+ Ps::Mutex::ScopedLock lock(mLock);
+ PX_ASSERT(mStack.size()>1);
+
+ PxU32 i=mStack.size()-1;
+ while(mStack[i]<addr)
+ i--;
+
+ PX_ASSERT(mStack[i]==addr);
+ mStack.remove(i);
+ }
+
+
+ bool isScratchAddr(void* addr) const
+ {
+ PxU8* a = reinterpret_cast<PxU8*>(addr);
+ return a>= mStart && a<mStart+mSize;
+ }
+
+private:
+ Ps::Mutex mLock;
+ Ps::Array<PxU8*> mStack;
+ PxU8* mStart;
+ PxU32 mSize;
+};
+
+}
+
+#endif
diff --git a/PhysX_3.4/Source/LowLevel/common/include/utils/PxcThreadCoherentCache.h b/PhysX_3.4/Source/LowLevel/common/include/utils/PxcThreadCoherentCache.h
new file mode 100644
index 00000000..ed8a37a5
--- /dev/null
+++ b/PhysX_3.4/Source/LowLevel/common/include/utils/PxcThreadCoherentCache.h
@@ -0,0 +1,150 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PXC_THREADCOHERENTCACHE_H
+#define PXC_THREADCOHERENTCACHE_H
+
+#include "PsMutex.h"
+#include "PsAllocator.h"
+#include "PsSList.h"
+
+namespace physx
+{
+
+class PxsContext;
+/*!
+Controls a pool of large objects which must be thread safe.
+Tries to return the object most recently used by the thread(for better cache coherancy).
+Assumes the object has a default contructor.
+
+(Note the semantics are different to a pool because we dont want to construct/destroy each time
+an object is requested, which may be expensive).
+
+TODO: add thread coherancy.
+*/
+template<class T, class Params>
+class PxcThreadCoherentCache : public Ps::AlignedAllocator<16, Ps::ReflectionAllocator<T> >
+{
+ typedef Ps::AlignedAllocator<16, Ps::ReflectionAllocator<T> > Allocator;
+ PX_NOCOPY(PxcThreadCoherentCache)
+public:
+
+ typedef Ps::SListEntry EntryBase;
+
+ PX_INLINE PxcThreadCoherentCache(Params* params, const Allocator& alloc = Allocator()) : Allocator(alloc), mParams(params)
+ {
+ }
+
+ PX_INLINE ~PxcThreadCoherentCache()
+ {
+ T* np = static_cast<T*>(root.pop());
+
+ while(np!=NULL)
+ {
+ np->~T();
+ Allocator::deallocate(np);
+ np = static_cast<T*>(root.pop());
+ }
+ }
+
+ PX_INLINE T* get()
+ {
+ T* rv = static_cast<T*>(root.pop());
+ if(rv==NULL)
+ {
+ rv = reinterpret_cast<T*>(Allocator::allocate(sizeof(T), __FILE__, __LINE__));
+ new (rv) T(mParams);
+ }
+
+ return rv;
+ }
+
+ PX_INLINE void put(T* item)
+ {
+ root.push(*item);
+ }
+
+
+private:
+ Ps::SList root;
+ Params* mParams;
+
+ template<class T2, class P2>
+ friend class PxcThreadCoherentCacheIterator;
+};
+
+/*!
+Used to iterate over all objects controlled by the cache.
+
+Note: The iterator flushes the cache(extracts all items on construction and adds them back on
+destruction so we can iterate the list in a safe manner).
+*/
+template<class T, class Params>
+class PxcThreadCoherentCacheIterator
+{
+public:
+ PxcThreadCoherentCacheIterator(PxcThreadCoherentCache<T, Params>& cache) : mCache(cache)
+ {
+ mNext = cache.root.flush();
+ mFirst = mNext;
+ }
+ ~PxcThreadCoherentCacheIterator()
+ {
+ Ps::SListEntry* np = mFirst;
+ while(np != NULL)
+ {
+ Ps::SListEntry* npNext = np->next();
+ mCache.root.push(*np);
+ np = npNext;
+ }
+ }
+
+ PX_INLINE T* getNext()
+ {
+ if(mNext == NULL)
+ return NULL;
+
+ T* rv = static_cast<T*>(mNext);
+ mNext = mNext->next();
+
+ return rv;
+ }
+private:
+
+ PxcThreadCoherentCacheIterator<T, Params>& operator=(const PxcThreadCoherentCacheIterator<T, Params>&);
+ PxcThreadCoherentCache<T, Params> &mCache;
+ Ps::SListEntry* mNext;
+ Ps::SListEntry* mFirst;
+
+};
+
+}
+
+#endif