diff options
| author | git perforce import user <a@b> | 2016-10-25 12:29:14 -0600 |
|---|---|---|
| committer | Sheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees> | 2016-10-25 18:56:37 -0500 |
| commit | 3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch) | |
| tree | fa6485c169e50d7415a651bf838f5bcd0fd3bfbd /PhysX_3.4/Source/Common/src/CmFlushPool.h | |
| download | physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip | |
Initial commit:
PhysX 3.4.0 Update @ 21294896
APEX 1.4.0 Update @ 21275617
[CL 21300167]
Diffstat (limited to 'PhysX_3.4/Source/Common/src/CmFlushPool.h')
| -rw-r--r-- | PhysX_3.4/Source/Common/src/CmFlushPool.h | 157 |
1 files changed, 157 insertions, 0 deletions
diff --git a/PhysX_3.4/Source/Common/src/CmFlushPool.h b/PhysX_3.4/Source/Common/src/CmFlushPool.h new file mode 100644 index 00000000..875d09f4 --- /dev/null +++ b/PhysX_3.4/Source/Common/src/CmFlushPool.h @@ -0,0 +1,157 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + + +#ifndef PX_PHYSICS_COMMON_FLUSHPOOL +#define PX_PHYSICS_COMMON_FLUSHPOOL + +#include "foundation/Px.h" +#include "PsUserAllocated.h" +#include "CmPhysXCommon.h" +#include "PsMutex.h" +#include "PsArray.h" +#include "PsBitUtils.h" + +/* +Pool used to allocate variable sized tasks. It's intended to be cleared after a short period (time step). +*/ + +namespace physx +{ +namespace Cm +{ + static const PxU32 sSpareChunkCount = 2; + + class FlushPool + { + PX_NOCOPY(FlushPool) + public: + FlushPool(PxU32 chunkSize) : mChunks(PX_DEBUG_EXP("FlushPoolChunk")), mChunkIndex(0), mOffset(0), mChunkSize(chunkSize) + { + mChunks.pushBack(static_cast<PxU8*>(PX_ALLOC(mChunkSize, "PxU8"))); + } + + ~FlushPool() + { + for (PxU32 i = 0; i < mChunks.size(); ++i) + PX_FREE(mChunks[i]); + } + + // alignment must be a power of two + void* allocate(PxU32 size, PxU32 alignment=16) + { + Ps::Mutex::ScopedLock lock(mMutex); + return allocateNotThreadSafe(size, alignment); + } + + // alignment must be a power of two + void* allocateNotThreadSafe(PxU32 size, PxU32 alignment=16) + { + PX_ASSERT(shdfnd::isPowerOfTwo(alignment)); + PX_ASSERT(size <= mChunkSize && !mChunks.empty()); + + // padding for alignment + size_t unalignedStart = reinterpret_cast<size_t>(mChunks[mChunkIndex]+mOffset); + PxU32 pad = PxU32(((unalignedStart+alignment-1)&~(size_t(alignment)-1)) - unalignedStart); + + if (mOffset + size + pad > mChunkSize) + { + mChunkIndex++; + mOffset = 0; + if (mChunkIndex >= mChunks.size()) + mChunks.pushBack(static_cast<PxU8*>(PX_ALLOC(mChunkSize, "PxU8"))); + + // update padding to ensure new alloc is aligned + unalignedStart = reinterpret_cast<size_t>(mChunks[mChunkIndex]); + pad = PxU32(((unalignedStart+alignment-1)&~(size_t(alignment)-1)) - unalignedStart); + } + + void* ptr = mChunks[mChunkIndex] + mOffset + pad; + PX_ASSERT((reinterpret_cast<size_t>(ptr)&(size_t(alignment)-1)) == 0); + mOffset += size + pad; + return ptr; + } + + void clear(PxU32 spareChunkCount = sSpareChunkCount) + { + Ps::Mutex::ScopedLock lock(mMutex); + + clearNotThreadSafe(spareChunkCount); + } + + void clearNotThreadSafe(PxU32 spareChunkCount = sSpareChunkCount) + { + PX_UNUSED(spareChunkCount); + + //release memory not used previously + PxU32 targetSize = mChunkIndex+sSpareChunkCount; + while (mChunks.size() > targetSize) + PX_FREE(mChunks.popBack()); + + mChunkIndex = 0; + mOffset = 0; + } + + void resetNotThreadSafe() + { + PxU8* firstChunk = mChunks[0]; + + for (PxU32 i = 1; i < mChunks.size(); ++i) + PX_FREE(mChunks[i]); + + mChunks.clear(); + mChunks.pushBack(firstChunk); + mChunkIndex = 0; + mOffset = 0; + } + + void lock() + { + mMutex.lock(); + } + + void unlock() + { + mMutex.unlock(); + } + + private: + Ps::Mutex mMutex; + Ps::Array<PxU8*> mChunks; + PxU32 mChunkIndex; + PxU32 mOffset; + PxU32 mChunkSize; + }; + + +} // namespace Cm + +} + +#endif |