aboutsummaryrefslogtreecommitdiff
path: root/PxShared/include
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /PxShared/include
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'PxShared/include')
-rw-r--r--PxShared/include/cudamanager/PxCudaContextManager.h418
-rw-r--r--PxShared/include/cudamanager/PxCudaMemoryManager.h281
-rw-r--r--PxShared/include/cudamanager/PxGpuCopyDesc.h86
-rw-r--r--PxShared/include/cudamanager/PxGpuCopyDescQueue.h149
-rw-r--r--PxShared/include/filebuf/PxFileBuf.h337
-rw-r--r--PxShared/include/foundation/Px.h92
-rw-r--r--PxShared/include/foundation/PxAllocatorCallback.h95
-rw-r--r--PxShared/include/foundation/PxAssert.h95
-rw-r--r--PxShared/include/foundation/PxBitAndData.h87
-rw-r--r--PxShared/include/foundation/PxBounds3.h480
-rw-r--r--PxShared/include/foundation/PxErrorCallback.h73
-rw-r--r--PxShared/include/foundation/PxErrors.h93
-rw-r--r--PxShared/include/foundation/PxFlags.h375
-rw-r--r--PxShared/include/foundation/PxFoundation.h147
-rw-r--r--PxShared/include/foundation/PxFoundationVersion.h65
-rw-r--r--PxShared/include/foundation/PxIO.h138
-rw-r--r--PxShared/include/foundation/PxIntrinsics.h45
-rw-r--r--PxShared/include/foundation/PxMat33.h396
-rw-r--r--PxShared/include/foundation/PxMat44.h376
-rw-r--r--PxShared/include/foundation/PxMath.h338
-rw-r--r--PxShared/include/foundation/PxMathUtils.h73
-rw-r--r--PxShared/include/foundation/PxMemory.h110
-rw-r--r--PxShared/include/foundation/PxPlane.h145
-rw-r--r--PxShared/include/foundation/PxPreprocessor.h529
-rw-r--r--PxShared/include/foundation/PxProfiler.h116
-rw-r--r--PxShared/include/foundation/PxQuat.h403
-rw-r--r--PxShared/include/foundation/PxSimpleTypes.h112
-rw-r--r--PxShared/include/foundation/PxStrideIterator.h353
-rw-r--r--PxShared/include/foundation/PxTransform.h215
-rw-r--r--PxShared/include/foundation/PxUnionCast.h64
-rw-r--r--PxShared/include/foundation/PxVec2.h347
-rw-r--r--PxShared/include/foundation/PxVec3.h393
-rw-r--r--PxShared/include/foundation/PxVec4.h376
-rw-r--r--PxShared/include/foundation/unix/PxUnixIntrinsics.h181
-rw-r--r--PxShared/include/foundation/windows/PxWindowsFoundationDelayLoadHook.h71
-rw-r--r--PxShared/include/foundation/windows/PxWindowsIntrinsics.h188
-rw-r--r--PxShared/include/pvd/PxPvd.h191
-rw-r--r--PxShared/include/pvd/PxPvdTransport.h129
-rw-r--r--PxShared/include/pvd/windows/PxWindowsPvdDelayLoadHook.h60
-rw-r--r--PxShared/include/task/PxCpuDispatcher.h79
-rw-r--r--PxShared/include/task/PxGpuDispatcher.h248
-rw-r--r--PxShared/include/task/PxGpuTask.h118
-rw-r--r--PxShared/include/task/PxTask.h362
-rw-r--r--PxShared/include/task/PxTaskDefine.h37
-rw-r--r--PxShared/include/task/PxTaskManager.h234
45 files changed, 9300 insertions, 0 deletions
diff --git a/PxShared/include/cudamanager/PxCudaContextManager.h b/PxShared/include/cudamanager/PxCudaContextManager.h
new file mode 100644
index 00000000..8e84b3b4
--- /dev/null
+++ b/PxShared/include/cudamanager/PxCudaContextManager.h
@@ -0,0 +1,418 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+
+#ifndef PXCUDACONTEXTMANAGER_PXCUDACONTEXTMANAGER_H
+#define PXCUDACONTEXTMANAGER_PXCUDACONTEXTMANAGER_H
+
+#include "foundation/PxPreprocessor.h"
+
+#if PX_SUPPORT_GPU_PHYSX
+
+#include "foundation/PxSimpleTypes.h"
+#include "foundation/PxErrorCallback.h"
+#include "foundation/PxFlags.h"
+#include "task/PxTaskDefine.h"
+#include "cudamanager/PxCudaMemoryManager.h"
+
+/* Forward decl to avoid inclusion of cuda.h */
+typedef struct CUctx_st *CUcontext;
+typedef struct CUgraphicsResource_st *CUgraphicsResource;
+
+namespace physx
+{
+
+class PxGpuDispatcher;
+
+
+/** \brief Possible graphic/CUDA interoperability modes for context */
+struct PxCudaInteropMode
+{
+ /**
+ * \brief Possible graphic/CUDA interoperability modes for context
+ */
+ enum Enum
+ {
+ NO_INTEROP = 0,
+ D3D10_INTEROP,
+ D3D11_INTEROP,
+ OGL_INTEROP,
+
+ COUNT
+ };
+};
+
+struct PxCudaInteropRegisterFlag
+{
+ enum Enum
+ {
+ eNONE = 0x00,
+ eREAD_ONLY = 0x01,
+ eWRITE_DISCARD = 0x02,
+ eSURFACE_LDST = 0x04,
+ eTEXTURE_GATHER = 0x08
+ };
+};
+
+/**
+\brief collection of set bits defined in NxCudaInteropRegisterFlag.
+
+@see NxCudaInteropRegisterFlag
+*/
+typedef PxFlags<PxCudaInteropRegisterFlag::Enum, uint32_t> PxCudaInteropRegisterFlags;
+PX_FLAGS_OPERATORS(PxCudaInteropRegisterFlag::Enum, uint32_t)
+
+//! \brief Descriptor used to create a PxCudaContextManager
+class PxCudaContextManagerDesc
+{
+public:
+ /**
+ * \brief The CUDA context to manage
+ *
+ * If left NULL, the PxCudaContextManager will create a new context. If
+ * graphicsDevice is also not NULL, this new CUDA context will be bound to
+ * that graphics device, enabling the use of CUDA/Graphics interop features.
+ *
+ * If ctx is not NULL, the specified context must be applied to the thread
+ * that is allocating the PxCudaContextManager at creation time (aka, it
+ * cannot be popped). The PxCudaContextManager will take ownership of the
+ * context until the manager is released. All access to the context must be
+ * gated by lock acquisition.
+ *
+ * If the user provides a context for the PxCudaContextManager, the context
+ * _must_ have either been created on the GPU ordinal returned by
+ * PxGetSuggestedCudaDeviceOrdinal() or on your graphics device.
+ *
+ * It is perfectly acceptable to allocate device or host pinned memory from
+ * the context outside the scope of the PxCudaMemoryManager, so long as you
+ * manage its eventual cleanup.
+ */
+ CUcontext *ctx;
+
+ /**
+ * \brief D3D device pointer or OpenGl context handle
+ *
+ * Only applicable when ctx is NULL, thus forcing a new context to be
+ * created. In that case, the created context will be bound to this
+ * graphics device.
+ */
+ void *graphicsDevice;
+
+#if PX_SUPPORT_GPU_PHYSX
+ /**
+ * \brief Application-specific GUID
+ *
+ * If your application employs PhysX modules that use CUDA you need to use a GUID
+ * so that patches for new architectures can be released for your game.You can obtain a GUID for your
+ * application from Nvidia.
+ */
+ const char* appGUID;
+#endif
+ /**
+ * \brief The CUDA/Graphics interop mode of this context
+ *
+ * If ctx is NULL, this value describes the nature of the graphicsDevice
+ * pointer provided by the user. Else it describes the nature of the
+ * context provided by the user.
+ */
+ PxCudaInteropMode::Enum interopMode;
+
+
+ /**
+ * \brief Size of persistent memory
+ *
+ * This memory is allocated up front and stays allocated until the
+ * PxCudaContextManager is released. Size is in bytes, has to be power of two
+ * and bigger than the page size. Set to 0 to only use dynamic pages.
+ *
+ * Note: On Vista O/S and above, there is a per-memory allocation overhead
+ * to every CUDA work submission, so we recommend that you carefully tune
+ * this initial base memory size to closely approximate the amount of
+ * memory your application will consume.
+
+ Note: This is currently not used by PxSceneFlag::eENABLE_GPU_DYNAMICS. Memory allocation properties are configured
+ for GPU rigid bodies using PxSceneDesc::gpuDynamicsConfig.
+ */
+ uint32_t memoryBaseSize[PxCudaBufferMemorySpace::COUNT];
+
+ /**
+ * \brief Size of memory pages
+ *
+ * The memory manager will dynamically grow and shrink in blocks multiple of
+ * this page size. Size has to be power of two and bigger than 0.
+
+ Note: This is currently not used by PxSceneFlag::eENABLE_GPU_DYNAMICS. Memory allocation properties are configured
+ for GPU rigid bodies using PxSceneDesc::gpuDynamicsConfig.
+ */
+ uint32_t memoryPageSize[PxCudaBufferMemorySpace::COUNT];
+
+ /**
+ * \brief Maximum size of memory that the memory manager will allocate
+
+ Note: This is currently not used by PxSceneFlag::eENABLE_GPU_DYNAMICS. Memory allocation properties are configured
+ for GPU rigid bodies using PxSceneDesc::gpuDynamicsConfig.
+ */
+ uint32_t maxMemorySize[PxCudaBufferMemorySpace::COUNT];
+
+ PX_INLINE PxCudaContextManagerDesc()
+ {
+ ctx = NULL;
+ interopMode = PxCudaInteropMode::NO_INTEROP;
+ graphicsDevice = 0;
+#if PX_SUPPORT_GPU_PHYSX
+ appGUID = NULL;
+#endif
+ for(uint32_t i = 0; i < PxCudaBufferMemorySpace::COUNT; i++)
+ {
+ memoryBaseSize[i] = 0;
+ memoryPageSize[i] = 2 * 1024*1024;
+ maxMemorySize[i] = UINT32_MAX;
+ }
+ }
+};
+
+
+/**
+ * \brief Manages memory, thread locks, and task scheduling for a CUDA context
+ *
+ * A PxCudaContextManager manages access to a single CUDA context, allowing it to
+ * be shared between multiple scenes. Memory allocations are dynamic: starting
+ * with an initial heap size and growing on demand by a configurable page size.
+ * The context must be acquired from the manager before using any CUDA APIs.
+ *
+ * The PxCudaContextManager is based on the CUDA driver API and explictly does not
+ * support the CUDA runtime API (aka, CUDART).
+ *
+ * To enable CUDA use by an APEX scene, a PxCudaContextManager must be created
+ * (supplying your own CUDA context, or allowing a new context to be allocated
+ * for you), the PxGpuDispatcher for that context is retrieved via the
+ * getGpuDispatcher() method, and this is assigned to the TaskManager that is
+ * given to the scene via its NxApexSceneDesc.
+ */
+class PxCudaContextManager
+{
+public:
+ /**
+ * \brief Acquire the CUDA context for the current thread
+ *
+ * Acquisitions are allowed to be recursive within a single thread.
+ * You can acquire the context multiple times so long as you release
+ * it the same count.
+ *
+ * The context must be acquired before using most CUDA functions.
+ *
+ * It is not necessary to acquire the CUDA context inside GpuTask
+ * launch functions, because the PxGpuDispatcher will have already
+ * acquired the context for its worker thread. However it is not
+ * harmfull to (re)acquire the context in code that is shared between
+ * GpuTasks and non-task functions.
+ */
+ virtual void acquireContext() = 0;
+
+ /**
+ * \brief Release the CUDA context from the current thread
+ *
+ * The CUDA context should be released as soon as practically
+ * possible, to allow other CPU threads (including the
+ * PxGpuDispatcher) to work efficiently.
+ */
+ virtual void releaseContext() = 0;
+
+ /**
+ * \brief Return the PxCudaMemoryManager instance associated with this
+ * CUDA context
+ * Note: This is currently not used by PxSceneFlag::eENABLE_GPU_DYNAMICS. Memory allocation properties are configured
+ * for GPU rigid bodies using PxSceneDesc::gpuDynamicsConfig.
+ */
+ virtual PxCudaMemoryManager *getMemoryManager() = 0;
+
+ /**
+ * \brief Return the PxGpuDispatcher instance associated with this
+ * CUDA context
+ */
+ virtual class physx::PxGpuDispatcher *getGpuDispatcher() = 0;
+
+ /**
+ * \brief Context manager has a valid CUDA context
+ *
+ * This method should be called after creating a PxCudaContextManager,
+ * especially if the manager was responsible for allocating its own
+ * CUDA context (desc.ctx == NULL). If it returns false, there is
+ * no point in assigning this manager's PxGpuDispatcher to a
+ * TaskManager as it will be unable to execute GpuTasks.
+ */
+ virtual bool contextIsValid() const = 0;
+
+ /* Query CUDA context and device properties, without acquiring context */
+
+ virtual bool supportsArchSM10() const = 0; //!< G80
+ virtual bool supportsArchSM11() const = 0; //!< G92
+ virtual bool supportsArchSM12() const = 0; //!< GT200
+ virtual bool supportsArchSM13() const = 0; //!< GT260
+ virtual bool supportsArchSM20() const = 0; //!< GF100
+ virtual bool supportsArchSM30() const = 0; //!< GK100
+ virtual bool supportsArchSM35() const = 0; //!< GK110
+ virtual bool supportsArchSM50() const = 0; //!< GM100
+ virtual bool supportsArchSM52() const = 0; //!< GM200
+ virtual bool supportsArchSM60() const = 0; //!< GP100
+ virtual bool isIntegrated() const = 0; //!< true if GPU is an integrated (MCP) part
+ virtual bool canMapHostMemory() const = 0; //!< true if GPU map host memory to GPU (0-copy)
+ virtual int getDriverVersion() const = 0; //!< returns cached value of cuGetDriverVersion()
+ virtual size_t getDeviceTotalMemBytes() const = 0; //!< returns cached value of device memory size
+ virtual int getMultiprocessorCount() const = 0; //!< returns cache value of SM unit count
+ virtual unsigned int getClockRate() const = 0; //!< returns cached value of SM clock frequency
+ virtual int getSharedMemPerBlock() const = 0; //!< returns total amount of shared memory available per block in bytes
+ virtual int getSharedMemPerMultiprocessor() const = 0; //!< returns total amount of shared memory available per multiprocessor in bytes
+ virtual unsigned int getMaxThreadsPerBlock() const = 0; //!< returns the maximum number of threads per block
+ virtual const char *getDeviceName() const = 0; //!< returns device name retrieved from driver
+ virtual PxCudaInteropMode::Enum getInteropMode() const = 0; //!< interop mode the context was created with
+
+ virtual void setUsingConcurrentStreams(bool) = 0; //!< turn on/off using concurrent streams for GPU work
+ virtual bool getUsingConcurrentStreams() const = 0; //!< true if GPU work can run in concurrent streams
+ /* End query methods that don't require context to be acquired */
+
+ /**
+ * \brief Register a rendering resource with CUDA
+ *
+ * This function is called to register render resources (allocated
+ * from OpenGL) with CUDA so that the memory may be shared
+ * between the two systems. This is only required for render
+ * resources that are designed for interop use. In APEX, each
+ * render resource descriptor that could support interop has a
+ * 'registerInCUDA' boolean variable.
+ *
+ * The function must be called again any time your graphics device
+ * is reset, to re-register the resource.
+ *
+ * Returns true if the registration succeeded. A registered
+ * resource must be unregistered before it can be released.
+ *
+ * \param resource [OUT] the handle to the resource that can be used with CUDA
+ * \param buffer [IN] GLuint buffer index to be mapped to cuda
+ * \param flags [IN] cuda interop registration flags
+ */
+ virtual bool registerResourceInCudaGL(CUgraphicsResource &resource, uint32_t buffer, PxCudaInteropRegisterFlags flags = PxCudaInteropRegisterFlags()) = 0;
+
+ /**
+ * \brief Register a rendering resource with CUDA
+ *
+ * This function is called to register render resources (allocated
+ * from Direct3D) with CUDA so that the memory may be shared
+ * between the two systems. This is only required for render
+ * resources that are designed for interop use. In APEX, each
+ * render resource descriptor that could support interop has a
+ * 'registerInCUDA' boolean variable.
+ *
+ * The function must be called again any time your graphics device
+ * is reset, to re-register the resource.
+ *
+ * Returns true if the registration succeeded. A registered
+ * resource must be unregistered before it can be released.
+ *
+ * \param resource [OUT] the handle to the resource that can be used with CUDA
+ * \param resourcePointer [IN] A pointer to either IDirect3DResource9, or ID3D10Device, or ID3D11Resource to be registered.
+ * \param flags [IN] cuda interop registration flags
+ */
+ virtual bool registerResourceInCudaD3D(CUgraphicsResource &resource, void *resourcePointer, PxCudaInteropRegisterFlags flags = PxCudaInteropRegisterFlags()) = 0;
+
+ /**
+ * \brief Unregister a rendering resource with CUDA
+ *
+ * If a render resource was successfully registered with CUDA using
+ * the registerResourceInCuda***() methods, this function must be called
+ * to unregister the resource before the it can be released.
+ */
+ virtual bool unregisterResourceInCuda(CUgraphicsResource resource) = 0;
+
+ /**
+ * \brief Determine if the user has configured a dedicated PhysX GPU in the NV Control Panel
+ * \note If using CUDA Interop, this will always return false
+ * \returns 1 if there is a dedicated GPU
+ * 0 if there is NOT a dedicated GPU
+ * -1 if the routine is not implemented
+ */
+ virtual int usingDedicatedGPU() const = 0;
+
+ /**
+ * \brief Release the PxCudaContextManager
+ *
+ * When the manager instance is released, it also releases its
+ * PxGpuDispatcher instance and PxCudaMemoryManager. Before the memory
+ * manager is released, it frees all allocated memory pages. If the
+ * PxCudaContextManager created the CUDA context it was responsible
+ * for, it also frees that context.
+ *
+ * Do not release the PxCudaContextManager if there are any scenes
+ * using its PxGpuDispatcher. Those scenes must be released first
+ * since there is no safe way to remove a PxGpuDispatcher from a
+ * TaskManager once the TaskManager has been given to a scene.
+ *
+ */
+ virtual void release() = 0;
+
+protected:
+
+ /**
+ * \brief protected destructor, use release() method
+ */
+ virtual ~PxCudaContextManager() {}
+};
+
+/**
+ * \brief Convenience class for holding CUDA lock within a scope
+ */
+class PxScopedCudaLock
+{
+public:
+ /**
+ * \brief ScopedCudaLock constructor
+ */
+ PxScopedCudaLock(PxCudaContextManager& ctx) : mCtx(&ctx)
+ {
+ mCtx->acquireContext();
+ }
+
+ /**
+ * \brief ScopedCudaLock destructor
+ */
+ ~PxScopedCudaLock()
+ {
+ mCtx->releaseContext();
+ }
+
+protected:
+
+ /**
+ * \brief CUDA context manager pointer (initialized in the constructor)
+ */
+ PxCudaContextManager* mCtx;
+};
+
+} // end physx namespace
+
+#endif // PX_SUPPORT_GPU_PHYSX
+#endif // PXCUDACONTEXTMANAGER_PXCUDACONTEXTMANAGER_H
diff --git a/PxShared/include/cudamanager/PxCudaMemoryManager.h b/PxShared/include/cudamanager/PxCudaMemoryManager.h
new file mode 100644
index 00000000..6798faba
--- /dev/null
+++ b/PxShared/include/cudamanager/PxCudaMemoryManager.h
@@ -0,0 +1,281 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXCUDACONTEXTMANAGER_PXCUDAMEMORYMANAGER_H
+#define PXCUDACONTEXTMANAGER_PXCUDAMEMORYMANAGER_H
+
+#include "foundation/PxPreprocessor.h"
+
+#if PX_SUPPORT_GPU_PHYSX
+
+#include "task/PxTaskDefine.h"
+
+// some macros to keep the source code more readable
+#define PX_ALLOC_INFO(name, ID) __FILE__, __LINE__, name, physx::PxAllocId::ID
+#define PX_ALLOC_INFO_PARAMS_DECL(p0, p1, p2, p3) const char* file = p0, int line = p1, const char* allocName = p2, physx::PxAllocId::Enum allocId = physx::PxAllocId::p3
+#define PX_ALLOC_INFO_PARAMS_DEF() const char* file, int line, const char* allocName, physx::PxAllocId::Enum allocId
+#define PX_ALLOC_INFO_PARAMS_INPUT() file, line, allocName, allocId
+#define PX_ALLOC_INFO_PARAMS_INPUT_INFO(info) info.getFileName(), info.getLine(), info.getAllocName(), info.getAllocId()
+
+#ifndef NULL // don't want to include <string.h>
+#define NULL 0
+#endif
+
+namespace physx
+{
+
+PX_PUSH_PACK_DEFAULT
+
+/** \brief ID of the Feature which owns/allocated memory from the heap
+ *
+ * Maximum of 64k IDs allowed.
+ */
+struct PxAllocId
+{
+ /**
+ * \brief ID of the Feature which owns/allocated memory from the heap
+ */
+ enum Enum
+ {
+ UNASSIGNED, //!< default
+ APEX, //!< APEX stuff not further classified
+ PARTICLES, //!< all particle related
+ GPU_UTIL, //!< e.g. RadixSort (used in SPH and deformable self collision)
+ CLOTH, //!< all cloth related
+ NUM_IDS //!< number of IDs, be aware that ApexHeapStats contains PxAllocIdStats[NUM_IDS]
+ };
+};
+
+/// \brief memory type managed by a heap
+struct PxCudaBufferMemorySpace
+{
+ /**
+ * \brief memory type managed by a heap
+ */
+ enum Enum
+ {
+ T_GPU,
+ T_PINNED_HOST,
+ T_WRITE_COMBINED,
+ T_HOST,
+ COUNT
+ };
+};
+
+/// \brief class to track allocation statistics, see PxgMirrored
+class PxAllocInfo
+{
+public:
+ /**
+ * \brief AllocInfo default constructor
+ */
+ PxAllocInfo() {}
+
+ /**
+ * \brief AllocInfo constructor that initializes all of the members
+ */
+ PxAllocInfo(const char* file, int line, const char* allocName, PxAllocId::Enum allocId)
+ : mFileName(file)
+ , mLine(line)
+ , mAllocName(allocName)
+ , mAllocId(allocId)
+ {}
+
+ /// \brief get the allocation file name
+ inline const char* getFileName() const
+ {
+ return mFileName;
+ }
+
+ /// \brief get the allocation line
+ inline int getLine() const
+ {
+ return mLine;
+ }
+
+ /// \brief get the allocation name
+ inline const char* getAllocName() const
+ {
+ return mAllocName;
+ }
+
+ /// \brief get the allocation ID
+ inline PxAllocId::Enum getAllocId() const
+ {
+ return mAllocId;
+ }
+
+private:
+ const char* mFileName;
+ int mLine;
+ const char* mAllocName;
+ PxAllocId::Enum mAllocId;
+};
+
+/// \brief statistics collected per AllocationId by HeapManager.
+struct PxAllocIdStats
+{
+ size_t size; //!< currently allocated memory by this ID
+ size_t maxSize; //!< max allocated memory by this ID
+ size_t elements; //!< number of current allocations by this ID
+ size_t maxElements; //!< max number of allocations by this ID
+};
+
+class PxCudaMemoryManager;
+typedef size_t PxCudaBufferPtr;
+
+/// \brief Hint flag to tell how the buffer will be used
+struct PxCudaBufferFlags
+{
+/// \brief Enumerations for the hint flag to tell how the buffer will be used
+ enum Enum
+ {
+ F_READ = (1 << 0),
+ F_WRITE = (1 << 1),
+ F_READ_WRITE = F_READ | F_WRITE
+ };
+};
+
+
+/// \brief Memory statistics struct returned by CudaMemMgr::getStats()
+struct PxCudaMemoryManagerStats
+{
+
+ size_t heapSize; //!< Size of all pages allocated for this memory type (allocated + free).
+ size_t totalAllocated; //!< Size occupied by the current allocations.
+ size_t maxAllocated; //!< High water mark of allocations since the SDK was created.
+ PxAllocIdStats allocIdStats[PxAllocId::NUM_IDS]; //!< Stats for each allocation ID, see PxAllocIdStats
+};
+
+
+/// \brief Buffer type: made of hint flags and the memory space (Device Memory, Pinned Host Memory, ...)
+struct PxCudaBufferType
+{
+ /// \brief PxCudaBufferType copy constructor
+ PX_INLINE PxCudaBufferType(const PxCudaBufferType& t)
+ : memorySpace(t.memorySpace)
+ , flags(t.flags)
+ {}
+
+ /// \brief PxCudaBufferType constructor to explicitely assign members
+ PX_INLINE PxCudaBufferType(PxCudaBufferMemorySpace::Enum _memSpace, PxCudaBufferFlags::Enum _flags)
+ : memorySpace(_memSpace)
+ , flags(_flags)
+ {}
+
+ PxCudaBufferMemorySpace::Enum memorySpace; //!< specifies which memory space for the buffer
+ PxCudaBufferFlags::Enum flags; //!< specifies the usage flags for the buffer
+};
+
+
+/// \brief Buffer which keeps informations about allocated piece of memory.
+class PxCudaBuffer
+{
+public:
+ /// Retrieves the manager over which the buffer was allocated.
+ virtual PxCudaMemoryManager* getCudaMemoryManager() const = 0;
+
+ /// Releases the buffer and the memory it used, returns true if successful.
+ virtual bool free() = 0;
+
+ /// Realloc memory. Use to shrink or resize the allocated chunk of memory of this buffer.
+ /// Returns true if successful. Fails if the operation would change the address and need a memcopy.
+ /// In that case the user has to allocate, copy and free the memory with separate steps.
+ /// Realloc to size 0 always returns false and doesn't change the state.
+ virtual bool realloc(size_t size, PX_ALLOC_INFO_PARAMS_DECL(NULL, 0, NULL, UNASSIGNED)) = 0;
+
+ /// Returns the type of the allocated memory.
+ virtual const PxCudaBufferType& getType() const = 0;
+
+ /// Returns the pointer to the allocated memory.
+ virtual PxCudaBufferPtr getPtr() const = 0;
+
+ /// Returns the size of the allocated memory.
+ virtual size_t getSize() const = 0;
+
+protected:
+ /// \brief protected destructor
+ virtual ~PxCudaBuffer() {}
+};
+
+
+/// \brief Allocator class for different kinds of CUDA related memory.
+class PxCudaMemoryManager
+{
+public:
+ /// Allocate memory of given type and size. Returns a CudaBuffer if successful. Returns NULL if failed.
+ virtual PxCudaBuffer* alloc(const PxCudaBufferType& type, size_t size, PX_ALLOC_INFO_PARAMS_DECL(NULL, 0, NULL, UNASSIGNED)) = 0;
+
+ /// Basic heap allocator without PxCudaBuffer
+ virtual PxCudaBufferPtr alloc(PxCudaBufferMemorySpace::Enum memorySpace, size_t size, PX_ALLOC_INFO_PARAMS_DECL(NULL, 0, NULL, UNASSIGNED)) = 0;
+
+ /// Basic heap deallocator without PxCudaBuffer
+ virtual bool free(PxCudaBufferMemorySpace::Enum memorySpace, PxCudaBufferPtr addr) = 0;
+
+ /// Basic heap realloc without PxCudaBuffer
+ virtual bool realloc(PxCudaBufferMemorySpace::Enum memorySpace, PxCudaBufferPtr addr, size_t size, PX_ALLOC_INFO_PARAMS_DECL(NULL, 0, NULL, UNASSIGNED)) = 0;
+
+ /// Retrieve stats for the memory of given type. See PxCudaMemoryManagerStats.
+ virtual void getStats(const PxCudaBufferType& type, PxCudaMemoryManagerStats& outStats) = 0;
+
+ /// Ensure that a given amount of free memory is available. Triggers CUDA allocations in size of (2^n * pageSize) if necessary.
+ /// Returns false if page allocations failed.
+ virtual bool reserve(const PxCudaBufferType& type, size_t size) = 0;
+
+ /// Set the page size. The managed memory grows by blocks 2^n * pageSize. Page allocations trigger CUDA driver allocations,
+ /// so the page size should be reasonably big. Returns false if input size was invalid, i.e. not power of two.
+ /// Default is 2 MB.
+ virtual bool setPageSize(const PxCudaBufferType& type, size_t size) = 0;
+
+ /// Set the upper limit until which pages of a given memory type can be allocated.
+ /// Reducing the max when it is already hit does not shrink the memory until it is deallocated by releasing the buffers which own the memory.
+ virtual bool setMaxMemorySize(const PxCudaBufferType& type, size_t size) = 0;
+
+ /// Returns the base size. The base memory block stays persistently allocated over the SDKs life time.
+ virtual size_t getBaseSize(const PxCudaBufferType& type) = 0;
+
+ /// Returns the currently set page size. The memory grows and shrinks in blocks of size (2^n pageSize)
+ virtual size_t getPageSize(const PxCudaBufferType& type) = 0;
+
+ /// Returns the upper limit until which the manager is allowed to allocate additional pages from the CUDA driver.
+ virtual size_t getMaxMemorySize(const PxCudaBufferType& type) = 0;
+
+ /// Get device mapped pinned host mem ptr. Operation only valid for memory space PxCudaBufferMemorySpace::T_PINNED_HOST.
+ virtual PxCudaBufferPtr getMappedPinnedPtr(PxCudaBufferPtr hostPtr) = 0;
+
+protected:
+ /// \brief protected destructor
+ virtual ~PxCudaMemoryManager() {}
+};
+
+PX_POP_PACK
+
+
+} // end physx namespace
+
+#endif // PX_SUPPORT_GPU_PHYSX
+#endif // PXCUDACONTEXTMANAGER_PXCUDAMEMORYMANAGER_H
diff --git a/PxShared/include/cudamanager/PxGpuCopyDesc.h b/PxShared/include/cudamanager/PxGpuCopyDesc.h
new file mode 100644
index 00000000..5704e327
--- /dev/null
+++ b/PxShared/include/cudamanager/PxGpuCopyDesc.h
@@ -0,0 +1,86 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXCUDACONTEXTMANAGER_PXGPUCOPYDESC_H
+#define PXCUDACONTEXTMANAGER_PXGPUCOPYDESC_H
+
+#include "foundation/PxPreprocessor.h"
+
+#if PX_SUPPORT_GPU_PHYSX
+
+#include "task/PxTaskDefine.h"
+
+namespace physx
+{
+
+PX_PUSH_PACK_DEFAULT
+
+/**
+ * \brief Input descriptor for the GpuDispatcher's built-in copy kernel
+ *
+ * All host memory involved in copy transactions must be page-locked.
+ * If more than one descriptor is passed to the copy kernel in one launch,
+ * the descriptors themselves must be in page-locked memory.
+ */
+struct PxGpuCopyDesc
+{
+ /**
+ * \brief Input descriptor for the GpuDispatcher's built-in copy kernel
+ */
+ enum CopyType
+ {
+ HostToDevice,
+ DeviceToHost,
+ DeviceToDevice,
+ DeviceMemset32
+ };
+
+ size_t dest; //!< the destination
+ size_t source; //!< the source (32bit value when type == DeviceMemset)
+ size_t bytes; //!< the size in bytes
+ CopyType type; //!< the memory transaction type
+
+ /**
+ * \brief Copy is optimally performed as 64bit words, requires 64bit alignment. But it can
+ * gracefully degrade to 32bit copies if necessary
+ */
+ PX_INLINE bool isValid()
+ {
+ bool ok = true;
+ ok &= ((dest & 0x3) == 0);
+ ok &= ((type == DeviceMemset32) || (source & 0x3) == 0);
+ ok &= ((bytes & 0x3) == 0);
+ return ok;
+ }
+};
+
+PX_POP_PACK
+
+} // end physx namespace
+
+#endif // PX_SUPPORT_GPU_PHYSX
+#endif // PXCUDACONTEXTMANAGER_PXGPUCOPYDESC_H
diff --git a/PxShared/include/cudamanager/PxGpuCopyDescQueue.h b/PxShared/include/cudamanager/PxGpuCopyDescQueue.h
new file mode 100644
index 00000000..3cb0067a
--- /dev/null
+++ b/PxShared/include/cudamanager/PxGpuCopyDescQueue.h
@@ -0,0 +1,149 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXCUDACONTEXTMANAGER_PXGPUCOPYDESCQUEUE_H
+#define PXCUDACONTEXTMANAGER_PXGPUCOPYDESCQUEUE_H
+
+#include "foundation/PxPreprocessor.h"
+
+#if PX_SUPPORT_GPU_PHYSX
+
+#include "foundation/PxAssert.h"
+#include "task/PxTaskDefine.h"
+#include "task/PxGpuDispatcher.h"
+#include "cudamanager/PxGpuCopyDesc.h"
+#include "cudamanager/PxCudaContextManager.h"
+
+/* forward decl to avoid including <cuda.h> */
+typedef struct CUstream_st* CUstream;
+
+namespace physx
+{
+
+PX_PUSH_PACK_DEFAULT
+
+/// \brief Container class for queueing PxGpuCopyDesc instances in pinned (non-pageable) CPU memory
+class PxGpuCopyDescQueue
+{
+public:
+ /// \brief PxGpuCopyDescQueue constructor
+ PxGpuCopyDescQueue(PxGpuDispatcher& d)
+ : mDispatcher(d)
+ , mBuffer(0)
+ , mStream(0)
+ , mReserved(0)
+ , mOccupancy(0)
+ , mFlushed(0)
+ {
+ }
+
+ /// \brief PxGpuCopyDescQueue destructor
+ ~PxGpuCopyDescQueue()
+ {
+ if (mBuffer)
+ {
+ mDispatcher.getCudaContextManager()->getMemoryManager()->free(PxCudaBufferMemorySpace::T_PINNED_HOST, (size_t) mBuffer);
+ }
+ }
+
+ /// \brief Reset the enqueued copy descriptor list
+ ///
+ /// Must be called at least once before any copies are enqueued, and each time the launched
+ /// copies are known to have been completed. The recommended use case is to call this at the
+ /// start of each simulation step.
+ void reset(CUstream stream, uint32_t reserveSize)
+ {
+ if (reserveSize > mReserved)
+ {
+ if (mBuffer)
+ {
+ mDispatcher.getCudaContextManager()->getMemoryManager()->free(
+ PxCudaBufferMemorySpace::T_PINNED_HOST,
+ (size_t) mBuffer);
+ mReserved = 0;
+ }
+ mBuffer = (PxGpuCopyDesc*) mDispatcher.getCudaContextManager()->getMemoryManager()->alloc(
+ PxCudaBufferMemorySpace::T_PINNED_HOST,
+ reserveSize * sizeof(PxGpuCopyDesc),
+ PX_ALLOC_INFO("PxGpuCopyDescQueue", GPU_UTIL));
+ if (mBuffer)
+ {
+ mReserved = reserveSize;
+ }
+ }
+
+ mOccupancy = 0;
+ mFlushed = 0;
+ mStream = stream;
+ }
+
+ /// \brief Enqueue the specified copy descriptor, or launch immediately if no room is available
+ void enqueue(PxGpuCopyDesc& desc)
+ {
+ PX_ASSERT(desc.isValid());
+ if (desc.bytes == 0)
+ {
+ return;
+ }
+
+ if (mOccupancy < mReserved)
+ {
+ mBuffer[ mOccupancy++ ] = desc;
+ }
+ else
+ {
+ mDispatcher.launchCopyKernel(&desc, 1, mStream);
+ }
+ }
+
+ /// \brief Launch all copies queued since the last flush or reset
+ void flushEnqueued()
+ {
+ if (mOccupancy > mFlushed)
+ {
+ mDispatcher.launchCopyKernel(mBuffer + mFlushed, mOccupancy - mFlushed, mStream);
+ mFlushed = mOccupancy;
+ }
+ }
+
+private:
+ PxGpuDispatcher& mDispatcher;
+ PxGpuCopyDesc* mBuffer;
+ CUstream mStream;
+ uint32_t mReserved;
+ uint32_t mOccupancy;
+ uint32_t mFlushed;
+
+ void operator=(const PxGpuCopyDescQueue&); // prevent a warning...
+};
+
+PX_POP_PACK
+
+} // end physx namespace
+
+#endif // PX_SUPPORT_GPU_PHYSX
+#endif // PXCUDACONTEXTMANAGER_PXGPUCOPYDESCQUEUE_H
diff --git a/PxShared/include/filebuf/PxFileBuf.h b/PxShared/include/filebuf/PxFileBuf.h
new file mode 100644
index 00000000..5be7bcd8
--- /dev/null
+++ b/PxShared/include/filebuf/PxFileBuf.h
@@ -0,0 +1,337 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PSFILEBUFFER_PXFILEBUF_H
+#define PSFILEBUFFER_PXFILEBUF_H
+
+/** \addtogroup foundation
+ @{
+*/
+
+#if !PX_DOXYGEN
+namespace physx
+{
+
+namespace general_PxIOStream2
+{
+#endif
+
+PX_PUSH_PACK_DEFAULT
+
+/**
+\brief Callback class for data serialization.
+
+The user needs to supply an PxFileBuf implementation to a number of methods to allow the SDK to read or write
+chunks of binary data. This allows flexibility for the source/destination of the data. For example the PxFileBuf
+could store data in a file, memory buffer or custom file format.
+
+\note It is the users responsibility to ensure that the data is written to the appropriate offset.
+
+*/
+class PxFileBuf
+{
+public:
+
+ enum EndianMode
+ {
+ ENDIAN_NONE = 0, // do no conversion for endian mode
+ ENDIAN_BIG = 1, // always read/write data as natively big endian (Power PC, etc.)
+ ENDIAN_LITTLE = 2 // always read/write data as natively little endian (Intel, etc.) Default Behavior!
+ };
+
+ PxFileBuf(EndianMode mode=ENDIAN_LITTLE)
+ {
+ setEndianMode(mode);
+ }
+
+ virtual ~PxFileBuf(void)
+ {
+
+ }
+
+ /**
+ \brief Declares a constant to seek to the end of the stream.
+ *
+ * Does not support streams longer than 32 bits
+ */
+ static const uint32_t STREAM_SEEK_END=0xFFFFFFFF;
+
+ enum OpenMode
+ {
+ OPEN_FILE_NOT_FOUND,
+ OPEN_READ_ONLY, // open file buffer stream for read only access
+ OPEN_WRITE_ONLY, // open file buffer stream for write only access
+ OPEN_READ_WRITE_NEW, // open a new file for both read/write access
+ OPEN_READ_WRITE_EXISTING // open an existing file for both read/write access
+ };
+
+ virtual OpenMode getOpenMode(void) const = 0;
+
+ bool isOpen(void) const
+ {
+ return getOpenMode()!=OPEN_FILE_NOT_FOUND;
+ }
+
+ enum SeekType
+ {
+ SEEKABLE_NO = 0,
+ SEEKABLE_READ = 0x1,
+ SEEKABLE_WRITE = 0x2,
+ SEEKABLE_READWRITE = 0x3
+ };
+
+ virtual SeekType isSeekable(void) const = 0;
+
+ void setEndianMode(EndianMode e)
+ {
+ mEndianMode = e;
+ if ( (e==ENDIAN_BIG && !isBigEndian() ) ||
+ (e==ENDIAN_LITTLE && isBigEndian() ) )
+ {
+ mEndianSwap = true;
+ }
+ else
+ {
+ mEndianSwap = false;
+ }
+ }
+
+ EndianMode getEndianMode(void) const
+ {
+ return mEndianMode;
+ }
+
+ virtual uint32_t getFileLength(void) const = 0;
+
+ /**
+ \brief Seeks the stream to a particular location for reading
+ *
+ * If the location passed exceeds the length of the stream, then it will seek to the end.
+ * Returns the location it ended up at (useful if you seek to the end) to get the file position
+ */
+ virtual uint32_t seekRead(uint32_t loc) = 0;
+
+ /**
+ \brief Seeks the stream to a particular location for writing
+ *
+ * If the location passed exceeds the length of the stream, then it will seek to the end.
+ * Returns the location it ended up at (useful if you seek to the end) to get the file position
+ */
+ virtual uint32_t seekWrite(uint32_t loc) = 0;
+
+ /**
+ \brief Reads from the stream into a buffer.
+
+ \param[out] mem The buffer to read the stream into.
+ \param[in] len The number of bytes to stream into the buffer
+
+ \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream.
+ */
+ virtual uint32_t read(void *mem,uint32_t len) = 0;
+
+
+ /**
+ \brief Reads from the stream into a buffer but does not advance the read location.
+
+ \param[out] mem The buffer to read the stream into.
+ \param[in] len The number of bytes to stream into the buffer
+
+ \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream.
+ */
+ virtual uint32_t peek(void *mem,uint32_t len) = 0;
+
+ /**
+ \brief Writes a buffer of memory to the stream
+
+ \param[in] mem The address of a buffer of memory to send to the stream.
+ \param[in] len The number of bytes to send to the stream.
+
+ \return Returns the actual number of bytes sent to the stream. If not equal to the length specific, then the stream is full or unable to write for some reason.
+ */
+ virtual uint32_t write(const void *mem,uint32_t len) = 0;
+
+ /**
+ \brief Reports the current stream location read aqccess.
+
+ \return Returns the current stream read location.
+ */
+ virtual uint32_t tellRead(void) const = 0;
+
+ /**
+ \brief Reports the current stream location for write access.
+
+ \return Returns the current stream write location.
+ */
+ virtual uint32_t tellWrite(void) const = 0;
+
+ /**
+ \brief Causes any temporarily cached data to be flushed to the stream.
+ */
+ virtual void flush(void) = 0;
+
+ /**
+ \brief Close the stream.
+ */
+ virtual void close(void) {}
+
+ void release(void)
+ {
+ delete this;
+ }
+
+ static PX_INLINE bool isBigEndian()
+ {
+ int32_t i = 1;
+ return *(reinterpret_cast<char*>(&i))==0;
+ }
+
+ PX_INLINE void swap2Bytes(void* _data) const
+ {
+ char *data = static_cast<char *>(_data);
+ char one_byte;
+ one_byte = data[0]; data[0] = data[1]; data[1] = one_byte;
+ }
+
+ PX_INLINE void swap4Bytes(void* _data) const
+ {
+ char *data = static_cast<char *>(_data);
+ char one_byte;
+ one_byte = data[0]; data[0] = data[3]; data[3] = one_byte;
+ one_byte = data[1]; data[1] = data[2]; data[2] = one_byte;
+ }
+
+ PX_INLINE void swap8Bytes(void *_data) const
+ {
+ char *data = static_cast<char *>(_data);
+ char one_byte;
+ one_byte = data[0]; data[0] = data[7]; data[7] = one_byte;
+ one_byte = data[1]; data[1] = data[6]; data[6] = one_byte;
+ one_byte = data[2]; data[2] = data[5]; data[5] = one_byte;
+ one_byte = data[3]; data[3] = data[4]; data[4] = one_byte;
+ }
+
+
+ PX_INLINE void storeDword(uint32_t v)
+ {
+ if ( mEndianSwap )
+ swap4Bytes(&v);
+
+ write(&v,sizeof(v));
+ }
+
+ PX_INLINE void storeFloat(float v)
+ {
+ if ( mEndianSwap )
+ swap4Bytes(&v);
+ write(&v,sizeof(v));
+ }
+
+ PX_INLINE void storeDouble(double v)
+ {
+ if ( mEndianSwap )
+ swap8Bytes(&v);
+ write(&v,sizeof(v));
+ }
+
+ PX_INLINE void storeByte(uint8_t b)
+ {
+ write(&b,sizeof(b));
+ }
+
+ PX_INLINE void storeWord(uint16_t w)
+ {
+ if ( mEndianSwap )
+ swap2Bytes(&w);
+ write(&w,sizeof(w));
+ }
+
+ uint8_t readByte(void)
+ {
+ uint8_t v=0;
+ read(&v,sizeof(v));
+ return v;
+ }
+
+ uint16_t readWord(void)
+ {
+ uint16_t v=0;
+ read(&v,sizeof(v));
+ if ( mEndianSwap )
+ swap2Bytes(&v);
+ return v;
+ }
+
+ uint32_t readDword(void)
+ {
+ uint32_t v=0;
+ read(&v,sizeof(v));
+ if ( mEndianSwap )
+ swap4Bytes(&v);
+ return v;
+ }
+
+ float readFloat(void)
+ {
+ float v=0;
+ read(&v,sizeof(v));
+ if ( mEndianSwap )
+ swap4Bytes(&v);
+ return v;
+ }
+
+ double readDouble(void)
+ {
+ double v=0;
+ read(&v,sizeof(v));
+ if ( mEndianSwap )
+ swap8Bytes(&v);
+ return v;
+ }
+
+private:
+ bool mEndianSwap; // whether or not the endian should be swapped on the current platform
+ EndianMode mEndianMode; // the current endian mode behavior for the stream
+};
+
+PX_POP_PACK
+
+#if !PX_DOXYGEN
+} // end of namespace
+
+using namespace general_PxIOStream2;
+
+namespace general_PxIOStream = general_PxIOStream2;
+
+} // end of namespace
+#endif
+
+/** @} */
+
+#endif // PSFILEBUFFER_PXFILEBUF_H
diff --git a/PxShared/include/foundation/Px.h b/PxShared/include/foundation/Px.h
new file mode 100644
index 00000000..6dc74aa2
--- /dev/null
+++ b/PxShared/include/foundation/Px.h
@@ -0,0 +1,92 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PX_H
+#define PXFOUNDATION_PX_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxSimpleTypes.h"
+
+/** files to always include */
+#include <string.h>
+#include <stdlib.h>
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+typedef uint32_t PxU32;
+
+class PxAllocatorCallback;
+class PxErrorCallback;
+struct PxErrorCode;
+class PxAssertHandler;
+
+class PxInputStream;
+class PxInputData;
+class PxOutputStream;
+
+class PxVec2;
+class PxVec3;
+class PxVec4;
+class PxMat33;
+class PxMat44;
+class PxPlane;
+class PxQuat;
+class PxTransform;
+class PxBounds3;
+
+/** enum for empty constructor tag*/
+enum PxEMPTY
+{
+ PxEmpty
+};
+
+/** enum for zero constructor tag for vectors and matrices */
+enum PxZERO
+{
+ PxZero
+};
+
+/** enum for identity constructor flag for quaternions, transforms, and matrices */
+enum PxIDENTITY
+{
+ PxIdentity
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PX_H
diff --git a/PxShared/include/foundation/PxAllocatorCallback.h b/PxShared/include/foundation/PxAllocatorCallback.h
new file mode 100644
index 00000000..e5b36c6f
--- /dev/null
+++ b/PxShared/include/foundation/PxAllocatorCallback.h
@@ -0,0 +1,95 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXALLOCATORCALLBACK_H
+#define PXFOUNDATION_PXALLOCATORCALLBACK_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/Px.h"
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief Abstract base class for an application defined memory allocator that can be used by the Nv library.
+
+\note The SDK state should not be modified from within any allocation/free function.
+
+<b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread
+or the physics processing thread(s).
+*/
+
+class PxAllocatorCallback
+{
+ public:
+ /**
+ \brief destructor
+ */
+ virtual ~PxAllocatorCallback()
+ {
+ }
+
+ /**
+ \brief Allocates size bytes of memory, which must be 16-byte aligned.
+
+ This method should never return NULL. If you run out of memory, then
+ you should terminate the app or take some other appropriate action.
+
+ <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread
+ and physics processing thread(s).
+
+ \param size Number of bytes to allocate.
+ \param typeName Name of the datatype that is being allocated
+ \param filename The source file which allocated the memory
+ \param line The source line which allocated the memory
+ \return The allocated block of memory.
+ */
+ virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) = 0;
+
+ /**
+ \brief Frees memory previously allocated by allocate().
+
+ <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread
+ and physics processing thread(s).
+
+ \param ptr Memory to free.
+ */
+ virtual void deallocate(void* ptr) = 0;
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXALLOCATORCALLBACK_H
diff --git a/PxShared/include/foundation/PxAssert.h b/PxShared/include/foundation/PxAssert.h
new file mode 100644
index 00000000..dbd5c9f9
--- /dev/null
+++ b/PxShared/include/foundation/PxAssert.h
@@ -0,0 +1,95 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXASSERT_H
+#define PXFOUNDATION_PXASSERT_H
+
+/** \addtogroup foundation
+@{ */
+
+#include "foundation/Px.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/* Base class to handle assert failures */
+class PxAssertHandler
+{
+ public:
+ virtual ~PxAssertHandler()
+ {
+ }
+ virtual void operator()(const char* exp, const char* file, int line, bool& ignore) = 0;
+};
+
+PX_FOUNDATION_API PxAssertHandler& PxGetAssertHandler();
+PX_FOUNDATION_API void PxSetAssertHandler(PxAssertHandler& handler);
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+#if !PX_ENABLE_ASSERTS
+#define PX_ASSERT(exp) ((void)0)
+#define PX_ALWAYS_ASSERT_MESSAGE(exp) ((void)0)
+#define PX_ASSERT_WITH_MESSAGE(condition, message) ((void)0)
+#else
+#if PX_VC
+#define PX_CODE_ANALYSIS_ASSUME(exp) \
+ __analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a PX_ASSERT is used
+// to "guard" illegal mem access, for example.
+#else
+#define PX_CODE_ANALYSIS_ASSUME(exp)
+#endif
+#define PX_ASSERT(exp) \
+ { \
+ static bool _ignore = false; \
+ ((void)((!!(exp)) || (!_ignore && (physx::PxGetAssertHandler()(#exp, __FILE__, __LINE__, _ignore), false)))); \
+ PX_CODE_ANALYSIS_ASSUME(exp); \
+ }
+#define PX_ALWAYS_ASSERT_MESSAGE(exp) \
+ { \
+ static bool _ignore = false; \
+ if(!_ignore) \
+ physx::PxGetAssertHandler()(exp, __FILE__, __LINE__, _ignore); \
+ }
+#define PX_ASSERT_WITH_MESSAGE(exp, message) \
+ { \
+ static bool _ignore = false; \
+ ((void)((!!(exp)) || (!_ignore && (physx::PxGetAssertHandler()(message, __FILE__, __LINE__, _ignore), false)))); \
+ PX_CODE_ANALYSIS_ASSUME(exp); \
+ }
+#endif
+
+#define PX_ALWAYS_ASSERT() PX_ASSERT(0)
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXASSERT_H
diff --git a/PxShared/include/foundation/PxBitAndData.h b/PxShared/include/foundation/PxBitAndData.h
new file mode 100644
index 00000000..2327f925
--- /dev/null
+++ b/PxShared/include/foundation/PxBitAndData.h
@@ -0,0 +1,87 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXBITANDDATA_H
+#define PXFOUNDATION_PXBITANDDATA_H
+
+#include "foundation/Px.h"
+
+/** \addtogroup foundation
+ @{
+*/
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+template <typename storageType, storageType bitMask>
+class PxBitAndDataT
+{
+ public:
+ PX_FORCE_INLINE PxBitAndDataT(const PxEMPTY)
+ {
+ }
+ PX_FORCE_INLINE PxBitAndDataT() : mData(0)
+ {
+ }
+ PX_FORCE_INLINE PxBitAndDataT(storageType data, bool bit = false)
+ {
+ mData = bit ? storageType(data | bitMask) : data;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE operator storageType() const
+ {
+ return storageType(mData & ~bitMask);
+ }
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void setBit()
+ {
+ mData |= bitMask;
+ }
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void clearBit()
+ {
+ mData &= ~bitMask;
+ }
+ PX_CUDA_CALLABLE PX_FORCE_INLINE storageType isBitSet() const
+ {
+ return storageType(mData & bitMask);
+ }
+
+ protected:
+ storageType mData;
+};
+typedef PxBitAndDataT<unsigned char, 0x80> PxBitAndByte;
+typedef PxBitAndDataT<unsigned short, 0x8000> PxBitAndWord;
+typedef PxBitAndDataT<unsigned int, 0x80000000> PxBitAndDword;
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // PXFOUNDATION_PXBITANDDATA_H
diff --git a/PxShared/include/foundation/PxBounds3.h b/PxShared/include/foundation/PxBounds3.h
new file mode 100644
index 00000000..e3fbc9ec
--- /dev/null
+++ b/PxShared/include/foundation/PxBounds3.h
@@ -0,0 +1,480 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXBOUNDS3_H
+#define PXFOUNDATION_PXBOUNDS3_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxTransform.h"
+#include "foundation/PxMat33.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+// maximum extents defined such that floating point exceptions are avoided for standard use cases
+#define PX_MAX_BOUNDS_EXTENTS (PX_MAX_REAL * 0.25f)
+
+/**
+\brief Class representing 3D range or axis aligned bounding box.
+
+Stored as minimum and maximum extent corners. Alternate representation
+would be center and dimensions.
+May be empty or nonempty. For nonempty bounds, minimum <= maximum has to hold for all axes.
+Empty bounds have to be represented as minimum = PX_MAX_BOUNDS_EXTENTS and maximum = -PX_MAX_BOUNDS_EXTENTS for all
+axes.
+All other representations are invalid and the behavior is undefined.
+*/
+class PxBounds3
+{
+ public:
+ /**
+ \brief Default constructor, not performing any initialization for performance reason.
+ \remark Use empty() function below to construct empty bounds.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3()
+ {
+ }
+
+ /**
+ \brief Construct from two bounding points
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3(const PxVec3& minimum, const PxVec3& maximum);
+
+ /**
+ \brief Return empty bounds.
+ */
+ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 empty();
+
+ /**
+ \brief returns the AABB containing v0 and v1.
+ \param v0 first point included in the AABB.
+ \param v1 second point included in the AABB.
+ */
+ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 boundsOfPoints(const PxVec3& v0, const PxVec3& v1);
+
+ /**
+ \brief returns the AABB from center and extents vectors.
+ \param center Center vector
+ \param extent Extents vector
+ */
+ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 centerExtents(const PxVec3& center, const PxVec3& extent);
+
+ /**
+ \brief Construct from center, extent, and (not necessarily orthogonal) basis
+ */
+ static PX_CUDA_CALLABLE PX_INLINE PxBounds3
+ basisExtent(const PxVec3& center, const PxMat33& basis, const PxVec3& extent);
+
+ /**
+ \brief Construct from pose and extent
+ */
+ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 poseExtent(const PxTransform& pose, const PxVec3& extent);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+
+ This version is safe to call for empty bounds.
+
+ \param[in] matrix Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformSafe(const PxMat33& matrix, const PxBounds3& bounds);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+
+ Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead.
+
+ \param[in] matrix Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformFast(const PxMat33& matrix, const PxBounds3& bounds);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+
+ This version is safe to call for empty bounds.
+
+ \param[in] transform Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformSafe(const PxTransform& transform, const PxBounds3& bounds);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+
+ Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead.
+
+ \param[in] transform Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformFast(const PxTransform& transform, const PxBounds3& bounds);
+
+ /**
+ \brief Sets empty to true
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void setEmpty();
+
+ /**
+ \brief Sets the bounds to maximum size [-PX_MAX_BOUNDS_EXTENTS, PX_MAX_BOUNDS_EXTENTS].
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void setMaximal();
+
+ /**
+ \brief expands the volume to include v
+ \param v Point to expand to.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void include(const PxVec3& v);
+
+ /**
+ \brief expands the volume to include b.
+ \param b Bounds to perform union with.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void include(const PxBounds3& b);
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isEmpty() const;
+
+ /**
+ \brief indicates whether the intersection of this and b is empty or not.
+ \param b Bounds to test for intersection.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool intersects(const PxBounds3& b) const;
+
+ /**
+ \brief computes the 1D-intersection between two AABBs, on a given axis.
+ \param a the other AABB
+ \param axis the axis (0, 1, 2)
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool intersects1D(const PxBounds3& a, uint32_t axis) const;
+
+ /**
+ \brief indicates if these bounds contain v.
+ \param v Point to test against bounds.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& v) const;
+
+ /**
+ \brief checks a box is inside another box.
+ \param box the other AABB
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isInside(const PxBounds3& box) const;
+
+ /**
+ \brief returns the center of this axis aligned box.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getCenter() const;
+
+ /**
+ \brief get component of the box's center along a given axis
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float getCenter(uint32_t axis) const;
+
+ /**
+ \brief get component of the box's extents along a given axis
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float getExtents(uint32_t axis) const;
+
+ /**
+ \brief returns the dimensions (width/height/depth) of this axis aligned box.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getDimensions() const;
+
+ /**
+ \brief returns the extents, which are half of the width/height/depth.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getExtents() const;
+
+ /**
+ \brief scales the AABB.
+
+ This version is safe to call for empty bounds.
+
+ \param scale Factor to scale AABB by.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void scaleSafe(float scale);
+
+ /**
+ \brief scales the AABB.
+
+ Calling this method for empty bounds leads to undefined behavior. Use #scaleSafe() instead.
+
+ \param scale Factor to scale AABB by.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void scaleFast(float scale);
+
+ /**
+ fattens the AABB in all 3 dimensions by the given distance.
+
+ This version is safe to call for empty bounds.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void fattenSafe(float distance);
+
+ /**
+ fattens the AABB in all 3 dimensions by the given distance.
+
+ Calling this method for empty bounds leads to undefined behavior. Use #fattenSafe() instead.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void fattenFast(float distance);
+
+ /**
+ checks that the AABB values are not NaN
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const;
+
+ /**
+ checks that the AABB values describe a valid configuration.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid() const;
+
+ PxVec3 minimum, maximum;
+};
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3::PxBounds3(const PxVec3& minimum_, const PxVec3& maximum_)
+: minimum(minimum_), maximum(maximum_)
+{
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::empty()
+{
+ return PxBounds3(PxVec3(PX_MAX_BOUNDS_EXTENTS), PxVec3(-PX_MAX_BOUNDS_EXTENTS));
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isFinite() const
+{
+ return minimum.isFinite() && maximum.isFinite();
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::boundsOfPoints(const PxVec3& v0, const PxVec3& v1)
+{
+ return PxBounds3(v0.minimum(v1), v0.maximum(v1));
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::centerExtents(const PxVec3& center, const PxVec3& extent)
+{
+ return PxBounds3(center - extent, center + extent);
+}
+
+PX_CUDA_CALLABLE PX_INLINE PxBounds3
+PxBounds3::basisExtent(const PxVec3& center, const PxMat33& basis, const PxVec3& extent)
+{
+ // extended basis vectors
+ PxVec3 c0 = basis.column0 * extent.x;
+ PxVec3 c1 = basis.column1 * extent.y;
+ PxVec3 c2 = basis.column2 * extent.z;
+
+ PxVec3 w;
+ // find combination of base vectors that produces max. distance for each component = sum of abs()
+ w.x = PxAbs(c0.x) + PxAbs(c1.x) + PxAbs(c2.x);
+ w.y = PxAbs(c0.y) + PxAbs(c1.y) + PxAbs(c2.y);
+ w.z = PxAbs(c0.z) + PxAbs(c1.z) + PxAbs(c2.z);
+
+ return PxBounds3(center - w, center + w);
+}
+
+PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::poseExtent(const PxTransform& pose, const PxVec3& extent)
+{
+ return basisExtent(pose.p, PxMat33(pose.q), extent);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::setEmpty()
+{
+ minimum = PxVec3(PX_MAX_BOUNDS_EXTENTS);
+ maximum = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::setMaximal()
+{
+ minimum = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
+ maximum = PxVec3(PX_MAX_BOUNDS_EXTENTS);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::include(const PxVec3& v)
+{
+ PX_ASSERT(isValid());
+ minimum = minimum.minimum(v);
+ maximum = maximum.maximum(v);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::include(const PxBounds3& b)
+{
+ PX_ASSERT(isValid());
+ minimum = minimum.minimum(b.minimum);
+ maximum = maximum.maximum(b.maximum);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isEmpty() const
+{
+ PX_ASSERT(isValid());
+ return minimum.x > maximum.x;
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::intersects(const PxBounds3& b) const
+{
+ PX_ASSERT(isValid() && b.isValid());
+ return !(b.minimum.x > maximum.x || minimum.x > b.maximum.x || b.minimum.y > maximum.y || minimum.y > b.maximum.y ||
+ b.minimum.z > maximum.z || minimum.z > b.maximum.z);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::intersects1D(const PxBounds3& a, uint32_t axis) const
+{
+ PX_ASSERT(isValid() && a.isValid());
+ return maximum[axis] >= a.minimum[axis] && a.maximum[axis] >= minimum[axis];
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::contains(const PxVec3& v) const
+{
+ PX_ASSERT(isValid());
+
+ return !(v.x < minimum.x || v.x > maximum.x || v.y < minimum.y || v.y > maximum.y || v.z < minimum.z ||
+ v.z > maximum.z);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isInside(const PxBounds3& box) const
+{
+ PX_ASSERT(isValid() && box.isValid());
+ if(box.minimum.x > minimum.x)
+ return false;
+ if(box.minimum.y > minimum.y)
+ return false;
+ if(box.minimum.z > minimum.z)
+ return false;
+ if(box.maximum.x < maximum.x)
+ return false;
+ if(box.maximum.y < maximum.y)
+ return false;
+ if(box.maximum.z < maximum.z)
+ return false;
+ return true;
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getCenter() const
+{
+ PX_ASSERT(isValid());
+ return (minimum + maximum) * 0.5f;
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxBounds3::getCenter(uint32_t axis) const
+{
+ PX_ASSERT(isValid());
+ return (minimum[axis] + maximum[axis]) * 0.5f;
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxBounds3::getExtents(uint32_t axis) const
+{
+ PX_ASSERT(isValid());
+ return (maximum[axis] - minimum[axis]) * 0.5f;
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getDimensions() const
+{
+ PX_ASSERT(isValid());
+ return maximum - minimum;
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getExtents() const
+{
+ PX_ASSERT(isValid());
+ return getDimensions() * 0.5f;
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::scaleSafe(float scale)
+{
+ PX_ASSERT(isValid());
+ if(!isEmpty())
+ scaleFast(scale);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::scaleFast(float scale)
+{
+ PX_ASSERT(isValid());
+ *this = centerExtents(getCenter(), getExtents() * scale);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::fattenSafe(float distance)
+{
+ PX_ASSERT(isValid());
+ if(!isEmpty())
+ fattenFast(distance);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::fattenFast(float distance)
+{
+ PX_ASSERT(isValid());
+ minimum.x -= distance;
+ minimum.y -= distance;
+ minimum.z -= distance;
+
+ maximum.x += distance;
+ maximum.y += distance;
+ maximum.z += distance;
+}
+
+PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformSafe(const PxMat33& matrix, const PxBounds3& bounds)
+{
+ PX_ASSERT(bounds.isValid());
+ return !bounds.isEmpty() ? transformFast(matrix, bounds) : bounds;
+}
+
+PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformFast(const PxMat33& matrix, const PxBounds3& bounds)
+{
+ PX_ASSERT(bounds.isValid());
+ return PxBounds3::basisExtent(matrix * bounds.getCenter(), matrix, bounds.getExtents());
+}
+
+PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformSafe(const PxTransform& transform, const PxBounds3& bounds)
+{
+ PX_ASSERT(bounds.isValid());
+ return !bounds.isEmpty() ? transformFast(transform, bounds) : bounds;
+}
+
+PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformFast(const PxTransform& transform, const PxBounds3& bounds)
+{
+ PX_ASSERT(bounds.isValid());
+ return PxBounds3::basisExtent(transform.transform(bounds.getCenter()), PxMat33(transform.q), bounds.getExtents());
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isValid() const
+{
+ return (isFinite() && (((minimum.x <= maximum.x) && (minimum.y <= maximum.y) && (minimum.z <= maximum.z)) ||
+ ((minimum.x == PX_MAX_BOUNDS_EXTENTS) && (minimum.y == PX_MAX_BOUNDS_EXTENTS) &&
+ (minimum.z == PX_MAX_BOUNDS_EXTENTS) && (maximum.x == -PX_MAX_BOUNDS_EXTENTS) &&
+ (maximum.y == -PX_MAX_BOUNDS_EXTENTS) && (maximum.z == -PX_MAX_BOUNDS_EXTENTS))));
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXBOUNDS3_H
diff --git a/PxShared/include/foundation/PxErrorCallback.h b/PxShared/include/foundation/PxErrorCallback.h
new file mode 100644
index 00000000..27869d39
--- /dev/null
+++ b/PxShared/include/foundation/PxErrorCallback.h
@@ -0,0 +1,73 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXERRORCALLBACK_H
+#define PXFOUNDATION_PXERRORCALLBACK_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxErrors.h"
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief User defined interface class. Used by the library to emit debug information.
+
+\note The SDK state should not be modified from within any error reporting functions.
+
+<b>Threading:</b> The SDK sequences its calls to the output stream using a mutex, so the class need not
+be implemented in a thread-safe manner if the SDK is the only client.
+*/
+class PxErrorCallback
+{
+ public:
+ virtual ~PxErrorCallback()
+ {
+ }
+
+ /**
+ \brief Reports an error code.
+ \param code Error code, see #PxErrorCode
+ \param message Message to display.
+ \param file File error occured in.
+ \param line Line number error occured on.
+ */
+ virtual void reportError(PxErrorCode::Enum code, const char* message, const char* file, int line) = 0;
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXERRORCALLBACK_H
diff --git a/PxShared/include/foundation/PxErrors.h b/PxShared/include/foundation/PxErrors.h
new file mode 100644
index 00000000..00745e3b
--- /dev/null
+++ b/PxShared/include/foundation/PxErrors.h
@@ -0,0 +1,93 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXERRORS_H
+#define PXFOUNDATION_PXERRORS_H
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/Px.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief Error codes
+
+These error codes are passed to #PxErrorCallback
+
+@see PxErrorCallback
+*/
+
+struct PxErrorCode
+{
+ enum Enum
+ {
+ eNO_ERROR = 0,
+
+ //! \brief An informational message.
+ eDEBUG_INFO = 1,
+
+ //! \brief a warning message for the user to help with debugging
+ eDEBUG_WARNING = 2,
+
+ //! \brief method called with invalid parameter(s)
+ eINVALID_PARAMETER = 4,
+
+ //! \brief method was called at a time when an operation is not possible
+ eINVALID_OPERATION = 8,
+
+ //! \brief method failed to allocate some memory
+ eOUT_OF_MEMORY = 16,
+
+ /** \brief The library failed for some reason.
+ Possibly you have passed invalid values like NaNs, which are not checked for.
+ */
+ eINTERNAL_ERROR = 32,
+
+ //! \brief An unrecoverable error, execution should be halted and log output flushed
+ eABORT = 64,
+
+ //! \brief The SDK has determined that an operation may result in poor performance.
+ ePERF_WARNING = 128,
+
+ //! \brief A bit mask for including all errors
+ eMASK_ALL = -1
+ };
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXERRORS_H
diff --git a/PxShared/include/foundation/PxFlags.h b/PxShared/include/foundation/PxFlags.h
new file mode 100644
index 00000000..0fa82783
--- /dev/null
+++ b/PxShared/include/foundation/PxFlags.h
@@ -0,0 +1,375 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXFLAGS_H
+#define PXFOUNDATION_PXFLAGS_H
+
+/** \addtogroup foundation
+ @{
+*/
+
+#include "foundation/Px.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+/**
+\brief Container for bitfield flag variables associated with a specific enum type.
+
+This allows for type safe manipulation for bitfields.
+
+<h3>Example</h3>
+ // enum that defines each bit...
+ struct MyEnum
+ {
+ enum Enum
+ {
+ eMAN = 1,
+ eBEAR = 2,
+ ePIG = 4,
+ };
+ };
+
+ // implements some convenient global operators.
+ PX_FLAGS_OPERATORS(MyEnum::Enum, uint8_t);
+
+ PxFlags<MyEnum::Enum, uint8_t> myFlags;
+ myFlags |= MyEnum::eMAN;
+ myFlags |= MyEnum::eBEAR | MyEnum::ePIG;
+ if(myFlags & MyEnum::eBEAR)
+ {
+ doSomething();
+ }
+*/
+
+template <typename enumtype, typename storagetype = uint32_t>
+class PxFlags
+{
+ public:
+ typedef storagetype InternalType;
+
+ PX_CUDA_CALLABLE PX_INLINE explicit PxFlags(const PxEMPTY)
+ {
+ }
+ PX_CUDA_CALLABLE PX_INLINE PxFlags(void);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags(enumtype e);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags(const PxFlags<enumtype, storagetype>& f);
+ PX_CUDA_CALLABLE PX_INLINE explicit PxFlags(storagetype b);
+
+ PX_CUDA_CALLABLE PX_INLINE bool isSet(enumtype e) const;
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& set(enumtype e);
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(enumtype e) const;
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxFlags<enumtype, storagetype>& f) const;
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(bool b) const;
+ PX_CUDA_CALLABLE PX_INLINE bool operator!=(enumtype e) const;
+ PX_CUDA_CALLABLE PX_INLINE bool operator!=(const PxFlags<enumtype, storagetype>& f) const;
+
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator=(const PxFlags<enumtype, storagetype>& f);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator=(enumtype e);
+
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator|=(enumtype e);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator|=(const PxFlags<enumtype, storagetype>& f);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(enumtype e) const;
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(const PxFlags<enumtype, storagetype>& f) const;
+
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator&=(enumtype e);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator&=(const PxFlags<enumtype, storagetype>& f);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype e) const;
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(const PxFlags<enumtype, storagetype>& f) const;
+
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator^=(enumtype e);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator^=(const PxFlags<enumtype, storagetype>& f);
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator^(enumtype e) const;
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator^(const PxFlags<enumtype, storagetype>& f) const;
+
+ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator~(void) const;
+
+ PX_CUDA_CALLABLE PX_INLINE operator bool(void) const;
+ PX_CUDA_CALLABLE PX_INLINE operator uint8_t(void) const;
+ PX_CUDA_CALLABLE PX_INLINE operator uint16_t(void) const;
+ PX_CUDA_CALLABLE PX_INLINE operator uint32_t(void) const;
+
+ PX_CUDA_CALLABLE PX_INLINE void clear(enumtype e);
+
+ public:
+ friend PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype a, PxFlags<enumtype, storagetype>& b)
+ {
+ PxFlags<enumtype, storagetype> out;
+ out.mBits = a & b.mBits;
+ return out;
+ }
+
+ private:
+ storagetype mBits;
+};
+
+#define PX_FLAGS_OPERATORS(enumtype, storagetype) \
+ PX_INLINE PxFlags<enumtype, storagetype> operator|(enumtype a, enumtype b) \
+ { \
+ PxFlags<enumtype, storagetype> r(a); \
+ r |= b; \
+ return r; \
+ } \
+ PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype a, enumtype b) \
+ { \
+ PxFlags<enumtype, storagetype> r(a); \
+ r &= b; \
+ return r; \
+ } \
+ PX_INLINE PxFlags<enumtype, storagetype> operator~(enumtype a) \
+ { \
+ return ~PxFlags<enumtype, storagetype>(a); \
+ }
+
+#define PX_FLAGS_TYPEDEF(x, y) \
+ typedef PxFlags<x::Enum, y> x##s; \
+ PX_FLAGS_OPERATORS(x::Enum, y)
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(void)
+{
+ mBits = 0;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(enumtype e)
+{
+ mBits = static_cast<storagetype>(e);
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(const PxFlags<enumtype, storagetype>& f)
+{
+ mBits = f.mBits;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(storagetype b)
+{
+ mBits = b;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE bool PxFlags<enumtype, storagetype>::isSet(enumtype e) const
+{
+ return (mBits & static_cast<storagetype>(e)) == static_cast<storagetype>(e);
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::set(enumtype e)
+{
+ mBits = static_cast<storagetype>(e);
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(enumtype e) const
+{
+ return mBits == static_cast<storagetype>(e);
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(const PxFlags<enumtype, storagetype>& f) const
+{
+ return mBits == f.mBits;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(bool b) const
+{
+ return bool(*this) == b;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE bool PxFlags<enumtype, storagetype>::operator!=(enumtype e) const
+{
+ return mBits != static_cast<storagetype>(e);
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE bool PxFlags<enumtype, storagetype>::operator!=(const PxFlags<enumtype, storagetype>& f) const
+{
+ return mBits != f.mBits;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator=(enumtype e)
+{
+ mBits = static_cast<storagetype>(e);
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator=(const PxFlags<enumtype, storagetype>& f)
+{
+ mBits = f.mBits;
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator|=(enumtype e)
+{
+ mBits |= static_cast<storagetype>(e);
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::
+operator|=(const PxFlags<enumtype, storagetype>& f)
+{
+ mBits |= f.mBits;
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator|(enumtype e) const
+{
+ PxFlags<enumtype, storagetype> out(*this);
+ out |= e;
+ return out;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::
+operator|(const PxFlags<enumtype, storagetype>& f) const
+{
+ PxFlags<enumtype, storagetype> out(*this);
+ out |= f;
+ return out;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator&=(enumtype e)
+{
+ mBits &= static_cast<storagetype>(e);
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::
+operator&=(const PxFlags<enumtype, storagetype>& f)
+{
+ mBits &= f.mBits;
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator&(enumtype e) const
+{
+ PxFlags<enumtype, storagetype> out = *this;
+ out.mBits &= static_cast<storagetype>(e);
+ return out;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::
+operator&(const PxFlags<enumtype, storagetype>& f) const
+{
+ PxFlags<enumtype, storagetype> out = *this;
+ out.mBits &= f.mBits;
+ return out;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator^=(enumtype e)
+{
+ mBits ^= static_cast<storagetype>(e);
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::
+operator^=(const PxFlags<enumtype, storagetype>& f)
+{
+ mBits ^= f.mBits;
+ return *this;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator^(enumtype e) const
+{
+ PxFlags<enumtype, storagetype> out = *this;
+ out.mBits ^= static_cast<storagetype>(e);
+ return out;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::
+operator^(const PxFlags<enumtype, storagetype>& f) const
+{
+ PxFlags<enumtype, storagetype> out = *this;
+ out.mBits ^= f.mBits;
+ return out;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator~(void) const
+{
+ PxFlags<enumtype, storagetype> out;
+ out.mBits = storagetype(~mBits);
+ return out;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>::operator bool(void) const
+{
+ return mBits ? true : false;
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>::operator uint8_t(void) const
+{
+ return static_cast<uint8_t>(mBits);
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>::operator uint16_t(void) const
+{
+ return static_cast<uint16_t>(mBits);
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE PxFlags<enumtype, storagetype>::operator uint32_t(void) const
+{
+ return static_cast<uint32_t>(mBits);
+}
+
+template <typename enumtype, typename storagetype>
+PX_INLINE void PxFlags<enumtype, storagetype>::clear(enumtype e)
+{
+ mBits &= ~static_cast<storagetype>(e);
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXFLAGS_H
diff --git a/PxShared/include/foundation/PxFoundation.h b/PxShared/include/foundation/PxFoundation.h
new file mode 100644
index 00000000..deaeffd1
--- /dev/null
+++ b/PxShared/include/foundation/PxFoundation.h
@@ -0,0 +1,147 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_FOUNDATION_PX_FOUNDATION_H
+#define PX_FOUNDATION_PX_FOUNDATION_H
+
+/** \addtogroup foundation
+ @{
+*/
+
+#include "foundation/Px.h"
+#include "foundation/PxErrors.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief Foundation SDK singleton class.
+
+You need to have an instance of this class to instance the higher level SDKs.
+*/
+class PX_FOUNDATION_API PxFoundation
+{
+ public:
+ /**
+ \brief Destroys the instance it is called on.
+
+ The operation will fail, if there are still modules referencing the foundation object. Release all dependent modules
+ prior
+ to calling this method.
+
+ @see PxCreateFoundation()
+ */
+ virtual void release() = 0;
+
+ /**
+ retrieves error callback
+ */
+ virtual PxErrorCallback& getErrorCallback() = 0;
+
+ /**
+ Sets mask of errors to report.
+ */
+ virtual void setErrorLevel(PxErrorCode::Enum mask = PxErrorCode::eMASK_ALL) = 0;
+
+ /**
+ Retrieves mask of errors to be reported.
+ */
+ virtual PxErrorCode::Enum getErrorLevel() const = 0;
+
+ /**
+ Retrieves the allocator this object was created with.
+ */
+ virtual PxAllocatorCallback& getAllocatorCallback() = 0;
+
+ /**
+ Retrieves if allocation names are being passed to allocator callback.
+ */
+ virtual bool getReportAllocationNames() const = 0;
+
+ /**
+ Set if allocation names are being passed to allocator callback.
+ \details Enabled by default in debug and checked build, disabled by default in profile and release build.
+ */
+ virtual void setReportAllocationNames(bool value) = 0;
+
+ protected:
+ virtual ~PxFoundation()
+ {
+ }
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/**
+\brief Creates an instance of the foundation class
+
+The foundation class is needed to initialize higher level SDKs. There may be only one instance per process.
+Calling this method after an instance has been created already will result in an error message and NULL will be
+returned.
+
+\param version Version number we are expecting (should be PX_FOUNDATION_VERSION)
+\param allocator User supplied interface for allocating memory(see #PxAllocatorCallback)
+\param errorCallback User supplied interface for reporting errors and displaying messages(see #PxErrorCallback)
+\return Foundation instance on success, NULL if operation failed
+
+@see PxFoundation
+*/
+
+PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation* PX_CALL_CONV
+PxCreateFoundation(physx::PxU32 version, physx::PxAllocatorCallback& allocator, physx::PxErrorCallback& errorCallback);
+/**
+\brief Retrieves the Foundation SDK after it has been created.
+
+\note The behavior of this method is undefined if the foundation instance has not been created already.
+
+@see PxCreateFoundation()
+*/
+PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation& PX_CALL_CONV PxGetFoundation();
+
+namespace physx
+{
+class PxProfilerCallback;
+}
+
+/**
+\brief Get the callback that will be used for all profiling.
+*/
+PX_C_EXPORT PX_FOUNDATION_API physx::PxProfilerCallback* PX_CALL_CONV PxGetProfilerCallback();
+
+/**
+\brief Set the callback that will be used for all profiling.
+*/
+PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxSetProfilerCallback(physx::PxProfilerCallback* profiler);
+
+/** @} */
+#endif // PX_FOUNDATION_PX_FOUNDATION_H
diff --git a/PxShared/include/foundation/PxFoundationVersion.h b/PxShared/include/foundation/PxFoundationVersion.h
new file mode 100644
index 00000000..7766142d
--- /dev/null
+++ b/PxShared/include/foundation/PxFoundationVersion.h
@@ -0,0 +1,65 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_FOUNDATION_VERSION_NUMBER_H
+#define PX_FOUNDATION_VERSION_NUMBER_H
+
+/*
+VersionNumbers: The combination of these
+numbers uniquely identifies the API, and should
+be incremented when the SDK API changes. This may
+include changes to file formats.
+
+This header is included in the main SDK header files
+so that the entire SDK and everything that builds on it
+is completely rebuilt when this file changes. Thus,
+this file is not to include a frequently changing
+build number. See BuildNumber.h for that.
+
+Each of these three values should stay below 255 because
+sometimes they are stored in a byte.
+*/
+/** \addtogroup foundation
+ @{
+*/
+
+#define PX_FOUNDATION_VERSION_MAJOR 1
+#define PX_FOUNDATION_VERSION_MINOR 0
+#define PX_FOUNDATION_VERSION_BUGFIX 0
+
+/**
+The constant PX_FOUNDATION_VERSION is used when creating certain PhysX module objects.
+This is to ensure that the application is using the same header version as the library was built with.
+*/
+#define PX_FOUNDATION_VERSION \
+ ((PX_FOUNDATION_VERSION_MAJOR << 24) + (PX_FOUNDATION_VERSION_MINOR << 16) + (PX_FOUNDATION_VERSION_BUGFIX << 8) + 0)
+
+#endif // PX_FOUNDATION_VERSION_NUMBER_H
+
+/** @} */
diff --git a/PxShared/include/foundation/PxIO.h b/PxShared/include/foundation/PxIO.h
new file mode 100644
index 00000000..d7b75749
--- /dev/null
+++ b/PxShared/include/foundation/PxIO.h
@@ -0,0 +1,138 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXIO_H
+#define PXFOUNDATION_PXIO_H
+
+/** \addtogroup common
+ @{
+*/
+
+#include "foundation/PxSimpleTypes.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief Input stream class for I/O.
+
+The user needs to supply a PxInputStream implementation to a number of methods to allow the SDK to read data.
+*/
+
+class PxInputStream
+{
+ public:
+ /**
+ \brief read from the stream. The number of bytes read may be less than the number requested.
+
+ \param[in] dest the destination address to which the data will be read
+ \param[in] count the number of bytes requested
+
+ \return the number of bytes read from the stream.
+ */
+
+ virtual uint32_t read(void* dest, uint32_t count) = 0;
+
+ virtual ~PxInputStream()
+ {
+ }
+};
+
+/**
+\brief Input data class for I/O which provides random read access.
+
+The user needs to supply a PxInputData implementation to a number of methods to allow the SDK to read data.
+*/
+
+class PxInputData : public PxInputStream
+{
+ public:
+ /**
+ \brief return the length of the input data
+
+ \return size in bytes of the input data
+ */
+
+ virtual uint32_t getLength() const = 0;
+
+ /**
+ \brief seek to the given offset from the start of the data.
+
+ \param[in] offset the offset to seek to. If greater than the length of the data, this call is equivalent to
+ seek(length);
+ */
+
+ virtual void seek(uint32_t offset) = 0;
+
+ /**
+ \brief return the current offset from the start of the data
+
+ \return the offset to seek to.
+ */
+
+ virtual uint32_t tell() const = 0;
+
+ virtual ~PxInputData()
+ {
+ }
+};
+
+/**
+\brief Output stream class for I/O.
+
+The user needs to supply a PxOutputStream implementation to a number of methods to allow the SDK to write data.
+*/
+
+class PxOutputStream
+{
+ public:
+ /**
+ \brief write to the stream. The number of bytes written may be less than the number sent.
+
+ \param[in] src the destination address from which the data will be written
+ \param[in] count the number of bytes to be written
+
+ \return the number of bytes written to the stream by this call.
+ */
+
+ virtual uint32_t write(const void* src, uint32_t count) = 0;
+
+ virtual ~PxOutputStream()
+ {
+ }
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXIO_H
diff --git a/PxShared/include/foundation/PxIntrinsics.h b/PxShared/include/foundation/PxIntrinsics.h
new file mode 100644
index 00000000..ad354850
--- /dev/null
+++ b/PxShared/include/foundation/PxIntrinsics.h
@@ -0,0 +1,45 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXINTRINSICS_H
+#define PXFOUNDATION_PXINTRINSICS_H
+
+#include "foundation/PxPreprocessor.h"
+
+#if PX_WINDOWS_FAMILY
+#include "foundation/windows/PxWindowsIntrinsics.h"
+#elif(PX_LINUX || PX_ANDROID || PX_APPLE_FAMILY || PX_PS4)
+#include "foundation/unix/PxUnixIntrinsics.h"
+#elif PX_XBOXONE
+#include "foundation/XboxOne/PxXboxOneIntrinsics.h"
+#else
+#error "Platform not supported!"
+#endif
+
+#endif // #ifndef PXFOUNDATION_PXINTRINSICS_H
diff --git a/PxShared/include/foundation/PxMat33.h b/PxShared/include/foundation/PxMat33.h
new file mode 100644
index 00000000..53720bba
--- /dev/null
+++ b/PxShared/include/foundation/PxMat33.h
@@ -0,0 +1,396 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXMAT33_H
+#define PXFOUNDATION_PXMAT33_H
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxVec3.h"
+#include "foundation/PxQuat.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+/*!
+\brief 3x3 matrix class
+
+Some clarifications, as there have been much confusion about matrix formats etc in the past.
+
+Short:
+- Matrix have base vectors in columns (vectors are column matrices, 3x1 matrices).
+- Matrix is physically stored in column major format
+- Matrices are concaternated from left
+
+Long:
+Given three base vectors a, b and c the matrix is stored as
+
+|a.x b.x c.x|
+|a.y b.y c.y|
+|a.z b.z c.z|
+
+Vectors are treated as columns, so the vector v is
+
+|x|
+|y|
+|z|
+
+And matrices are applied _before_ the vector (pre-multiplication)
+v' = M*v
+
+|x'| |a.x b.x c.x| |x| |a.x*x + b.x*y + c.x*z|
+|y'| = |a.y b.y c.y| * |y| = |a.y*x + b.y*y + c.y*z|
+|z'| |a.z b.z c.z| |z| |a.z*x + b.z*y + c.z*z|
+
+
+Physical storage and indexing:
+To be compatible with popular 3d rendering APIs (read D3d and OpenGL)
+the physical indexing is
+
+|0 3 6|
+|1 4 7|
+|2 5 8|
+
+index = column*3 + row
+
+which in C++ translates to M[column][row]
+
+The mathematical indexing is M_row,column and this is what is used for _-notation
+so _12 is 1st row, second column and operator(row, column)!
+
+*/
+class PxMat33
+{
+ public:
+ //! Default constructor
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33()
+ {
+ }
+
+ //! identity constructor
+ PX_CUDA_CALLABLE PX_INLINE PxMat33(PxIDENTITY r)
+ : column0(1.0f, 0.0f, 0.0f), column1(0.0f, 1.0f, 0.0f), column2(0.0f, 0.0f, 1.0f)
+ {
+ PX_UNUSED(r);
+ }
+
+ //! zero constructor
+ PX_CUDA_CALLABLE PX_INLINE PxMat33(PxZERO r) : column0(0.0f), column1(0.0f), column2(0.0f)
+ {
+ PX_UNUSED(r);
+ }
+
+ //! Construct from three base vectors
+ PX_CUDA_CALLABLE PxMat33(const PxVec3& col0, const PxVec3& col1, const PxVec3& col2)
+ : column0(col0), column1(col1), column2(col2)
+ {
+ }
+
+ //! constructor from a scalar, which generates a multiple of the identity matrix
+ explicit PX_CUDA_CALLABLE PX_INLINE PxMat33(float r)
+ : column0(r, 0.0f, 0.0f), column1(0.0f, r, 0.0f), column2(0.0f, 0.0f, r)
+ {
+ }
+
+ //! Construct from float[9]
+ explicit PX_CUDA_CALLABLE PX_INLINE PxMat33(float values[])
+ : column0(values[0], values[1], values[2])
+ , column1(values[3], values[4], values[5])
+ , column2(values[6], values[7], values[8])
+ {
+ }
+
+ //! Construct from a quaternion
+ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33(const PxQuat& q)
+ {
+ const float x = q.x;
+ const float y = q.y;
+ const float z = q.z;
+ const float w = q.w;
+
+ const float x2 = x + x;
+ const float y2 = y + y;
+ const float z2 = z + z;
+
+ const float xx = x2 * x;
+ const float yy = y2 * y;
+ const float zz = z2 * z;
+
+ const float xy = x2 * y;
+ const float xz = x2 * z;
+ const float xw = x2 * w;
+
+ const float yz = y2 * z;
+ const float yw = y2 * w;
+ const float zw = z2 * w;
+
+ column0 = PxVec3(1.0f - yy - zz, xy + zw, xz - yw);
+ column1 = PxVec3(xy - zw, 1.0f - xx - zz, yz + xw);
+ column2 = PxVec3(xz + yw, yz - xw, 1.0f - xx - yy);
+ }
+
+ //! Copy constructor
+ PX_CUDA_CALLABLE PX_INLINE PxMat33(const PxMat33& other)
+ : column0(other.column0), column1(other.column1), column2(other.column2)
+ {
+ }
+
+ //! Assignment operator
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33& operator=(const PxMat33& other)
+ {
+ column0 = other.column0;
+ column1 = other.column1;
+ column2 = other.column2;
+ return *this;
+ }
+
+ //! Construct from diagonal, off-diagonals are zero.
+ PX_CUDA_CALLABLE PX_INLINE static const PxMat33 createDiagonal(const PxVec3& d)
+ {
+ return PxMat33(PxVec3(d.x, 0.0f, 0.0f), PxVec3(0.0f, d.y, 0.0f), PxVec3(0.0f, 0.0f, d.z));
+ }
+
+ /**
+ \brief returns true if the two matrices are exactly equal
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxMat33& m) const
+ {
+ return column0 == m.column0 && column1 == m.column1 && column2 == m.column2;
+ }
+
+ //! Get transposed matrix
+ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat33 getTranspose() const
+ {
+ const PxVec3 v0(column0.x, column1.x, column2.x);
+ const PxVec3 v1(column0.y, column1.y, column2.y);
+ const PxVec3 v2(column0.z, column1.z, column2.z);
+
+ return PxMat33(v0, v1, v2);
+ }
+
+ //! Get the real inverse
+ PX_CUDA_CALLABLE PX_INLINE const PxMat33 getInverse() const
+ {
+ const float det = getDeterminant();
+ PxMat33 inverse;
+
+ if(det != 0)
+ {
+ const float invDet = 1.0f / det;
+
+ inverse.column0.x = invDet * (column1.y * column2.z - column2.y * column1.z);
+ inverse.column0.y = invDet * -(column0.y * column2.z - column2.y * column0.z);
+ inverse.column0.z = invDet * (column0.y * column1.z - column0.z * column1.y);
+
+ inverse.column1.x = invDet * -(column1.x * column2.z - column1.z * column2.x);
+ inverse.column1.y = invDet * (column0.x * column2.z - column0.z * column2.x);
+ inverse.column1.z = invDet * -(column0.x * column1.z - column0.z * column1.x);
+
+ inverse.column2.x = invDet * (column1.x * column2.y - column1.y * column2.x);
+ inverse.column2.y = invDet * -(column0.x * column2.y - column0.y * column2.x);
+ inverse.column2.z = invDet * (column0.x * column1.y - column1.x * column0.y);
+
+ return inverse;
+ }
+ else
+ {
+ return PxMat33(PxIdentity);
+ }
+ }
+
+ //! Get determinant
+ PX_CUDA_CALLABLE PX_INLINE float getDeterminant() const
+ {
+ return column0.dot(column1.cross(column2));
+ }
+
+ //! Unary minus
+ PX_CUDA_CALLABLE PX_INLINE const PxMat33 operator-() const
+ {
+ return PxMat33(-column0, -column1, -column2);
+ }
+
+ //! Add
+ PX_CUDA_CALLABLE PX_INLINE const PxMat33 operator+(const PxMat33& other) const
+ {
+ return PxMat33(column0 + other.column0, column1 + other.column1, column2 + other.column2);
+ }
+
+ //! Subtract
+ PX_CUDA_CALLABLE PX_INLINE const PxMat33 operator-(const PxMat33& other) const
+ {
+ return PxMat33(column0 - other.column0, column1 - other.column1, column2 - other.column2);
+ }
+
+ //! Scalar multiplication
+ PX_CUDA_CALLABLE PX_INLINE const PxMat33 operator*(float scalar) const
+ {
+ return PxMat33(column0 * scalar, column1 * scalar, column2 * scalar);
+ }
+
+ friend PxMat33 operator*(float, const PxMat33&);
+
+ //! Matrix vector multiplication (returns 'this->transform(vec)')
+ PX_CUDA_CALLABLE PX_INLINE const PxVec3 operator*(const PxVec3& vec) const
+ {
+ return transform(vec);
+ }
+
+ // a <op>= b operators
+
+ //! Matrix multiplication
+ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat33 operator*(const PxMat33& other) const
+ {
+ // Rows from this <dot> columns from other
+ // column0 = transform(other.column0) etc
+ return PxMat33(transform(other.column0), transform(other.column1), transform(other.column2));
+ }
+
+ //! Equals-add
+ PX_CUDA_CALLABLE PX_INLINE PxMat33& operator+=(const PxMat33& other)
+ {
+ column0 += other.column0;
+ column1 += other.column1;
+ column2 += other.column2;
+ return *this;
+ }
+
+ //! Equals-sub
+ PX_CUDA_CALLABLE PX_INLINE PxMat33& operator-=(const PxMat33& other)
+ {
+ column0 -= other.column0;
+ column1 -= other.column1;
+ column2 -= other.column2;
+ return *this;
+ }
+
+ //! Equals scalar multiplication
+ PX_CUDA_CALLABLE PX_INLINE PxMat33& operator*=(float scalar)
+ {
+ column0 *= scalar;
+ column1 *= scalar;
+ column2 *= scalar;
+ return *this;
+ }
+
+ //! Equals matrix multiplication
+ PX_CUDA_CALLABLE PX_INLINE PxMat33& operator*=(const PxMat33& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ //! Element access, mathematical way!
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE float operator()(unsigned int row, unsigned int col) const
+ {
+ return (*this)[col][row];
+ }
+
+ //! Element access, mathematical way!
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE float& operator()(unsigned int row, unsigned int col)
+ {
+ return (*this)[col][row];
+ }
+
+ // Transform etc
+
+ //! Transform vector by matrix, equal to v' = M*v
+ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3 transform(const PxVec3& other) const
+ {
+ return column0 * other.x + column1 * other.y + column2 * other.z;
+ }
+
+ //! Transform vector by matrix transpose, v' = M^t*v
+ PX_CUDA_CALLABLE PX_INLINE const PxVec3 transformTranspose(const PxVec3& other) const
+ {
+ return PxVec3(column0.dot(other), column1.dot(other), column2.dot(other));
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE const float* front() const
+ {
+ return &column0.x;
+ }
+
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3& operator[](unsigned int num)
+ {
+ return (&column0)[num];
+ }
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3& operator[](unsigned int num) const
+ {
+ return (&column0)[num];
+ }
+
+ // Data, see above for format!
+
+ PxVec3 column0, column1, column2; // the three base vectors
+};
+
+// implementation from PxQuat.h
+PX_CUDA_CALLABLE PX_INLINE PxQuat::PxQuat(const PxMat33& m)
+{
+ if(m.column2.z < 0)
+ {
+ if(m.column0.x > m.column1.y)
+ {
+ float t = 1 + m.column0.x - m.column1.y - m.column2.z;
+ *this = PxQuat(t, m.column0.y + m.column1.x, m.column2.x + m.column0.z, m.column1.z - m.column2.y) *
+ (0.5f / PxSqrt(t));
+ }
+ else
+ {
+ float t = 1 - m.column0.x + m.column1.y - m.column2.z;
+ *this = PxQuat(m.column0.y + m.column1.x, t, m.column1.z + m.column2.y, m.column2.x - m.column0.z) *
+ (0.5f / PxSqrt(t));
+ }
+ }
+ else
+ {
+ if(m.column0.x < -m.column1.y)
+ {
+ float t = 1 - m.column0.x - m.column1.y + m.column2.z;
+ *this = PxQuat(m.column2.x + m.column0.z, m.column1.z + m.column2.y, t, m.column0.y - m.column1.x) *
+ (0.5f / PxSqrt(t));
+ }
+ else
+ {
+ float t = 1 + m.column0.x + m.column1.y + m.column2.z;
+ *this = PxQuat(m.column1.z - m.column2.y, m.column2.x - m.column0.z, m.column0.y - m.column1.x, t) *
+ (0.5f / PxSqrt(t));
+ }
+ }
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXMAT33_H
diff --git a/PxShared/include/foundation/PxMat44.h b/PxShared/include/foundation/PxMat44.h
new file mode 100644
index 00000000..f6445468
--- /dev/null
+++ b/PxShared/include/foundation/PxMat44.h
@@ -0,0 +1,376 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXMAT44_H
+#define PXFOUNDATION_PXMAT44_H
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxQuat.h"
+#include "foundation/PxVec4.h"
+#include "foundation/PxMat33.h"
+#include "foundation/PxTransform.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/*!
+\brief 4x4 matrix class
+
+This class is layout-compatible with D3D and OpenGL matrices. More notes on layout are given in the PxMat33
+
+@see PxMat33 PxTransform
+*/
+
+class PxMat44
+{
+ public:
+ //! Default constructor
+ PX_CUDA_CALLABLE PX_INLINE PxMat44()
+ {
+ }
+
+ //! identity constructor
+ PX_CUDA_CALLABLE PX_INLINE PxMat44(PxIDENTITY r)
+ : column0(1.0f, 0.0f, 0.0f, 0.0f)
+ , column1(0.0f, 1.0f, 0.0f, 0.0f)
+ , column2(0.0f, 0.0f, 1.0f, 0.0f)
+ , column3(0.0f, 0.0f, 0.0f, 1.0f)
+ {
+ PX_UNUSED(r);
+ }
+
+ //! zero constructor
+ PX_CUDA_CALLABLE PX_INLINE PxMat44(PxZERO r) : column0(PxZero), column1(PxZero), column2(PxZero), column3(PxZero)
+ {
+ PX_UNUSED(r);
+ }
+
+ //! Construct from four 4-vectors
+ PX_CUDA_CALLABLE PxMat44(const PxVec4& col0, const PxVec4& col1, const PxVec4& col2, const PxVec4& col3)
+ : column0(col0), column1(col1), column2(col2), column3(col3)
+ {
+ }
+
+ //! constructor that generates a multiple of the identity matrix
+ explicit PX_CUDA_CALLABLE PX_INLINE PxMat44(float r)
+ : column0(r, 0.0f, 0.0f, 0.0f)
+ , column1(0.0f, r, 0.0f, 0.0f)
+ , column2(0.0f, 0.0f, r, 0.0f)
+ , column3(0.0f, 0.0f, 0.0f, r)
+ {
+ }
+
+ //! Construct from three base vectors and a translation
+ PX_CUDA_CALLABLE PxMat44(const PxVec3& col0, const PxVec3& col1, const PxVec3& col2, const PxVec3& col3)
+ : column0(col0, 0), column1(col1, 0), column2(col2, 0), column3(col3, 1.0f)
+ {
+ }
+
+ //! Construct from float[16]
+ explicit PX_CUDA_CALLABLE PX_INLINE PxMat44(float values[])
+ : column0(values[0], values[1], values[2], values[3])
+ , column1(values[4], values[5], values[6], values[7])
+ , column2(values[8], values[9], values[10], values[11])
+ , column3(values[12], values[13], values[14], values[15])
+ {
+ }
+
+ //! Construct from a quaternion
+ explicit PX_CUDA_CALLABLE PX_INLINE PxMat44(const PxQuat& q)
+ {
+ const float x = q.x;
+ const float y = q.y;
+ const float z = q.z;
+ const float w = q.w;
+
+ const float x2 = x + x;
+ const float y2 = y + y;
+ const float z2 = z + z;
+
+ const float xx = x2 * x;
+ const float yy = y2 * y;
+ const float zz = z2 * z;
+
+ const float xy = x2 * y;
+ const float xz = x2 * z;
+ const float xw = x2 * w;
+
+ const float yz = y2 * z;
+ const float yw = y2 * w;
+ const float zw = z2 * w;
+
+ column0 = PxVec4(1.0f - yy - zz, xy + zw, xz - yw, 0.0f);
+ column1 = PxVec4(xy - zw, 1.0f - xx - zz, yz + xw, 0.0f);
+ column2 = PxVec4(xz + yw, yz - xw, 1.0f - xx - yy, 0.0f);
+ column3 = PxVec4(0.0f, 0.0f, 0.0f, 1.0f);
+ }
+
+ //! Construct from a diagonal vector
+ explicit PX_CUDA_CALLABLE PX_INLINE PxMat44(const PxVec4& diagonal)
+ : column0(diagonal.x, 0.0f, 0.0f, 0.0f)
+ , column1(0.0f, diagonal.y, 0.0f, 0.0f)
+ , column2(0.0f, 0.0f, diagonal.z, 0.0f)
+ , column3(0.0f, 0.0f, 0.0f, diagonal.w)
+ {
+ }
+
+ //! Construct from Mat33 and a translation
+ PX_CUDA_CALLABLE PxMat44(const PxMat33& axes, const PxVec3& position)
+ : column0(axes.column0, 0.0f), column1(axes.column1, 0.0f), column2(axes.column2, 0.0f), column3(position, 1.0f)
+ {
+ }
+
+ PX_CUDA_CALLABLE PxMat44(const PxTransform& t)
+ {
+ *this = PxMat44(PxMat33(t.q), t.p);
+ }
+
+ /**
+ \brief returns true if the two matrices are exactly equal
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxMat44& m) const
+ {
+ return column0 == m.column0 && column1 == m.column1 && column2 == m.column2 && column3 == m.column3;
+ }
+
+ //! Copy constructor
+ PX_CUDA_CALLABLE PX_INLINE PxMat44(const PxMat44& other)
+ : column0(other.column0), column1(other.column1), column2(other.column2), column3(other.column3)
+ {
+ }
+
+ //! Assignment operator
+ PX_CUDA_CALLABLE PX_INLINE PxMat44& operator=(const PxMat44& other)
+ {
+ column0 = other.column0;
+ column1 = other.column1;
+ column2 = other.column2;
+ column3 = other.column3;
+ return *this;
+ }
+
+ //! Get transposed matrix
+ PX_CUDA_CALLABLE PX_INLINE const PxMat44 getTranspose() const
+ {
+ return PxMat44(
+ PxVec4(column0.x, column1.x, column2.x, column3.x), PxVec4(column0.y, column1.y, column2.y, column3.y),
+ PxVec4(column0.z, column1.z, column2.z, column3.z), PxVec4(column0.w, column1.w, column2.w, column3.w));
+ }
+
+ //! Unary minus
+ PX_CUDA_CALLABLE PX_INLINE const PxMat44 operator-() const
+ {
+ return PxMat44(-column0, -column1, -column2, -column3);
+ }
+
+ //! Add
+ PX_CUDA_CALLABLE PX_INLINE const PxMat44 operator+(const PxMat44& other) const
+ {
+ return PxMat44(column0 + other.column0, column1 + other.column1, column2 + other.column2,
+ column3 + other.column3);
+ }
+
+ //! Subtract
+ PX_CUDA_CALLABLE PX_INLINE const PxMat44 operator-(const PxMat44& other) const
+ {
+ return PxMat44(column0 - other.column0, column1 - other.column1, column2 - other.column2,
+ column3 - other.column3);
+ }
+
+ //! Scalar multiplication
+ PX_CUDA_CALLABLE PX_INLINE const PxMat44 operator*(float scalar) const
+ {
+ return PxMat44(column0 * scalar, column1 * scalar, column2 * scalar, column3 * scalar);
+ }
+
+ friend PxMat44 operator*(float, const PxMat44&);
+
+ //! Matrix multiplication
+ PX_CUDA_CALLABLE PX_INLINE const PxMat44 operator*(const PxMat44& other) const
+ {
+ // Rows from this <dot> columns from other
+ // column0 = transform(other.column0) etc
+ return PxMat44(transform(other.column0), transform(other.column1), transform(other.column2),
+ transform(other.column3));
+ }
+
+ // a <op>= b operators
+
+ //! Equals-add
+ PX_CUDA_CALLABLE PX_INLINE PxMat44& operator+=(const PxMat44& other)
+ {
+ column0 += other.column0;
+ column1 += other.column1;
+ column2 += other.column2;
+ column3 += other.column3;
+ return *this;
+ }
+
+ //! Equals-sub
+ PX_CUDA_CALLABLE PX_INLINE PxMat44& operator-=(const PxMat44& other)
+ {
+ column0 -= other.column0;
+ column1 -= other.column1;
+ column2 -= other.column2;
+ column3 -= other.column3;
+ return *this;
+ }
+
+ //! Equals scalar multiplication
+ PX_CUDA_CALLABLE PX_INLINE PxMat44& operator*=(float scalar)
+ {
+ column0 *= scalar;
+ column1 *= scalar;
+ column2 *= scalar;
+ column3 *= scalar;
+ return *this;
+ }
+
+ //! Equals matrix multiplication
+ PX_CUDA_CALLABLE PX_INLINE PxMat44& operator*=(const PxMat44& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ //! Element access, mathematical way!
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE float operator()(unsigned int row, unsigned int col) const
+ {
+ return (*this)[col][row];
+ }
+
+ //! Element access, mathematical way!
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE float& operator()(unsigned int row, unsigned int col)
+ {
+ return (*this)[col][row];
+ }
+
+ //! Transform vector by matrix, equal to v' = M*v
+ PX_CUDA_CALLABLE PX_INLINE const PxVec4 transform(const PxVec4& other) const
+ {
+ return column0 * other.x + column1 * other.y + column2 * other.z + column3 * other.w;
+ }
+
+ //! Transform vector by matrix, equal to v' = M*v
+ PX_CUDA_CALLABLE PX_INLINE const PxVec3 transform(const PxVec3& other) const
+ {
+ return transform(PxVec4(other, 1.0f)).getXYZ();
+ }
+
+ //! Rotate vector by matrix, equal to v' = M*v
+ PX_CUDA_CALLABLE PX_INLINE const PxVec4 rotate(const PxVec4& other) const
+ {
+ return column0 * other.x + column1 * other.y + column2 * other.z; // + column3*0;
+ }
+
+ //! Rotate vector by matrix, equal to v' = M*v
+ PX_CUDA_CALLABLE PX_INLINE const PxVec3 rotate(const PxVec3& other) const
+ {
+ return rotate(PxVec4(other, 1.0f)).getXYZ();
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE const PxVec3 getBasis(int num) const
+ {
+ PX_ASSERT(num >= 0 && num < 3);
+ return (&column0)[num].getXYZ();
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE const PxVec3 getPosition() const
+ {
+ return column3.getXYZ();
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE void setPosition(const PxVec3& position)
+ {
+ column3.x = position.x;
+ column3.y = position.y;
+ column3.z = position.z;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE const float* front() const
+ {
+ return &column0.x;
+ }
+
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec4& operator[](unsigned int num)
+ {
+ return (&column0)[num];
+ }
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec4& operator[](unsigned int num) const
+ {
+ return (&column0)[num];
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE void scale(const PxVec4& p)
+ {
+ column0 *= p.x;
+ column1 *= p.y;
+ column2 *= p.z;
+ column3 *= p.w;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE const PxMat44 inverseRT(void) const
+ {
+ PxVec3 r0(column0.x, column1.x, column2.x), r1(column0.y, column1.y, column2.y),
+ r2(column0.z, column1.z, column2.z);
+
+ return PxMat44(r0, r1, r2, -(r0 * column3.x + r1 * column3.y + r2 * column3.z));
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
+ {
+ return column0.isFinite() && column1.isFinite() && column2.isFinite() && column3.isFinite();
+ }
+
+ // Data, see above for format!
+
+ PxVec4 column0, column1, column2, column3; // the four base vectors
+};
+
+// implementation from PxTransform.h
+PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform::PxTransform(const PxMat44& m)
+{
+ PxVec3 column0 = PxVec3(m.column0.x, m.column0.y, m.column0.z);
+ PxVec3 column1 = PxVec3(m.column1.x, m.column1.y, m.column1.z);
+ PxVec3 column2 = PxVec3(m.column2.x, m.column2.y, m.column2.z);
+
+ q = PxQuat(PxMat33(column0, column1, column2));
+ p = PxVec3(m.column3.x, m.column3.y, m.column3.z);
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXMAT44_H
diff --git a/PxShared/include/foundation/PxMath.h b/PxShared/include/foundation/PxMath.h
new file mode 100644
index 00000000..c3434dcc
--- /dev/null
+++ b/PxShared/include/foundation/PxMath.h
@@ -0,0 +1,338 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXMATH_H
+#define PXFOUNDATION_PXMATH_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxPreprocessor.h"
+
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration
+#endif
+#include <math.h>
+#if PX_VC
+#pragma warning(pop)
+#endif
+
+#include <float.h>
+#include "foundation/PxIntrinsics.h"
+#include "foundation/PxAssert.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+// constants
+static const float PxPi = float(3.141592653589793);
+static const float PxHalfPi = float(1.57079632679489661923);
+static const float PxTwoPi = float(6.28318530717958647692);
+static const float PxInvPi = float(0.31830988618379067154);
+static const float PxInvTwoPi = float(0.15915494309189533577);
+static const float PxPiDivTwo = float(1.57079632679489661923);
+static const float PxPiDivFour = float(0.78539816339744830962);
+
+/**
+\brief The return value is the greater of the two specified values.
+*/
+template <class T>
+PX_CUDA_CALLABLE PX_FORCE_INLINE T PxMax(T a, T b)
+{
+ return a < b ? b : a;
+}
+
+//! overload for float to use fsel on xbox
+template <>
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxMax(float a, float b)
+{
+ return intrinsics::selectMax(a, b);
+}
+
+/**
+\brief The return value is the lesser of the two specified values.
+*/
+template <class T>
+PX_CUDA_CALLABLE PX_FORCE_INLINE T PxMin(T a, T b)
+{
+ return a < b ? a : b;
+}
+
+template <>
+//! overload for float to use fsel on xbox
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxMin(float a, float b)
+{
+ return intrinsics::selectMin(a, b);
+}
+
+/*
+Many of these are just implemented as PX_CUDA_CALLABLE PX_FORCE_INLINE calls to the C lib right now,
+but later we could replace some of them with some approximations or more
+clever stuff.
+*/
+
+/**
+\brief abs returns the absolute value of its argument.
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAbs(float a)
+{
+ return intrinsics::abs(a);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxEquals(float a, float b, float eps)
+{
+ return (PxAbs(a - b) < eps);
+}
+
+/**
+\brief abs returns the absolute value of its argument.
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAbs(double a)
+{
+ return ::fabs(a);
+}
+
+/**
+\brief abs returns the absolute value of its argument.
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE int32_t PxAbs(int32_t a)
+{
+ return ::abs(a);
+}
+
+/**
+\brief Clamps v to the range [hi,lo]
+*/
+template <class T>
+PX_CUDA_CALLABLE PX_FORCE_INLINE T PxClamp(T v, T lo, T hi)
+{
+ PX_ASSERT(lo <= hi);
+ return PxMin(hi, PxMax(lo, v));
+}
+
+//! \brief Square root.
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSqrt(float a)
+{
+ return intrinsics::sqrt(a);
+}
+
+//! \brief Square root.
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxSqrt(double a)
+{
+ return ::sqrt(a);
+}
+
+//! \brief reciprocal square root.
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxRecipSqrt(float a)
+{
+ return intrinsics::recipSqrt(a);
+}
+
+//! \brief reciprocal square root.
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxRecipSqrt(double a)
+{
+ return 1 / ::sqrt(a);
+}
+
+//! trigonometry -- all angles are in radians.
+
+//! \brief Sine of an angle ( <b>Unit:</b> Radians )
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSin(float a)
+{
+ return intrinsics::sin(a);
+}
+
+//! \brief Sine of an angle ( <b>Unit:</b> Radians )
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxSin(double a)
+{
+ return ::sin(a);
+}
+
+//! \brief Cosine of an angle (<b>Unit:</b> Radians)
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxCos(float a)
+{
+ return intrinsics::cos(a);
+}
+
+//! \brief Cosine of an angle (<b>Unit:</b> Radians)
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxCos(double a)
+{
+ return ::cos(a);
+}
+
+/**
+\brief Tangent of an angle.
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxTan(float a)
+{
+ return ::tanf(a);
+}
+
+/**
+\brief Tangent of an angle.
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxTan(double a)
+{
+ return ::tan(a);
+}
+
+/**
+\brief Arcsine.
+Returns angle between -PI/2 and PI/2 in radians
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAsin(float f)
+{
+ return ::asinf(PxClamp(f, -1.0f, 1.0f));
+}
+
+/**
+\brief Arcsine.
+Returns angle between -PI/2 and PI/2 in radians
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAsin(double f)
+{
+ return ::asin(PxClamp(f, -1.0, 1.0));
+}
+
+/**
+\brief Arccosine.
+Returns angle between 0 and PI in radians
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAcos(float f)
+{
+ return ::acosf(PxClamp(f, -1.0f, 1.0f));
+}
+
+/**
+\brief Arccosine.
+Returns angle between 0 and PI in radians
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAcos(double f)
+{
+ return ::acos(PxClamp(f, -1.0, 1.0));
+}
+
+/**
+\brief ArcTangent.
+Returns angle between -PI/2 and PI/2 in radians
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAtan(float a)
+{
+ return ::atanf(a);
+}
+
+/**
+\brief ArcTangent.
+Returns angle between -PI/2 and PI/2 in radians
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAtan(double a)
+{
+ return ::atan(a);
+}
+
+/**
+\brief Arctangent of (x/y) with correct sign.
+Returns angle between -PI and PI in radians
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAtan2(float x, float y)
+{
+ return ::atan2f(x, y);
+}
+
+/**
+\brief Arctangent of (x/y) with correct sign.
+Returns angle between -PI and PI in radians
+<b>Unit:</b> Radians
+*/
+PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAtan2(double x, double y)
+{
+ return ::atan2(x, y);
+}
+
+//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc.
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxIsFinite(float f)
+{
+ return intrinsics::isFinite(f);
+}
+
+//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc.
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxIsFinite(double f)
+{
+ return intrinsics::isFinite(f);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxFloor(float a)
+{
+ return ::floorf(a);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxExp(float a)
+{
+ return ::expf(a);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxCeil(float a)
+{
+ return ::ceilf(a);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSign(float a)
+{
+ return physx::intrinsics::sign(a);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxPow(float x, float y)
+{
+ return ::powf(x, y);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float PxLog(float x)
+{
+ return ::logf(x);
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXMATH_H
diff --git a/PxShared/include/foundation/PxMathUtils.h b/PxShared/include/foundation/PxMathUtils.h
new file mode 100644
index 00000000..6897dad2
--- /dev/null
+++ b/PxShared/include/foundation/PxMathUtils.h
@@ -0,0 +1,73 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXMATHUTILS_H
+#define PXFOUNDATION_PXMATHUTILS_H
+
+/** \addtogroup common
+ @{
+*/
+
+#include "foundation/Px.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief finds the shortest rotation between two vectors.
+
+\param[in] from the vector to start from
+\param[in] target the vector to rotate to
+\return a rotation about an axis normal to the two vectors which takes one to the other via the shortest path
+*/
+
+PX_FOUNDATION_API PxQuat PxShortestRotation(const PxVec3& from, const PxVec3& target);
+
+/* \brief diagonalizes a 3x3 symmetric matrix y
+
+The returned matrix satisfies M = R * D * R', where R is the rotation matrix for the output quaternion, R' its
+transpose, and D the diagonal matrix
+
+If the matrix is not symmetric, the result is undefined.
+
+\param[in] m the matrix to diagonalize
+\param[out] axes a quaternion rotation which diagonalizes the matrix
+\return the vector diagonal of the diagonalized matrix.
+*/
+
+PX_FOUNDATION_API PxVec3 PxDiagonalize(const PxMat33& m, PxQuat& axes);
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif
diff --git a/PxShared/include/foundation/PxMemory.h b/PxShared/include/foundation/PxMemory.h
new file mode 100644
index 00000000..9bf108cc
--- /dev/null
+++ b/PxShared/include/foundation/PxMemory.h
@@ -0,0 +1,110 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXMEMORY_H
+#define PXFOUNDATION_PXMEMORY_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/Px.h"
+#include "foundation/PxIntrinsics.h"
+#include "foundation/PxSimpleTypes.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief Sets the bytes of the provided buffer to zero.
+
+\param dest Pointer to block of memory to set zero.
+\param count Number of bytes to set to zero.
+
+\return Pointer to memory block (same as input)
+*/
+PX_FORCE_INLINE void* PxMemZero(void* dest, PxU32 count)
+{
+ return physx::intrinsics::memZero(dest, count);
+}
+
+/**
+\brief Sets the bytes of the provided buffer to the specified value.
+
+\param dest Pointer to block of memory to set to the specified value.
+\param c Value to set the bytes of the block of memory to.
+\param count Number of bytes to set to the specified value.
+
+\return Pointer to memory block (same as input)
+*/
+PX_FORCE_INLINE void* PxMemSet(void* dest, PxI32 c, PxU32 count)
+{
+ return physx::intrinsics::memSet(dest, c, count);
+}
+
+/**
+\brief Copies the bytes of one memory block to another. The memory blocks must not overlap.
+
+\note Use #PxMemMove if memory blocks overlap.
+
+\param dest Pointer to block of memory to copy to.
+\param src Pointer to block of memory to copy from.
+\param count Number of bytes to copy.
+
+\return Pointer to destination memory block
+*/
+PX_FORCE_INLINE void* PxMemCopy(void* dest, const void* src, PxU32 count)
+{
+ return physx::intrinsics::memCopy(dest, src, count);
+}
+
+/**
+\brief Copies the bytes of one memory block to another. The memory blocks can overlap.
+
+\note Use #PxMemCopy if memory blocks do not overlap.
+
+\param dest Pointer to block of memory to copy to.
+\param src Pointer to block of memory to copy from.
+\param count Number of bytes to copy.
+
+\return Pointer to destination memory block
+*/
+PX_FORCE_INLINE void* PxMemMove(void* dest, const void* src, PxU32 count)
+{
+ return physx::intrinsics::memMove(dest, src, count);
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // PXFOUNDATION_PXMEMORY_H
diff --git a/PxShared/include/foundation/PxPlane.h b/PxShared/include/foundation/PxPlane.h
new file mode 100644
index 00000000..2b4671f7
--- /dev/null
+++ b/PxShared/include/foundation/PxPlane.h
@@ -0,0 +1,145 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXPLANE_H
+#define PXFOUNDATION_PXPLANE_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxMath.h"
+#include "foundation/PxVec3.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief Representation of a plane.
+
+ Plane equation used: n.dot(v) + d = 0
+*/
+class PxPlane
+{
+ public:
+ /**
+ \brief Constructor
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane()
+ {
+ }
+
+ /**
+ \brief Constructor from a normal and a distance
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(float nx, float ny, float nz, float distance) : n(nx, ny, nz), d(distance)
+ {
+ }
+
+ /**
+ \brief Constructor from a normal and a distance
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& normal, float distance) : n(normal), d(distance)
+ {
+ }
+
+ /**
+ \brief Constructor from a point on the plane and a normal
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& point, const PxVec3& normal)
+ : n(normal), d(-point.dot(n)) // p satisfies normal.dot(p) + d = 0
+ {
+ }
+
+ /**
+ \brief Constructor from three points
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2)
+ {
+ n = (p1 - p0).cross(p2 - p0).getNormalized();
+ d = -p0.dot(n);
+ }
+
+ /**
+ \brief returns true if the two planes are exactly equal
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxPlane& p) const
+ {
+ return n == p.n && d == p.d;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float distance(const PxVec3& p) const
+ {
+ return p.dot(n) + d;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& p) const
+ {
+ return PxAbs(distance(p)) < (1.0e-7f);
+ }
+
+ /**
+ \brief projects p into the plane
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 project(const PxVec3& p) const
+ {
+ return p - n * distance(p);
+ }
+
+ /**
+ \brief find an arbitrary point in the plane
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 pointInPlane() const
+ {
+ return -n * d;
+ }
+
+ /**
+ \brief equivalent plane with unit normal
+ */
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE void normalize()
+ {
+ float denom = 1.0f / n.magnitude();
+ n *= denom;
+ d *= denom;
+ }
+
+ PxVec3 n; //!< The normal to the plane
+ float d; //!< The distance from the origin
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXPLANE_H
diff --git a/PxShared/include/foundation/PxPreprocessor.h b/PxShared/include/foundation/PxPreprocessor.h
new file mode 100644
index 00000000..c763fb2c
--- /dev/null
+++ b/PxShared/include/foundation/PxPreprocessor.h
@@ -0,0 +1,529 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXPREPROCESSOR_H
+#define PXFOUNDATION_PXPREPROCESSOR_H
+
+#include <stddef.h>
+#if !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64)))
+#include <ciso646> // detect std::lib, unless clang on windows is used (PxMetaDataGenerator issue)
+#endif
+/** \addtogroup foundation
+ @{
+*/
+
+/*
+The following preprocessor identifiers specify compiler, OS, and architecture.
+All definitions have a value of 1 or 0, use '#if' instead of '#ifdef'.
+*/
+
+/**
+Compiler defines, see http://sourceforge.net/p/predef/wiki/Compilers/
+*/
+#if defined(_MSC_VER)
+#if _MSC_VER >= 1900
+#define PX_VC 14
+#elif _MSC_VER >= 1800
+#define PX_VC 12
+#elif _MSC_VER >= 1700
+#define PX_VC 11
+#elif _MSC_VER >= 1600
+#define PX_VC 10
+#elif _MSC_VER >= 1500
+#define PX_VC 9
+#else
+#error "Unknown VC version"
+#endif
+#elif defined(__clang__)
+#define PX_CLANG 1
+#elif defined(__GNUC__) // note: __clang__ implies __GNUC__
+#define PX_GCC 1
+#else
+#error "Unknown compiler"
+#endif
+
+/**
+Operating system defines, see http://sourceforge.net/p/predef/wiki/OperatingSystems/
+*/
+#if defined(_XBOX_ONE)
+#define PX_XBOXONE 1
+#elif defined(_WIN64) // note: _XBOX_ONE implies _WIN64
+#define PX_WIN64 1
+#elif defined(_WIN32) // note: _M_PPC implies _WIN32
+#define PX_WIN32 1
+#elif defined(__ANDROID__)
+#define PX_ANDROID 1
+#elif defined(__linux__) || defined (__EMSCRIPTEN__) // note: __ANDROID__ implies __linux__
+#define PX_LINUX 1
+#elif defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
+#define PX_IOS 1
+#elif defined(__APPLE__)
+#define PX_OSX 1
+#elif defined(__ORBIS__)
+#define PX_PS4 1
+#else
+#error "Unknown operating system"
+#endif
+
+/**
+Architecture defines, see http://sourceforge.net/p/predef/wiki/Architectures/
+*/
+#if defined(__x86_64__) || defined(_M_X64) // ps4 compiler defines _M_X64 without value
+#define PX_X64 1
+#elif defined(__i386__) || defined(_M_IX86) || defined (__EMSCRIPTEN__)
+#define PX_X86 1
+#elif defined(__arm64__) || defined(__aarch64__)
+#define PX_A64 1
+#elif defined(__arm__) || defined(_M_ARM)
+#define PX_ARM 1
+#elif defined(__ppc__) || defined(_M_PPC) || defined(__CELLOS_LV2__)
+#define PX_PPC 1
+#else
+#error "Unknown architecture"
+#endif
+
+/**
+SIMD defines
+*/
+#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) || defined (__EMSCRIPTEN__)
+#define PX_SSE2 1
+#endif
+#if defined(_M_ARM) || defined(__ARM_NEON__)
+#define PX_NEON 1
+#endif
+#if defined(_M_PPC) || defined(__CELLOS_LV2__)
+#define PX_VMX 1
+#endif
+
+/**
+define anything not defined on this platform to 0
+*/
+#ifndef PX_VC
+#define PX_VC 0
+#endif
+#ifndef PX_CLANG
+#define PX_CLANG 0
+#endif
+#ifndef PX_GCC
+#define PX_GCC 0
+#endif
+#ifndef PX_XBOXONE
+#define PX_XBOXONE 0
+#endif
+#ifndef PX_WIN64
+#define PX_WIN64 0
+#endif
+#ifndef PX_WIN32
+#define PX_WIN32 0
+#endif
+#ifndef PX_ANDROID
+#define PX_ANDROID 0
+#endif
+#ifndef PX_LINUX
+#define PX_LINUX 0
+#endif
+#ifndef PX_IOS
+#define PX_IOS 0
+#endif
+#ifndef PX_OSX
+#define PX_OSX 0
+#endif
+#ifndef PX_PS4
+#define PX_PS4 0
+#endif
+#ifndef PX_X64
+#define PX_X64 0
+#endif
+#ifndef PX_X86
+#define PX_X86 0
+#endif
+#ifndef PX_A64
+#define PX_A64 0
+#endif
+#ifndef PX_ARM
+#define PX_ARM 0
+#endif
+#ifndef PX_PPC
+#define PX_PPC 0
+#endif
+#ifndef PX_SSE2
+#define PX_SSE2 0
+#endif
+#ifndef PX_NEON
+#define PX_NEON 0
+#endif
+#ifndef PX_VMX
+#define PX_VMX 0
+#endif
+
+/*
+define anything not defined through the command line to 0
+*/
+#ifndef PX_DEBUG
+#define PX_DEBUG 0
+#endif
+#ifndef PX_CHECKED
+#define PX_CHECKED 0
+#endif
+#ifndef PX_PROFILE
+#define PX_PROFILE 0
+#endif
+#ifndef PX_NVTX
+#define PX_NVTX 0
+#endif
+#ifndef PX_DOXYGEN
+#define PX_DOXYGEN 0
+#endif
+
+/**
+family shortcuts
+*/
+// compiler
+#define PX_GCC_FAMILY (PX_CLANG || PX_GCC)
+// os
+#define PX_WINDOWS_FAMILY (PX_WIN32 || PX_WIN64)
+#define PX_MICROSOFT_FAMILY (PX_XBOXONE || PX_WINDOWS_FAMILY)
+#define PX_LINUX_FAMILY (PX_LINUX || PX_ANDROID)
+#define PX_APPLE_FAMILY (PX_IOS || PX_OSX) // equivalent to #if __APPLE__
+#define PX_UNIX_FAMILY (PX_LINUX_FAMILY || PX_APPLE_FAMILY) // shortcut for unix/posix platforms
+#if defined(__EMSCRIPTEN__)
+#define PX_EMSCRIPTEN 1
+#else
+#define PX_EMSCRIPTEN 0
+#endif
+// architecture
+#define PX_INTEL_FAMILY (PX_X64 || PX_X86)
+#define PX_ARM_FAMILY (PX_ARM || PX_A64)
+#define PX_P64_FAMILY (PX_X64 || PX_A64) // shortcut for 64-bit architectures
+
+/**
+C++ standard library defines
+*/
+#if defined(_LIBCPP_VERSION) || PX_WIN64 || PX_WIN32 || PX_PS4 || PX_XBOXONE || PX_EMSCRIPTEN
+#define PX_LIBCPP 1
+#else
+#define PX_LIBCPP 0
+#endif
+
+// legacy define for PhysX
+#define PX_WINDOWS (PX_WINDOWS_FAMILY && !PX_ARM_FAMILY)
+
+/**
+Assert macro
+*/
+#ifndef PX_ENABLE_ASSERTS
+#if PX_DEBUG && !defined(__CUDACC__)
+#define PX_ENABLE_ASSERTS 1
+#else
+#define PX_ENABLE_ASSERTS 0
+#endif
+#endif
+
+/**
+DLL export macros
+*/
+#ifndef PX_C_EXPORT
+#if PX_WINDOWS_FAMILY || PX_LINUX
+#define PX_C_EXPORT extern "C"
+#else
+#define PX_C_EXPORT
+#endif
+#endif
+
+#if PX_UNIX_FAMILY&& __GNUC__ >= 4
+#define PX_UNIX_EXPORT __attribute__((visibility("default")))
+#else
+#define PX_UNIX_EXPORT
+#endif
+
+#if PX_WINDOWS_FAMILY
+#define PX_DLL_EXPORT __declspec(dllexport)
+#define PX_DLL_IMPORT __declspec(dllimport)
+#else
+#define PX_DLL_EXPORT PX_UNIX_EXPORT
+#define PX_DLL_IMPORT
+#endif
+
+/**
+Define API function declaration
+
+PX_FOUNDATION_DLL=1 - used by the DLL library (PhysXCommon) to export the API
+PX_FOUNDATION_DLL=0 - for windows configurations where the PX_FOUNDATION_API is linked through standard static linking
+no definition - this will allow DLLs and libraries to use the exported API from PhysXCommon
+
+*/
+
+#if PX_WINDOWS_FAMILY && !PX_ARM_FAMILY
+#ifndef PX_FOUNDATION_DLL
+#define PX_FOUNDATION_API PX_DLL_IMPORT
+#elif PX_FOUNDATION_DLL
+#define PX_FOUNDATION_API PX_DLL_EXPORT
+#endif
+#elif PX_UNIX_FAMILY
+#ifdef PX_FOUNDATION_DLL
+#define PX_FOUNDATION_API PX_UNIX_EXPORT
+#endif
+#endif
+
+#ifndef PX_FOUNDATION_API
+#define PX_FOUNDATION_API
+#endif
+
+/**
+Calling convention
+*/
+#ifndef PX_CALL_CONV
+#if PX_MICROSOFT_FAMILY
+#define PX_CALL_CONV __cdecl
+#else
+#define PX_CALL_CONV
+#endif
+#endif
+
+/**
+Pack macros - disabled on SPU because they are not supported
+*/
+#if PX_VC
+#define PX_PUSH_PACK_DEFAULT __pragma(pack(push, 8))
+#define PX_POP_PACK __pragma(pack(pop))
+#elif PX_GCC_FAMILY
+#define PX_PUSH_PACK_DEFAULT _Pragma("pack(push, 8)")
+#define PX_POP_PACK _Pragma("pack(pop)")
+#else
+#define PX_PUSH_PACK_DEFAULT
+#define PX_POP_PACK
+#endif
+
+/**
+Inline macro
+*/
+#define PX_INLINE inline
+#if PX_MICROSOFT_FAMILY
+#pragma inline_depth(255)
+#endif
+
+/**
+Force inline macro
+*/
+#if PX_VC
+#define PX_FORCE_INLINE __forceinline
+#elif PX_LINUX // Workaround; Fedora Core 3 do not agree with force inline and PxcPool
+#define PX_FORCE_INLINE inline
+#elif PX_GCC_FAMILY
+#define PX_FORCE_INLINE inline __attribute__((always_inline))
+#else
+#define PX_FORCE_INLINE inline
+#endif
+
+/**
+Noinline macro
+*/
+#if PX_MICROSOFT_FAMILY
+#define PX_NOINLINE __declspec(noinline)
+#elif PX_GCC_FAMILY
+#define PX_NOINLINE __attribute__((noinline))
+#else
+#define PX_NOINLINE
+#endif
+
+/**
+Restrict macro
+*/
+#if defined(__CUDACC__)
+#define PX_RESTRICT __restrict__
+#else
+#define PX_RESTRICT __restrict
+#endif
+
+/**
+Noalias macro
+*/
+#if PX_MICROSOFT_FAMILY
+#define PX_NOALIAS __declspec(noalias)
+#else
+#define PX_NOALIAS
+#endif
+
+/**
+Alignment macros
+
+PX_ALIGN_PREFIX and PX_ALIGN_SUFFIX can be used for type alignment instead of aligning individual variables as follows:
+PX_ALIGN_PREFIX(16)
+struct A {
+...
+} PX_ALIGN_SUFFIX(16);
+This declaration style is parsed correctly by Visual Assist.
+
+*/
+#ifndef PX_ALIGN
+#if PX_MICROSOFT_FAMILY
+#define PX_ALIGN(alignment, decl) __declspec(align(alignment)) decl
+#define PX_ALIGN_PREFIX(alignment) __declspec(align(alignment))
+#define PX_ALIGN_SUFFIX(alignment)
+#elif PX_GCC_FAMILY
+#define PX_ALIGN(alignment, decl) decl __attribute__((aligned(alignment)))
+#define PX_ALIGN_PREFIX(alignment)
+#define PX_ALIGN_SUFFIX(alignment) __attribute__((aligned(alignment)))
+#elif defined __CUDACC__
+#define PX_ALIGN(alignment, decl) __align__(alignment) decl
+#define PX_ALIGN_PREFIX(alignment)
+#define PX_ALIGN_SUFFIX(alignment) __align__(alignment))
+#else
+#define PX_ALIGN(alignment, decl)
+#define PX_ALIGN_PREFIX(alignment)
+#define PX_ALIGN_SUFFIX(alignment)
+#endif
+#endif
+
+/**
+Deprecated macro
+- To deprecate a function: Place PX_DEPRECATED at the start of the function header (leftmost word).
+- To deprecate a 'typedef', a 'struct' or a 'class': Place PX_DEPRECATED directly after the keywords ('typdef',
+'struct', 'class').
+
+Use these macro definitions to create warnings for deprecated functions
+\#define PX_DEPRECATED __declspec(deprecated) // Microsoft
+\#define PX_DEPRECATED __attribute__((deprecated())) // GCC
+*/
+#define PX_DEPRECATED
+
+/**
+General defines
+*/
+
+// static assert
+#if(defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))) || (PX_PS4) || (PX_APPLE_FAMILY)
+#define PX_COMPILE_TIME_ASSERT(exp) typedef char PxCompileTimeAssert_Dummy[(exp) ? 1 : -1] __attribute__((unused))
+#else
+#define PX_COMPILE_TIME_ASSERT(exp) typedef char PxCompileTimeAssert_Dummy[(exp) ? 1 : -1]
+#endif
+
+#if PX_GCC_FAMILY
+#define PX_OFFSET_OF(X, Y) __builtin_offsetof(X, Y)
+#else
+#define PX_OFFSET_OF(X, Y) offsetof(X, Y)
+#endif
+
+#define PX_OFFSETOF_BASE 0x100 // casting the null ptr takes a special-case code path, which we don't want
+#define PX_OFFSET_OF_RT(Class, Member) \
+ (reinterpret_cast<size_t>(&reinterpret_cast<Class*>(PX_OFFSETOF_BASE)->Member) - size_t(PX_OFFSETOF_BASE))
+
+// check that exactly one of NDEBUG and _DEBUG is defined
+#if !defined(NDEBUG) ^ defined(_DEBUG)
+#error Exactly one of NDEBUG and _DEBUG needs to be defined!
+#endif
+
+// make sure PX_CHECKED is defined in all _DEBUG configurations as well
+#if !PX_CHECKED && PX_DEBUG
+#error PX_CHECKED must be defined when PX_DEBUG is defined
+#endif
+
+#ifdef __CUDACC__
+#define PX_CUDA_CALLABLE __host__ __device__
+#else
+#define PX_CUDA_CALLABLE
+#endif
+
+// avoid unreferenced parameter warning
+// preferred solution: omit the parameter's name from the declaration
+template <class T>
+PX_CUDA_CALLABLE PX_INLINE void PX_UNUSED(T const&)
+{
+}
+
+// Ensure that the application hasn't tweaked the pack value to less than 8, which would break
+// matching between the API headers and the binaries
+// This assert works on win32/win64, but may need further specialization on other platforms.
+// Some GCC compilers need the compiler flag -malign-double to be set.
+// Apparently the apple-clang-llvm compiler doesn't support malign-double.
+#if PX_PS4 || PX_APPLE_FAMILY
+struct PxPackValidation
+{
+ char _;
+ long a;
+};
+#elif PX_ANDROID
+struct PxPackValidation
+{
+ char _;
+ double a;
+};
+#else
+struct PxPackValidation
+{
+ char _;
+ long long a;
+};
+#endif
+#if !PX_APPLE_FAMILY
+PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(PxPackValidation, a) == 8);
+#endif
+
+// use in a cpp file to suppress LNK4221
+#if PX_VC
+#define PX_DUMMY_SYMBOL \
+ namespace \
+ { \
+ char PxDummySymbol; \
+ }
+#else
+#define PX_DUMMY_SYMBOL
+#endif
+
+#if PX_GCC_FAMILY
+#define PX_WEAK_SYMBOL __attribute__((weak)) // this is to support SIMD constant merging in template specialization
+#else
+#define PX_WEAK_SYMBOL
+#endif
+
+// Macro for avoiding default assignment and copy, because doing this by inheritance can increase class size on some
+// platforms.
+#define PX_NOCOPY(Class) \
+ \
+protected: \
+ Class(const Class&); \
+ Class& operator=(const Class&);
+
+#define PX_STRINGIZE_HELPER(X) #X
+#define PX_STRINGIZE(X) PX_STRINGIZE_HELPER(X)
+
+#define PX_CONCAT_HELPER(X, Y) X##Y
+#define PX_CONCAT(X, Y) PX_CONCAT_HELPER(X, Y)
+
+#ifndef DISABLE_CUDA_PHYSX
+//CUDA is currently supported only on windows
+#define PX_SUPPORT_GPU_PHYSX ((PX_WINDOWS_FAMILY) || (PX_LINUX && PX_X64))
+#else
+#define PX_SUPPORT_GPU_PHYSX 0
+#endif
+
+#define PX_SUPPORT_COMPUTE_PHYSX 0
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXPREPROCESSOR_H
diff --git a/PxShared/include/foundation/PxProfiler.h b/PxShared/include/foundation/PxProfiler.h
new file mode 100644
index 00000000..6b181c09
--- /dev/null
+++ b/PxShared/include/foundation/PxProfiler.h
@@ -0,0 +1,116 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXFOUNDATION_PXPROFILER_H
+#define PXFOUNDATION_PXPROFILER_H
+
+#include "foundation/PxFoundation.h"
+
+namespace physx
+{
+
+/**
+\brief The pure virtual callback interface for general purpose instrumentation and profiling of GameWorks modules as
+well as applications
+*/
+class PxProfilerCallback
+{
+protected:
+ virtual ~PxProfilerCallback() {}
+
+public:
+ /**************************************************************************************************************************
+ Instrumented profiling events
+ ***************************************************************************************************************************/
+
+ /**
+ \brief Mark the beginning of a nested profile block
+ \param[in] eventName Event name. Must be a persistent const char *
+ \param[in] detached True for cross thread events
+ \param[in] contextId the context id of this zone. Zones with the same id belong to the same group. 0 is used for no specific group.
+ \return Returns implementation-specific profiler data for this event
+ */
+ virtual void* zoneStart(const char* eventName, bool detached, uint64_t contextId) = 0;
+
+ /**
+ \brief Mark the end of a nested profile block
+ \param[in] profilerData The data returned by the corresponding zoneStart call (or NULL if not available)
+ \param[in] eventName The name of the zone ending, must match the corresponding name passed with 'zoneStart'. Must be a persistent const char *.
+ \param[in] detached True for cross thread events. Should match the value passed to zoneStart.
+ \param[in] contextId The context of this zone. Should match the value passed to zoneStart.
+
+ \note eventName plus contextId can be used to uniquely match up start and end of a zone.
+ */
+ virtual void zoneEnd(void* profilerData, const char* eventName, bool detached, uint64_t contextId) = 0;
+};
+
+class PxProfileScoped
+{
+ public:
+ PX_FORCE_INLINE PxProfileScoped(const char* eventName, bool detached, uint64_t contextId) : mCallback(PxGetProfilerCallback()), mProfilerData(NULL)
+ {
+ if(mCallback)
+ {
+ mEventName = eventName;
+ mContextId = contextId;
+ mDetached = detached;
+ mProfilerData = mCallback->zoneStart(eventName, detached, contextId);
+ }
+ }
+
+ PX_FORCE_INLINE ~PxProfileScoped()
+ {
+ if(mCallback)
+ mCallback->zoneEnd(mProfilerData, mEventName, mDetached, mContextId);
+ }
+ physx::PxProfilerCallback* mCallback;
+ const char* mEventName;
+ void* mProfilerData;
+ uint64_t mContextId;
+ bool mDetached;
+};
+
+} // end of physx namespace
+
+#if PX_DEBUG || PX_CHECKED || PX_PROFILE
+ #define PX_PROFILE_ZONE(x, y) \
+ physx::PxProfileScoped PX_CONCAT(_scoped, __LINE__)(x, false, y)
+ #define PX_PROFILE_START_CROSSTHREAD(x, y) \
+ if(PxGetProfilerCallback()) \
+ PxGetProfilerCallback()->zoneStart(x, true, y)
+ #define PX_PROFILE_STOP_CROSSTHREAD(x, y) \
+ if(PxGetProfilerCallback()) \
+ PxGetProfilerCallback()->zoneEnd(NULL, x, true, y)
+#else
+ #define PX_PROFILE_ZONE(x, y)
+ #define PX_PROFILE_START_CROSSTHREAD(x, y)
+ #define PX_PROFILE_STOP_CROSSTHREAD(x, y)
+#endif
+
+#define PX_PROFILE_POINTER_TO_U64(pointer) static_cast<uint64_t>(reinterpret_cast<size_t>(pointer))
+
+#endif // PXFOUNDATION_PXPROFILER_H
diff --git a/PxShared/include/foundation/PxQuat.h b/PxShared/include/foundation/PxQuat.h
new file mode 100644
index 00000000..bf55c6d2
--- /dev/null
+++ b/PxShared/include/foundation/PxQuat.h
@@ -0,0 +1,403 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXQUAT_H
+#define PXFOUNDATION_PXQUAT_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxVec3.h"
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief This is a quaternion class. For more information on quaternion mathematics
+consult a mathematics source on complex numbers.
+
+*/
+
+class PxQuat
+{
+ public:
+ /**
+ \brief Default constructor, does not do any initialization.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat()
+ {
+ }
+
+ //! identity constructor
+ PX_CUDA_CALLABLE PX_INLINE PxQuat(PxIDENTITY r) : x(0.0f), y(0.0f), z(0.0f), w(1.0f)
+ {
+ PX_UNUSED(r);
+ }
+
+ /**
+ \brief Constructor from a scalar: sets the real part w to the scalar value, and the imaginary parts (x,y,z) to zero
+ */
+ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat(float r) : x(0.0f), y(0.0f), z(0.0f), w(r)
+ {
+ }
+
+ /**
+ \brief Constructor. Take note of the order of the elements!
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat(float nx, float ny, float nz, float nw) : x(nx), y(ny), z(nz), w(nw)
+ {
+ }
+
+ /**
+ \brief Creates from angle-axis representation.
+
+ Axis must be normalized!
+
+ Angle is in radians!
+
+ <b>Unit:</b> Radians
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxQuat(float angleRadians, const PxVec3& unitAxis)
+ {
+ PX_ASSERT(PxAbs(1.0f - unitAxis.magnitude()) < 1e-3f);
+ const float a = angleRadians * 0.5f;
+ const float s = PxSin(a);
+ w = PxCos(a);
+ x = unitAxis.x * s;
+ y = unitAxis.y * s;
+ z = unitAxis.z * s;
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat(const PxQuat& v) : x(v.x), y(v.y), z(v.z), w(v.w)
+ {
+ }
+
+ /**
+ \brief Creates from orientation matrix.
+
+ \param[in] m Rotation matrix to extract quaternion from.
+ */
+ PX_CUDA_CALLABLE PX_INLINE explicit PxQuat(const PxMat33& m); /* defined in PxMat33.h */
+
+ /**
+ \brief returns true if quat is identity
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isIdentity() const
+ {
+ return x==0.0f && y==0.0f && z==0.0f && w==1.0f;
+ }
+
+ /**
+ \brief returns true if all elements are finite (not NAN or INF, etc.)
+ */
+ PX_CUDA_CALLABLE bool isFinite() const
+ {
+ return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z) && PxIsFinite(w);
+ }
+
+ /**
+ \brief returns true if finite and magnitude is close to unit
+ */
+ PX_CUDA_CALLABLE bool isUnit() const
+ {
+ const float unitTolerance = 1e-4f;
+ return isFinite() && PxAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns true if finite and magnitude is reasonably close to unit to allow for some accumulation of error vs
+ isValid
+ */
+ PX_CUDA_CALLABLE bool isSane() const
+ {
+ const float unitTolerance = 1e-2f;
+ return isFinite() && PxAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns true if the two quaternions are exactly equal
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxQuat& q) const
+ {
+ return x == q.x && y == q.y && z == q.z && w == q.w;
+ }
+
+ /**
+ \brief converts this quaternion to angle-axis representation
+ */
+ PX_CUDA_CALLABLE PX_INLINE void toRadiansAndUnitAxis(float& angle, PxVec3& axis) const
+ {
+ const float quatEpsilon = 1.0e-8f;
+ const float s2 = x * x + y * y + z * z;
+ if(s2 < quatEpsilon * quatEpsilon) // can't extract a sensible axis
+ {
+ angle = 0.0f;
+ axis = PxVec3(1.0f, 0.0f, 0.0f);
+ }
+ else
+ {
+ const float s = PxRecipSqrt(s2);
+ axis = PxVec3(x, y, z) * s;
+ angle = PxAbs(w) < quatEpsilon ? PxPi : PxAtan2(s2 * s, w) * 2.0f;
+ }
+ }
+
+ /**
+ \brief Gets the angle between this quat and the identity quaternion.
+
+ <b>Unit:</b> Radians
+ */
+ PX_CUDA_CALLABLE PX_INLINE float getAngle() const
+ {
+ return PxAcos(w) * 2.0f;
+ }
+
+ /**
+ \brief Gets the angle between this quat and the argument
+
+ <b>Unit:</b> Radians
+ */
+ PX_CUDA_CALLABLE PX_INLINE float getAngle(const PxQuat& q) const
+ {
+ return PxAcos(dot(q)) * 2.0f;
+ }
+
+ /**
+ \brief This is the squared 4D vector length, should be 1 for unit quaternions.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float magnitudeSquared() const
+ {
+ return x * x + y * y + z * z + w * w;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float dot(const PxQuat& v) const
+ {
+ return x * v.x + y * v.y + z * v.z + w * v.w;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE PxQuat getNormalized() const
+ {
+ const float s = 1.0f / magnitude();
+ return PxQuat(x * s, y * s, z * s, w * s);
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE float magnitude() const
+ {
+ return PxSqrt(magnitudeSquared());
+ }
+
+ // modifiers:
+ /**
+ \brief maps to the closest unit quaternion.
+ */
+ PX_CUDA_CALLABLE PX_INLINE float normalize() // convert this PxQuat to a unit quaternion
+ {
+ const float mag = magnitude();
+ if(mag != 0.0f)
+ {
+ const float imag = 1.0f / mag;
+
+ x *= imag;
+ y *= imag;
+ z *= imag;
+ w *= imag;
+ }
+ return mag;
+ }
+
+ /*
+ \brief returns the conjugate.
+
+ \note for unit quaternions, this is the inverse.
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxQuat getConjugate() const
+ {
+ return PxQuat(-x, -y, -z, w);
+ }
+
+ /*
+ \brief returns imaginary part.
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec3 getImaginaryPart() const
+ {
+ return PxVec3(x, y, z);
+ }
+
+ /** brief computes rotation of x-axis */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getBasisVector0() const
+ {
+ const float x2 = x * 2.0f;
+ const float w2 = w * 2.0f;
+ return PxVec3((w * w2) - 1.0f + x * x2, (z * w2) + y * x2, (-y * w2) + z * x2);
+ }
+
+ /** brief computes rotation of y-axis */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getBasisVector1() const
+ {
+ const float y2 = y * 2.0f;
+ const float w2 = w * 2.0f;
+ return PxVec3((-z * w2) + x * y2, (w * w2) - 1.0f + y * y2, (x * w2) + z * y2);
+ }
+
+ /** brief computes rotation of z-axis */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getBasisVector2() const
+ {
+ const float z2 = z * 2.0f;
+ const float w2 = w * 2.0f;
+ return PxVec3((y * w2) + x * z2, (-x * w2) + y * z2, (w * w2) - 1.0f + z * z2);
+ }
+
+ /**
+ rotates passed vec by this (assumed unitary)
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3 rotate(const PxVec3& v) const
+ {
+ const float vx = 2.0f * v.x;
+ const float vy = 2.0f * v.y;
+ const float vz = 2.0f * v.z;
+ const float w2 = w * w - 0.5f;
+ const float dot2 = (x * vx + y * vy + z * vz);
+ return PxVec3((vx * w2 + (y * vz - z * vy) * w + x * dot2), (vy * w2 + (z * vx - x * vz) * w + y * dot2),
+ (vz * w2 + (x * vy - y * vx) * w + z * dot2));
+ }
+
+ /**
+ inverse rotates passed vec by this (assumed unitary)
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3 rotateInv(const PxVec3& v) const
+ {
+ const float vx = 2.0f * v.x;
+ const float vy = 2.0f * v.y;
+ const float vz = 2.0f * v.z;
+ const float w2 = w * w - 0.5f;
+ const float dot2 = (x * vx + y * vy + z * vz);
+ return PxVec3((vx * w2 - (y * vz - z * vy) * w + x * dot2), (vy * w2 - (z * vx - x * vz) * w + y * dot2),
+ (vz * w2 - (x * vy - y * vx) * w + z * dot2));
+ }
+
+ /**
+ \brief Assignment operator
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat& operator=(const PxQuat& p)
+ {
+ x = p.x;
+ y = p.y;
+ z = p.z;
+ w = p.w;
+ return *this;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat& operator*=(const PxQuat& q)
+ {
+ const float tx = w * q.x + q.w * x + y * q.z - q.y * z;
+ const float ty = w * q.y + q.w * y + z * q.x - q.z * x;
+ const float tz = w * q.z + q.w * z + x * q.y - q.x * y;
+
+ w = w * q.w - q.x * x - y * q.y - q.z * z;
+ x = tx;
+ y = ty;
+ z = tz;
+
+ return *this;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat& operator+=(const PxQuat& q)
+ {
+ x += q.x;
+ y += q.y;
+ z += q.z;
+ w += q.w;
+ return *this;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat& operator-=(const PxQuat& q)
+ {
+ x -= q.x;
+ y -= q.y;
+ z -= q.z;
+ w -= q.w;
+ return *this;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat& operator*=(const float s)
+ {
+ x *= s;
+ y *= s;
+ z *= s;
+ w *= s;
+ return *this;
+ }
+
+ /** quaternion multiplication */
+ PX_CUDA_CALLABLE PX_INLINE PxQuat operator*(const PxQuat& q) const
+ {
+ return PxQuat(w * q.x + q.w * x + y * q.z - q.y * z, w * q.y + q.w * y + z * q.x - q.z * x,
+ w * q.z + q.w * z + x * q.y - q.x * y, w * q.w - x * q.x - y * q.y - z * q.z);
+ }
+
+ /** quaternion addition */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat operator+(const PxQuat& q) const
+ {
+ return PxQuat(x + q.x, y + q.y, z + q.z, w + q.w);
+ }
+
+ /** quaternion subtraction */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat operator-() const
+ {
+ return PxQuat(-x, -y, -z, -w);
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat operator-(const PxQuat& q) const
+ {
+ return PxQuat(x - q.x, y - q.y, z - q.z, w - q.w);
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat operator*(float r) const
+ {
+ return PxQuat(x * r, y * r, z * r, w * r);
+ }
+
+ /** the quaternion elements */
+ float x, y, z, w;
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXQUAT_H
diff --git a/PxShared/include/foundation/PxSimpleTypes.h b/PxShared/include/foundation/PxSimpleTypes.h
new file mode 100644
index 00000000..a88f5acd
--- /dev/null
+++ b/PxShared/include/foundation/PxSimpleTypes.h
@@ -0,0 +1,112 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXSIMPLETYPES_H
+#define PXFOUNDATION_PXSIMPLETYPES_H
+
+/** \addtogroup foundation
+ @{
+*/
+
+// Platform specific types:
+// Design note: Its OK to use int for general loop variables and temps.
+
+#include "foundation/PxPreprocessor.h"
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable : 4668) // suppressing warning generated by Microsoft Visual Studio when including this standard
+// header
+#endif
+
+#if PX_LINUX
+#define __STDC_LIMIT_MACROS
+#endif
+
+#include <stdint.h>
+#if PX_VC
+#pragma warning(pop)
+#endif
+
+#if PX_VC // we could use inttypes.h starting with VC12
+#define PX_PRIu64 "I64u"
+#else
+#if !PX_PS4 && !PX_APPLE_FAMILY
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+#define PX_PRIu64 PRIu64
+#endif
+
+namespace physx
+{
+typedef int64_t PxI64;
+typedef uint64_t PxU64;
+typedef int32_t PxI32;
+typedef uint32_t PxU32;
+typedef int16_t PxI16;
+typedef uint16_t PxU16;
+typedef int8_t PxI8;
+typedef uint8_t PxU8;
+typedef float PxF32;
+typedef double PxF64;
+typedef float PxReal;
+}
+
+// Type ranges
+
+// These are here because we sometimes have non-IEEE compliant platforms to deal with.
+// Removal is under consideration (issue GWSD-34)
+
+#define PX_MAX_F32 3.4028234663852885981170418348452e+38F
+// maximum possible float value
+#define PX_MAX_F64 DBL_MAX // maximum possible double value
+
+#define PX_EPS_F32 FLT_EPSILON // maximum relative error of float rounding
+#define PX_EPS_F64 DBL_EPSILON // maximum relative error of double rounding
+
+#define PX_MAX_REAL PX_MAX_F32
+#define PX_EPS_REAL PX_EPS_F32
+#define PX_NORMALIZATION_EPSILON float(1e-20f)
+
+// Legacy type ranges used by PhysX
+#define PX_MAX_I8 INT8_MAX
+#define PX_MIN_I8 INT8_MIN
+#define PX_MAX_U8 UINT8_MAX
+#define PX_MIN_U8 UINT8_MIN
+#define PX_MAX_I16 INT16_MAX
+#define PX_MIN_I16 INT16_MIN
+#define PX_MAX_U16 UINT16_MAX
+#define PX_MIN_U16 UINT16_MIN
+#define PX_MAX_I32 INT32_MAX
+#define PX_MIN_I32 INT32_MIN
+#define PX_MAX_U32 UINT32_MAX
+#define PX_MIN_U32 UINT32_MIN
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXSIMPLETYPES_H
diff --git a/PxShared/include/foundation/PxStrideIterator.h b/PxShared/include/foundation/PxStrideIterator.h
new file mode 100644
index 00000000..70229956
--- /dev/null
+++ b/PxShared/include/foundation/PxStrideIterator.h
@@ -0,0 +1,353 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXSTRIDEITERATOR_H
+#define PXFOUNDATION_PXSTRIDEITERATOR_H
+
+#include "foundation/Px.h"
+#include "foundation/PxAssert.h"
+
+/** \addtogroup foundation
+ @{
+*/
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief Iterator class for iterating over arrays of data that may be interleaved with other data.
+
+This class is used for iterating over arrays of elements that may have a larger element to element
+offset, called the stride, than the size of the element itself (non-contiguous).
+
+The template parameter T denotes the type of the element accessed. The stride itself
+is stored as a member field so multiple instances of a PxStrideIterator class can have
+different strides. This is useful for cases were the stride depends on runtime configuration.
+
+The stride iterator can be used for index based access, e.g.:
+\code
+ PxStrideIterator<PxVec3> strideArray(...);
+ for (unsigned i = 0; i < 10; ++i)
+ {
+ PxVec3& vec = strideArray[i];
+ ...
+ }
+\endcode
+or iteration by increment, e.g.:
+\code
+ PxStrideIterator<PxVec3> strideBegin(...);
+ PxStrideIterator<PxVec3> strideEnd(strideBegin + 10);
+ for (PxStrideIterator<PxVec3> it = strideBegin; it < strideEnd; ++it)
+ {
+ PxVec3& vec = *it;
+ ...
+ }
+\endcode
+
+Two special cases:
+- A stride of sizeof(T) represents a regular c array of type T.
+- A stride of 0 can be used to describe re-occurrence of the same element multiple times.
+
+*/
+template <typename T>
+class PxStrideIterator
+{
+
+#if !PX_DOXYGEN
+ template <typename X>
+ struct StripConst
+ {
+ typedef X Type;
+ };
+
+ template <typename X>
+ struct StripConst<const X>
+ {
+ typedef X Type;
+ };
+#endif
+
+ public:
+ /**
+ \brief Constructor.
+
+ Optionally takes a pointer to an element and a stride.
+
+ \param[in] ptr pointer to element, defaults to NULL.
+ \param[in] stride stride for accessing consecutive elements, defaults to the size of one element.
+ */
+ explicit PX_INLINE PxStrideIterator(T* ptr = NULL, PxU32 stride = sizeof(T)) : mPtr(ptr), mStride(stride)
+ {
+ PX_ASSERT(mStride == 0 || sizeof(T) <= mStride);
+ }
+
+ /**
+ \brief Copy constructor.
+
+ \param[in] strideIterator PxStrideIterator to be copied.
+ */
+ PX_INLINE PxStrideIterator(const PxStrideIterator<typename StripConst<T>::Type>& strideIterator)
+ : mPtr(strideIterator.ptr()), mStride(strideIterator.stride())
+ {
+ PX_ASSERT(mStride == 0 || sizeof(T) <= mStride);
+ }
+
+ /**
+ \brief Get pointer to element.
+ */
+ PX_INLINE T* ptr() const
+ {
+ return mPtr;
+ }
+
+ /**
+ \brief Get stride.
+ */
+ PX_INLINE PxU32 stride() const
+ {
+ return mStride;
+ }
+
+ /**
+ \brief Indirection operator.
+ */
+ PX_INLINE T& operator*() const
+ {
+ return *mPtr;
+ }
+
+ /**
+ \brief Dereferencing operator.
+ */
+ PX_INLINE T* operator->() const
+ {
+ return mPtr;
+ }
+
+ /**
+ \brief Indexing operator.
+ */
+ PX_INLINE T& operator[](unsigned int i) const
+ {
+ return *byteAdd(mPtr, i * stride());
+ }
+
+ /**
+ \brief Pre-increment operator.
+ */
+ PX_INLINE PxStrideIterator& operator++()
+ {
+ mPtr = byteAdd(mPtr, stride());
+ return *this;
+ }
+
+ /**
+ \brief Post-increment operator.
+ */
+ PX_INLINE PxStrideIterator operator++(int)
+ {
+ PxStrideIterator tmp = *this;
+ mPtr = byteAdd(mPtr, stride());
+ return tmp;
+ }
+
+ /**
+ \brief Pre-decrement operator.
+ */
+ PX_INLINE PxStrideIterator& operator--()
+ {
+ mPtr = byteSub(mPtr, stride());
+ return *this;
+ }
+
+ /**
+ \brief Post-decrement operator.
+ */
+ PX_INLINE PxStrideIterator operator--(int)
+ {
+ PxStrideIterator tmp = *this;
+ mPtr = byteSub(mPtr, stride());
+ return tmp;
+ }
+
+ /**
+ \brief Addition operator.
+ */
+ PX_INLINE PxStrideIterator operator+(unsigned int i) const
+ {
+ return PxStrideIterator(byteAdd(mPtr, i * stride()), stride());
+ }
+
+ /**
+ \brief Subtraction operator.
+ */
+ PX_INLINE PxStrideIterator operator-(unsigned int i) const
+ {
+ return PxStrideIterator(byteSub(mPtr, i * stride()), stride());
+ }
+
+ /**
+ \brief Addition compound assignment operator.
+ */
+ PX_INLINE PxStrideIterator& operator+=(unsigned int i)
+ {
+ mPtr = byteAdd(mPtr, i * stride());
+ return *this;
+ }
+
+ /**
+ \brief Subtraction compound assignment operator.
+ */
+ PX_INLINE PxStrideIterator& operator-=(unsigned int i)
+ {
+ mPtr = byteSub(mPtr, i * stride());
+ return *this;
+ }
+
+ /**
+ \brief Iterator difference.
+ */
+ PX_INLINE int operator-(const PxStrideIterator& other) const
+ {
+ PX_ASSERT(isCompatible(other));
+ int byteDiff = static_cast<int>(reinterpret_cast<const PxU8*>(mPtr) - reinterpret_cast<const PxU8*>(other.mPtr));
+ return byteDiff / static_cast<int>(stride());
+ }
+
+ /**
+ \brief Equality operator.
+ */
+ PX_INLINE bool operator==(const PxStrideIterator& other) const
+ {
+ PX_ASSERT(isCompatible(other));
+ return mPtr == other.mPtr;
+ }
+
+ /**
+ \brief Inequality operator.
+ */
+ PX_INLINE bool operator!=(const PxStrideIterator& other) const
+ {
+ PX_ASSERT(isCompatible(other));
+ return mPtr != other.mPtr;
+ }
+
+ /**
+ \brief Less than operator.
+ */
+ PX_INLINE bool operator<(const PxStrideIterator& other) const
+ {
+ PX_ASSERT(isCompatible(other));
+ return mPtr < other.mPtr;
+ }
+
+ /**
+ \brief Greater than operator.
+ */
+ PX_INLINE bool operator>(const PxStrideIterator& other) const
+ {
+ PX_ASSERT(isCompatible(other));
+ return mPtr > other.mPtr;
+ }
+
+ /**
+ \brief Less or equal than operator.
+ */
+ PX_INLINE bool operator<=(const PxStrideIterator& other) const
+ {
+ PX_ASSERT(isCompatible(other));
+ return mPtr <= other.mPtr;
+ }
+
+ /**
+ \brief Greater or equal than operator.
+ */
+ PX_INLINE bool operator>=(const PxStrideIterator& other) const
+ {
+ PX_ASSERT(isCompatible(other));
+ return mPtr >= other.mPtr;
+ }
+
+ private:
+ PX_INLINE static T* byteAdd(T* ptr, PxU32 bytes)
+ {
+ return const_cast<T*>(reinterpret_cast<const T*>(reinterpret_cast<const PxU8*>(ptr) + bytes));
+ }
+
+ PX_INLINE static T* byteSub(T* ptr, PxU32 bytes)
+ {
+ return const_cast<T*>(reinterpret_cast<const T*>(reinterpret_cast<const PxU8*>(ptr) - bytes));
+ }
+
+ PX_INLINE bool isCompatible(const PxStrideIterator& other) const
+ {
+ int byteDiff = static_cast<int>(reinterpret_cast<const PxU8*>(mPtr) - reinterpret_cast<const PxU8*>(other.mPtr));
+ return (stride() == other.stride()) && (abs(byteDiff) % stride() == 0);
+ }
+
+ T* mPtr;
+ PxU32 mStride;
+};
+
+/**
+\brief Addition operator.
+*/
+template <typename T>
+PX_INLINE PxStrideIterator<T> operator+(int i, PxStrideIterator<T> it)
+{
+ it += i;
+ return it;
+}
+
+/**
+\brief Stride iterator factory function which infers the iterator type.
+*/
+template <typename T>
+PX_INLINE PxStrideIterator<T> PxMakeIterator(T* ptr, PxU32 stride = sizeof(T))
+{
+ return PxStrideIterator<T>(ptr, stride);
+}
+
+/**
+\brief Stride iterator factory function which infers the iterator type.
+*/
+template <typename T>
+PX_INLINE PxStrideIterator<const T> PxMakeIterator(const T* ptr, PxU32 stride = sizeof(T))
+{
+ return PxStrideIterator<const T>(ptr, stride);
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // PXFOUNDATION_PXSTRIDEITERATOR_H
diff --git a/PxShared/include/foundation/PxTransform.h b/PxShared/include/foundation/PxTransform.h
new file mode 100644
index 00000000..cffc1d3e
--- /dev/null
+++ b/PxShared/include/foundation/PxTransform.h
@@ -0,0 +1,215 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXTRANSFORM_H
+#define PXFOUNDATION_PXTRANSFORM_H
+/** \addtogroup foundation
+ @{
+*/
+
+#include "foundation/PxQuat.h"
+#include "foundation/PxPlane.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/*!
+\brief class representing a rigid euclidean transform as a quaternion and a vector
+*/
+
+class PxTransform
+{
+ public:
+ PxQuat q;
+ PxVec3 p;
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform()
+ {
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransform(const PxVec3& position) : q(PxIdentity), p(position)
+ {
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransform(PxIDENTITY r) : q(PxIdentity), p(PxZero)
+ {
+ PX_UNUSED(r);
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransform(const PxQuat& orientation) : q(orientation), p(0)
+ {
+ PX_ASSERT(orientation.isSane());
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform(float x, float y, float z, PxQuat aQ = PxQuat(PxIdentity))
+ : q(aQ), p(x, y, z)
+ {
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform(const PxVec3& p0, const PxQuat& q0) : q(q0), p(p0)
+ {
+ PX_ASSERT(q0.isSane());
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransform(const PxMat44& m); // defined in PxMat44.h
+
+ /**
+ \brief returns true if the two transforms are exactly equal
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxTransform& t) const
+ {
+ return p == t.p && q == t.q;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform operator*(const PxTransform& x) const
+ {
+ PX_ASSERT(x.isSane());
+ return transform(x);
+ }
+
+ //! Equals matrix multiplication
+ PX_CUDA_CALLABLE PX_INLINE PxTransform& operator*=(PxTransform& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform getInverse() const
+ {
+ PX_ASSERT(isFinite());
+ return PxTransform(q.rotateInv(-p), q.getConjugate());
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 transform(const PxVec3& input) const
+ {
+ PX_ASSERT(isFinite());
+ return q.rotate(input) + p;
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 transformInv(const PxVec3& input) const
+ {
+ PX_ASSERT(isFinite());
+ return q.rotateInv(input - p);
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 rotate(const PxVec3& input) const
+ {
+ PX_ASSERT(isFinite());
+ return q.rotate(input);
+ }
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 rotateInv(const PxVec3& input) const
+ {
+ PX_ASSERT(isFinite());
+ return q.rotateInv(input);
+ }
+
+ //! Transform transform to parent (returns compound transform: first src, then *this)
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform transform(const PxTransform& src) const
+ {
+ PX_ASSERT(src.isSane());
+ PX_ASSERT(isSane());
+ // src = [srct, srcr] -> [r*srct + t, r*srcr]
+ return PxTransform(q.rotate(src.p) + p, q * src.q);
+ }
+
+ /**
+ \brief returns true if finite and q is a unit quaternion
+ */
+
+ PX_CUDA_CALLABLE bool isValid() const
+ {
+ return p.isFinite() && q.isFinite() && q.isUnit();
+ }
+
+ /**
+ \brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error
+ vs isValid
+ */
+
+ PX_CUDA_CALLABLE bool isSane() const
+ {
+ return isFinite() && q.isSane();
+ }
+
+ /**
+ \brief returns true if all elems are finite (not NAN or INF, etc.)
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
+ {
+ return p.isFinite() && q.isFinite();
+ }
+
+ //! Transform transform from parent (returns compound transform: first src, then this->inverse)
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform transformInv(const PxTransform& src) const
+ {
+ PX_ASSERT(src.isSane());
+ PX_ASSERT(isFinite());
+ // src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr]
+ PxQuat qinv = q.getConjugate();
+ return PxTransform(qinv.rotate(src.p - p), qinv * src.q);
+ }
+
+ /**
+ \brief transform plane
+ */
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane transform(const PxPlane& plane) const
+ {
+ PxVec3 transformedNormal = rotate(plane.n);
+ return PxPlane(transformedNormal, plane.d - p.dot(transformedNormal));
+ }
+
+ /**
+ \brief inverse-transform plane
+ */
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane inverseTransform(const PxPlane& plane) const
+ {
+ PxVec3 transformedNormal = rotateInv(plane.n);
+ return PxPlane(transformedNormal, plane.d + p.dot(plane.n));
+ }
+
+ /**
+ \brief return a normalized transform (i.e. one in which the quaternion has unit magnitude)
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransform getNormalized() const
+ {
+ return PxTransform(p, q.getNormalized());
+ }
+};
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXTRANSFORM_H
diff --git a/PxShared/include/foundation/PxUnionCast.h b/PxShared/include/foundation/PxUnionCast.h
new file mode 100644
index 00000000..2b622033
--- /dev/null
+++ b/PxShared/include/foundation/PxUnionCast.h
@@ -0,0 +1,64 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXUNIONCAST_H
+#define PXFOUNDATION_PXUNIONCAST_H
+
+#include "foundation/Px.h"
+
+/** \addtogroup foundation
+@{
+*/
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+template <class A, class B>
+PX_FORCE_INLINE A PxUnionCast(B b)
+{
+ union AB
+ {
+ AB(B bb) : _b(bb)
+ {
+ }
+ B _b;
+ A _a;
+ } u(b);
+ return u._a;
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+
+#endif // PXFOUNDATION_PXUNIONCAST_H
diff --git a/PxShared/include/foundation/PxVec2.h b/PxShared/include/foundation/PxVec2.h
new file mode 100644
index 00000000..91db0865
--- /dev/null
+++ b/PxShared/include/foundation/PxVec2.h
@@ -0,0 +1,347 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXVEC2_H
+#define PXFOUNDATION_PXVEC2_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxMath.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief 2 Element vector class.
+
+This is a 2-dimensional vector class with public data members.
+*/
+class PxVec2
+{
+ public:
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2()
+ {
+ }
+
+ /**
+ \brief zero constructor.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2(PxZERO r) : x(0.0f), y(0.0f)
+ {
+ PX_UNUSED(r);
+ }
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2(float a) : x(a), y(a)
+ {
+ }
+
+ /**
+ \brief Initializes from 2 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2(float nx, float ny) : x(nx), y(ny)
+ {
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2(const PxVec2& v) : x(v.x), y(v.y)
+ {
+ }
+
+ // Operators
+
+ /**
+ \brief Assignment operator
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2& operator=(const PxVec2& p)
+ {
+ x = p.x;
+ y = p.y;
+ return *this;
+ }
+
+ /**
+ \brief element access
+ */
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE float& operator[](int index)
+ {
+ PX_ASSERT(index >= 0 && index <= 1);
+
+ return reinterpret_cast<float*>(this)[index];
+ }
+
+ /**
+ \brief element access
+ */
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE const float& operator[](int index) const
+ {
+ PX_ASSERT(index >= 0 && index <= 1);
+
+ return reinterpret_cast<const float*>(this)[index];
+ }
+
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxVec2& v) const
+ {
+ return x == v.x && y == v.y;
+ }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxVec2& v) const
+ {
+ return x != v.x || y != v.y;
+ }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isZero() const
+ {
+ return x == 0.0f && y == 0.0f;
+ }
+
+ /**
+ \brief returns true if all 2 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
+ {
+ return PxIsFinite(x) && PxIsFinite(y);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = 1e-4f;
+ return isFinite() && PxAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling PxSqrt()!
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float magnitudeSquared() const
+ {
+ return x * x + y * y;
+ }
+
+ /**
+ \brief returns the magnitude
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float magnitude() const
+ {
+ return PxSqrt(magnitudeSquared());
+ }
+
+ /**
+ \brief negation
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 operator-() const
+ {
+ return PxVec2(-x, -y);
+ }
+
+ /**
+ \brief vector addition
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 operator+(const PxVec2& v) const
+ {
+ return PxVec2(x + v.x, y + v.y);
+ }
+
+ /**
+ \brief vector difference
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 operator-(const PxVec2& v) const
+ {
+ return PxVec2(x - v.x, y - v.y);
+ }
+
+ /**
+ \brief scalar post-multiplication
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 operator*(float f) const
+ {
+ return PxVec2(x * f, y * f);
+ }
+
+ /**
+ \brief scalar division
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 operator/(float f) const
+ {
+ f = 1.0f / f; // PT: inconsistent notation with operator /=
+ return PxVec2(x * f, y * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2& operator+=(const PxVec2& v)
+ {
+ x += v.x;
+ y += v.y;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2& operator-=(const PxVec2& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2& operator*=(float f)
+ {
+ x *= f;
+ y *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2& operator/=(float f)
+ {
+ f = 1.0f / f; // PT: inconsistent notation with operator /
+ x *= f;
+ y *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float dot(const PxVec2& v) const
+ {
+ return x * v.x + y * v.y;
+ }
+
+ /** return a unit vector */
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 getNormalized() const
+ {
+ const float m = magnitudeSquared();
+ return m > 0.0f ? *this * PxRecipSqrt(m) : PxVec2(0, 0);
+ }
+
+ /**
+ \brief normalizes the vector in place
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float normalize()
+ {
+ const float m = magnitude();
+ if(m > 0.0f)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 multiply(const PxVec2& a) const
+ {
+ return PxVec2(x * a.x, y * a.y);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 minimum(const PxVec2& v) const
+ {
+ return PxVec2(PxMin(x, v.x), PxMin(y, v.y));
+ }
+
+ /**
+ \brief returns MIN(x, y);
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float minElement() const
+ {
+ return PxMin(x, y);
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2 maximum(const PxVec2& v) const
+ {
+ return PxVec2(PxMax(x, v.x), PxMax(y, v.y));
+ }
+
+ /**
+ \brief returns MAX(x, y);
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float maxElement() const
+ {
+ return PxMax(x, y);
+ }
+
+ float x, y;
+};
+
+PX_CUDA_CALLABLE static PX_FORCE_INLINE PxVec2 operator*(float f, const PxVec2& v)
+{
+ return PxVec2(f * v.x, f * v.y);
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXVEC2_H
diff --git a/PxShared/include/foundation/PxVec3.h b/PxShared/include/foundation/PxVec3.h
new file mode 100644
index 00000000..b4fb07d4
--- /dev/null
+++ b/PxShared/include/foundation/PxVec3.h
@@ -0,0 +1,393 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXVEC3_H
+#define PXFOUNDATION_PXVEC3_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "foundation/PxMath.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief 3 Element vector class.
+
+This is a 3-dimensional vector class with public data members.
+*/
+class PxVec3
+{
+ public:
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3()
+ {
+ }
+
+ /**
+ \brief zero constructor.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3(PxZERO r) : x(0.0f), y(0.0f), z(0.0f)
+ {
+ PX_UNUSED(r);
+ }
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3(float a) : x(a), y(a), z(a)
+ {
+ }
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ \param[in] nz Value to initialize Z component.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3(float nx, float ny, float nz) : x(nx), y(ny), z(nz)
+ {
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3(const PxVec3& v) : x(v.x), y(v.y), z(v.z)
+ {
+ }
+
+ // Operators
+
+ /**
+ \brief Assignment operator
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3& operator=(const PxVec3& p)
+ {
+ x = p.x;
+ y = p.y;
+ z = p.z;
+ return *this;
+ }
+
+ /**
+ \brief element access
+ */
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE float& operator[](unsigned int index)
+ {
+ PX_ASSERT(index <= 2);
+
+ return reinterpret_cast<float*>(this)[index];
+ }
+
+ /**
+ \brief element access
+ */
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_FORCE_INLINE const float& operator[](unsigned int index) const
+ {
+ PX_ASSERT(index <= 2);
+
+ return reinterpret_cast<const float*>(this)[index];
+ }
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxVec3& v) const
+ {
+ return x == v.x && y == v.y && z == v.z;
+ }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxVec3& v) const
+ {
+ return x != v.x || y != v.y || z != v.z;
+ }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isZero() const
+ {
+ return x == 0.0f && y == 0.0f && z == 0.0f;
+ }
+
+ /**
+ \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
+ {
+ return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = 1e-4f;
+ return isFinite() && PxAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling PxSqrt()!
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float magnitudeSquared() const
+ {
+ return x * x + y * y + z * z;
+ }
+
+ /**
+ \brief returns the magnitude
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float magnitude() const
+ {
+ return PxSqrt(magnitudeSquared());
+ }
+
+ /**
+ \brief negation
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 operator-() const
+ {
+ return PxVec3(-x, -y, -z);
+ }
+
+ /**
+ \brief vector addition
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 operator+(const PxVec3& v) const
+ {
+ return PxVec3(x + v.x, y + v.y, z + v.z);
+ }
+
+ /**
+ \brief vector difference
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 operator-(const PxVec3& v) const
+ {
+ return PxVec3(x - v.x, y - v.y, z - v.z);
+ }
+
+ /**
+ \brief scalar post-multiplication
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 operator*(float f) const
+ {
+ return PxVec3(x * f, y * f, z * f);
+ }
+
+ /**
+ \brief scalar division
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 operator/(float f) const
+ {
+ f = 1.0f / f;
+ return PxVec3(x * f, y * f, z * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3& operator+=(const PxVec3& v)
+ {
+ x += v.x;
+ y += v.y;
+ z += v.z;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3& operator-=(const PxVec3& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ z -= v.z;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3& operator*=(float f)
+ {
+ x *= f;
+ y *= f;
+ z *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3& operator/=(float f)
+ {
+ f = 1.0f / f;
+ x *= f;
+ y *= f;
+ z *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float dot(const PxVec3& v) const
+ {
+ return x * v.x + y * v.y + z * v.z;
+ }
+
+ /**
+ \brief cross product
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 cross(const PxVec3& v) const
+ {
+ return PxVec3(y * v.z - z * v.y, z * v.x - x * v.z, x * v.y - y * v.x);
+ }
+
+ /** return a unit vector */
+
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getNormalized() const
+ {
+ const float m = magnitudeSquared();
+ return m > 0.0f ? *this * PxRecipSqrt(m) : PxVec3(0, 0, 0);
+ }
+
+ /**
+ \brief normalizes the vector in place
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float normalize()
+ {
+ const float m = magnitude();
+ if(m > 0.0f)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief normalizes the vector in place. Does nothing if vector magnitude is under PX_NORMALIZATION_EPSILON.
+ Returns vector magnitude if >= PX_NORMALIZATION_EPSILON and 0.0f otherwise.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float normalizeSafe()
+ {
+ const float mag = magnitude();
+ if(mag < PX_NORMALIZATION_EPSILON)
+ return 0.0f;
+ *this *= 1.0f / mag;
+ return mag;
+ }
+
+ /**
+ \brief normalizes the vector in place. Asserts if vector magnitude is under PX_NORMALIZATION_EPSILON.
+ returns vector magnitude.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float normalizeFast()
+ {
+ const float mag = magnitude();
+ PX_ASSERT(mag >= PX_NORMALIZATION_EPSILON);
+ *this *= 1.0f / mag;
+ return mag;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 multiply(const PxVec3& a) const
+ {
+ return PxVec3(x * a.x, y * a.y, z * a.z);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 minimum(const PxVec3& v) const
+ {
+ return PxVec3(PxMin(x, v.x), PxMin(y, v.y), PxMin(z, v.z));
+ }
+
+ /**
+ \brief returns MIN(x, y, z);
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float minElement() const
+ {
+ return PxMin(x, PxMin(y, z));
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 maximum(const PxVec3& v) const
+ {
+ return PxVec3(PxMax(x, v.x), PxMax(y, v.y), PxMax(z, v.z));
+ }
+
+ /**
+ \brief returns MAX(x, y, z);
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE float maxElement() const
+ {
+ return PxMax(x, PxMax(y, z));
+ }
+
+ /**
+ \brief returns absolute values of components;
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 abs() const
+ {
+ return PxVec3(PxAbs(x), PxAbs(y), PxAbs(z));
+ }
+
+ float x, y, z;
+};
+
+PX_CUDA_CALLABLE static PX_FORCE_INLINE PxVec3 operator*(float f, const PxVec3& v)
+{
+ return PxVec3(f * v.x, f * v.y, f * v.z);
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXVEC3_H
diff --git a/PxShared/include/foundation/PxVec4.h b/PxShared/include/foundation/PxVec4.h
new file mode 100644
index 00000000..883fdf7e
--- /dev/null
+++ b/PxShared/include/foundation/PxVec4.h
@@ -0,0 +1,376 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXVEC4_H
+#define PXFOUNDATION_PXVEC4_H
+/** \addtogroup foundation
+@{
+*/
+#include "foundation/PxMath.h"
+#include "foundation/PxVec3.h"
+#include "foundation/PxAssert.h"
+
+/**
+\brief 4 Element vector class.
+
+This is a 4-dimensional vector class with public data members.
+*/
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+class PxVec4
+{
+ public:
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4()
+ {
+ }
+
+ /**
+ \brief zero constructor.
+ */
+ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec4(PxZERO r) : x(0.0f), y(0.0f), z(0.0f), w(0.0f)
+ {
+ PX_UNUSED(r);
+ }
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit PX_CUDA_CALLABLE PX_INLINE PxVec4(float a) : x(a), y(a), z(a), w(a)
+ {
+ }
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ \param[in] nz Value to initialize Z component.
+ \param[in] nw Value to initialize W component.
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4(float nx, float ny, float nz, float nw) : x(nx), y(ny), z(nz), w(nw)
+ {
+ }
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] v Value to initialize the X, Y, and Z components.
+ \param[in] nw Value to initialize W component.
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4(const PxVec3& v, float nw) : x(v.x), y(v.y), z(v.z), w(nw)
+ {
+ }
+
+ /**
+ \brief Initializes from an array of scalar parameters.
+
+ \param[in] v Value to initialize with.
+ */
+ explicit PX_CUDA_CALLABLE PX_INLINE PxVec4(const float v[]) : x(v[0]), y(v[1]), z(v[2]), w(v[3])
+ {
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4(const PxVec4& v) : x(v.x), y(v.y), z(v.z), w(v.w)
+ {
+ }
+
+ // Operators
+
+ /**
+ \brief Assignment operator
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4& operator=(const PxVec4& p)
+ {
+ x = p.x;
+ y = p.y;
+ z = p.z;
+ w = p.w;
+ return *this;
+ }
+
+ /**
+ \brief element access
+ */
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_INLINE float& operator[](unsigned int index)
+ {
+ PX_ASSERT(index <= 3);
+
+ return reinterpret_cast<float*>(this)[index];
+ }
+
+ /**
+ \brief element access
+ */
+ PX_DEPRECATED PX_CUDA_CALLABLE PX_INLINE const float& operator[](unsigned int index) const
+ {
+ PX_ASSERT(index <= 3);
+
+ return reinterpret_cast<const float*>(this)[index];
+ }
+
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxVec4& v) const
+ {
+ return x == v.x && y == v.y && z == v.z && w == v.w;
+ }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool operator!=(const PxVec4& v) const
+ {
+ return x != v.x || y != v.y || z != v.z || w != v.w;
+ }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool isZero() const
+ {
+ return x == 0 && y == 0 && z == 0 && w == 0;
+ }
+
+ /**
+ \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
+ {
+ return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z) && PxIsFinite(w);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ PX_CUDA_CALLABLE PX_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = 1e-4f;
+ return isFinite() && PxAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling PxSqrt()!
+ */
+ PX_CUDA_CALLABLE PX_INLINE float magnitudeSquared() const
+ {
+ return x * x + y * y + z * z + w * w;
+ }
+
+ /**
+ \brief returns the magnitude
+ */
+ PX_CUDA_CALLABLE PX_INLINE float magnitude() const
+ {
+ return PxSqrt(magnitudeSquared());
+ }
+
+ /**
+ \brief negation
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 operator-() const
+ {
+ return PxVec4(-x, -y, -z, -w);
+ }
+
+ /**
+ \brief vector addition
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 operator+(const PxVec4& v) const
+ {
+ return PxVec4(x + v.x, y + v.y, z + v.z, w + v.w);
+ }
+
+ /**
+ \brief vector difference
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 operator-(const PxVec4& v) const
+ {
+ return PxVec4(x - v.x, y - v.y, z - v.z, w - v.w);
+ }
+
+ /**
+ \brief scalar post-multiplication
+ */
+
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 operator*(float f) const
+ {
+ return PxVec4(x * f, y * f, z * f, w * f);
+ }
+
+ /**
+ \brief scalar division
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 operator/(float f) const
+ {
+ f = 1.0f / f;
+ return PxVec4(x * f, y * f, z * f, w * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4& operator+=(const PxVec4& v)
+ {
+ x += v.x;
+ y += v.y;
+ z += v.z;
+ w += v.w;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4& operator-=(const PxVec4& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ z -= v.z;
+ w -= v.w;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4& operator*=(float f)
+ {
+ x *= f;
+ y *= f;
+ z *= f;
+ w *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4& operator/=(float f)
+ {
+ f = 1.0f / f;
+ x *= f;
+ y *= f;
+ z *= f;
+ w *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ PX_CUDA_CALLABLE PX_INLINE float dot(const PxVec4& v) const
+ {
+ return x * v.x + y * v.y + z * v.z + w * v.w;
+ }
+
+ /** return a unit vector */
+
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 getNormalized() const
+ {
+ float m = magnitudeSquared();
+ return m > 0.0f ? *this * PxRecipSqrt(m) : PxVec4(0, 0, 0, 0);
+ }
+
+ /**
+ \brief normalizes the vector in place
+ */
+ PX_CUDA_CALLABLE PX_INLINE float normalize()
+ {
+ float m = magnitude();
+ if(m > 0.0f)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 multiply(const PxVec4& a) const
+ {
+ return PxVec4(x * a.x, y * a.y, z * a.z, w * a.w);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 minimum(const PxVec4& v) const
+ {
+ return PxVec4(PxMin(x, v.x), PxMin(y, v.y), PxMin(z, v.z), PxMin(w, v.w));
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ PX_CUDA_CALLABLE PX_INLINE PxVec4 maximum(const PxVec4& v) const
+ {
+ return PxVec4(PxMax(x, v.x), PxMax(y, v.y), PxMax(z, v.z), PxMax(w, v.w));
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE PxVec3 getXYZ() const
+ {
+ return PxVec3(x, y, z);
+ }
+
+ /**
+ \brief set vector elements to zero
+ */
+ PX_CUDA_CALLABLE PX_INLINE void setZero()
+ {
+ x = y = z = w = 0.0f;
+ }
+
+ float x, y, z, w;
+};
+
+PX_CUDA_CALLABLE static PX_INLINE PxVec4 operator*(float f, const PxVec4& v)
+{
+ return PxVec4(f * v.x, f * v.y, f * v.z, f * v.w);
+}
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // #ifndef PXFOUNDATION_PXVEC4_H
diff --git a/PxShared/include/foundation/unix/PxUnixIntrinsics.h b/PxShared/include/foundation/unix/PxUnixIntrinsics.h
new file mode 100644
index 00000000..7c0916f0
--- /dev/null
+++ b/PxShared/include/foundation/unix/PxUnixIntrinsics.h
@@ -0,0 +1,181 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXUNIXINTRINSICS_H
+#define PXFOUNDATION_PXUNIXINTRINSICS_H
+
+#include "foundation/Px.h"
+#include "foundation/PxAssert.h"
+
+#if !(PX_LINUX || PX_ANDROID || PX_PS4 || PX_APPLE_FAMILY)
+#error "This file should only be included by Unix builds!!"
+#endif
+
+#if PX_LINUX && PX_ARM
+ // Linux ARM toolchain can somehow end up including cmath header after math.h, which will undef a bunch of macros and place them in std namespace
+ #ifndef isfinite
+ #define isfinite std::isfinite
+ #endif
+#endif
+
+#include <math.h>
+#include <float.h>
+
+namespace physx
+{
+namespace intrinsics
+{
+//! \brief platform-specific absolute value
+PX_CUDA_CALLABLE PX_FORCE_INLINE float abs(float a)
+{
+ return ::fabsf(a);
+}
+
+//! \brief platform-specific select float
+PX_CUDA_CALLABLE PX_FORCE_INLINE float fsel(float a, float b, float c)
+{
+ return (a >= 0.0f) ? b : c;
+}
+
+//! \brief platform-specific sign
+PX_CUDA_CALLABLE PX_FORCE_INLINE float sign(float a)
+{
+ return (a >= 0.0f) ? 1.0f : -1.0f;
+}
+
+//! \brief platform-specific reciprocal
+PX_CUDA_CALLABLE PX_FORCE_INLINE float recip(float a)
+{
+ return 1.0f / a;
+}
+
+//! \brief platform-specific reciprocal estimate
+PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a)
+{
+ return 1.0f / a;
+}
+
+//! \brief platform-specific square root
+PX_CUDA_CALLABLE PX_FORCE_INLINE float sqrt(float a)
+{
+ return ::sqrtf(a);
+}
+
+//! \brief platform-specific reciprocal square root
+PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrt(float a)
+{
+ return 1.0f / ::sqrtf(a);
+}
+
+PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a)
+{
+ return 1.0f / ::sqrtf(a);
+}
+
+//! \brief platform-specific sine
+PX_CUDA_CALLABLE PX_FORCE_INLINE float sin(float a)
+{
+ return ::sinf(a);
+}
+
+//! \brief platform-specific cosine
+PX_CUDA_CALLABLE PX_FORCE_INLINE float cos(float a)
+{
+ return ::cosf(a);
+}
+
+//! \brief platform-specific minimum
+PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMin(float a, float b)
+{
+ return a < b ? a : b;
+}
+
+//! \brief platform-specific maximum
+PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMax(float a, float b)
+{
+ return a > b ? a : b;
+}
+
+//! \brief platform-specific finiteness check (not INF or NAN)
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(float a)
+{
+ return !!isfinite(a);
+}
+
+//! \brief platform-specific finiteness check (not INF or NAN)
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(double a)
+{
+ return !!isfinite(a);
+}
+
+/*!
+Sets \c count bytes starting at \c dst to zero.
+*/
+PX_FORCE_INLINE void* memZero(void* dest, uint32_t count)
+{
+ return memset(dest, 0, count);
+}
+
+/*!
+Sets \c count bytes starting at \c dst to \c c.
+*/
+PX_FORCE_INLINE void* memSet(void* dest, int32_t c, uint32_t count)
+{
+ return memset(dest, c, count);
+}
+
+/*!
+Copies \c count bytes from \c src to \c dst. User memMove if regions overlap.
+*/
+PX_FORCE_INLINE void* memCopy(void* dest, const void* src, uint32_t count)
+{
+ return memcpy(dest, src, count);
+}
+
+/*!
+Copies \c count bytes from \c src to \c dst. Supports overlapping regions.
+*/
+PX_FORCE_INLINE void* memMove(void* dest, const void* src, uint32_t count)
+{
+ return memmove(dest, src, count);
+}
+
+/*!
+Set 128B to zero starting at \c dst+offset. Must be aligned.
+*/
+PX_FORCE_INLINE void memZero128(void* dest, uint32_t offset = 0)
+{
+ PX_ASSERT(((size_t(dest) + offset) & 0x7f) == 0);
+ memSet(reinterpret_cast<char*>(dest) + offset, 0, 128);
+}
+
+} // namespace intrinsics
+} // namespace physx
+
+#endif // #ifndef PXFOUNDATION_PXUNIXINTRINSICS_H
diff --git a/PxShared/include/foundation/windows/PxWindowsFoundationDelayLoadHook.h b/PxShared/include/foundation/windows/PxWindowsFoundationDelayLoadHook.h
new file mode 100644
index 00000000..f328e529
--- /dev/null
+++ b/PxShared/include/foundation/windows/PxWindowsFoundationDelayLoadHook.h
@@ -0,0 +1,71 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PX_FOUNDATION_DELAY_LOAD_HOOK
+#define PX_FOUNDATION_DELAY_LOAD_HOOK
+
+#include "foundation/PxPreprocessor.h"
+
+/** \addtogroup foundation
+@{
+*/
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+ /**
+ \brief PxFoundationDelayLoadHook
+
+ This is a helper class for delay loading the PxFoundation dll.
+ If a PxFoundation dll with a non-default file name needs to be loaded,
+ PxFoundationDelayLoadHook can be sub-classed to provide the custom file names.
+
+ Once the names are set, the instance must be set for use by the loading dll.
+ */
+ class PxFoundationDelayLoadHook
+ {
+ public:
+ PxFoundationDelayLoadHook() {}
+ virtual ~PxFoundationDelayLoadHook() {}
+
+ virtual const char* getPxFoundationDEBUGDllName() const = 0;
+ virtual const char* getPxFoundationCHECKEDDllName() const = 0;
+ virtual const char* getPxFoundationPROFILEDllName() const = 0;
+ virtual const char* getPxFoundationDllName() const = 0;
+
+ protected:
+ private:
+ };
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+/** @} */
+#endif
diff --git a/PxShared/include/foundation/windows/PxWindowsIntrinsics.h b/PxShared/include/foundation/windows/PxWindowsIntrinsics.h
new file mode 100644
index 00000000..bff41375
--- /dev/null
+++ b/PxShared/include/foundation/windows/PxWindowsIntrinsics.h
@@ -0,0 +1,188 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXFOUNDATION_PXWINDOWSINTRINSICS_H
+#define PXFOUNDATION_PXWINDOWSINTRINSICS_H
+
+#include "foundation/Px.h"
+#include "foundation/PxAssert.h"
+
+#if !PX_WINDOWS_FAMILY
+#error "This file should only be included by Windows builds!!"
+#endif
+
+#include <math.h>
+#include <float.h>
+
+#if !PX_DOXYGEN
+namespace physx
+{
+namespace intrinsics
+{
+#endif
+
+//! \brief platform-specific absolute value
+PX_CUDA_CALLABLE PX_FORCE_INLINE float abs(float a)
+{
+ return ::fabsf(a);
+}
+
+//! \brief platform-specific select float
+PX_CUDA_CALLABLE PX_FORCE_INLINE float fsel(float a, float b, float c)
+{
+ return (a >= 0.0f) ? b : c;
+}
+
+//! \brief platform-specific sign
+PX_CUDA_CALLABLE PX_FORCE_INLINE float sign(float a)
+{
+ return (a >= 0.0f) ? 1.0f : -1.0f;
+}
+
+//! \brief platform-specific reciprocal
+PX_CUDA_CALLABLE PX_FORCE_INLINE float recip(float a)
+{
+ return 1.0f / a;
+}
+
+//! \brief platform-specific reciprocal estimate
+PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a)
+{
+ return 1.0f / a;
+}
+
+//! \brief platform-specific square root
+PX_CUDA_CALLABLE PX_FORCE_INLINE float sqrt(float a)
+{
+ return ::sqrtf(a);
+}
+
+//! \brief platform-specific reciprocal square root
+PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrt(float a)
+{
+ return 1.0f / ::sqrtf(a);
+}
+
+//! \brief platform-specific reciprocal square root estimate
+PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a)
+{
+ return 1.0f / ::sqrtf(a);
+}
+
+//! \brief platform-specific sine
+PX_CUDA_CALLABLE PX_FORCE_INLINE float sin(float a)
+{
+ return ::sinf(a);
+}
+
+//! \brief platform-specific cosine
+PX_CUDA_CALLABLE PX_FORCE_INLINE float cos(float a)
+{
+ return ::cosf(a);
+}
+
+//! \brief platform-specific minimum
+PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMin(float a, float b)
+{
+ return a < b ? a : b;
+}
+
+//! \brief platform-specific maximum
+PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMax(float a, float b)
+{
+ return a > b ? a : b;
+}
+
+//! \brief platform-specific finiteness check (not INF or NAN)
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(float a)
+{
+#ifdef __CUDACC__
+ return !!isfinite(a);
+#else
+ return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)));
+#endif
+}
+
+//! \brief platform-specific finiteness check (not INF or NAN)
+PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(double a)
+{
+#ifdef __CUDACC__
+ return !!isfinite(a);
+#else
+ return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)));
+#endif
+}
+
+/*!
+Sets \c count bytes starting at \c dst to zero.
+*/
+PX_FORCE_INLINE void* memZero(void* dest, uint32_t count)
+{
+ return memset(dest, 0, count);
+}
+
+/*!
+Sets \c count bytes starting at \c dst to \c c.
+*/
+PX_FORCE_INLINE void* memSet(void* dest, int32_t c, uint32_t count)
+{
+ return memset(dest, c, count);
+}
+
+/*!
+Copies \c count bytes from \c src to \c dst. User memMove if regions overlap.
+*/
+PX_FORCE_INLINE void* memCopy(void* dest, const void* src, uint32_t count)
+{
+ return memcpy(dest, src, count);
+}
+
+/*!
+Copies \c count bytes from \c src to \c dst. Supports overlapping regions.
+*/
+PX_FORCE_INLINE void* memMove(void* dest, const void* src, uint32_t count)
+{
+ return memmove(dest, src, count);
+}
+
+/*!
+Set 128B to zero starting at \c dst+offset. Must be aligned.
+*/
+PX_FORCE_INLINE void memZero128(void* dest, uint32_t offset = 0)
+{
+ PX_ASSERT(((size_t(dest) + offset) & 0x7f) == 0);
+ memSet(reinterpret_cast<char*>(dest) + offset, 0, 128);
+}
+
+#if !PX_DOXYGEN
+} // namespace intrinsics
+} // namespace physx
+#endif
+
+#endif // #ifndef PXFOUNDATION_PXWINDOWSINTRINSICS_H
diff --git a/PxShared/include/pvd/PxPvd.h b/PxShared/include/pvd/PxPvd.h
new file mode 100644
index 00000000..0ed17fde
--- /dev/null
+++ b/PxShared/include/pvd/PxPvd.h
@@ -0,0 +1,191 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXPVDSDK_PXPVD_H
+#define PXPVDSDK_PXPVD_H
+
+/** \addtogroup pvd
+@{
+*/
+#include "foundation/PxFlags.h"
+#include "foundation/PxProfiler.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+class PxPvdTransport;
+
+#if PX_WINDOWS_FAMILY && !PX_ARM_FAMILY
+#ifndef PX_PVDSDK_DLL
+#define PX_PVDSDK_API PX_DLL_IMPORT
+#elif PX_PVDSDK_DLL
+#define PX_PVDSDK_API PX_DLL_EXPORT
+#endif
+#elif PX_UNIX_FAMILY
+#ifdef PX_PVDSDK_DLL
+#define PX_PVDSDK_API PX_UNIX_EXPORT
+#endif
+#endif
+
+#ifndef PX_PVDSDK_API
+#define PX_PVDSDK_API
+#endif
+
+/**
+\brief types of instrumentation that PVD can do.
+*/
+struct PxPvdInstrumentationFlag
+{
+ enum Enum
+ {
+ /**
+ \brief Send debugging information to PVD.
+
+ This information is the actual object data
+ of the rigid statics, shapes, articulations, etc. Sending this information has
+ a noticeable impact on performance and thus this flag should not be set
+ if you want an accurate performance profile.
+ */
+ eDEBUG = 1 << 0,
+ /**
+ \brief Send profile information to PVD.
+
+ This information populates PVD's profile view. It has (at this time) negligible cost
+ compared to Debug information and makes PVD *much* more useful so it is quite
+ highly recommended.
+
+ This flag works together with a PxCreatePhysics parameter.
+ Using it allows the SDK to send profile
+ events to PVD.
+ */
+ ePROFILE = 1 << 1,
+ /**
+ \brief Send memory information to PVD.
+
+ The PVD sdk side hooks into the Foundation memory controller and listens to
+ allocation/deallocation events. This has a noticable hit on the first frame,
+ however, this data is somewhat compressed and the PhysX SDK doesn't allocate much
+ once it hits a steady state. This information also has a fairly negligible
+ impact and thus is also highly recommended.
+
+ This flag works together with a PxCreatePhysics parameter,
+ trackOutstandingAllocations. Using both of them together allows users to have
+ an accurate view of the overall memory usage of the simulation at the cost of
+ a hashtable lookup per allocation/deallocation. Again, PhysX makes a best effort
+ attempt not to allocate or deallocate during simulation so this hashtable lookup
+ tends to have no effect past the first frame.
+
+ Sending memory information without tracking outstanding allocations means that
+ PVD will accurate information about the state of the memory system before the
+ actual connection happened.
+ */
+ eMEMORY = 1 << 2,
+ eALL = (eDEBUG | ePROFILE | eMEMORY)
+ };
+};
+
+/**
+\brief Bitfield that contains a set of raised flags defined in PxPvdInstrumentationFlag.
+
+@see PxPvdInstrumentationFlag
+*/
+typedef PxFlags<PxPvdInstrumentationFlag::Enum, uint8_t> PxPvdInstrumentationFlags;
+PX_FLAGS_OPERATORS(PxPvdInstrumentationFlag::Enum, uint8_t)
+
+/**
+\brief PxPvd is the top-level class for the PVD framework, and the main customer interface for PVD
+configuration.It is a singleton class, instantiated and owned by the application.
+*/
+class PxPvd : public physx::PxProfilerCallback
+{
+ public:
+ /**
+ Connects the SDK to the PhysX Visual Debugger application.
+ \param transport transport for pvd captured data.
+ \param flags Flags to set.
+ return True if success
+ */
+ virtual bool connect(PxPvdTransport& transport, PxPvdInstrumentationFlags flags) = 0;
+
+ /**
+ Disconnects the SDK from the PhysX Visual Debugger application.
+ If we are still connected, this will kill the entire debugger connection.
+ */
+ virtual void disconnect() = 0;
+
+ /**
+ * Return if connection to PVD is created.
+ \param useCachedStatus
+ 1> When useCachedStaus is false, isConnected() checks the lowlevel network status.
+ This can be slow because it needs to lock the lowlevel network stream. If isConnected() is
+ called frequently, the expense of locking can be significant.
+ 2> When useCachedStatus is true, isConnected() checks the highlevel cached status with atomic access.
+ It is faster than locking, but the status may be different from the lowlevel network with latency of up to
+ one frame.
+ The reason for this is that the cached status is changed inside socket listener, which is not
+ called immediately when the lowlevel connection status changes.
+ */
+ virtual bool isConnected(bool useCachedStatus = true) = 0;
+
+ /**
+ returns the PVD data transport
+ returns NULL if no transport is present.
+ */
+ virtual PxPvdTransport* getTransport() = 0;
+
+ /**
+ Retrieves the PVD flags. See PxPvdInstrumentationFlags.
+ */
+ virtual PxPvdInstrumentationFlags getInstrumentationFlags() = 0;
+
+ /**
+ \brief Releases the pvd instance.
+ */
+ virtual void release() = 0;
+
+ protected:
+ virtual ~PxPvd()
+ {
+ }
+};
+
+/**
+ \brief Create a pvd instance.
+ \param foundation is the foundation instance that stores the allocator and error callbacks.
+*/
+PX_PVDSDK_API PxPvd* PX_CALL_CONV PxCreatePvd(PxFoundation& foundation);
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // PXPVDSDK_PXPVD_H
diff --git a/PxShared/include/pvd/PxPvdTransport.h b/PxShared/include/pvd/PxPvdTransport.h
new file mode 100644
index 00000000..726a194a
--- /dev/null
+++ b/PxShared/include/pvd/PxPvdTransport.h
@@ -0,0 +1,129 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef PXPVDSDK_PXPVDTRANSPORT_H
+#define PXPVDSDK_PXPVDTRANSPORT_H
+
+/** \addtogroup pvd
+@{
+*/
+#include "foundation/PxErrors.h"
+#include "foundation/PxFlags.h"
+#include "pvd/PxPvd.h"
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+/**
+\brief PxPvdTransport is an interface representing the data transport mechanism.
+This class defines all services associated with the transport: configuration, connection, reading, writing etc.
+It is owned by the application, and can be realized as a file or a socket (using one-line PxDefault<...> methods in
+PhysXExtensions) or in a custom implementation. This is a class that is intended for use by PVD, not by the
+application, the application entry points are PxPvd and PvdClient.
+*/
+
+class PxPvdTransport
+{
+ public:
+ // connect, isConnected, disconnect, read, write, flush
+
+ /**
+ Connects to the Visual Debugger application.
+ return True if success
+ */
+ virtual bool connect() = 0;
+
+ /**
+ Disconnects from the Visual Debugger application.
+ If we are still connected, this will kill the entire debugger connection.
+ */
+ virtual void disconnect() = 0;
+
+ /**
+ * Return if connection to PVD is created.
+ */
+ virtual bool isConnected() = 0;
+
+ /**
+ * write bytes to the other endpoint of the connection. should lock before witre. If an error occurs
+ * this connection will assume to be dead.
+ */
+ virtual bool write(const uint8_t* inBytes, uint32_t inLength) = 0;
+
+ /*
+ lock this transport and return it
+ */
+ virtual PxPvdTransport& lock() = 0;
+
+ /*
+ unlock this transport
+ */
+ virtual void unlock() = 0;
+
+ /**
+ * send any data and block until we know it is at least on the wire.
+ */
+ virtual void flush() = 0;
+
+ /**
+ * Return size of written data.
+ */
+ virtual uint64_t getWrittenDataSize() = 0;
+
+ virtual void release() = 0;
+
+ protected:
+ virtual ~PxPvdTransport()
+ {
+ }
+};
+
+/**
+ \brief Create a default socket transport.
+ \param host host address of the pvd application.
+ \param port ip port used for pvd, should same as the port setting in pvd application.
+ \param timeoutInMilliseconds timeout when connect to pvd host.
+*/
+PX_PVDSDK_API PxPvdTransport* PX_CALL_CONV
+PxDefaultPvdSocketTransportCreate(const char* host, int port, unsigned int timeoutInMilliseconds);
+
+/**
+ \brief Create a default file transport.
+ \param name full path filename used save captured pvd data.
+*/
+PX_PVDSDK_API PxPvdTransport* PX_CALL_CONV PxDefaultPvdFileTransportCreate(const char* name);
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+
+/** @} */
+#endif // PXPVDSDK_PXPVDTRANSPORT_H
diff --git a/PxShared/include/pvd/windows/PxWindowsPvdDelayLoadHook.h b/PxShared/include/pvd/windows/PxWindowsPvdDelayLoadHook.h
new file mode 100644
index 00000000..f5096a32
--- /dev/null
+++ b/PxShared/include/pvd/windows/PxWindowsPvdDelayLoadHook.h
@@ -0,0 +1,60 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_PVD_DELAY_LOAD_HOOK
+#define PX_PVD_DELAY_LOAD_HOOK
+
+#include "foundation/PxPreprocessor.h"
+#include "foundation/windows/PxWindowsFoundationDelayLoadHook.h"
+#include "../PxPvd.h"
+
+/** \addtogroup pvd
+@{
+*/
+
+#if !PX_DOXYGEN
+namespace physx
+{
+#endif
+
+ /**
+ \brief Sets delay load hook instance for Foundation dll.
+
+ \param[in] hook Delay load hook.
+
+ @see PxFoundationDelayLoadHook
+ */
+ PX_C_EXPORT PX_PVDSDK_API void PX_CALL_CONV PxPvdSetFoundationDelayLoadHook(const physx::PxFoundationDelayLoadHook* hook);
+
+#if !PX_DOXYGEN
+} // namespace physx
+#endif
+/** @} */
+#endif
diff --git a/PxShared/include/task/PxCpuDispatcher.h b/PxShared/include/task/PxCpuDispatcher.h
new file mode 100644
index 00000000..360a9123
--- /dev/null
+++ b/PxShared/include/task/PxCpuDispatcher.h
@@ -0,0 +1,79 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXTASK_PXCPUDISPATCHER_H
+#define PXTASK_PXCPUDISPATCHER_H
+
+#include "task/PxTaskDefine.h"
+#include "foundation/PxSimpleTypes.h"
+
+namespace physx
+{
+
+class PxBaseTask;
+
+/**
+ \brief A CpuDispatcher is responsible for scheduling the execution of tasks passed to it by the SDK.
+
+ A typical implementation would for example use a thread pool with the dispatcher
+ pushing tasks onto worker thread queues or a global queue.
+
+ @see PxBaseTask
+ @see PxTask
+ @see PxTaskManager
+*/
+class PxCpuDispatcher
+{
+public:
+ /**
+ \brief Called by the TaskManager when a task is to be queued for execution.
+
+ Upon receiving a task, the dispatcher should schedule the task
+ to run when resource is available. After the task has been run,
+ it should call the release() method and discard it's pointer.
+
+ \param[in] task The task to be run.
+
+ @see PxBaseTask
+ */
+ virtual void submitTask( PxBaseTask& task ) = 0;
+
+ /**
+ \brief Returns the number of available worker threads for this dispatcher.
+
+ The SDK will use this count to control how many tasks are submitted. By
+ matching the number of tasks with the number of execution units task
+ overhead can be reduced.
+ */
+ virtual uint32_t getWorkerCount() const = 0;
+
+ virtual ~PxCpuDispatcher() {}
+};
+
+} // end physx namespace
+
+#endif // PXTASK_PXCPUDISPATCHER_H
diff --git a/PxShared/include/task/PxGpuDispatcher.h b/PxShared/include/task/PxGpuDispatcher.h
new file mode 100644
index 00000000..5fff9d0f
--- /dev/null
+++ b/PxShared/include/task/PxGpuDispatcher.h
@@ -0,0 +1,248 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXTASK_PXGPUDISPATCHER_H
+#define PXTASK_PXGPUDISPATCHER_H
+
+#include "task/PxTaskDefine.h"
+#include "task/PxTask.h"
+
+/* forward decl to avoid including <cuda.h> */
+typedef struct CUstream_st* CUstream;
+
+namespace physx
+{
+
+struct PxGpuCopyDesc;
+class PxCudaContextManager;
+
+PX_PUSH_PACK_DEFAULT
+
+class PxTaskManager;
+
+/** \brief A GpuTask dispatcher
+ *
+ * A PxGpuDispatcher executes GpuTasks submitted by one or more TaskManagers (one
+ * or more scenes). It maintains a CPU worker thread which waits on GpuTask
+ * "groups" to be submitted. The submission API is explicitly sessioned so that
+ * GpuTasks are dispatched together as a group whenever possible to improve
+ * parallelism on the GPU.
+ *
+ * A PxGpuDispatcher cannot be allocated ad-hoc, they are created as a result of
+ * creating a PxCudaContextManager. Every PxCudaContextManager has a PxGpuDispatcher
+ * instance that can be queried. In this way, each PxGpuDispatcher is tied to
+ * exactly one CUDA context.
+ *
+ * A scene will use CPU fallback Tasks for GpuTasks if the PxTaskManager provided
+ * to it does not have a PxGpuDispatcher. For this reason, the PxGpuDispatcher must
+ * be assigned to the PxTaskManager before the PxTaskManager is given to a scene.
+ *
+ * Multiple TaskManagers may safely share a single PxGpuDispatcher instance, thus
+ * enabling scenes to share a CUDA context.
+ *
+ * Only failureDetected() is intended for use by the user. The rest of the
+ * nvGpuDispatcher public methods are reserved for internal use by only both
+ * TaskManagers and GpuTasks.
+ */
+class PxGpuDispatcher
+{
+public:
+ /** \brief Record the start of a simulation step
+ *
+ * A PxTaskManager calls this function to record the beginning of a simulation
+ * step. The PxGpuDispatcher uses this notification to initialize the
+ * profiler state.
+ */
+ virtual void startSimulation() = 0;
+
+ /** \brief Record the start of a GpuTask batch submission
+ *
+ * A PxTaskManager calls this function to notify the PxGpuDispatcher that one or
+ * more GpuTasks are about to be submitted for execution. The PxGpuDispatcher
+ * will not read the incoming task queue until it receives one finishGroup()
+ * call for each startGroup() call. This is to ensure as many GpuTasks as
+ * possible are executed together as a group, generating optimal parallelism
+ * on the GPU.
+ */
+ virtual void startGroup() = 0;
+
+ /** \brief Submit a GpuTask for execution
+ *
+ * Submitted tasks are pushed onto an incoming queue. The PxGpuDispatcher
+ * will take the contents of this queue every time the pending group count
+ * reaches 0 and run the group of submitted GpuTasks as an interleaved
+ * group.
+ */
+ virtual void submitTask(PxTask& task) = 0;
+
+ /** \brief Record the end of a GpuTask batch submission
+ *
+ * A PxTaskManager calls this function to notify the PxGpuDispatcher that it is
+ * done submitting a group of GpuTasks (GpuTasks which were all make ready
+ * to run by the same prerequisite dependency becoming resolved). If no
+ * other group submissions are in progress, the PxGpuDispatcher will execute
+ * the set of ready tasks.
+ */
+ virtual void finishGroup() = 0;
+
+ /** \brief Add a CUDA completion prerequisite dependency to a task
+ *
+ * A GpuTask calls this function to add a prerequisite dependency on another
+ * task (usually a CpuTask) preventing that task from starting until all of
+ * the CUDA kernels and copies already launched have been completed. The
+ * PxGpuDispatcher will increment that task's reference count, blocking its
+ * execution, until the CUDA work is complete.
+ *
+ * This is generally only required when a CPU task is expecting the results
+ * of the CUDA kernels to have been copied into host memory.
+ *
+ * This mechanism is not at all not required to ensure CUDA kernels and
+ * copies are issued in the correct order. Kernel issue order is determined
+ * by normal task dependencies. The rule of thumb is to only use a blocking
+ * completion prerequisite if the task in question depends on a completed
+ * GPU->Host DMA.
+ *
+ * The PxGpuDispatcher issues a blocking event record to CUDA for the purposes
+ * of tracking the already submitted CUDA work. When this event is
+ * resolved, the PxGpuDispatcher manually decrements the reference count of
+ * the specified task, allowing it to execute (assuming it does not have
+ * other pending prerequisites).
+ */
+ virtual void addCompletionPrereq(PxBaseTask& task) = 0;
+
+ /** \brief Retrieve the PxCudaContextManager associated with this
+ * PxGpuDispatcher
+ *
+ * Every PxCudaContextManager has one PxGpuDispatcher, and every PxGpuDispatcher
+ * has one PxCudaContextManager.
+ */
+ virtual PxCudaContextManager* getCudaContextManager() = 0;
+
+ /** \brief Record the end of a simulation frame
+ *
+ * A PxTaskManager calls this function to record the completion of its
+ * dependency graph. If profiling is enabled, the PxGpuDispatcher will
+ * trigger the retrieval of profiling data from the GPU at this point.
+ */
+ virtual void stopSimulation() = 0;
+
+ /** \brief Returns true if a CUDA call has returned a non-recoverable error
+ *
+ * A return value of true indicates a fatal error has occurred. To protect
+ * itself, the PxGpuDispatcher enters a fall through mode that allows GpuTasks
+ * to complete without being executed. This allows simulations to continue
+ * but leaves GPU content static or corrupted.
+ *
+ * The user may try to recover from these failures by deleting GPU content
+ * so the visual artifacts are minimized. But there is no way to recover
+ * the state of the GPU actors before the failure. Once a CUDA context is
+ * in this state, the only recourse is to create a new CUDA context, a new
+ * scene, and start over.
+ *
+ * This is our "Best Effort" attempt to not turn a soft failure into a hard
+ * failure because continued use of a CUDA context after it has returned an
+ * error will usually result in a driver reset. However if the initial
+ * failure was serious enough, a reset may have already occurred by the time
+ * we learn of it.
+ */
+ virtual bool failureDetected() const = 0;
+
+ /** \brief Force the PxGpuDispatcher into failure mode
+ *
+ * This API should be used if user code detects a non-recoverable CUDA
+ * error. This ensures the PxGpuDispatcher does not launch any further
+ * CUDA work. Subsequent calls to failureDetected() will return true.
+ */
+ virtual void forceFailureMode() = 0;
+
+ /** \brief Launch a copy kernel with arbitrary number of copy commands
+ *
+ * This method is intended to be called from Kernel GpuTasks, but it can
+ * function outside of that context as well.
+ *
+ * If count is 1, the descriptor is passed to the kernel as arguments, so it
+ * may be declared on the stack.
+ *
+ * If count is greater than 1, the kernel will read the descriptors out of
+ * host memory. Because of this, the descriptor array must be located in
+ * page locked (pinned) memory. The provided descriptors may be modified by
+ * this method (converting host pointers to their GPU mapped equivalents)
+ * and should be considered *owned* by CUDA until the current batch of work
+ * has completed, so descriptor arrays should not be freed or modified until
+ * you have received a completion notification.
+ *
+ * If your GPU does not support mapping of page locked memory (SM>=1.1),
+ * this function degrades to calling CUDA copy methods.
+ */
+ virtual void launchCopyKernel(PxGpuCopyDesc* desc, uint32_t count, CUstream stream) = 0;
+
+ /** \brief Query pre launch task that runs before launching gpu kernels.
+ *
+ * This is part of an optional feature to schedule multiple gpu features
+ * at the same time to get kernels to run in parallel.
+ * \note Do *not* set the continuation on the returned task, but use addPreLaunchDependent().
+ */
+ virtual PxBaseTask& getPreLaunchTask() = 0;
+
+ /** \brief Adds a gpu launch task that gets executed after the pre launch task.
+ *
+ * This is part of an optional feature to schedule multiple gpu features
+ * at the same time to get kernels to run in parallel.
+ * \note Each call adds a reference to the pre-launch task.
+ */
+ virtual void addPreLaunchDependent(PxBaseTask& dependent) = 0;
+
+ /** \brief Query post launch task that runs after the gpu is done.
+ *
+ * This is part of an optional feature to schedule multiple gpu features
+ * at the same time to get kernels to run in parallel.
+ * \note Do *not* set the continuation on the returned task, but use addPostLaunchDependent().
+ */
+ virtual PxBaseTask& getPostLaunchTask() = 0;
+
+ /** \brief Adds a task that gets executed after the post launch task.
+ *
+ * This is part of an optional feature to schedule multiple gpu features
+ * at the same time to get kernels to run in parallel.
+ * \note Each call adds a reference to the pre-launch task.
+ */
+ virtual void addPostLaunchDependent(PxBaseTask& dependent) = 0;
+
+protected:
+ /** \brief protected destructor
+ *
+ * GpuDispatchers are allocated and freed by their PxCudaContextManager.
+ */
+ virtual ~PxGpuDispatcher() {}
+};
+
+PX_POP_PACK
+
+} // end physx namespace
+
+
+#endif // PXTASK_PXGPUDISPATCHER_H
diff --git a/PxShared/include/task/PxGpuTask.h b/PxShared/include/task/PxGpuTask.h
new file mode 100644
index 00000000..f96e1a40
--- /dev/null
+++ b/PxShared/include/task/PxGpuTask.h
@@ -0,0 +1,118 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+
+#ifndef PXTASK_PXGPUTASK_H
+#define PXTASK_PXGPUTASK_H
+
+#include "task/PxTaskDefine.h"
+#include "task/PxTask.h"
+#include "task/PxGpuDispatcher.h"
+
+namespace physx
+{
+
+PX_PUSH_PACK_DEFAULT
+
+/** \brief Define the 'flavor' of a PxGpuTask
+ *
+ * Each PxGpuTask should have a specific function; either copying data to the
+ * device, running kernels on that data, or copying data from the device.
+ *
+ * For optimal performance, the dispatcher should run all available HtoD tasks
+ * before running all Kernel tasks, and all Kernel tasks before running any DtoH
+ * tasks. This provides maximal kernel overlap and the least number of CUDA
+ * flushes.
+ */
+struct PxGpuTaskHint
+{
+ /// \brief Enums for the type of GPU task
+ enum Enum
+ {
+ HostToDevice,
+ Kernel,
+ DeviceToHost,
+
+ NUM_GPU_TASK_HINTS
+ };
+};
+
+/**
+ * \brief PxTask implementation for launching CUDA work
+ */
+class PxGpuTask : public PxTask
+{
+public:
+ PxGpuTask() : mComp(NULL) {}
+
+ /**
+ * \brief iterative "run" function for a PxGpuTask
+ *
+ * The GpuDispatcher acquires the CUDA context for the duration of this
+ * function call, and it is highly recommended that the PxGpuTask use the
+ * provided CUstream for all kernels.
+ *
+ * kernelIndex will be 0 for the initial call and incremented before each
+ * subsequent call. Once launchInstance() returns false, its PxGpuTask is
+ * considered completed and is released.
+ */
+ virtual bool launchInstance(CUstream stream, int kernelIndex) = 0;
+
+ /**
+ * \brief Returns a hint indicating the function of this task
+ */
+ virtual PxGpuTaskHint::Enum getTaskHint() const = 0;
+
+ /**
+ * \brief Specify a task that will have its reference count decremented
+ * when this task is released
+ */
+ void setCompletionTask(PxBaseTask& task)
+ {
+ mComp = &task;
+ }
+
+ void release()
+ {
+ if (mComp)
+ {
+ mComp->removeReference();
+ mComp = NULL;
+ }
+ PxTask::release();
+ }
+
+protected:
+ /// \brief A pointer to the completion task
+ PxBaseTask* mComp;
+};
+
+PX_POP_PACK
+
+} // end physx namespace
+
+#endif // PXTASK_PXGPUTASK_H
diff --git a/PxShared/include/task/PxTask.h b/PxShared/include/task/PxTask.h
new file mode 100644
index 00000000..ac2c189b
--- /dev/null
+++ b/PxShared/include/task/PxTask.h
@@ -0,0 +1,362 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXTASK_PXTASK_H
+#define PXTASK_PXTASK_H
+
+#include "task/PxTaskDefine.h"
+#include "task/PxTaskManager.h"
+#include "task/PxCpuDispatcher.h"
+#include "task/PxGpuDispatcher.h"
+#include "foundation/PxAssert.h"
+
+namespace physx
+{
+
+/**
+ * \brief Base class of all task types
+ *
+ * PxBaseTask defines a runnable reference counted task with built-in profiling.
+ */
+class PxBaseTask
+{
+public:
+ PxBaseTask() : mEventID(0xFFFF), mProfileStat(0), mTm(0) {}
+ virtual ~PxBaseTask() {}
+
+ /**
+ * \brief The user-implemented run method where the task's work should be performed
+ *
+ * run() methods must be thread safe, stack friendly (no alloca, etc), and
+ * must never block.
+ */
+ virtual void run() = 0;
+
+ /**
+ * \brief Return a user-provided task name for profiling purposes.
+ *
+ * It does not have to be unique, but unique names are helpful.
+ *
+ * \return The name of this task
+ */
+ virtual const char* getName() const = 0;
+
+ //! \brief Implemented by derived implementation classes
+ virtual void addReference() = 0;
+ //! \brief Implemented by derived implementation classes
+ virtual void removeReference() = 0;
+ //! \brief Implemented by derived implementation classes
+ virtual int32_t getReference() const = 0;
+
+ /** \brief Implemented by derived implementation classes
+ *
+ * A task may assume in its release() method that the task system no longer holds
+ * references to it - so it may safely run its destructor, recycle itself, etc.
+ * provided no additional user references to the task exist
+ */
+
+ virtual void release() = 0;
+
+ /**
+ * \brief Execute user run method with wrapping profiling events.
+ *
+ * Optional entry point for use by CpuDispatchers.
+ *
+ * \param[in] threadId The threadId of the thread that executed the task.
+ */
+ PX_INLINE void runProfiled(uint32_t threadId=0)
+ {
+ mTm->emitStartEvent(*this, threadId);
+ run();
+ mTm->emitStopEvent(*this, threadId);
+ }
+
+ /**
+ * \brief Specify stop event statistic
+ *
+ * If called before or while the task is executing, the given value
+ * will appear in the task's event bar in the profile viewer
+ *
+ * \param[in] stat The stat to signal when the task is finished
+ */
+ PX_INLINE void setProfileStat( uint16_t stat )
+ {
+ mProfileStat = stat;
+ }
+
+ /**
+ * \brief Return PxTaskManager to which this task was submitted
+ *
+ * Note, can return NULL if task was not submitted, or has been
+ * completed.
+ */
+ PX_INLINE PxTaskManager* getTaskManager() const
+ {
+ return mTm;
+ }
+
+protected:
+ uint16_t mEventID; //!< Registered profile event ID
+ uint16_t mProfileStat; //!< Profiling statistic
+ PxTaskManager* mTm; //!< Owning PxTaskManager instance
+
+ friend class PxTaskMgr;
+};
+
+
+/**
+ * \brief A PxBaseTask implementation with deferred execution and full dependencies
+ *
+ * A PxTask must be submitted to a PxTaskManager to to be executed, Tasks may
+ * optionally be named when they are submitted.
+ */
+class PxTask : public PxBaseTask
+{
+public:
+ PxTask() : mTaskID(0) {}
+ virtual ~PxTask() {}
+
+ //! \brief Release method implementation
+ virtual void release()
+ {
+ PX_ASSERT(mTm);
+
+ // clear mTm before calling taskCompleted() for safety
+ PxTaskManager* save = mTm;
+ mTm = NULL;
+ save->taskCompleted( *this );
+ }
+
+ //! \brief Inform the PxTaskManager this task must finish before the given
+ // task is allowed to start.
+ PX_INLINE void finishBefore( PxTaskID taskID )
+ {
+ PX_ASSERT(mTm);
+ mTm->finishBefore( *this, taskID);
+ }
+
+ //! \brief Inform the PxTaskManager this task cannot start until the given
+ // task has completed.
+ PX_INLINE void startAfter( PxTaskID taskID )
+ {
+ PX_ASSERT(mTm);
+ mTm->startAfter( *this, taskID );
+ }
+
+ /**
+ * \brief Manually increment this task's reference count. The task will
+ * not be allowed to run until removeReference() is called.
+ */
+ PX_INLINE void addReference()
+ {
+ PX_ASSERT(mTm);
+ mTm->addReference( mTaskID );
+ }
+
+ /**
+ * \brief Manually decrement this task's reference count. If the reference
+ * count reaches zero, the task will be dispatched.
+ */
+ PX_INLINE void removeReference()
+ {
+ PX_ASSERT(mTm);
+ mTm->decrReference( mTaskID );
+ }
+
+ /**
+ * \brief Return the ref-count for this task
+ */
+ PX_INLINE int32_t getReference() const
+ {
+ return mTm->getReference( mTaskID );
+ }
+
+ /**
+ * \brief Return the unique ID for this task
+ */
+ PX_INLINE PxTaskID getTaskID() const
+ {
+ return mTaskID;
+ }
+
+ /**
+ * \brief Called by PxTaskManager at submission time for initialization
+ *
+ * Perform simulation step initialization here.
+ */
+ virtual void submitted()
+ {
+ mStreamIndex = 0;
+ mPreSyncRequired = false;
+ mProfileStat = 0;
+ }
+
+ /**
+ * \brief Specify that the GpuTask sync flag be set
+ */
+ PX_INLINE void requestSyncPoint()
+ {
+ mPreSyncRequired = true;
+ }
+
+
+protected:
+ PxTaskID mTaskID; //!< ID assigned at submission
+ uint32_t mStreamIndex; //!< GpuTask CUDA stream index
+ bool mPreSyncRequired; //!< GpuTask sync flag
+
+ friend class PxTaskMgr;
+ friend class PxGpuWorkerThread;
+};
+
+
+/**
+ * \brief A PxBaseTask implementation with immediate execution and simple dependencies
+ *
+ * A PxLightCpuTask bypasses the PxTaskManager launch dependencies and will be
+ * submitted directly to your scene's CpuDispatcher. When the run() function
+ * completes, it will decrement the reference count of the specified
+ * continuation task.
+ *
+ * You must use a full-blown PxTask if you want your task to be resolved
+ * by another PxTask, or you need more than a single dependency to be
+ * resolved when your task completes, or your task will not run on the
+ * CpuDispatcher.
+ */
+class PxLightCpuTask : public PxBaseTask
+{
+public:
+ PxLightCpuTask()
+ : mCont( NULL )
+ , mRefCount( 0 )
+ {
+ }
+ virtual ~PxLightCpuTask()
+ {
+ mTm = NULL;
+ }
+
+ /**
+ * \brief Initialize this task and specify the task that will have its ref count decremented on completion.
+ *
+ * Submission is deferred until the task's mRefCount is decremented to zero.
+ * Note that we only use the PxTaskManager to query the appropriate dispatcher.
+ *
+ * \param[in] tm The PxTaskManager this task is managed by
+ * \param[in] c The task to be executed when this task has finished running
+ */
+ PX_INLINE void setContinuation(PxTaskManager& tm, PxBaseTask* c)
+ {
+ PX_ASSERT( mRefCount == 0 );
+ mRefCount = 1;
+ mCont = c;
+ mTm = &tm;
+ if( mCont )
+ {
+ mCont->addReference();
+ }
+ }
+
+ /**
+ * \brief Initialize this task and specify the task that will have its ref count decremented on completion.
+ *
+ * This overload of setContinuation() queries the PxTaskManager from the continuation
+ * task, which cannot be NULL.
+ * \param[in] c The task to be executed after this task has finished running
+ */
+ PX_INLINE void setContinuation( PxBaseTask* c )
+ {
+ PX_ASSERT( c );
+ PX_ASSERT( mRefCount == 0 );
+ mRefCount = 1;
+ mCont = c;
+ if( mCont )
+ {
+ mCont->addReference();
+ mTm = mCont->getTaskManager();
+ PX_ASSERT( mTm );
+ }
+ }
+
+ /**
+ * \brief Retrieves continuation task
+ */
+ PX_INLINE PxBaseTask* getContinuation() const
+ {
+ return mCont;
+ }
+
+ /**
+ * \brief Manually decrement this task's reference count. If the reference
+ * count reaches zero, the task will be dispatched.
+ */
+ PX_INLINE void removeReference()
+ {
+ mTm->decrReference(*this);
+ }
+
+ /** \brief Return the ref-count for this task */
+ PX_INLINE int32_t getReference() const
+ {
+ return mRefCount;
+ }
+
+ /**
+ * \brief Manually increment this task's reference count. The task will
+ * not be allowed to run until removeReference() is called.
+ */
+ PX_INLINE void addReference()
+ {
+ mTm->addReference(*this);
+ }
+
+ /**
+ * \brief called by CpuDispatcher after run method has completed
+ *
+ * Decrements the continuation task's reference count, if specified.
+ */
+ PX_INLINE void release()
+ {
+ if( mCont )
+ {
+ mCont->removeReference();
+ }
+ }
+
+protected:
+
+ PxBaseTask* mCont; //!< Continuation task, can be NULL
+ volatile int32_t mRefCount; //!< PxTask is dispatched when reaches 0
+
+ friend class PxTaskMgr;
+};
+
+
+}// end physx namespace
+
+
+#endif // PXTASK_PXTASK_H
diff --git a/PxShared/include/task/PxTaskDefine.h b/PxShared/include/task/PxTaskDefine.h
new file mode 100644
index 00000000..a63ed050
--- /dev/null
+++ b/PxShared/include/task/PxTaskDefine.h
@@ -0,0 +1,37 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXTASK_PXTASKDEFINE_H
+#define PXTASK_PXTASKDEFINE_H
+
+#include "foundation/PxPreprocessor.h"
+
+#ifndef PX_SUPPORT_PXTASK_PROFILING
+#define PX_SUPPORT_PXTASK_PROFILING 1
+#endif
+
+#endif // PXTASK_PXTASKDEFINE_H
diff --git a/PxShared/include/task/PxTaskManager.h b/PxShared/include/task/PxTaskManager.h
new file mode 100644
index 00000000..fbf2aabb
--- /dev/null
+++ b/PxShared/include/task/PxTaskManager.h
@@ -0,0 +1,234 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+
+#ifndef PXTASK_PXTASKMANAGER_H
+#define PXTASK_PXTASKMANAGER_H
+
+#include "task/PxTaskDefine.h"
+#include "foundation/PxSimpleTypes.h"
+#include "foundation/PxErrorCallback.h"
+
+namespace physx
+{
+
+PX_PUSH_PACK_DEFAULT
+
+class PxBaseTask;
+class PxTask;
+class PxLightCpuTask;
+typedef unsigned int PxTaskID;
+
+/**
+\brief Identifies the type of each heavyweight PxTask object
+
+\note This enum type is only used by PxTask and GpuTask objects, LightCpuTasks do not use this enum.
+
+@see PxTask
+@see PxLightCpuTask
+*/
+struct PxTaskType
+{
+ /**
+ * \brief Identifies the type of each heavyweight PxTask object
+ */
+ enum Enum
+ {
+ TT_CPU, //!< PxTask will be run on the CPU
+ TT_GPU, //!< PxTask will be run on the GPU
+ TT_NOT_PRESENT, //!< Return code when attempting to find a task that does not exist
+ TT_COMPLETED //!< PxTask execution has been completed
+ };
+};
+
+class PxCpuDispatcher;
+class PxGpuDispatcher;
+
+/**
+ \brief The PxTaskManager interface
+
+ A PxTaskManager instance holds references to user-provided dispatcher objects, when tasks are
+ submitted the PxTaskManager routes them to the appropriate dispatcher and handles task profiling if enabled.
+ Users should not implement the PxTaskManager interface, the SDK creates it's own concrete PxTaskManager object
+ per-scene which users can configure by passing dispatcher objects into the PxSceneDesc.
+
+
+ @see CpuDispatcher
+ @see PxGpuDispatcher
+
+*/
+class PxTaskManager
+{
+public:
+
+ /**
+ \brief Set the user-provided dispatcher object for CPU tasks
+
+ \param[in] ref The dispatcher object.
+
+ @see CpuDispatcher
+ */
+ virtual void setCpuDispatcher(PxCpuDispatcher& ref) = 0;
+
+ /**
+ \brief Set the user-provided dispatcher object for GPU tasks
+
+ \param[in] ref The dispatcher object.
+
+ @see PxGpuDispatcher
+ */
+ virtual void setGpuDispatcher(PxGpuDispatcher& ref) = 0;
+
+ /**
+ \brief Get the user-provided dispatcher object for CPU tasks
+
+ \return The CPU dispatcher object.
+
+ @see CpuDispatcher
+ */
+ virtual PxCpuDispatcher* getCpuDispatcher() const = 0;
+
+ /**
+ \brief Get the user-provided dispatcher object for GPU tasks
+
+ \return The GPU dispatcher object.
+
+ @see PxGpuDispatcher
+ */
+ virtual PxGpuDispatcher* getGpuDispatcher() const = 0;
+
+ /**
+ \brief Reset any dependencies between Tasks
+
+ \note Will be called at the start of every frame before tasks are submitted.
+
+ @see PxTask
+ */
+ virtual void resetDependencies() = 0;
+
+ /**
+ \brief Called by the owning scene to start the task graph.
+
+ \note All tasks with with ref count of 1 will be dispatched.
+
+ @see PxTask
+ */
+ virtual void startSimulation() = 0;
+
+ /**
+ \brief Called by the owning scene at the end of a simulation step to synchronize the PxGpuDispatcher
+
+ @see PxGpuDispatcher
+ */
+ virtual void stopSimulation() = 0;
+
+ /**
+ \brief Called by the worker threads to inform the PxTaskManager that a task has completed processing
+
+ \param[in] task The task which has been completed
+ */
+ virtual void taskCompleted(PxTask& task) = 0;
+
+ /**
+ \brief Retrieve a task by name
+
+ \param[in] name The unique name of a task
+ \return The ID of the task with that name, or TT_NOT_PRESENT if not found
+ */
+ virtual PxTaskID getNamedTask(const char* name) = 0;
+
+ /**
+ \brief Submit a task with a unique name.
+
+ \param[in] task The task to be executed
+ \param[in] name The unique name of a task
+ \param[in] type The type of the task (default TT_CPU)
+ \return The ID of the task with that name, or TT_NOT_PRESENT if not found
+
+ */
+ virtual PxTaskID submitNamedTask(PxTask* task, const char* name, PxTaskType::Enum type = PxTaskType::TT_CPU) = 0;
+
+ /**
+ \brief Submit an unnamed task.
+
+ \param[in] task The task to be executed
+ \param[in] type The type of the task (default TT_CPU)
+
+ \return The ID of the task with that name, or TT_NOT_PRESENT if not found
+ */
+ virtual PxTaskID submitUnnamedTask(PxTask& task, PxTaskType::Enum type = PxTaskType::TT_CPU) = 0;
+
+ /**
+ \brief Retrieve a task given a task ID
+
+ \param[in] id The ID of the task to return, a valid ID must be passed or results are undefined
+
+ \return The task associated with the ID
+ */
+ virtual PxTask* getTaskFromID(PxTaskID id) = 0;
+
+ /**
+ \brief Release the PxTaskManager object, referenced dispatchers will not be released
+ */
+ virtual void release() = 0;
+
+ /**
+ \brief Construct a new PxTaskManager instance with the given [optional] dispatchers
+ */
+ static PxTaskManager* createTaskManager(PxErrorCallback& errorCallback, PxCpuDispatcher* = 0, PxGpuDispatcher* = 0);
+
+protected:
+ virtual ~PxTaskManager() {}
+
+ /*! \cond PRIVATE */
+
+ virtual void finishBefore(PxTask& task, PxTaskID taskID) = 0;
+ virtual void startAfter(PxTask& task, PxTaskID taskID) = 0;
+
+ virtual void addReference(PxTaskID taskID) = 0;
+ virtual void decrReference(PxTaskID taskID) = 0;
+ virtual int32_t getReference(PxTaskID taskID) const = 0;
+
+ virtual void decrReference(PxLightCpuTask&) = 0;
+ virtual void addReference(PxLightCpuTask&) = 0;
+
+ virtual void emitStartEvent(PxBaseTask&, uint32_t threadId=0) = 0;
+ virtual void emitStopEvent(PxBaseTask&, uint32_t threadId=0) = 0;
+
+ /*! \endcond */
+
+ friend class PxBaseTask;
+ friend class PxTask;
+ friend class PxLightCpuTask;
+ friend class PxGpuWorkerThread;
+};
+
+PX_POP_PACK
+
+} // end physx namespace
+
+
+#endif // PXTASK_PXTASKMANAGER_H