aboutsummaryrefslogtreecommitdiff
path: root/APEX_1.4/common/include
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /APEX_1.4/common/include
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'APEX_1.4/common/include')
-rw-r--r--APEX_1.4/common/include/ApexActor.h410
-rw-r--r--APEX_1.4/common/include/ApexAssetAuthoring.h34
-rw-r--r--APEX_1.4/common/include/ApexAssetTracker.h80
-rw-r--r--APEX_1.4/common/include/ApexAuthorableObject.h322
-rw-r--r--APEX_1.4/common/include/ApexBinaryHeap.h141
-rw-r--r--APEX_1.4/common/include/ApexCollision.h69
-rw-r--r--APEX_1.4/common/include/ApexConstrainedDistributor.h170
-rw-r--r--APEX_1.4/common/include/ApexContext.h86
-rw-r--r--APEX_1.4/common/include/ApexCuda.h375
-rw-r--r--APEX_1.4/common/include/ApexCudaDefs.h310
-rw-r--r--APEX_1.4/common/include/ApexCudaProfile.h131
-rw-r--r--APEX_1.4/common/include/ApexCudaSource.h50
-rw-r--r--APEX_1.4/common/include/ApexCudaTest.h363
-rw-r--r--APEX_1.4/common/include/ApexCudaWrapper.h1232
-rw-r--r--APEX_1.4/common/include/ApexCutil.h39
-rw-r--r--APEX_1.4/common/include/ApexFIFO.h136
-rw-r--r--APEX_1.4/common/include/ApexFind.h63
-rw-r--r--APEX_1.4/common/include/ApexGeneralizedCubeTemplates.h133
-rw-r--r--APEX_1.4/common/include/ApexGeneralizedMarchingCubes.h153
-rw-r--r--APEX_1.4/common/include/ApexGroupsFiltering.h158
-rw-r--r--APEX_1.4/common/include/ApexIsoMesh.h206
-rw-r--r--APEX_1.4/common/include/ApexLegacyModule.h170
-rw-r--r--APEX_1.4/common/include/ApexMarchingCubes.h380
-rw-r--r--APEX_1.4/common/include/ApexMath.h69
-rw-r--r--APEX_1.4/common/include/ApexMerge.h84
-rw-r--r--APEX_1.4/common/include/ApexMeshContractor.h133
-rw-r--r--APEX_1.4/common/include/ApexMeshHash.h110
-rw-r--r--APEX_1.4/common/include/ApexMirrored.h507
-rw-r--r--APEX_1.4/common/include/ApexMirroredArray.h259
-rw-r--r--APEX_1.4/common/include/ApexPermute.h69
-rw-r--r--APEX_1.4/common/include/ApexPreview.h54
-rw-r--r--APEX_1.4/common/include/ApexPvdClient.h168
-rw-r--r--APEX_1.4/common/include/ApexQuadricSimplifier.h352
-rw-r--r--APEX_1.4/common/include/ApexQuickSelectSmallestK.h81
-rw-r--r--APEX_1.4/common/include/ApexRWLockable.h126
-rw-r--r--APEX_1.4/common/include/ApexRand.h133
-rw-r--r--APEX_1.4/common/include/ApexRenderable.h79
-rw-r--r--APEX_1.4/common/include/ApexResource.h105
-rw-r--r--APEX_1.4/common/include/ApexResourceHelper.h95
-rw-r--r--APEX_1.4/common/include/ApexSDKCachedDataImpl.h85
-rw-r--r--APEX_1.4/common/include/ApexSDKHelpers.h236
-rw-r--r--APEX_1.4/common/include/ApexSDKIntl.h265
-rw-r--r--APEX_1.4/common/include/ApexShape.h263
-rw-r--r--APEX_1.4/common/include/ApexSharedUtils.h2364
-rw-r--r--APEX_1.4/common/include/ApexSimdMath.h127
-rw-r--r--APEX_1.4/common/include/ApexSubdivider.h200
-rw-r--r--APEX_1.4/common/include/ApexTest.h24
-rw-r--r--APEX_1.4/common/include/ApexTetrahedralizer.h422
-rw-r--r--APEX_1.4/common/include/AuthorableObjectIntl.h92
-rw-r--r--APEX_1.4/common/include/Cof44.h135
-rw-r--r--APEX_1.4/common/include/CurveImpl.h83
-rw-r--r--APEX_1.4/common/include/DebugColorParamsEx.h65
-rw-r--r--APEX_1.4/common/include/DeclareArray.h25
-rw-r--r--APEX_1.4/common/include/FieldBoundaryIntl.h72
-rw-r--r--APEX_1.4/common/include/FieldSamplerIntl.h144
-rw-r--r--APEX_1.4/common/include/FieldSamplerManagerIntl.h57
-rw-r--r--APEX_1.4/common/include/FieldSamplerQueryIntl.h124
-rw-r--r--APEX_1.4/common/include/FieldSamplerSceneIntl.h85
-rw-r--r--APEX_1.4/common/include/InplaceStorage.h985
-rw-r--r--APEX_1.4/common/include/InplaceTypes.h548
-rw-r--r--APEX_1.4/common/include/InplaceTypesBuilder.h86
-rw-r--r--APEX_1.4/common/include/InstancedObjectSimulationIntl.h160
-rw-r--r--APEX_1.4/common/include/IofxManagerIntl.h208
-rw-r--r--APEX_1.4/common/include/ModuleBase.h46
-rw-r--r--APEX_1.4/common/include/ModuleFieldSamplerIntl.h34
-rw-r--r--APEX_1.4/common/include/ModuleIntl.h264
-rw-r--r--APEX_1.4/common/include/ModuleIofxIntl.h35
-rw-r--r--APEX_1.4/common/include/ModuleUpdateLoader.h53
-rw-r--r--APEX_1.4/common/include/P4Info.h45
-rw-r--r--APEX_1.4/common/include/PVDParameterizedHandler.h113
-rw-r--r--APEX_1.4/common/include/PhysXObjectDescIntl.h116
-rw-r--r--APEX_1.4/common/include/ProfilerCallback.h19
-rw-r--r--APEX_1.4/common/include/RandState.h185
-rw-r--r--APEX_1.4/common/include/RandStateHelpers.h60
-rw-r--r--APEX_1.4/common/include/ReadCheck.h57
-rw-r--r--APEX_1.4/common/include/RenderAPIIntl.h515
-rw-r--r--APEX_1.4/common/include/RenderMeshAssetIntl.h124
-rw-r--r--APEX_1.4/common/include/ResourceProviderIntl.h165
-rw-r--r--APEX_1.4/common/include/SceneIntl.h133
-rw-r--r--APEX_1.4/common/include/SimplexNoise.h132
-rw-r--r--APEX_1.4/common/include/Spline.h168
-rw-r--r--APEX_1.4/common/include/TableLookup.h154
-rw-r--r--APEX_1.4/common/include/WriteCheck.h65
-rw-r--r--APEX_1.4/common/include/autogen/ConvexHullParameters.h276
-rw-r--r--APEX_1.4/common/include/autogen/DebugColorParams.h265
-rw-r--r--APEX_1.4/common/include/autogen/DebugRenderParams.h248
-rw-r--r--APEX_1.4/common/include/autogen/ModuleCommonRegistration.h120
-rw-r--r--APEX_1.4/common/include/variable_oscillator.h48
88 files changed, 17930 insertions, 0 deletions
diff --git a/APEX_1.4/common/include/ApexActor.h b/APEX_1.4/common/include/ApexActor.h
new file mode 100644
index 00000000..08ad7b83
--- /dev/null
+++ b/APEX_1.4/common/include/ApexActor.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_ACTOR_H
+#define APEX_ACTOR_H
+
+#include "ApexContext.h"
+#include "ApexRenderable.h"
+#include "ApexResource.h"
+#include "ResourceProviderIntl.h"
+#include "ApexSDK.h"
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+#include "PxActor.h"
+#include "PxShape.h"
+#include "PxFiltering.h"
+#include "PxRigidDynamic.h"
+#include "PxTransform.h"
+#include "PxRigidBodyExt.h"
+#include "../../include/Actor.h"
+#endif
+
+#define UNIQUE_ACTOR_ID 1
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexContext;
+class RenderDebugInterface;
+class SceneIntl;
+class Asset;
+class Actor;
+
+/**
+Class that implements actor interface with its context(s)
+*/
+class ApexActor : public ApexRenderable, public ApexContext
+{
+public:
+ ApexActor();
+ ~ApexActor();
+
+ void addSelfToContext(ApexContext& ctx, ApexActor* actorPtr = NULL);
+ void updateIndex(ApexContext& ctx, uint32_t index);
+ bool findSelfInContext(ApexContext& ctx);
+
+ // Each class that derives from ApexActor should implement the following functions
+ // if it wants ActorCreationNotification and Deletion callbacks
+ virtual Asset* getAsset(void)
+ {
+ return NULL;
+ }
+ virtual void ContextActorCreationNotification(AuthObjTypeID authorableObjectType,
+ ApexActor* actorPtr)
+ {
+ PX_UNUSED(authorableObjectType);
+ PX_UNUSED(actorPtr);
+ return;
+ }
+ virtual void ContextActorDeletionNotification(AuthObjTypeID authorableObjectType,
+ ApexActor* actorPtr)
+ {
+ PX_UNUSED(authorableObjectType);
+ PX_UNUSED(actorPtr);
+ return;
+ }
+
+ // Each class that derives from ApexActor may optionally implement these functions
+ virtual Renderable* getRenderable()
+ {
+ return NULL;
+ }
+ virtual Actor* getActor()
+ {
+ return NULL;
+ }
+
+ virtual void release() = 0;
+ void destroy();
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ virtual void setPhysXScene(PxScene* s) = 0;
+ virtual PxScene* getPhysXScene() const = 0;
+#endif
+
+ enum ActorState
+ {
+ StateEnabled,
+ StateDisabled,
+ StateEnabling,
+ StateDisabling,
+ };
+
+ /**
+ \brief Selectively enables/disables debug visualization of a specific APEX actor. Default value it true.
+ */
+ virtual void setEnableDebugVisualization(bool state)
+ {
+ mEnableDebugVisualization = state;
+ }
+
+protected:
+ bool mInRelease;
+
+ struct ContextTrack
+ {
+ uint32_t index;
+ ApexContext* ctx;
+ };
+ physx::Array<ContextTrack> mContexts;
+
+#if UNIQUE_ACTOR_ID
+ static int32_t mUniqueActorIdCounter;
+ int32_t mUniqueActorId;
+#endif
+
+ bool mEnableDebugVisualization;
+ friend class ApexContext;
+};
+
+
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+
+#define APEX_ACTOR_TEMPLATE_PARAM(_type, _name, _variable) \
+bool set##_name(_type x) \
+{ \
+ _variable = x; \
+ return is##_name##Valid(x); \
+} \
+_type get##_name() const { return _variable; }
+
+#define APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(_name, _variable) \
+if (!is##_name##Valid(_variable)) \
+{ \
+ return false; \
+}
+
+#define APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(_name) set##_name( getDefault##_name() );
+
+// template for PhysX3.0 actor, body and shape.
+class PhysX3DescTemplateImpl : public nvidia::PhysX3DescTemplate, public UserAllocated
+{
+public:
+ PhysX3DescTemplateImpl()
+ {
+ SetToDefault();
+ }
+ void apply(PxActor* actor) const
+ {
+ actor->setActorFlags(static_cast<physx::PxActorFlags>(actorFlags));
+ actor->setDominanceGroup(dominanceGroup);
+ actor->setOwnerClient(ownerClient);
+ PX_ASSERT(clientBehaviorBits < UINT8_MAX);
+ actor->setClientBehaviorFlags(physx::PxActorClientBehaviorFlags((uint8_t)clientBehaviorBits));
+ //actor->contactReportFlags; // must be set via call PhysX3Interface::setContactReportFlags
+ actor->userData = userData;
+ if (name)
+ {
+ actor->setName(name);
+ }
+
+ // body
+ PxRigidBody* rb = actor->is<physx::PxRigidBody>();
+ if (rb)
+ {
+ // density, user should call updateMassAndInertia when shapes are created.
+ }
+
+ PxRigidDynamic* rd = actor->is<physx::PxRigidDynamic>();
+ if (rd)
+ {
+ rd->setRigidBodyFlags(physx::PxRigidBodyFlags(bodyFlags));
+ rd->setWakeCounter(wakeUpCounter);
+ rd->setLinearDamping(linearDamping);
+ rd->setAngularDamping(angularDamping);
+ rd->setMaxAngularVelocity(maxAngularVelocity);
+ // sleepLinearVelocity attribute for deformable/cloth, see below.
+ rd->setSolverIterationCounts(solverIterationCount, velocityIterationCount);
+ rd->setContactReportThreshold(contactReportThreshold);
+ rd->setSleepThreshold(sleepThreshold);
+ }
+ }
+ void apply(PxShape* shape) const
+ {
+ shape->setFlags((physx::PxShapeFlags)shapeFlags);
+ shape->setMaterials(materials.begin(), static_cast<uint16_t>(materials.size()));
+ shape->userData = shapeUserData;
+ if (shapeName)
+ {
+ shape->setName(shapeName);
+ }
+ shape->setSimulationFilterData(simulationFilterData);
+ shape->setQueryFilterData(queryFilterData);
+ shape->setContactOffset(contactOffset);
+ shape->setRestOffset(restOffset);
+ }
+
+ APEX_ACTOR_TEMPLATE_PARAM(physx::PxDominanceGroup, DominanceGroup, dominanceGroup)
+ APEX_ACTOR_TEMPLATE_PARAM(uint8_t, ActorFlags, actorFlags)
+ APEX_ACTOR_TEMPLATE_PARAM(physx::PxClientID, OwnerClient, ownerClient)
+ APEX_ACTOR_TEMPLATE_PARAM(uint32_t, ClientBehaviorBits, clientBehaviorBits)
+ APEX_ACTOR_TEMPLATE_PARAM(uint16_t, ContactReportFlags, contactReportFlags)
+ APEX_ACTOR_TEMPLATE_PARAM(void*, UserData, userData)
+ APEX_ACTOR_TEMPLATE_PARAM(const char*, Name, name)
+
+ APEX_ACTOR_TEMPLATE_PARAM(float, Density, density)
+ APEX_ACTOR_TEMPLATE_PARAM(uint8_t, BodyFlags, bodyFlags)
+ APEX_ACTOR_TEMPLATE_PARAM(float, WakeUpCounter, wakeUpCounter)
+ APEX_ACTOR_TEMPLATE_PARAM(float, LinearDamping, linearDamping)
+ APEX_ACTOR_TEMPLATE_PARAM(float, AngularDamping, angularDamping)
+ APEX_ACTOR_TEMPLATE_PARAM(float, MaxAngularVelocity, maxAngularVelocity)
+ APEX_ACTOR_TEMPLATE_PARAM(float, SleepLinearVelocity, sleepLinearVelocity)
+ APEX_ACTOR_TEMPLATE_PARAM(uint32_t, SolverIterationCount, solverIterationCount)
+ APEX_ACTOR_TEMPLATE_PARAM(uint32_t, VelocityIterationCount, velocityIterationCount)
+ APEX_ACTOR_TEMPLATE_PARAM(float, ContactReportThreshold, contactReportThreshold)
+ APEX_ACTOR_TEMPLATE_PARAM(float, SleepThreshold, sleepLinearVelocity)
+
+ APEX_ACTOR_TEMPLATE_PARAM(uint8_t, ShapeFlags, shapeFlags)
+ APEX_ACTOR_TEMPLATE_PARAM(void*, ShapeUserData, shapeUserData)
+ APEX_ACTOR_TEMPLATE_PARAM(const char*, ShapeName, shapeName)
+ APEX_ACTOR_TEMPLATE_PARAM(physx::PxFilterData, SimulationFilterData, simulationFilterData)
+ APEX_ACTOR_TEMPLATE_PARAM(physx::PxFilterData, QueryFilterData, queryFilterData)
+ APEX_ACTOR_TEMPLATE_PARAM(float, ContactOffset, contactOffset)
+ APEX_ACTOR_TEMPLATE_PARAM(float, RestOffset, restOffset)
+ physx::PxMaterial** getMaterials(uint32_t& materialCount) const
+ {
+ materialCount = materials.size();
+ return const_cast<physx::PxMaterial**>(materials.begin());
+ }
+ bool setMaterials(physx::PxMaterial** materialArray, uint32_t materialCount)
+ {
+ const bool valid = materialArray != NULL && materialCount > 0;
+ if (!valid)
+ {
+ materials.reset();
+ }
+ else
+ {
+ materials = Array<PxMaterial*>(materialArray, materialArray + materialCount);
+ }
+ return valid;
+ }
+
+ bool isValid()
+ {
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(DominanceGroup, dominanceGroup)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(ActorFlags, actorFlags)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(OwnerClient, ownerClient)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(ClientBehaviorBits, clientBehaviorBits)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(ContactReportFlags, contactReportFlags)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(UserData, userData)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(Name, name)
+
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(Density, density)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(BodyFlags, bodyFlags)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(WakeUpCounter, wakeUpCounter)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(LinearDamping, linearDamping)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(AngularDamping, angularDamping)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(MaxAngularVelocity, maxAngularVelocity)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(SleepLinearVelocity, sleepLinearVelocity)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(SolverIterationCount, solverIterationCount)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(VelocityIterationCount, velocityIterationCount)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(ContactReportThreshold, contactReportThreshold)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(SleepThreshold, sleepLinearVelocity)
+
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(ShapeFlags, shapeFlags)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(ShapeUserData, shapeUserData)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(ShapeName, shapeName)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(SimulationFilterData, simulationFilterData)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(QueryFilterData, queryFilterData)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(ContactOffset, contactOffset)
+ APEX_ACTOR_TEMPLATE_PARAM_VALID_OR_RETURN(RestOffset, restOffset)
+ if (materials.size() == 0)
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ void SetToDefault()
+ {
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(DominanceGroup)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(ActorFlags)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(OwnerClient)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(ClientBehaviorBits)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(ContactReportFlags)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(UserData)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(Name)
+
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(Density)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(BodyFlags)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(WakeUpCounter)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(LinearDamping)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(AngularDamping)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(MaxAngularVelocity)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(SleepLinearVelocity)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(SolverIterationCount)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(VelocityIterationCount)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(ContactReportThreshold)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(SleepThreshold)
+
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(ShapeFlags)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(ShapeUserData)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(ShapeName)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(SimulationFilterData)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(QueryFilterData)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(ContactOffset)
+ APEX_ACTOR_TEMPLATE_PARAM_SET_DEFAULT(RestOffset)
+ materials.reset();
+ }
+
+ void release()
+ {
+ delete this;
+ }
+
+public:
+ // actor
+ physx::PxDominanceGroup dominanceGroup;
+ uint8_t actorFlags;
+ physx::PxClientID ownerClient;
+ uint32_t clientBehaviorBits;
+ uint16_t contactReportFlags;
+ void* userData;
+ const char* name;
+
+ // body
+ float density;
+
+ uint8_t bodyFlags;
+ float wakeUpCounter;
+ float linearDamping;
+ float angularDamping;
+ float maxAngularVelocity;
+ float sleepLinearVelocity;
+ uint32_t solverIterationCount;
+ uint32_t velocityIterationCount;
+ float contactReportThreshold;
+ float sleepThreshold;
+
+ // shape
+ uint8_t shapeFlags;
+ Array<PxMaterial*> materials;
+ void* shapeUserData;
+ const char* shapeName;
+ PxFilterData simulationFilterData;
+ PxFilterData queryFilterData;
+ float contactOffset;
+ float restOffset;
+}; // PhysX3DescTemplateImpl
+
+class ApexActorSource
+{
+public:
+
+ // ActorSource methods
+
+ void setPhysX3Template(const PhysX3DescTemplate* desc)
+ {
+ physX3Template.set(static_cast<const PhysX3DescTemplateImpl*>(desc));
+ }
+ bool getPhysX3Template(PhysX3DescTemplate& dest) const
+ {
+ return physX3Template.get(static_cast<PhysX3DescTemplateImpl&>(dest));
+ }
+ PhysX3DescTemplate* createPhysX3DescTemplate() const
+ {
+ return PX_NEW(PhysX3DescTemplateImpl);
+ }
+
+ void modifyActor(PxRigidActor* actor) const
+ {
+ if (physX3Template.isSet)
+ {
+ physX3Template.data.apply(actor);
+ }
+ }
+ void modifyShape(PxShape* shape) const
+ {
+ if (physX3Template.isSet)
+ {
+ physX3Template.data.apply(shape);
+ }
+ }
+
+
+
+protected:
+
+ InitTemplate<PhysX3DescTemplateImpl> physX3Template;
+};
+
+#endif // PX_PHYSICS_VERSION_MAJOR == 3
+
+}
+} // end namespace nvidia::apex
+
+#endif // __APEX_ACTOR_H__
diff --git a/APEX_1.4/common/include/ApexAssetAuthoring.h b/APEX_1.4/common/include/ApexAssetAuthoring.h
new file mode 100644
index 00000000..50ab4c12
--- /dev/null
+++ b/APEX_1.4/common/include/ApexAssetAuthoring.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_ASSET_AUTHORING_H
+#define APEX_ASSET_AUTHORING_H
+
+#include "ApexUsingNamespace.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+
+class ApexAssetAuthoring
+{
+public:
+ virtual void setToolString(const char* toolName, const char* toolVersion, uint32_t toolChangelist);
+
+ virtual void setToolString(const char* toolString);
+};
+
+} // namespace apex
+} // namespace nvidia
+
+#endif // APEX_ASSET_AUTHORING_H \ No newline at end of file
diff --git a/APEX_1.4/common/include/ApexAssetTracker.h b/APEX_1.4/common/include/ApexAssetTracker.h
new file mode 100644
index 00000000..d3c3d94f
--- /dev/null
+++ b/APEX_1.4/common/include/ApexAssetTracker.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_ASSET_TRACKER_H
+#define APEX_ASSET_TRACKER_H
+
+#include "ApexString.h"
+#include "ApexSDKIntl.h"
+#include "ResourceProviderIntl.h"
+#include "PsArray.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class IosAsset;
+
+/* There are a couple confusing details here:
+ * 1. If you are tracking particle system or material assets, use an *authoringTypeName*
+ * of "".
+ * 2. When loading, this class checks first if the *authoringTypeName* == "" [A], it will
+ * then check the nameIdList.assetTypeName == "" [B]
+ * If A && B, use particle system namespace defined in B
+ * If A && !B, use material namespace
+ * If !A && B, use namespace specified in constructor
+ */
+class ApexAssetTracker
+{
+public:
+ ApexAssetTracker() : mSdk(0) {}
+
+ ApexAssetTracker(ApexSDKIntl* sdk, const char* authoringTypeName)
+ : mAuthoringTypeName(authoringTypeName),
+ mSdk(sdk)
+ {}
+
+ ApexAssetTracker(ApexSDKIntl* sdk)
+ : mAuthoringTypeName(""),
+ mSdk(sdk)
+ {}
+
+ ~ApexAssetTracker();
+
+ IosAsset* getIosAssetFromName(const char* iosTypeName, const char* assetName);
+ Asset* getAssetFromName(const char* assetName);
+ Asset* getMeshAssetFromName(const char* assetName, bool isOpaqueMesh);
+ bool addAssetName(const char* assetName, bool isOpaqueMesh);
+ bool addAssetName(const char* iosTypeName, const char* assetName);
+ void removeAllAssetNames();
+
+ ResID getResourceIdFromName(const char* assetName, bool isOpaqueMesh);
+
+ uint32_t forceLoadAssets();
+
+ /* one function must be implemented to fill in the name to id mappin lists */
+ //virtual void initializeAssetNameTable() = 0;
+
+ physx::Array<AssetNameIDMapping*>& getNameIdList()
+ {
+ return mNameIdList;
+ }
+
+ ApexSimpleString mAuthoringTypeName;
+ ApexSDKIntl* mSdk;
+ physx::Array<AssetNameIDMapping*> mNameIdList;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // APEX_ASSET_TRACKER_H
diff --git a/APEX_1.4/common/include/ApexAuthorableObject.h b/APEX_1.4/common/include/ApexAuthorableObject.h
new file mode 100644
index 00000000..6d4f1ebe
--- /dev/null
+++ b/APEX_1.4/common/include/ApexAuthorableObject.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEXAUTHORABLEOBJECT_H__
+#define __APEXAUTHORABLEOBJECT_H__
+
+#include "Asset.h"
+
+#include "AuthorableObjectIntl.h"
+#include "nvparameterized/NvParameterizedTraits.h"
+#include "nvparameterized/NvParameterized.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ResourceList;
+
+/**
+ * ApexAuthorableObject
+ * This class is the implementation for AuthorableObjectIntl (except for the
+ * ApexResource stuff). It's disappointing that it has to be templated like
+ * this, but there were issues with multiple inheritance (an Resource ptr
+ * cannot be cast to an Asset ptr - Asset should inherit from Resource
+ * in the future.
+ *
+ * Template expectations:
+ * T_Module - must inherit from ModuleIntl
+ * the T_Asset type typically uses T_Module->mSdk
+ *
+ * T_Asset - T_Asset( T_Module *m, ResourceList &list, const char *name )
+ * must inherit from Asset
+ *
+ * T_AssetAuthoring - T_AssetAuthoring( T_Module *m, ResourceList &list )
+ * must inherit from AssetAuthoring
+ */
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+class ApexAuthorableObject : public AuthorableObjectIntl
+{
+public:
+ ApexAuthorableObject(ModuleIntl* m, ResourceList& list)
+ : AuthorableObjectIntl(m, list, T_Asset::getClassName())
+ {
+ // Register the authorable object type name in the NRP
+ mAOResID = GetInternalApexSDK()->getInternalResourceProvider()->createNameSpace(mAOTypeName.c_str());
+ mAOPtrResID = GetInternalApexSDK()->registerAuthObjType(mAOTypeName.c_str(), this);
+
+ PX_ASSERT(!"This constructor is no longer valid, you MUST provide a parameterizedName!");
+ }
+
+ // This constructor is for assets that are based on NvParameterized, they provide the string
+ // defined in the NvParameterized structure. This will be used to map the NvParameterized object
+ // to the AuthorableObject class to create the assets after they are deserialized
+ ApexAuthorableObject(ModuleIntl* m, ResourceList& list, const char* parameterizedName)
+ : AuthorableObjectIntl(m, list, T_Asset::getClassName())
+ {
+ mParameterizedName = parameterizedName;
+
+ // Register the authorable object type name in the NRP
+ mAOResID = GetInternalApexSDK()->getInternalResourceProvider()->createNameSpace(mAOTypeName.c_str());
+ mAOPtrResID = GetInternalApexSDK()->registerAuthObjType(mAOTypeName.c_str(), this);
+
+ // Register the parameterized name in the NRP to point to this authorable object
+ GetInternalApexSDK()->registerNvParamAuthType(mParameterizedName.c_str(), this);
+ }
+
+ virtual Asset* createAsset(AssetAuthoring& author, const char* name);
+ virtual Asset* createAsset(NvParameterized::Interface* params, const char* name);
+ virtual void releaseAsset(Asset& nxasset);
+
+ virtual AssetAuthoring* createAssetAuthoring();
+ virtual AssetAuthoring* createAssetAuthoring(const char* name);
+ virtual AssetAuthoring* createAssetAuthoring(NvParameterized::Interface* params, const char* name);
+ virtual void releaseAssetAuthoring(AssetAuthoring& nxauthor);
+
+ virtual uint32_t forceLoadAssets();
+
+ virtual uint32_t getAssetCount()
+ {
+ return mAssets.getSize();
+ }
+ virtual bool getAssetList(Asset** outAssets, uint32_t& outAssetCount, uint32_t inAssetCount);
+
+ virtual ResID getResID()
+ {
+ return mAOResID;
+ }
+
+ virtual ApexSimpleString& getName()
+ {
+ return mAOTypeName;
+ }
+
+ // Resource methods
+ virtual void release();
+
+ virtual void destroy()
+ {
+ delete this;
+ }
+
+};
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+Asset* ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+createAsset(AssetAuthoring& author, const char* name)
+{
+ if (mParameterizedName.len())
+ {
+ NvParameterized::Interface* params = 0;
+ NvParameterized::Traits* traits = GetInternalApexSDK()->getParameterizedTraits();
+ params = traits->createNvParameterized(mParameterizedName.c_str());
+ PX_ASSERT(params);
+ if (params)
+ {
+ NvParameterized::Interface* authorParams = author.getNvParameterized();
+ PX_ASSERT(authorParams);
+ if (authorParams)
+ {
+ if (NvParameterized::ERROR_NONE != authorParams->callPreSerializeCallback())
+ {
+ return NULL;
+ }
+
+ NvParameterized::ErrorType err = params->copy(*authorParams);
+
+ PX_ASSERT(err == NvParameterized::ERROR_NONE);
+
+ if (err == NvParameterized::ERROR_NONE)
+ {
+ return createAsset(params, name);
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ else
+ {
+ APEX_INVALID_OPERATION("Authorable Asset needs a parameterized name");
+ return NULL;
+ }
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+void ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+release()
+{
+ // test this by releasing the module before the individual assets
+
+ // remove all assets that we loaded (must do now else we cannot unregister)
+ mAssets.clear();
+ mAssetAuthors.clear();
+
+ // remove this AO's name from the authorable namespace
+ GetInternalApexSDK()->unregisterAuthObjType(mAOTypeName.c_str());
+
+ if (mParameterizedName.len())
+ {
+ GetInternalApexSDK()->unregisterNvParamAuthType(mParameterizedName.c_str());
+ }
+ destroy();
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+Asset* ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+createAsset(NvParameterized::Interface* params, const char* name)
+{
+ T_Asset* asset = PX_NEW(T_Asset)(DYNAMIC_CAST(T_Module*)(mModule), mAssets, params, name);
+ if (asset)
+ {
+ GetInternalApexSDK()->getNamedResourceProvider()->setResource(mAOTypeName.c_str(), name, asset);
+ }
+ return asset;
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+void ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+releaseAsset(Asset& nxasset)
+{
+ T_Asset* asset = DYNAMIC_CAST(T_Asset*)(&nxasset);
+
+ GetInternalApexSDK()->getInternalResourceProvider()->setResource(mAOTypeName.c_str(), nxasset.getName(), NULL, false, false);
+ asset->destroy();
+}
+
+#ifdef WITHOUT_APEX_AUTHORING
+
+// this should no longer be called now that we're auto-assigning names in createAssetAuthoring()
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+AssetAuthoring* ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+createAssetAuthoring()
+{
+ APEX_INVALID_OPERATION("Asset authoring has been disabled");
+ return NULL;
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+AssetAuthoring* ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+createAssetAuthoring(const char*)
+{
+ APEX_INVALID_OPERATION("Asset authoring has been disabled");
+ return NULL;
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+AssetAuthoring* ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+createAssetAuthoring(NvParameterized::Interface*, const char*)
+{
+ APEX_INVALID_OPERATION("Asset authoring has been disabled");
+ return NULL;
+}
+
+#else // WITHOUT_APEX_AUTHORING
+
+// this should no longer be called now that we're auto-assigning names in createAssetAuthoring()
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+AssetAuthoring* ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+createAssetAuthoring()
+{
+ return PX_NEW(T_AssetAuthoring)(DYNAMIC_CAST(T_Module*)(mModule), mAssetAuthors);
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+AssetAuthoring* ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+createAssetAuthoring(const char* name)
+{
+ T_AssetAuthoring* assetAuthor = PX_NEW(T_AssetAuthoring)(DYNAMIC_CAST(T_Module*)(mModule), mAssetAuthors, name);
+
+ if (assetAuthor)
+ {
+ GetInternalApexSDK()->getNamedResourceProvider()->setResource(mAOTypeName.c_str(), name, assetAuthor);
+ }
+ return assetAuthor;
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+AssetAuthoring* ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+createAssetAuthoring(NvParameterized::Interface* params, const char* name)
+{
+ T_AssetAuthoring* assetAuthor = PX_NEW(T_AssetAuthoring)(DYNAMIC_CAST(T_Module*)(mModule), mAssetAuthors, params, name);
+
+ if (assetAuthor)
+ {
+ GetInternalApexSDK()->getNamedResourceProvider()->setResource(mAOTypeName.c_str(), name, assetAuthor);
+ }
+ return assetAuthor;
+}
+
+#endif // WITHOUT_APEX_AUTHORING
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+void ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+releaseAssetAuthoring(AssetAuthoring& nxauthor)
+{
+ T_AssetAuthoring* author = DYNAMIC_CAST(T_AssetAuthoring*)(&nxauthor);
+
+ GetInternalApexSDK()->getInternalResourceProvider()->setResource(mAOTypeName.c_str(), nxauthor.getName(), NULL, false, false);
+ author->destroy();
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+uint32_t ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+forceLoadAssets()
+{
+ uint32_t loadedAssetCount = 0;
+
+ for (uint32_t i = 0; i < mAssets.getSize(); i++)
+ {
+ T_Asset* asset = DYNAMIC_CAST(T_Asset*)(mAssets.getResource(i));
+ loadedAssetCount += asset->forceLoadAssets();
+ }
+ return loadedAssetCount;
+}
+
+template <class T_Module, class T_Asset, class T_AssetAuthoring>
+bool ApexAuthorableObject<T_Module, T_Asset, T_AssetAuthoring>::
+getAssetList(Asset** outAssets, uint32_t& outAssetCount, uint32_t inAssetCount)
+{
+ PX_ASSERT(outAssets);
+ PX_ASSERT(inAssetCount >= mAssets.getSize());
+
+ if (!outAssets || inAssetCount < mAssets.getSize())
+ {
+ outAssetCount = 0;
+ return false;
+ }
+
+ outAssetCount = mAssets.getSize();
+ for (uint32_t i = 0; i < mAssets.getSize(); i++)
+ {
+ T_Asset* asset = DYNAMIC_CAST(T_Asset*)(mAssets.getResource(i));
+ outAssets[i] = static_cast<Asset*>(asset);
+ }
+
+ return true;
+}
+
+}
+} // end namespace nvidia::apex
+
+#endif // __APEXAUTHORABLEOBJECT_H__
diff --git a/APEX_1.4/common/include/ApexBinaryHeap.h b/APEX_1.4/common/include/ApexBinaryHeap.h
new file mode 100644
index 00000000..e772b900
--- /dev/null
+++ b/APEX_1.4/common/include/ApexBinaryHeap.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_BINARY_HEAP_H
+#define APEX_BINARY_HEAP_H
+
+#include "ApexDefs.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+template <class Comparable>
+class ApexBinaryHeap
+{
+public:
+ explicit ApexBinaryHeap(int capacity = 100) : mCurrentSize(0)
+ {
+ if (capacity > 0)
+ {
+ mArray.reserve((uint32_t)capacity + 1);
+ }
+
+ mArray.insert();
+ }
+
+
+
+ bool isEmpty() const
+ {
+ return mCurrentSize == 0;
+ }
+
+
+
+ const Comparable& peek() const
+ {
+ PX_ASSERT(mArray.size() > 1);
+ return mArray[1];
+ }
+
+
+
+ void push(const Comparable& x)
+ {
+ mArray.insert();
+ // Percolate up
+ mCurrentSize++;
+ uint32_t hole = mCurrentSize;
+ while (hole > 1)
+ {
+ uint32_t parent = hole >> 1;
+ if (!(x < mArray[parent]))
+ {
+ break;
+ }
+ mArray[hole] = mArray[parent];
+ hole = parent;
+ }
+ mArray[hole] = x;
+ }
+
+
+
+ void pop()
+ {
+ if (mArray.size() > 1)
+ {
+ mArray[1] = mArray[mCurrentSize--];
+ percolateDown(1);
+ mArray.popBack();
+ }
+ }
+
+
+
+ void pop(Comparable& minItem)
+ {
+ if (mArray.size() > 1)
+ {
+ minItem = mArray[1];
+ mArray[1] = mArray[mCurrentSize--];
+ percolateDown(1);
+ mArray.popBack();
+ }
+ }
+
+private:
+ uint32_t mCurrentSize; // Number of elements in heap
+ physx::Array<Comparable> mArray;
+
+ void buildHeap()
+ {
+ for (uint32_t i = mCurrentSize / 2; i > 0; i--)
+ {
+ percolateDown(i);
+ }
+ }
+
+
+
+ void percolateDown(uint32_t hole)
+ {
+ Comparable tmp = mArray[hole];
+
+ while (hole * 2 <= mCurrentSize)
+ {
+ uint32_t child = hole * 2;
+ if (child != mCurrentSize && mArray[child + 1] < mArray[child])
+ {
+ child++;
+ }
+ if (mArray[child] < tmp)
+ {
+ mArray[hole] = mArray[child];
+ }
+ else
+ {
+ break;
+ }
+
+ hole = child;
+ }
+
+ mArray[hole] = tmp;
+ }
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // APEX_BINARY_HEAP_H
diff --git a/APEX_1.4/common/include/ApexCollision.h b/APEX_1.4/common/include/ApexCollision.h
new file mode 100644
index 00000000..ccc4b8b4
--- /dev/null
+++ b/APEX_1.4/common/include/ApexCollision.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_COLLISION_H
+#define APEX_COLLISION_H
+
+#include "ApexDefs.h"
+
+#include "ApexUsingNamespace.h"
+#include "PxVec3.h"
+#include "PxMat33.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+
+
+struct Segment
+{
+ PxVec3 p0;
+ PxVec3 p1;
+};
+
+struct Capsule : Segment
+{
+ float radius;
+};
+
+struct Triangle
+{
+ Triangle(const PxVec3& a, const PxVec3& b, const PxVec3& c) : v0(a), v1(b), v2(c) {}
+ PxVec3 v0;
+ PxVec3 v1;
+ PxVec3 v2;
+};
+
+struct Box
+{
+ PxVec3 center;
+ PxVec3 extents;
+ PxMat33 rot;
+};
+
+
+bool capsuleCapsuleIntersection(const Capsule& worldCaps0, const Capsule& worldCaps1, float tolerance = 1.2);
+bool boxBoxIntersection(const Box& worldBox0, const Box& worldBox1);
+
+float APEX_pointTriangleSqrDst(const Triangle& triangle, const PxVec3& position);
+float APEX_segmentSegmentSqrDist(const Segment& seg0, const Segment& seg1, float* s, float* t);
+float APEX_pointSegmentSqrDist(const Segment& seg, const PxVec3& point, float* param = 0);
+uint32_t APEX_RayCapsuleIntersect(const PxVec3& origin, const PxVec3& dir, const Capsule& capsule, float s[2]);
+
+bool APEX_RayTriangleIntersect(const PxVec3& orig, const PxVec3& dir, const PxVec3& a, const PxVec3& b, const PxVec3& c, float& t, float& u, float& v);
+
+} // namespace apex
+} // namespace nvidia
+
+
+#endif // APEX_COLLISION_H
diff --git a/APEX_1.4/common/include/ApexConstrainedDistributor.h b/APEX_1.4/common/include/ApexConstrainedDistributor.h
new file mode 100644
index 00000000..b9d7474e
--- /dev/null
+++ b/APEX_1.4/common/include/ApexConstrainedDistributor.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_CONSTRAINED_DISTRIBUTOR_H__
+#define __APEX_CONSTRAINED_DISTRIBUTOR_H__
+
+#include "Apex.h"
+#include "PsUserAllocated.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+template <typename T = uint32_t>
+class ApexConstrainedDistributor
+{
+public:
+ ApexConstrainedDistributor()
+ {
+ }
+
+ PX_INLINE void resize(uint32_t size)
+ {
+ mConstraintDataArray.resize(size);
+ }
+ PX_INLINE void setBenefit(uint32_t index, float benefit)
+ {
+ PX_ASSERT(index < mConstraintDataArray.size());
+ mConstraintDataArray[index].benefit = benefit;
+ }
+ PX_INLINE void setTargetValue(uint32_t index, T targetValue)
+ {
+ PX_ASSERT(index < mConstraintDataArray.size());
+ mConstraintDataArray[index].targetValue = targetValue;
+ }
+ PX_INLINE T getResultValue(uint32_t index) const
+ {
+ PX_ASSERT(index < mConstraintDataArray.size());
+ return mConstraintDataArray[index].resultValue;
+ }
+
+ void solve(T totalValueLimit)
+ {
+ uint32_t size = mConstraintDataArray.size();
+ if (size == 0)
+ {
+ return;
+ }
+ if (size == 1)
+ {
+ ConstraintData& data = mConstraintDataArray.front();
+ data.resultValue = PxMin(data.targetValue, totalValueLimit);
+ return;
+ }
+
+ float totalBenefit = 0;
+ T totalValue = 0;
+ for (uint32_t i = 0; i < size; i++)
+ {
+ ConstraintData& data = mConstraintDataArray[i];
+
+ totalBenefit += data.benefit;
+ totalValue += data.targetValue;
+
+ data.resultValue = data.targetValue;
+ }
+ if (totalValue <= totalValueLimit)
+ {
+ //resultValue was setted in prev. for-scope
+ return;
+ }
+
+ mConstraintSortPairArray.resize(size);
+ for (uint32_t i = 0; i < size; i++)
+ {
+ ConstraintData& data = mConstraintDataArray[i];
+
+ data.weight = (totalValueLimit * data.benefit / totalBenefit);
+ if (data.weight > 0)
+ {
+ mConstraintSortPairArray[i].key = (data.targetValue / data.weight);
+ }
+ else
+ {
+ mConstraintSortPairArray[i].key = FLT_MAX;
+ data.resultValue = 0; //reset resultValue
+ }
+ mConstraintSortPairArray[i].index = i;
+ }
+
+ nvidia::sort(mConstraintSortPairArray.begin(), size, ConstraintSortPredicate());
+
+ for (uint32_t k = 0; k < size; k++)
+ {
+ float firstKey = mConstraintSortPairArray[k].key;
+ if (firstKey == FLT_MAX)
+ {
+ break;
+ }
+ ConstraintData& firstData = mConstraintDataArray[mConstraintSortPairArray[k].index];
+
+ //special case when k == i
+ float sumWeight = firstData.weight;
+ T sum = firstData.targetValue;
+ for (uint32_t i = k + 1; i < size; i++)
+ {
+ const ConstraintData& data = mConstraintDataArray[mConstraintSortPairArray[i].index];
+
+ sumWeight += data.weight;
+ const T value = static_cast<T>(firstKey * data.weight);
+ PX_ASSERT(value <= data.targetValue);
+ sum += value;
+ }
+
+ if (sum > totalValueLimit)
+ {
+ for (uint32_t i = k; i < size; i++)
+ {
+ ConstraintData& data = mConstraintDataArray[mConstraintSortPairArray[i].index];
+
+ const T value = static_cast<T>(totalValueLimit * data.weight / sumWeight);
+ PX_ASSERT(value <= data.targetValue);
+ data.resultValue = value;
+ }
+ break;
+ }
+ //allready here: firstData.resultData = firstData.targetValue
+ totalValueLimit -= firstData.targetValue;
+ }
+ }
+
+private:
+ struct ConstraintData
+ {
+ float benefit; //input benefit
+ T targetValue; //input constraint on value
+ float weight; //temp
+ T resultValue; //output
+ };
+ struct ConstraintSortPair
+ {
+ float key;
+ uint32_t index;
+ };
+ class ConstraintSortPredicate
+ {
+ public:
+ PX_INLINE bool operator()(const ConstraintSortPair& a, const ConstraintSortPair& b) const
+ {
+ return a.key < b.key;
+ }
+ };
+
+ physx::Array<ConstraintData> mConstraintDataArray;
+ physx::Array<ConstraintSortPair> mConstraintSortPairArray;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/ApexContext.h b/APEX_1.4/common/include/ApexContext.h
new file mode 100644
index 00000000..d991dd3a
--- /dev/null
+++ b/APEX_1.4/common/include/ApexContext.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_CONTEXT_H
+#define APEX_CONTEXT_H
+
+#include "ApexUsingNamespace.h"
+#include "Context.h"
+#include "PsMutex.h"
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+#include "ApexRWLockable.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexActor;
+class ApexRenderableIterator;
+
+class ApexContext
+{
+public:
+ ApexContext() : mIterator(NULL) {}
+ virtual ~ApexContext();
+
+ virtual uint32_t addActor(ApexActor& actor, ApexActor* actorPtr = NULL);
+ virtual void callContextCreationCallbacks(ApexActor* actorPtr);
+ virtual void callContextDeletionCallbacks(ApexActor* actorPtr);
+ virtual void removeActorAtIndex(uint32_t index);
+
+ void renderLockAllActors();
+ void renderUnLockAllActors();
+
+ void removeAllActors();
+ RenderableIterator* createRenderableIterator();
+ void releaseRenderableIterator(RenderableIterator&);
+
+protected:
+ physx::Array<ApexActor*> mActorArray;
+ physx::Array<ApexActor*> mActorArrayCallBacks;
+ nvidia::ReadWriteLock mActorListLock;
+ ApexRenderableIterator* mIterator;
+
+ friend class ApexRenderableIterator;
+ friend class ApexActor;
+};
+
+class ApexRenderableIterator : public RenderableIterator, public ApexRWLockable, public UserAllocated
+{
+public:
+ APEX_RW_LOCKABLE_BOILERPLATE
+
+ Renderable* getFirst();
+ Renderable* getNext();
+ void reset();
+ void release();
+
+protected:
+ void destroy();
+ ApexRenderableIterator(ApexContext&);
+ virtual ~ApexRenderableIterator() {}
+ void removeCachedActor(ApexActor&);
+
+ ApexContext* ctx;
+ uint32_t curActor;
+ ApexActor* mLockedActor;
+ physx::Array<ApexActor*> mCachedActors;
+ physx::Array<ApexActor*> mSkippedActors;
+
+ friend class ApexContext;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // APEX_CONTEXT_H \ No newline at end of file
diff --git a/APEX_1.4/common/include/ApexCuda.h b/APEX_1.4/common/include/ApexCuda.h
new file mode 100644
index 00000000..c4c476dd
--- /dev/null
+++ b/APEX_1.4/common/include/ApexCuda.h
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_CUDA_H
+#define APEX_CUDA_H
+
+#include <cuda.h>
+#include "ApexCudaDefs.h"
+
+#define APEX_CUDA_CONCAT_I(arg1, arg2) arg1 ## arg2
+#define APEX_CUDA_CONCAT(arg1, arg2) APEX_CUDA_CONCAT_I(arg1, arg2)
+
+#define APEX_CUDA_TO_STR_I(arg) # arg
+#define APEX_CUDA_TO_STR(arg) APEX_CUDA_TO_STR_I(arg)
+
+const unsigned int APEX_CUDA_SINGLE_BLOCK_LAUNCH = 0xFFFFFFFF;
+
+#define APEX_CUDA_KERNEL_DEFAULT_CONFIG ()
+#define APEX_CUDA_KERNEL_2D_CONFIG(x, y) (0, 0, nvidia::apex::DimBlock(x, y, 1))
+#define APEX_CUDA_KERNEL_3D_CONFIG(x, y, z) (0, 0, nvidia::apex::DimBlock(x, y, z))
+
+#define APEX_CUDA_TEX_FILTER_POINT CU_TR_FILTER_MODE_POINT
+#define APEX_CUDA_TEX_FILTER_LINEAR CU_TR_FILTER_MODE_LINEAR
+
+
+#include <boost/preprocessor/seq.hpp>
+#include <boost/preprocessor/seq/for_each.hpp>
+#include <boost/preprocessor/seq/for_each_i.hpp>
+#include <boost/preprocessor/tuple/elem.hpp>
+#include <boost/preprocessor/punctuation/comma_if.hpp>
+#include <boost/preprocessor/cat.hpp>
+
+
+#define __APEX_CUDA_FUNC_ARG_NAME(elem) BOOST_PP_TUPLE_ELEM(2, 1, elem)
+
+#define __APEX_CUDA_FUNC_ARG_I(r, data, i, elem) BOOST_PP_COMMA_IF(i) BOOST_PP_TUPLE_ELEM(2, 0, elem) __APEX_CUDA_FUNC_ARG_NAME(elem)
+#define __APEX_CUDA_FUNC_ARGS(argseq) BOOST_PP_SEQ_FOR_EACH_I(__APEX_CUDA_FUNC_ARG_I, _, argseq)
+
+#define __APEX_CUDA_FUNC_ARG_NAME_I(r, data, i, elem) BOOST_PP_COMMA_IF(i) __APEX_CUDA_FUNC_ARG_NAME(elem)
+#define __APEX_CUDA_FUNC_ARG_NAMES(argseq) BOOST_PP_SEQ_FOR_EACH_I(__APEX_CUDA_FUNC_ARG_NAME_I, _, argseq)
+
+
+#define __APEX_CUDA_FUNC_$ARG_NAME(elem) BOOST_PP_CAT(_$arg_, BOOST_PP_TUPLE_ELEM(2, 1, elem))
+
+#define __APEX_CUDA_FUNC_$ARG_I(r, data, i, elem) BOOST_PP_COMMA_IF(i) BOOST_PP_TUPLE_ELEM(2, 0, elem) __APEX_CUDA_FUNC_$ARG_NAME(elem)
+#define __APEX_CUDA_FUNC_$ARGS(argseq) BOOST_PP_SEQ_FOR_EACH_I(__APEX_CUDA_FUNC_$ARG_I, _, argseq)
+
+#define __APEX_CUDA_FUNC_$ARG_NAME_I(r, data, i, elem) BOOST_PP_COMMA_IF(i) __APEX_CUDA_FUNC_$ARG_NAME(elem)
+#define __APEX_CUDA_FUNC_$ARG_NAMES(argseq) BOOST_PP_SEQ_FOR_EACH_I(__APEX_CUDA_FUNC_$ARG_NAME_I, _, argseq)
+
+
+#define __APEX_CUDA_FUNC_SET_PARAM(r, data, elem) setParam( data, __APEX_CUDA_FUNC_$ARG_NAME(elem) );
+
+#define __APEX_CUDA_FUNC_COPY_PARAM(r, data, elem) copyParam( APEX_CUDA_TO_STR(__APEX_CUDA_FUNC_ARG_NAME(elem)), __APEX_CUDA_FUNC_$ARG_NAME(elem) );
+
+#define APEX_CUDA_NAME(name) APEX_CUDA_CONCAT(APEX_CUDA_MODULE_PREFIX, name)
+#define APEX_CUDA_NAME_STR(name) APEX_CUDA_TO_STR( APEX_CUDA_NAME(name) )
+
+#define APEX_CUDA_TEX_REF_NAME(name) APEX_CUDA_NAME( APEX_CUDA_CONCAT(texRef, name) )
+#define APEX_CUDA_SURF_REF_NAME(name) APEX_CUDA_NAME( APEX_CUDA_CONCAT(surfRef, name) )
+
+#define APEX_CUDA_STORAGE(name) APEX_CUDA_STORAGE_SIZE(name, MAX_CONST_MEM_SIZE)
+
+#ifdef __CUDACC__
+
+#define APEX_MEM_BLOCK(format) format*
+
+#define APEX_CUDA_TEXTURE_1D(name, format) texture<format, cudaTextureType1D, cudaReadModeElementType> APEX_CUDA_NAME(name);
+#define APEX_CUDA_TEXTURE_2D(name, format) texture<format, cudaTextureType2D, cudaReadModeElementType> APEX_CUDA_NAME(name);
+#define APEX_CUDA_TEXTURE_3D(name, format) texture<format, cudaTextureType3D, cudaReadModeElementType> APEX_CUDA_NAME(name);
+#define APEX_CUDA_TEXTURE_3D_FILTER(name, format, filter) APEX_CUDA_TEXTURE_3D(name, format)
+
+#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 200
+
+#define APEX_CUDA_SURFACE_1D(name)
+#define APEX_CUDA_SURFACE_2D(name)
+#define APEX_CUDA_SURFACE_3D(name)
+
+#else
+
+#define APEX_CUDA_SURFACE_1D(name) surface<void, cudaSurfaceType1D> APEX_CUDA_NAME(name);
+#define APEX_CUDA_SURFACE_2D(name) surface<void, cudaSurfaceType2D> APEX_CUDA_NAME(name);
+#define APEX_CUDA_SURFACE_3D(name) surface<void, cudaSurfaceType3D> APEX_CUDA_NAME(name);
+
+#endif
+
+#define APEX_CUDA_STORAGE_SIZE(name, size) \
+ __constant__ unsigned char APEX_CUDA_NAME( APEX_CUDA_CONCAT(name, _ConstMem) )[size]; \
+ texture<int, 1, cudaReadModeElementType> APEX_CUDA_NAME( APEX_CUDA_CONCAT(name, _Texture) );
+
+
+#define APEX_CUDA_FREE_KERNEL(kernelWarps, kernelName, argseq) \
+ extern "C" __global__ void APEX_CUDA_NAME(kernelName)(int* _extMem, uint16_t _kernelEnum, uint32_t _threadCount, __APEX_CUDA_FUNC_ARGS(argseq) );
+
+#define APEX_CUDA_FREE_KERNEL_2D(kernelDim, kernelName, argseq) \
+ extern "C" __global__ void APEX_CUDA_NAME(kernelName)(int* _extMem, uint16_t _kernelEnum, uint32_t _threadCountX, uint32_t _threadCountY, __APEX_CUDA_FUNC_ARGS(argseq) );
+
+#define APEX_CUDA_FREE_KERNEL_3D(kernelDim, kernelName, argseq) \
+ extern "C" __global__ void APEX_CUDA_NAME(kernelName)(int* _extMem, uint16_t _kernelEnum, uint32_t _threadCountX, uint32_t _threadCountY, uint32_t _threadCountZ, uint32_t _blockCountY, __APEX_CUDA_FUNC_ARGS(argseq) );
+
+#define APEX_CUDA_BOUND_KERNEL(kernelWarps, kernelName, argseq) \
+ extern "C" __global__ void APEX_CUDA_NAME(kernelName)(int* _extMem, uint16_t _kernelEnum, uint32_t _threadCount, __APEX_CUDA_FUNC_ARGS(argseq) );
+
+#define APEX_CUDA_SYNC_KERNEL(kernelWarps, kernelName, argseq) \
+ extern "C" __global__ void APEX_CUDA_NAME(kernelName)(int* _extMem, uint16_t _kernelEnum, __APEX_CUDA_FUNC_ARGS(argseq) );
+
+#else
+
+#define APEX_CUDA_CLASS_NAME(name) APEX_CUDA_CONCAT(CudaClass_, APEX_CUDA_NAME(name) )
+#define APEX_CUDA_OBJ_NAME(name) APEX_CUDA_CONCAT(cudaObj_, APEX_CUDA_NAME(name) )
+
+#define APEX_MEM_BLOCK(format) const ApexCudaMemRef<format>&
+
+#define __APEX_CUDA_TEXTURE(name, filter) \
+ class APEX_CUDA_CLASS_NAME(name) : public ApexCudaTexRef { \
+ public: \
+ APEX_CUDA_CLASS_NAME(name) () : ApexCudaTexRef( APEX_CUDA_NAME_STR(name), filter ) {} \
+ } APEX_CUDA_OBJ_NAME(name); \
+
+#define APEX_CUDA_TEXTURE_1D(name, format) __APEX_CUDA_TEXTURE(name, APEX_CUDA_TEX_FILTER_POINT)
+#define APEX_CUDA_TEXTURE_2D(name, format) __APEX_CUDA_TEXTURE(name, APEX_CUDA_TEX_FILTER_POINT)
+#define APEX_CUDA_TEXTURE_3D(name, format) __APEX_CUDA_TEXTURE(name, APEX_CUDA_TEX_FILTER_POINT)
+#define APEX_CUDA_TEXTURE_3D_FILTER(name, format, filter) __APEX_CUDA_TEXTURE(name, filter)
+
+
+#define __APEX_CUDA_SURFACE(name) \
+ class APEX_CUDA_CLASS_NAME(name) : public ApexCudaSurfRef { \
+ public: \
+ APEX_CUDA_CLASS_NAME(name) () : ApexCudaSurfRef( APEX_CUDA_NAME_STR(name) ) {} \
+ } APEX_CUDA_OBJ_NAME(name); \
+
+#define APEX_CUDA_SURFACE_1D(name) __APEX_CUDA_SURFACE(name)
+#define APEX_CUDA_SURFACE_2D(name) __APEX_CUDA_SURFACE(name)
+#define APEX_CUDA_SURFACE_3D(name) __APEX_CUDA_SURFACE(name)
+
+#define APEX_CUDA_STORAGE_SIZE(name, size) \
+ class APEX_CUDA_CLASS_NAME(name) : public ApexCudaConstStorage { \
+ public: \
+ APEX_CUDA_CLASS_NAME(name) () : ApexCudaConstStorage( APEX_CUDA_NAME_STR( APEX_CUDA_CONCAT(name, _ConstMem) ), APEX_CUDA_NAME_STR( APEX_CUDA_CONCAT(name, _Texture) ) ) {} \
+ } APEX_CUDA_OBJ_NAME(name); \
+
+
+#define __APEX_CUDA_KERNEL_START($blocksPerSM, $config, name, argseq) \
+ class APEX_CUDA_CLASS_NAME(name) : public ApexCudaFunc \
+ { \
+ public: \
+ APEX_CUDA_CLASS_NAME(name) () : ApexCudaFunc( APEX_CUDA_NAME_STR(name) ) {} \
+ PX_INLINE bool isSingleBlock(unsigned int threadCount) const \
+ { \
+ const FuncInstData& fid = getFuncInstData(); \
+ return (threadCount <= fid.mWarpsPerBlock * WARP_SIZE); \
+ } \
+ protected: \
+ void launch1(const FuncInstData& fid, ApexCudaFuncParams& params, CUstream stream) \
+ { \
+ PX_UNUSED(fid); \
+ PX_ASSERT( isValid() ); \
+ setParam(params, (int*)NULL); \
+ setParam(params, uint16_t(0)); \
+ mCTContext = mManager->getCudaTestManager()->isTestKernel(mName, mManager->getModule()->getName());\
+ if (mCTContext) \
+ { \
+ mCTContext->setCuStream(stream); \
+ resolveContext(); \
+ } \
+ } \
+ void launch2(const FuncInstData& fid, const DimBlock& blockDim, uint32_t sharedSize, ApexCudaFuncParams& params, CUstream stream, const DimGrid& gridDim, __APEX_CUDA_FUNC_$ARGS(argseq) ) \
+ { \
+ if (mCTContext) { \
+ mCTContext->setGridDim(gridDim.x, gridDim.y); \
+ mCTContext->setBlockDim(blockDim.x, blockDim.y, blockDim.z); \
+ mCTContext->setSharedSize(sharedSize); \
+ mCTContext->setFuncInstId(int(&fid - mFuncInstData)); \
+ BOOST_PP_SEQ_FOR_EACH(__APEX_CUDA_FUNC_COPY_PARAM, , argseq); \
+ } \
+ BOOST_PP_SEQ_FOR_EACH(__APEX_CUDA_FUNC_SET_PARAM, params, argseq); \
+ void *config[5] = { \
+ CU_LAUNCH_PARAM_BUFFER_POINTER, params.mParams, \
+ CU_LAUNCH_PARAM_BUFFER_SIZE, &params.mOffset, \
+ CU_LAUNCH_PARAM_END \
+ }; \
+ onBeforeLaunch(stream); \
+ CUT_SAFE_CALL(cuLaunchKernel(fid.mCuFunc, gridDim.x, gridDim.y, 1, blockDim.x, blockDim.y, blockDim.z, sharedSize, stream, 0, (void **)config)); \
+ onAfterLaunch(stream); \
+ if (mCTContext) mCTContext->setKernelStatus(); \
+ } \
+ PX_INLINE void evalLaunchParams(const ApexKernelConfig& kernelConfig, const FuncInstData& fid, uint32_t &outWarpsPerBlock, uint32_t &outDynamicShared) \
+ { \
+ const ApexCudaDeviceTraits& devTraits = mManager->getDeviceTraits(); \
+ const uint32_t fixedSharedMem = (kernelConfig.fixedSharedMemDWords << 2); \
+ const uint32_t sharedMemPerWarp = (kernelConfig.sharedMemDWordsPerWarp << 2); \
+ const uint32_t staticSharedMem = fid.mStaticSharedSize + fixedSharedMem; \
+ PX_ASSERT(staticSharedMem + sharedMemPerWarp * 1 <= devTraits.mMaxSharedMemPerBlock); \
+ if (kernelConfig.blockDim.x == 0) \
+ { \
+ const uint32_t maxThreadsPerSM = physx::PxMin(devTraits.mMaxRegistersPerSM / fid.mNumRegsPerThread, devTraits.mMaxThreadsPerSM); \
+ outWarpsPerBlock = physx::PxMin(fid.mMaxThreadsPerBlock, maxThreadsPerSM / $blocksPerSM) >> LOG2_WARP_SIZE; \
+ if (sharedMemPerWarp > 0) \
+ { \
+ const uint32_t sharedMemLimit4SM = (devTraits.mMaxSharedMemPerSM - staticSharedMem * $blocksPerSM) / (sharedMemPerWarp * $blocksPerSM); \
+ const uint32_t sharedMemLimit4Block = (devTraits.mMaxSharedMemPerBlock - staticSharedMem) / sharedMemPerWarp; \
+ const uint32_t sharedMemLimit = physx::PxMin(sharedMemLimit4SM, sharedMemLimit4Block); \
+ outWarpsPerBlock = PxMin<uint32_t>(outWarpsPerBlock, sharedMemLimit); \
+ } \
+ PX_ASSERT(outWarpsPerBlock > 0); \
+ } \
+ else \
+ { \
+ outWarpsPerBlock = (kernelConfig.blockDim.x * kernelConfig.blockDim.y * kernelConfig.blockDim.z + WARP_SIZE-1) / WARP_SIZE; \
+ } \
+ outDynamicShared = fixedSharedMem + sharedMemPerWarp * outWarpsPerBlock; \
+ PX_ASSERT(fid.mStaticSharedSize + outDynamicShared <= devTraits.mMaxSharedMemPerBlock); \
+ PX_ASSERT(outWarpsPerBlock * WARP_SIZE <= fid.mMaxThreadsPerBlock); \
+ PX_ASSERT(outWarpsPerBlock >= kernelConfig.minWarpsPerBlock); \
+ } \
+ virtual void init( PxCudaContextManager* ctx, int funcInstIndex ) \
+ { \
+ PX_UNUSED(ctx); \
+ ApexKernelConfig kernelConfig = ApexKernelConfig $config; \
+ FuncInstData& fid = mFuncInstData[(uint32_t)funcInstIndex]; \
+ evalLaunchParams(kernelConfig, fid, fid.mWarpsPerBlock, fid.mDynamicShared); \
+
+
+#define __APEX_CUDA_KERNEL_WARPS_END(name, argseq) \
+ } \
+ private: \
+ uint32_t mMaxBlocksPerGrid; \
+ uint32_t launch(const FuncInstData& fid, uint32_t warpsPerBlock, uint32_t dynamicShared, CUstream stream, unsigned int _threadCount, __APEX_CUDA_FUNC_$ARGS(argseq) ) \
+ { \
+ warpsPerBlock = PxMin(warpsPerBlock, MAX_WARPS_PER_BLOCK); /* TODO: refactor old kernels to avoid this */ \
+ uint32_t threadsPerBlock = warpsPerBlock * WARP_SIZE; \
+ uint32_t blocksPerGrid = 1; \
+ if (_threadCount == APEX_CUDA_SINGLE_BLOCK_LAUNCH) \
+ { \
+ _threadCount = threadsPerBlock; \
+ } \
+ else \
+ { \
+ if (_threadCount > threadsPerBlock) \
+ { \
+ blocksPerGrid = PxMin((_threadCount + threadsPerBlock - 1) / threadsPerBlock, mMaxBlocksPerGrid); \
+ } \
+ else \
+ { \
+ threadsPerBlock = APEX_CUDA_ALIGN_UP(_threadCount, WARP_SIZE); \
+ } \
+ } \
+ ApexCudaFuncParams params; \
+ launch1(fid, params, stream); \
+ if (mCTContext) mCTContext->setBoundKernel(_threadCount); \
+ setParam(params, _threadCount); \
+ launch2(fid, DimBlock(threadsPerBlock), dynamicShared, params, stream, DimGrid(blocksPerGrid), __APEX_CUDA_FUNC_$ARG_NAMES(argseq) ); \
+ return blocksPerGrid; \
+ } \
+ public: \
+ uint32_t operator() ( CUstream stream, unsigned int _threadCount, __APEX_CUDA_FUNC_$ARGS(argseq) ) \
+ { \
+ const FuncInstData& fid = getFuncInstData(); \
+ return launch(fid, fid.mWarpsPerBlock, fid.mDynamicShared, stream, _threadCount, __APEX_CUDA_FUNC_$ARG_NAMES(argseq) ); \
+ } \
+ uint32_t operator() ( const ApexKernelConfig& kernelConfig, CUstream stream, unsigned int _threadCount, __APEX_CUDA_FUNC_$ARGS(argseq) ) \
+ { \
+ const FuncInstData& fid = getFuncInstData(); \
+ uint32_t warpsPerBlock; \
+ uint32_t dynamicShared; \
+ evalLaunchParams(kernelConfig, fid, warpsPerBlock, dynamicShared); \
+ return launch(fid, warpsPerBlock, dynamicShared, stream, _threadCount, __APEX_CUDA_FUNC_$ARG_NAMES(argseq) ); \
+ } \
+ } APEX_CUDA_OBJ_NAME(name); \
+
+
+#define APEX_CUDA_SYNC_KERNEL(config, name, argseq) \
+ __APEX_CUDA_KERNEL_START(1, config, name, argseq) \
+ mBlocksPerGrid = (uint32_t)ctx->getMultiprocessorCount(); \
+ } \
+ private: \
+ uint32_t mBlocksPerGrid; \
+ public: \
+ void operator() ( CUstream stream, __APEX_CUDA_FUNC_$ARGS(argseq) ) \
+ { \
+ const FuncInstData& fid = getFuncInstData(); \
+ ApexCudaFuncParams params; \
+ launch1(fid, params, stream); \
+ if (mCTContext) mCTContext->setSyncKernel(); \
+ /* alloc full dynamic shared memory for correct block distrib. on GF100 */ \
+ uint32_t dynamicSharedSize = mManager->getDeviceTraits().mMaxSharedMemPerBlock - fid.mStaticSharedSize; \
+ launch2(fid, DimBlock(fid.mWarpsPerBlock * WARP_SIZE), dynamicSharedSize, params, stream, DimGrid(mBlocksPerGrid), __APEX_CUDA_FUNC_$ARG_NAMES(argseq) ); \
+ } \
+ } APEX_CUDA_OBJ_NAME(name); \
+
+
+#define APEX_CUDA_BOUND_KERNEL(config, name, argseq) \
+ __APEX_CUDA_KERNEL_START(mManager->getDeviceTraits().mBlocksPerSM, config, name, argseq) \
+ mMaxBlocksPerGrid = PxMin(mManager->getDeviceTraits().mMaxBlocksPerGrid, kernelConfig.maxGridSize); \
+ __APEX_CUDA_KERNEL_WARPS_END(name, argseq) \
+
+
+#define APEX_CUDA_FREE_KERNEL(config, name, argseq) \
+ __APEX_CUDA_KERNEL_START(mManager->getDeviceTraits().mBlocksPerSM, config, name, argseq) \
+ mMaxBlocksPerGrid = UINT_MAX; \
+ __APEX_CUDA_KERNEL_WARPS_END(name, argseq) \
+
+
+#define APEX_CUDA_FREE_KERNEL_2D($config, name, argseq) \
+ __APEX_CUDA_KERNEL_START(mManager->getDeviceTraits().mBlocksPerSM_2D, $config, name, argseq) \
+ } \
+ public: \
+ void operator() ( CUstream stream, unsigned int _threadCountX, unsigned int _threadCountY, __APEX_CUDA_FUNC_$ARGS(argseq) ) \
+ { \
+ const FuncInstData& fid = getFuncInstData(); \
+ DimBlock blockDim = (ApexKernelConfig $config).blockDim; \
+ if (blockDim.x == 0) \
+ { \
+ uint32_t threadsPerBlock = fid.mWarpsPerBlock * WARP_SIZE; \
+ blockDim.x = PxMin(_threadCountX, threadsPerBlock); \
+ threadsPerBlock /= blockDim.x; \
+ blockDim.y = PxMin(_threadCountY, threadsPerBlock); \
+ } \
+ DimGrid gridDim; \
+ gridDim.x = (_threadCountX + blockDim.x - 1) / blockDim.x; \
+ gridDim.y = (_threadCountY + blockDim.y - 1) / blockDim.y; \
+ ApexCudaFuncParams params; \
+ launch1(fid, params, stream); \
+ if (mCTContext) mCTContext->setFreeKernel(_threadCountX, _threadCountY); \
+ setParam(params, _threadCountX); \
+ setParam(params, _threadCountY); \
+ launch2(fid, blockDim, fid.mDynamicShared, params, stream, gridDim, __APEX_CUDA_FUNC_$ARG_NAMES(argseq) ); \
+ } \
+ } APEX_CUDA_OBJ_NAME(name); \
+
+
+#define APEX_CUDA_FREE_KERNEL_3D($config, name, argseq) \
+ __APEX_CUDA_KERNEL_START(mManager->getDeviceTraits().mBlocksPerSM_3D, $config, name, argseq) \
+ } \
+ public: \
+ void operator() ( CUstream stream, unsigned int _threadCountX, unsigned int _threadCountY, unsigned int _threadCountZ, __APEX_CUDA_FUNC_$ARGS(argseq) ) \
+ { \
+ const FuncInstData& fid = getFuncInstData(); \
+ DimBlock blockDim = (ApexKernelConfig $config).blockDim; \
+ if (blockDim.x == 0) \
+ { \
+ uint32_t threadsPerBlock = fid.mWarpsPerBlock * WARP_SIZE; \
+ blockDim.x = PxMin(_threadCountX, threadsPerBlock); \
+ threadsPerBlock /= blockDim.x; \
+ blockDim.y = PxMin(_threadCountY, threadsPerBlock); \
+ threadsPerBlock /= blockDim.y; \
+ blockDim.z = PxMin(_threadCountZ, threadsPerBlock); \
+ } \
+ const uint32_t blockCountX = (_threadCountX + blockDim.x - 1) / blockDim.x; \
+ const uint32_t blockCountY = (_threadCountY + blockDim.y - 1) / blockDim.y; \
+ const uint32_t blockCountZ = (_threadCountZ + blockDim.z - 1) / blockDim.z; \
+ DimGrid gridDim(blockCountX, blockCountY * blockCountZ); \
+ ApexCudaFuncParams params; \
+ launch1(fid, params, stream); \
+ if (mCTContext) mCTContext->setFreeKernel(_threadCountX, _threadCountY, _threadCountZ, blockCountY); \
+ setParam(params, _threadCountX); \
+ setParam(params, _threadCountY); \
+ setParam(params, _threadCountZ); \
+ setParam(params, blockCountY); \
+ launch2(fid, blockDim, fid.mDynamicShared, params, stream, gridDim, __APEX_CUDA_FUNC_$ARG_NAMES(argseq) ); \
+ } \
+ } APEX_CUDA_OBJ_NAME(name); \
+
+
+#endif // #ifdef __CUDACC__
+
+#endif //APEX_CUDA_H
diff --git a/APEX_1.4/common/include/ApexCudaDefs.h b/APEX_1.4/common/include/ApexCudaDefs.h
new file mode 100644
index 00000000..6a089265
--- /dev/null
+++ b/APEX_1.4/common/include/ApexCudaDefs.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_CUDA_DEFS_H
+#define APEX_CUDA_DEFS_H
+
+#include <cuda.h>
+
+const unsigned int MAX_CONST_MEM_SIZE = 65536;
+
+const unsigned int APEX_CUDA_MEM_ALIGNMENT = 256;
+const unsigned int APEX_CUDA_TEX_MEM_ALIGNMENT = 512;
+
+const unsigned int MAX_SMEM_BANKS = 32;
+
+
+#define APEX_CUDA_ALIGN_UP(value, alignment) (((value) + (alignment)-1) & ~((alignment)-1))
+#define APEX_CUDA_MEM_ALIGN_UP_32BIT(count) APEX_CUDA_ALIGN_UP(count, APEX_CUDA_MEM_ALIGNMENT >> 2)
+
+const unsigned int LOG2_WARP_SIZE = 5;
+const unsigned int WARP_SIZE = (1U << LOG2_WARP_SIZE);
+
+//if you would like to make this value larger than 32 for future GPUs,
+//then you'll need to fix some kernels (like reduce and scan) to support more than 32 warps per block!!!
+const unsigned int MAX_WARPS_PER_BLOCK = 32;
+const unsigned int MAX_THREADS_PER_BLOCK = (MAX_WARPS_PER_BLOCK << LOG2_WARP_SIZE);
+
+const unsigned int MAX_BOUND_BLOCKS = 64;
+
+//uncomment this line to force bound kernels to use defined number of CTAs
+//#define APEX_CUDA_FORCED_BLOCKS 60
+
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct ApexCudaMemFlags
+{
+ enum Enum
+ {
+ UNUSED = 0,
+ IN = 0x01,
+ OUT = 0x02,
+ IN_OUT = IN | OUT
+ };
+};
+
+#ifndef __CUDACC__
+
+class ApexCudaArray : public UserAllocated
+{
+ PX_NOCOPY(ApexCudaArray)
+
+ void init()
+ {
+ switch (mDesc.Format)
+ {
+ case CU_AD_FORMAT_UNSIGNED_INT8:
+ case CU_AD_FORMAT_SIGNED_INT8:
+ mElemSize = 1;
+ break;
+ case CU_AD_FORMAT_UNSIGNED_INT16:
+ case CU_AD_FORMAT_SIGNED_INT16:
+ case CU_AD_FORMAT_HALF:
+ mElemSize = 2;
+ break;
+ case CU_AD_FORMAT_UNSIGNED_INT32:
+ case CU_AD_FORMAT_SIGNED_INT32:
+ case CU_AD_FORMAT_FLOAT:
+ mElemSize = 4;
+ break;
+ default:
+ PX_ALWAYS_ASSERT();
+ mElemSize = 0;
+ break;
+ };
+ mElemSize *= mDesc.NumChannels;
+ }
+
+public:
+ ApexCudaArray() : mCuArray(NULL), mHasOwnership(false), mElemSize(0) {}
+ ~ApexCudaArray() { release(); }
+
+ void assign(CUarray cuArray, bool bTakeOwnership)
+ {
+ release();
+
+ mCuArray = cuArray;
+ mHasOwnership = bTakeOwnership;
+ CUT_SAFE_CALL(cuArray3DGetDescriptor(&mDesc, mCuArray));
+ init();
+ }
+
+ void create(CUDA_ARRAY3D_DESCRIPTOR desc)
+ {
+ if (mCuArray != NULL && mHasOwnership &&
+ mDesc.Width == desc.Width && mDesc.Height == desc.Height && mDesc.Depth == desc.Depth &&
+ mDesc.Format == desc.Format && mDesc.NumChannels == desc.NumChannels && mDesc.Flags == desc.Flags)
+ {
+ return;
+ }
+ release();
+
+ // Allocate CUDA 3d array in device memory
+ mDesc = desc;
+ CUT_SAFE_CALL(cuArray3DCreate(&mCuArray, &mDesc));
+ mHasOwnership = true;
+ init();
+ }
+
+ void create(CUarray_format format, unsigned int numChannels, unsigned int width, unsigned int height, unsigned int depth = 0, bool surfUsage = false)
+ {
+ CUDA_ARRAY3D_DESCRIPTOR desc;
+ desc.Format = format;
+ desc.NumChannels = numChannels;
+ desc.Width = width;
+ desc.Height = height;
+ desc.Depth = depth;
+ desc.Flags = surfUsage ? CUDA_ARRAY3D_SURFACE_LDST : 0u;
+
+ create(desc);
+ }
+
+ void release()
+ {
+ if (mCuArray != NULL)
+ {
+ if (mHasOwnership)
+ {
+ CUT_SAFE_CALL(cuArrayDestroy(mCuArray));
+ }
+ mCuArray = NULL;
+ mHasOwnership = false;
+ mElemSize = 0;
+ }
+ }
+
+ void copyToHost(CUstream stream, void* dstHost, size_t dstPitch = 0, size_t dstHeight = 0,
+ size_t copyWidth = 0, size_t copyHeight = 0, size_t copyDepth = 0)
+ {
+ if (mDesc.Width > 0)
+ {
+ if (mDesc.Height > 0)
+ {
+ if (mDesc.Depth > 0)
+ {
+ //3D
+ CUDA_MEMCPY3D copyDesc;
+ copyDesc.WidthInBytes = size_t(copyWidth ? copyWidth : mDesc.Width) * mElemSize;
+ copyDesc.Height = copyHeight ? copyHeight : mDesc.Height;
+ copyDesc.Depth = copyDepth ? copyDepth : mDesc.Depth;
+
+ copyDesc.srcXInBytes = copyDesc.srcY = copyDesc.srcZ = copyDesc.srcLOD = 0;
+ copyDesc.srcMemoryType = CU_MEMORYTYPE_ARRAY;
+ copyDesc.srcArray = mCuArray;
+
+ copyDesc.dstXInBytes = copyDesc.dstY = copyDesc.dstZ = copyDesc.dstLOD = 0;
+ copyDesc.dstMemoryType = CU_MEMORYTYPE_HOST;
+ copyDesc.dstHost = dstHost;
+ copyDesc.dstPitch = (dstPitch > 0) ? dstPitch : copyDesc.WidthInBytes;
+ copyDesc.dstHeight = (dstHeight > 0) ? dstHeight : copyDesc.Height;
+ CUT_SAFE_CALL(cuMemcpy3DAsync(&copyDesc, stream));
+ }
+ else
+ {
+ //2D
+ CUDA_MEMCPY2D copyDesc;
+ copyDesc.WidthInBytes = size_t(copyWidth ? copyWidth : mDesc.Width) * mElemSize;
+ copyDesc.Height = copyHeight ? copyHeight : mDesc.Height;
+
+ copyDesc.srcXInBytes = copyDesc.srcY = 0;
+ copyDesc.srcMemoryType = CU_MEMORYTYPE_ARRAY;
+ copyDesc.srcArray = mCuArray;
+
+ copyDesc.dstXInBytes = copyDesc.dstY = 0;
+ copyDesc.dstMemoryType = CU_MEMORYTYPE_HOST;
+ copyDesc.dstHost = dstHost;
+ copyDesc.dstPitch = (dstPitch > 0) ? dstPitch : copyDesc.WidthInBytes;
+ CUT_SAFE_CALL(cuMemcpy2DAsync(&copyDesc, stream));
+ }
+ }
+ else
+ {
+ //1D
+ CUT_SAFE_CALL(cuMemcpyAtoHAsync(dstHost, mCuArray, 0, size_t(copyWidth ? copyWidth : mDesc.Width) * mElemSize, stream));
+ }
+ }
+ }
+
+ void copyFromHost(CUstream stream, const void* srcHost, size_t srcPitch = 0, size_t srcHeight = 0)
+ {
+ if (mDesc.Width > 0)
+ {
+ if (mDesc.Height > 0)
+ {
+ if (mDesc.Depth > 0)
+ {
+ //3D
+ CUDA_MEMCPY3D copyDesc;
+ copyDesc.WidthInBytes = size_t(mDesc.Width) * mElemSize;
+ copyDesc.Height = mDesc.Height;
+ copyDesc.Depth = mDesc.Depth;
+
+ copyDesc.srcXInBytes = copyDesc.srcY = copyDesc.srcZ = copyDesc.srcLOD = 0;
+ copyDesc.srcMemoryType = CU_MEMORYTYPE_HOST;
+ copyDesc.srcHost = srcHost;
+ copyDesc.srcPitch = (srcPitch > 0) ? srcPitch : copyDesc.WidthInBytes;
+ copyDesc.srcHeight = (srcHeight > 0) ? srcHeight : copyDesc.Height;
+
+ copyDesc.dstXInBytes = copyDesc.dstY = copyDesc.dstZ = copyDesc.dstLOD = 0;
+ copyDesc.dstMemoryType = CU_MEMORYTYPE_ARRAY;
+ copyDesc.dstArray = mCuArray;
+
+ CUT_SAFE_CALL(cuMemcpy3DAsync(&copyDesc, stream));
+ }
+ else
+ {
+ //2D
+ CUDA_MEMCPY2D copyDesc;
+ copyDesc.WidthInBytes = size_t(mDesc.Width) * mElemSize;
+ copyDesc.Height = mDesc.Height;
+
+ copyDesc.srcXInBytes = copyDesc.srcY = 0;
+ copyDesc.srcMemoryType = CU_MEMORYTYPE_HOST;
+ copyDesc.srcHost = srcHost;
+ copyDesc.srcPitch = (srcPitch > 0) ? srcPitch : copyDesc.WidthInBytes;
+
+ copyDesc.dstXInBytes = copyDesc.dstY = 0;
+ copyDesc.dstMemoryType = CU_MEMORYTYPE_ARRAY;
+ copyDesc.dstArray = mCuArray;
+
+ CUT_SAFE_CALL(cuMemcpy2DAsync(&copyDesc, stream));
+ }
+ }
+ else
+ {
+ //1D
+ CUT_SAFE_CALL(cuMemcpyHtoAAsync(mCuArray, 0, srcHost, size_t(mDesc.Width) * mElemSize, stream));
+ }
+ }
+ }
+
+ void copyToArray(CUstream stream, CUarray dstArray)
+ {
+ //copy array to array
+ CUDA_MEMCPY3D desc;
+ desc.srcXInBytes = desc.srcY = desc.srcZ = desc.srcLOD = 0;
+ desc.srcMemoryType = CU_MEMORYTYPE_ARRAY;
+ desc.srcArray = mCuArray;
+
+ desc.dstXInBytes = desc.dstY = desc.dstZ = desc.dstLOD = 0;
+ desc.dstMemoryType = CU_MEMORYTYPE_ARRAY;
+ desc.dstArray = dstArray;
+
+ desc.WidthInBytes = size_t(mDesc.Width) * mElemSize;
+ desc.Height = mDesc.Height;
+ desc.Depth = mDesc.Depth;
+ CUT_SAFE_CALL(cuMemcpy3DAsync(&desc, stream));
+ }
+
+ PX_INLINE CUarray getCuArray() const
+ {
+ return mCuArray;
+ }
+ PX_INLINE bool isValid() const
+ {
+ return (mCuArray != NULL);
+ }
+
+ PX_INLINE unsigned int getWidth() const { return (unsigned int)mDesc.Width; }
+ PX_INLINE unsigned int getHeight() const { return (unsigned int)mDesc.Height; }
+ PX_INLINE unsigned int getDepth() const { return (unsigned int)mDesc.Depth; }
+ PX_INLINE CUarray_format getFormat() const { return mDesc.Format; }
+ PX_INLINE unsigned int getNumChannels() const { return mDesc.NumChannels; }
+
+ PX_INLINE bool hasOwnership() const { return mHasOwnership; }
+
+ PX_INLINE const CUDA_ARRAY3D_DESCRIPTOR& getDesc() const { return mDesc; }
+
+ PX_INLINE size_t getByteSize() const
+ {
+ size_t size = mDesc.Width * mElemSize;
+ if (mDesc.Height > 0) size *= mDesc.Height;
+ if (mDesc.Depth > 0) size *= mDesc.Depth;
+ return size;
+ }
+
+private:
+ CUarray mCuArray;
+ bool mHasOwnership;
+ unsigned int mElemSize;
+ CUDA_ARRAY3D_DESCRIPTOR mDesc;
+};
+
+#endif //__CUDACC__
+
+}
+} // end namespace nvidia::apex
+
+#endif //APEX_CUDA_DEFS_H
diff --git a/APEX_1.4/common/include/ApexCudaProfile.h b/APEX_1.4/common/include/ApexCudaProfile.h
new file mode 100644
index 00000000..05ee188e
--- /dev/null
+++ b/APEX_1.4/common/include/ApexCudaProfile.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_CUDA_KERNEL_MANAGER__
+#define __APEX_CUDA_KERNEL_MANAGER__
+
+#include "ApexDefs.h"
+#include "CudaProfileManager.h"
+
+#include "PsMemoryBuffer.h"
+#include "ApexString.h"
+#include "SceneIntl.h"
+#include "PsMutex.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexCudaObj;
+class ApexCudaFunc;
+class ApexCudaProfileManager;
+
+class ApexCudaProfileSession
+{
+ struct ProfileData
+ {
+ uint32_t id;
+ void* start;
+ void* stop;
+ };
+public:
+ ApexCudaProfileSession();
+ ~ApexCudaProfileSession();
+
+ PX_INLINE void init(ApexCudaProfileManager* manager)
+ {
+ mManager = manager;
+ }
+ void nextFrame();
+ void start();
+ bool stopAndSave();
+ uint32_t getProfileId(const char* name, const char* moduleName);
+
+ void onFuncStart(uint32_t id, void* stream);
+ void onFuncFinish(uint32_t id, void* stream);
+
+protected:
+ float flushProfileInfo(ProfileData& pd);
+
+ ApexCudaProfileManager* mManager;
+ void* mTimer;
+ nvidia::PsMemoryBuffer mMemBuf;
+ nvidia::Mutex mLock;
+ Array <ProfileData> mProfileDataList;
+ float mFrameStart;
+ float mFrameFinish;
+};
+
+/**
+ */
+class ApexCudaProfileManager : public CudaProfileManager, public UserAllocated
+{
+public:
+ struct KernelInfo
+ {
+ ApexSimpleString functionName;
+ ApexSimpleString moduleName;
+ uint32_t id;
+
+ KernelInfo(const char* functionName, const char* moduleName, uint32_t id = 0)
+ : functionName(functionName), moduleName(moduleName), id(id) {}
+
+ bool operator!= (const KernelInfo& ki)
+ {
+ return (this->functionName != "*" && this->functionName != ki.functionName)
+ || (this->moduleName != ki.moduleName);
+ }
+ };
+
+ ApexCudaProfileManager();
+
+ virtual ~ApexCudaProfileManager();
+
+ PX_INLINE void setInternalApexScene(SceneIntl* scene)
+ {
+ mApexScene = scene;
+ }
+ void nextFrame();
+
+ // interface for CudaProfileManager
+ PX_INLINE void setPath(const char* path)
+ {
+ mPath = ApexSimpleString(path);
+ enable(false);
+ }
+ void setKernel(const char* functionName, const char* moduleName);
+ PX_INLINE void setTimeFormat(TimeFormat tf)
+ {
+ mTimeFormat = tf;
+ }
+ void enable(bool state);
+ PX_INLINE bool isEnabled() const
+ {
+ return mState;
+ }
+
+private:
+ bool mState;
+ uint32_t mSessionCount;
+ TimeFormat mTimeFormat;
+ uint32_t mReservedId;
+ ApexSimpleString mPath;
+ Array <KernelInfo> mKernels;
+ ApexCudaProfileSession mSession;
+ SceneIntl* mApexScene;
+ friend class ApexCudaProfileSession;
+};
+
+}
+} // namespace nvidia::apex
+
+#endif // __APEX_CUDA_KERNEL_MANAGER__
diff --git a/APEX_1.4/common/include/ApexCudaSource.h b/APEX_1.4/common/include/ApexCudaSource.h
new file mode 100644
index 00000000..a5caeed7
--- /dev/null
+++ b/APEX_1.4/common/include/ApexCudaSource.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_CUDA_SOURCE_H__
+#define __APEX_CUDA_SOURCE_H__
+
+
+#undef APEX_CUDA_TEXTURE_1D
+#undef APEX_CUDA_TEXTURE_2D
+#undef APEX_CUDA_TEXTURE_3D
+#undef APEX_CUDA_TEXTURE_3D_FILTER
+#undef APEX_CUDA_SURFACE_1D
+#undef APEX_CUDA_SURFACE_2D
+#undef APEX_CUDA_SURFACE_3D
+#undef APEX_CUDA_STORAGE_SIZE
+#undef APEX_CUDA_FREE_KERNEL
+#undef APEX_CUDA_FREE_KERNEL_2D
+#undef APEX_CUDA_FREE_KERNEL_3D
+#undef APEX_CUDA_SYNC_KERNEL
+#undef APEX_CUDA_BOUND_KERNEL
+
+#define __APEX_CUDA_OBJ(name) initCudaObj( APEX_CUDA_OBJ_NAME(name) );
+
+#define APEX_CUDA_TEXTURE_1D(name, format) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_TEXTURE_2D(name, format) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_TEXTURE_3D(name, format) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_TEXTURE_3D_FILTER(name, format, filter) __APEX_CUDA_OBJ(name)
+
+#define APEX_CUDA_SURFACE_1D(name) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_SURFACE_2D(name) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_SURFACE_3D(name) __APEX_CUDA_OBJ(name)
+
+#define APEX_CUDA_STORAGE_SIZE(name, size) __APEX_CUDA_OBJ(name)
+
+#define APEX_CUDA_FREE_KERNEL(warps, name, argseq) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_FREE_KERNEL_2D(warps, name, argseq) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_FREE_KERNEL_3D(warps, name, argseq) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_SYNC_KERNEL(warps, name, argseq) __APEX_CUDA_OBJ(name)
+#define APEX_CUDA_BOUND_KERNEL(warps, name, argseq) __APEX_CUDA_OBJ(name)
+
+
+#endif //__APEX_CUDA_SOURCE_H__
diff --git a/APEX_1.4/common/include/ApexCudaTest.h b/APEX_1.4/common/include/ApexCudaTest.h
new file mode 100644
index 00000000..43a0c6ea
--- /dev/null
+++ b/APEX_1.4/common/include/ApexCudaTest.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_CUDA_TEST__
+#define __APEX_CUDA_TEST__
+
+#include "ApexDefs.h"
+#include "CudaTestManager.h"
+
+#include "PsMemoryBuffer.h"
+#include "ApexString.h"
+#include "ApexMirroredArray.h"
+#include "SceneIntl.h"
+
+#include "ApexCudaDefs.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct ApexCudaMemRefBase;
+class ApexCudaObj;
+class ApexCudaFunc;
+struct ApexCudaFuncParams;
+class ApexCudaTexRef;
+class ApexCudaSurfRef;
+
+const uint32_t ApexCudaTestFileVersion = 103;
+
+namespace apexCudaTest
+{
+
+struct MemRef
+{
+ ApexSimpleString name;
+ const void* gpuPtr;
+ size_t size;
+ int32_t dataOffset;
+ uint32_t bufferOffset;
+ uint32_t fpType; // Floating point type, if 0 - not a float, else if 4 - float, else if 8 - double
+
+ MemRef(const void* gpuPtr, size_t size, int32_t dataOffset, uint32_t bufferOffset, uint32_t fpType = 0)
+ : gpuPtr(gpuPtr), size(size), dataOffset(dataOffset), bufferOffset(bufferOffset), fpType(fpType) {}
+};
+
+enum ObjTypeEnum
+{
+ OBJ_TYPE_NONE = 0,
+ OBJ_TYPE_TEX_REF_MEM = 1,
+ OBJ_TYPE_CONST_MEM = 2,
+ OBJ_TYPE_SURF_REF = 4,
+ OBJ_TYPE_TEX_REF_ARR = 5
+};
+
+enum KernelTypeEnum
+{
+ KT_SYNC,
+ KT_FREE,
+ KT_FREE2D,
+ KT_FREE3D,
+ KT_BOUND
+};
+
+}
+
+/** Read cuda kernel context from specified file. Run kernel ant compare output with results from file
+*/
+class ApexCudaTestKernelContextReader : public UserAllocated
+{
+ struct Dim3
+ {
+ int x,y,z;
+ };
+
+ struct TexRef
+ {
+ ApexCudaTexRef* cudaTexRef;
+ uint32_t memRefIdx;
+ ApexCudaArray* cudaArray;
+ };
+
+ struct SurfRef
+ {
+ ApexCudaSurfRef* cudaSurfRef;
+ ApexCudaArray* cudaArray;
+ ApexCudaMemFlags::Enum flags;
+ };
+
+ struct ArrayRef
+ {
+ ApexSimpleString name;
+ ApexCudaArray* cudaArray;
+ const uint8_t* bufferPtr;
+ uint32_t size;
+
+ ArrayRef(const char* name, ApexCudaArray* cudaArray, const uint8_t* bufferPtr, uint32_t size)
+ {
+ this->name = name;
+ this->cudaArray = cudaArray;
+ this->bufferPtr = bufferPtr;
+ this->size = size;
+ }
+ };
+
+ struct ParamRef
+ {
+ ApexSimpleString name;
+ uint32_t value;
+ };
+
+public:
+ ApexCudaTestKernelContextReader(const char* path, SceneIntl* scene);
+ ~ApexCudaTestKernelContextReader();
+
+ bool runKernel();
+
+private:
+ ApexCudaArray* loadCudaArray();
+
+ void loadContext(ApexCudaFuncParams& params);
+ void loadTexRef(uint32_t& memOffset, bool bBindToArray);
+ void loadSurfRef();
+ void loadConstMem();
+ uint32_t getParamSize();
+ void loadParam(uint32_t& memOffset, ApexCudaFuncParams& params);
+
+ bool compare(const uint8_t* resData, const uint8_t* refData, size_t size, uint32_t fpType, const char* name);
+ void dumpParams(char* str);
+
+ nvidia::PsMemoryBuffer* mMemBuf;
+
+ uint32_t mCudaObjOffset;
+ uint32_t mParamOffset;
+
+ int mCuOffset;
+ void* mCuStream;
+
+ ApexSimpleString mName;
+ ApexSimpleString mModuleName;
+ uint32_t mFrame;
+ uint32_t mCallPerFrame;
+
+ uint32_t mFuncInstId;
+ uint32_t mSharedSize;
+ Dim3 mBlockDim;
+ Dim3 mGridDim;
+ apexCudaTest::KernelTypeEnum mKernelType;
+ uint32_t mThreadCount[3];
+ uint32_t mBlockCountY;
+
+ ApexCudaObj* mHeadCudaObj;
+ ApexCudaFunc* mFunc;
+
+ SceneIntl* mApexScene;
+ Array <uint8_t*> mArgSeq;
+ ApexMirroredArray <uint8_t> mTmpArray;
+ PxGpuCopyDescQueue mCopyQueue;
+
+ Array <apexCudaTest::MemRef> mInMemRefs;
+ Array <apexCudaTest::MemRef> mOutMemRefs;
+ Array <ArrayRef> mInArrayRefs;
+ Array <ArrayRef> mOutArrayRefs;
+
+ Array <TexRef> mTexRefs;
+ Array <SurfRef> mSurfRefs;
+
+ uint32_t mCudaArrayCount;
+ ApexCudaArray* mCudaArrayList;
+
+ Array <ParamRef> mParamRefs;
+};
+
+/** Extract context data from CudaModuleScene about cuda kernel and save it to specified file
+*/
+class ApexCudaTestKernelContext : public UserAllocated
+{
+ struct ArrayRef
+ {
+ CUarray cuArray;
+ uint32_t bufferOffset;
+
+ ArrayRef(CUarray cuArray, uint32_t bufferOffset)
+ {
+ this->cuArray = cuArray;
+ this->bufferOffset = bufferOffset;
+ }
+ };
+
+public:
+ ApexCudaTestKernelContext(const char* path, const char* functionName, const char* moduleName, uint32_t frame, uint32_t callPerFrame, bool isWriteForNonSuccessfulKernel, bool isKernelForSave);
+ ~ApexCudaTestKernelContext();
+
+ bool saveToFile();
+
+ PX_INLINE void setCuStream(void* cuStream)
+ {
+ mCuStream = cuStream;
+ }
+
+ void startObjList();
+ void finishObjList();
+
+ void setFreeKernel(uint32_t threadCount);
+ void setFreeKernel(uint32_t threadCountX, uint32_t threadCountY);
+ void setFreeKernel(uint32_t threadCountX, uint32_t threadCountY, uint32_t threadCountZ, uint32_t blockCountY);
+ void setBoundKernel(uint32_t threadCount);
+ void setSyncKernel();
+
+ void setBlockDim(uint32_t x, uint32_t y, uint32_t z);
+ void setGridDim(uint32_t x, uint32_t y);
+
+ void setSharedSize(uint32_t size);
+ void setFuncInstId(int id);
+
+ void addParam(const char* name, uint32_t align, const void *val, size_t size, int isMemRef = 0, int dataOffset = 0, uint32_t fpType = 0);
+ void addTexRef(const char* name, const void* mem, size_t size, CUarray arr);
+ void addSurfRef(const char* name, CUarray arr, ApexCudaMemFlags::Enum flags);
+ void addConstMem(const char* name, const void* mem, size_t size);
+ void setKernelStatus();
+
+private:
+ void copyMemRefs();
+ void copyArrayRefs();
+
+ uint32_t addCuArray(CUarray cuArray);
+
+ void completeCudaObjsBlock();
+ void completeCallParamsBlock();
+
+ PX_INLINE uint32_t advanceMemBuf(uint32_t size, uint32_t align = 4);
+ PX_INLINE void copyToMemBuf(const apexCudaTest::MemRef& memRef);
+ PX_INLINE void copyToMemBuf(const ArrayRef& arrayRef);
+
+ void* mCuStream;
+
+ uint32_t mVersion;
+ uint32_t mFrame;
+ uint32_t mCallPerFrame;
+ ApexSimpleString mName;
+ ApexSimpleString mErrorCode;
+ ApexSimpleString mModuleName;
+ ApexSimpleString mPath;
+ nvidia::PsMemoryBuffer mMemBuf;
+
+ uint32_t mCudaObjsOffset;
+ uint32_t mCallParamsOffset;
+
+ uint32_t mCudaObjsCounter;
+ uint32_t mCallParamsCounter;
+
+ Array <ArrayRef> mArrayRefs;
+ Array <apexCudaTest::MemRef> mMemRefs;
+
+ bool mIsCompleteContext;
+ bool mIsWriteForNonSuccessfulKernel;
+ bool mIsContextForSave;
+};
+
+
+/** Class get information what kernels should be tested and give directive for creation ApexCudaTestContext
+ */
+class ApexCudaTestManager : public CudaTestManager, public UserAllocated
+{
+ struct KernelInfo
+ {
+ ApexSimpleString functionName;
+ ApexSimpleString moduleName;
+ uint32_t callCount;
+
+ KernelInfo(const char* functionName, const char* moduleName)
+ : functionName(functionName), moduleName(moduleName), callCount(0) {}
+
+ bool operator!= (const KernelInfo& ki)
+ {
+ return this->functionName != ki.functionName || this->moduleName != ki.moduleName;
+ }
+ };
+
+public:
+
+ ApexCudaTestManager();
+ virtual ~ApexCudaTestManager();
+
+ PX_INLINE void setInternalApexScene(SceneIntl* scene)
+ {
+ mApexScene = scene;
+ }
+ void nextFrame();
+ ApexCudaTestKernelContext* isTestKernel(const char* functionName, const char* moduleName);
+
+ // interface for CudaTestManager
+ PX_INLINE void setWritePath(const char* path)
+ {
+ mPath = ApexSimpleString(path);
+ }
+ void setWriteForFunction(const char* functionName, const char* moduleName);
+
+ PX_INLINE void setMaxSamples(uint32_t maxSamples)
+ {
+ mMaxSamples = maxSamples;
+ }
+ void setFrames(uint32_t numFrames, const uint32_t* frames)
+ {
+ for(uint32_t i = 0; i < numFrames && mSampledFrames.size() < mMaxSamples; i++)
+ {
+ if (frames == NULL) // write next numFrames frames after current
+ {
+ mSampledFrames.pushBack(mCurrentFrame + i + 1);
+ }
+ else
+ {
+ mSampledFrames.pushBack(frames[i]);
+ }
+ }
+ }
+ void setFramePeriod(uint32_t period)
+ {
+ mFramePeriod = period;
+ }
+ void setCallPerFrameMaxCount(uint32_t cpfMaxCount)
+ {
+ mCallPerFrameMaxCount = cpfMaxCount;
+ }
+ void setWriteForNotSuccessfulKernel(bool flag)
+ {
+ mIsWriteForNonSuccessfulKernel = flag;
+ }
+/* void setCallPerFrameSeries(uint32_t callsCount, const uint32_t* calls)
+ {
+ for(uint32_t i = 0; i < callsCount && mSampledCallsPerFrame.size() < mCallPerFrameMaxCount; i++)
+ {
+ mSampledCallsPerFrame.pushBack(calls[i]);
+ }
+ }*/
+ bool runKernel(const char* path);
+
+private:
+ SceneIntl* mApexScene;
+ uint32_t mCurrentFrame;
+ uint32_t mMaxSamples;
+ uint32_t mFramePeriod;
+ uint32_t mCallPerFrameMaxCount;
+ bool mIsWriteForNonSuccessfulKernel;
+ ApexSimpleString mPath;
+ Array <uint32_t> mSampledFrames;
+ //Array <uint32_t> mSampledCallsPerFrame;
+ Array <KernelInfo> mKernels;
+ Array <ApexCudaTestKernelContext*> mContexts;
+};
+
+}
+} // namespace nvidia::apex
+
+#endif // __APEX_CUDA_TEST__
diff --git a/APEX_1.4/common/include/ApexCudaWrapper.h b/APEX_1.4/common/include/ApexCudaWrapper.h
new file mode 100644
index 00000000..c455fbaf
--- /dev/null
+++ b/APEX_1.4/common/include/ApexCudaWrapper.h
@@ -0,0 +1,1232 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_CUDA_WRAPPER_H__
+#define __APEX_CUDA_WRAPPER_H__
+
+#include <cuda.h>
+#include "ApexCutil.h"
+#include "vector_types.h"
+#include "ApexMirroredArray.h"
+#include "InplaceStorage.h"
+#include "PsMutex.h"
+#include "ApexCudaTest.h"
+#include "ApexCudaProfile.h"
+#include "ApexCudaDefs.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct DimGrid
+{
+ uint32_t x, y;
+
+ DimGrid() {}
+ DimGrid(uint32_t x, uint32_t y = 1)
+ {
+ this->x = x;
+ this->y = y;
+ }
+};
+struct DimBlock
+{
+ uint32_t x, y, z;
+
+ DimBlock() {}
+ DimBlock(uint32_t x, uint32_t y = 1, uint32_t z = 1)
+ {
+ this->x = x;
+ this->y = y;
+ this->z = z;
+ }
+};
+
+struct ApexKernelConfig
+{
+ uint32_t fixedSharedMemDWords;
+ uint32_t sharedMemDWordsPerWarp;
+ DimBlock blockDim;
+ uint32_t minWarpsPerBlock;
+ uint32_t maxGridSize;
+
+ ApexKernelConfig() { fixedSharedMemDWords = sharedMemDWordsPerWarp = 0; blockDim = DimBlock(0, 0, 0); minWarpsPerBlock = 1; maxGridSize = MAX_BOUND_BLOCKS; }
+ ApexKernelConfig(uint32_t fixedSharedMemDWords, uint32_t sharedMemDWordsPerWarp, int fixedWarpsPerBlock = 0, uint32_t minWarpsPerBlock = 1, uint32_t maxGridSize = MAX_BOUND_BLOCKS)
+ {
+ this->fixedSharedMemDWords = fixedSharedMemDWords;
+ this->sharedMemDWordsPerWarp = sharedMemDWordsPerWarp;
+ this->blockDim = DimBlock(fixedWarpsPerBlock * WARP_SIZE);
+ this->minWarpsPerBlock = minWarpsPerBlock;
+ this->maxGridSize = maxGridSize;
+ }
+ ApexKernelConfig(uint32_t fixedSharedMemDWords, uint32_t sharedMemDWordsPerWarp, const DimBlock& blockDim)
+ {
+ this->fixedSharedMemDWords = fixedSharedMemDWords;
+ this->sharedMemDWordsPerWarp = sharedMemDWordsPerWarp;
+ this->blockDim = blockDim;
+ this->minWarpsPerBlock = 1;
+ this->maxGridSize = MAX_BOUND_BLOCKS;
+ }
+};
+
+struct ApexCudaMemRefBase
+{
+ typedef ApexCudaMemFlags::Enum Intent;
+
+ const void* ptr;
+ size_t size; //size in bytes
+ int32_t offset; //data offset for ptr
+ Intent intent;
+
+ ApexCudaMemRefBase(const void* ptr, size_t byteSize, int32_t offset, Intent intent)
+ : ptr(ptr), size(byteSize), offset(offset), intent(intent) {}
+ virtual ~ApexCudaMemRefBase() {}
+};
+
+template <class T>
+struct ApexCudaMemRef : public ApexCudaMemRefBase
+{
+ ApexCudaMemRef(T* ptr, size_t byteSize, Intent intent = ApexCudaMemFlags::IN_OUT)
+ : ApexCudaMemRefBase(ptr, byteSize, 0, intent) {}
+
+ ApexCudaMemRef(T* ptr, size_t byteSize, int32_t offset, Intent intent)
+ : ApexCudaMemRefBase(ptr, byteSize, offset, intent) {}
+
+ inline T* getPtr() const
+ {
+ return (T*)ptr;
+ }
+
+ virtual ~ApexCudaMemRef() {}
+};
+
+template <class T>
+inline ApexCudaMemRef<T> createApexCudaMemRef(T* ptr, size_t size, ApexCudaMemRefBase::Intent intent = ApexCudaMemFlags::IN_OUT)
+{
+ return ApexCudaMemRef<T>(ptr, sizeof(T) * size, intent);
+}
+
+template <class T>
+inline ApexCudaMemRef<T> createApexCudaMemRef(T* ptr, size_t size, int32_t offset, ApexCudaMemRefBase::Intent intent)
+{
+ return ApexCudaMemRef<T>(ptr, sizeof(T) * size, sizeof(T) * offset, intent);
+}
+
+template <class T>
+inline ApexCudaMemRef<T> createApexCudaMemRef(const ApexMirroredArray<T>& ma, ApexCudaMemRefBase::Intent intent = ApexCudaMemFlags::IN_OUT)
+{
+ return ApexCudaMemRef<T>(ma.getGpuPtr(), ma.getByteSize(), intent);
+}
+
+template <class T>
+inline ApexCudaMemRef<T> createApexCudaMemRef(const ApexMirroredArray<T>& ma, size_t size, ApexCudaMemRefBase::Intent intent = ApexCudaMemFlags::IN_OUT)
+{
+ return ApexCudaMemRef<T>(ma.getGpuPtr(), sizeof(T) * size, intent);
+}
+
+template <class T>
+inline ApexCudaMemRef<T> createApexCudaMemRef(const ApexMirroredArray<T>& ma, size_t size, int32_t offset, ApexCudaMemRefBase::Intent intent = ApexCudaMemFlags::IN_OUT)
+{
+ return ApexCudaMemRef<T>(ma.getGpuPtr(), sizeof(T) * size, sizeof(T) * offset, intent);
+}
+
+#ifndef ALIGN_OFFSET
+#define ALIGN_OFFSET(offset, alignment) (offset) = ((offset) + (alignment) - 1) & ~((alignment) - 1)
+#endif
+
+#define CUDA_MAX_PARAM_SIZE 256
+
+
+class ApexCudaTestKernelContext;
+
+
+class ApexCudaConstStorage;
+
+class ApexCudaModule
+{
+public:
+ ApexCudaModule()
+ : mCuModule(0), mStorage(0)
+ {
+ }
+
+ PX_INLINE void init(const void* image)
+ {
+ if (mCuModule == 0)
+ {
+ CUT_SAFE_CALL(cuModuleLoadDataEx(&mCuModule, image, 0, NULL, NULL));
+ }
+ }
+ PX_INLINE void release()
+ {
+ if (mCuModule != 0)
+ {
+ CUT_SAFE_CALL(cuModuleUnload(mCuModule));
+ mCuModule = 0;
+ }
+ }
+
+ PX_INLINE bool isValid() const
+ {
+ return (mCuModule != 0);
+ }
+
+ PX_INLINE CUmodule getCuModule() const
+ {
+ return mCuModule;
+ }
+
+ PX_INLINE ApexCudaConstStorage* getStorage() const
+ {
+ return mStorage;
+ }
+
+private:
+ CUmodule mCuModule;
+ ApexCudaConstStorage* mStorage;
+
+ friend class ApexCudaConstStorage;
+};
+
+class ApexCudaObjManager;
+
+class ApexCudaObj
+{
+ friend class ApexCudaObjManager;
+ ApexCudaObj* mObjListNext;
+
+protected:
+ const char* mName;
+ ApexCudaModule* mCudaModule;
+ ApexCudaObjManager* mManager;
+
+ ApexCudaObj(const char* name) : mObjListNext(0), mName(name), mCudaModule(NULL), mManager(NULL) {}
+ virtual ~ApexCudaObj() {}
+
+ PX_INLINE void init(ApexCudaObjManager* manager, ApexCudaModule* cudaModule);
+
+public:
+ const char* getName() const
+ {
+ return mName;
+ }
+ const ApexCudaModule* getCudaModule() const
+ {
+ return mCudaModule;
+ }
+
+ enum ApexCudaObjType
+ {
+ UNKNOWN,
+ FUNCTION,
+ TEXTURE,
+ CONST_STORAGE,
+ SURFACE
+ };
+ virtual ApexCudaObjType getType()
+ {
+ return UNKNOWN;
+ }
+
+ PX_INLINE ApexCudaObj* next()
+ {
+ return mObjListNext;
+ }
+ virtual void release() = 0;
+ virtual void formContext(ApexCudaTestKernelContext*) = 0;
+};
+
+struct ApexCudaDeviceTraits
+{
+ uint32_t mMaxSharedMemPerBlock;
+ uint32_t mMaxSharedMemPerSM;
+ uint32_t mMaxRegistersPerSM;
+ uint32_t mMaxThreadsPerSM;
+
+ uint32_t mBlocksPerSM;
+ uint32_t mBlocksPerSM_2D;
+ uint32_t mBlocksPerSM_3D;
+ uint32_t mMaxBlocksPerGrid;
+};
+
+class ApexCudaObjManager
+{
+ ApexCudaObj* mObjListHead;
+
+ Module* mNxModule;
+ ApexCudaTestManager* mCudaTestManager;
+ PxGpuDispatcher* mGpuDispatcher;
+
+ ApexCudaDeviceTraits mDeviceTraits;
+
+protected:
+ friend class ApexCudaFunc;
+ ApexCudaProfileSession* mCudaProfileSession;
+
+public:
+ ApexCudaObjManager() : mObjListHead(0), mNxModule(0), mCudaTestManager(0), mGpuDispatcher(0), mCudaProfileSession(0) {}
+
+ void init(Module* nxModule, ApexCudaTestManager* cudaTestManager, PxGpuDispatcher* gpuDispatcher)
+ {
+ mNxModule = nxModule;
+ mCudaTestManager = cudaTestManager;
+ mGpuDispatcher = gpuDispatcher;
+
+ //get device traits
+ CUdevice device;
+ CUT_SAFE_CALL(cuCtxGetDevice(&device));
+ CUT_SAFE_CALL(cuDeviceGetAttribute((int*)&mDeviceTraits.mMaxSharedMemPerBlock, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK, device));
+ CUT_SAFE_CALL(cuDeviceGetAttribute((int*)&mDeviceTraits.mMaxSharedMemPerSM, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR, device));
+ CUT_SAFE_CALL(cuDeviceGetAttribute((int*)&mDeviceTraits.mMaxRegistersPerSM, CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR, device));
+ CUT_SAFE_CALL(cuDeviceGetAttribute((int*)&mDeviceTraits.mMaxThreadsPerSM, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device));
+
+#ifdef APEX_CUDA_FORCED_BLOCKS
+ mDeviceTraits.mBlocksPerSM = (APEX_CUDA_FORCED_BLOCKS > 32) ? 2u : 1u;
+ mDeviceTraits.mMaxBlocksPerGrid = APEX_CUDA_FORCED_BLOCKS;
+#else
+ int computeMajor;
+ int smCount;
+ CUT_SAFE_CALL(cuDeviceGetAttribute(&smCount, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device));
+ CUT_SAFE_CALL(cuDeviceGetAttribute(&computeMajor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device));
+
+ mDeviceTraits.mBlocksPerSM = 2;//(computeMajor >= 5) ? 2u : 1u;
+ mDeviceTraits.mMaxBlocksPerGrid = uint32_t(smCount) * mDeviceTraits.mBlocksPerSM;
+#endif
+ mDeviceTraits.mBlocksPerSM_2D = 4;
+ mDeviceTraits.mBlocksPerSM_3D = 4;
+ }
+
+ PX_INLINE const ApexCudaDeviceTraits& getDeviceTraits() const
+ {
+ return mDeviceTraits;
+ }
+
+ PX_INLINE void addToObjList(ApexCudaObj* obj)
+ {
+ obj->mObjListNext = mObjListHead;
+ mObjListHead = obj;
+ }
+
+ PX_INLINE ApexCudaObj* getObjListHead()
+ {
+ return mObjListHead;
+ }
+
+ void releaseAll()
+ {
+ for (ApexCudaObj* obj = mObjListHead; obj != 0; obj = obj->mObjListNext)
+ {
+ obj->release();
+ }
+ }
+
+ PX_INLINE Module* getModule() const
+ {
+ return mNxModule;
+ }
+ PX_INLINE ApexCudaTestManager* getCudaTestManager() const
+ {
+ return mCudaTestManager;
+ }
+ PX_INLINE PxGpuDispatcher* getGpuDispatcher() const
+ {
+ return mGpuDispatcher;
+ }
+
+public:
+ virtual void onBeforeLaunchApexCudaFunc(const ApexCudaFunc& func, CUstream stream) = 0;
+ virtual void onAfterLaunchApexCudaFunc(const ApexCudaFunc& func, CUstream stream) = 0;
+
+};
+
+PX_INLINE void ApexCudaObj::init(ApexCudaObjManager* manager, ApexCudaModule* cudaModule)
+{
+ mManager = manager;
+ mManager->addToObjList(this);
+ mCudaModule = cudaModule;
+}
+
+
+class ApexCudaTexRef : public ApexCudaObj
+{
+public:
+ void init(ApexCudaObjManager* manager, CUtexref texRef, ApexCudaModule* cudaModule, CUarray_format format, int numChannels, int dim, int flags)
+ {
+ ApexCudaObj::init(manager, cudaModule);
+
+ mTexRef = texRef;
+ mDim = dim;
+ mFormat = format;
+ mNumChannels = numChannels;
+ mFlags = flags;
+ mIsBinded = false;
+
+ CUT_SAFE_CALL(cuTexRefSetFilterMode(mTexRef, mFilterMode));
+
+ for (int d = 0; d < dim; ++d)
+ {
+ CUT_SAFE_CALL(cuTexRefSetAddressMode(mTexRef, d, CU_TR_ADDRESS_MODE_CLAMP));
+ }
+ }
+
+ ApexCudaTexRef(const char* name, CUfilter_mode filterMode = CU_TR_FILTER_MODE_POINT)
+ : ApexCudaObj(name), mTexRef(0), mFilterMode(filterMode)
+ {
+ }
+
+ void setNormalizedCoords()
+ {
+ mFlags |= CU_TRSF_NORMALIZED_COORDINATES;
+ }
+
+ void bindTo(const void* ptr, size_t bytes, size_t* retByteOffset = 0)
+ {
+ CUT_SAFE_CALL(cuTexRefSetFormat(mTexRef, mFormat, mNumChannels));
+ CUT_SAFE_CALL(cuTexRefSetFlags(mTexRef, (uint32_t)mFlags));
+
+ size_t byteOffset;
+ CUT_SAFE_CALL(cuTexRefSetAddress(&byteOffset, mTexRef, CUT_TODEVICE(ptr), static_cast<unsigned int>(bytes)));
+
+ if (retByteOffset != 0)
+ {
+ *retByteOffset = byteOffset;
+ }
+ else
+ {
+ PX_ASSERT(byteOffset == 0);
+ }
+
+ mBindedSize = bytes;
+ mBindedPtr = ptr;
+ mBindedArray = NULL;
+ mIsBinded = true;
+ }
+
+ template <typename T>
+ void bindTo(ApexMirroredArray<T>& mem, size_t* retByteOffset = 0)
+ {
+ bindTo(mem.getGpuPtr(), mem.getByteSize(), retByteOffset);
+ }
+
+ template <typename T>
+ void bindTo(ApexMirroredArray<T>& mem, size_t size, size_t* retByteOffset = 0)
+ {
+ bindTo(mem.getGpuPtr(), sizeof(T) * size, retByteOffset);
+ }
+
+ void bindTo(CUarray cuArray)
+ {
+ CUT_SAFE_CALL(cuTexRefSetFlags(mTexRef, (uint32_t)mFlags));
+
+ CUT_SAFE_CALL(cuTexRefSetArray(mTexRef, cuArray, CU_TRSA_OVERRIDE_FORMAT));
+
+ mBindedSize = 0;
+ mBindedPtr = NULL;
+ mBindedArray = cuArray;
+ mIsBinded = true;
+ }
+
+ void bindTo(const ApexCudaArray& cudaArray)
+ {
+ bindTo(cudaArray.getCuArray());
+ }
+
+ void unbind()
+ {
+ size_t byteOffset;
+ CUT_SAFE_CALL(cuTexRefSetAddress(&byteOffset, mTexRef, CUdeviceptr(0), 0));
+ mIsBinded = false;
+ }
+
+ virtual ApexCudaObjType getType()
+ {
+ return TEXTURE;
+ }
+
+ virtual void release() {}
+
+ virtual void formContext(ApexCudaTestKernelContext* context)
+ {
+ if (mIsBinded)
+ {
+ context->addTexRef(mName, mBindedPtr, mBindedSize, mBindedArray);
+ }
+ }
+
+private:
+ CUtexref mTexRef;
+ CUfilter_mode mFilterMode;
+
+ CUarray_format mFormat;
+ int mNumChannels;
+ int mDim;
+ int mFlags;
+
+ bool mIsBinded;
+ size_t mBindedSize;
+ const void* mBindedPtr;
+ CUarray mBindedArray;
+};
+
+
+class ApexCudaSurfRef : public ApexCudaObj
+{
+public:
+ void init(ApexCudaObjManager* manager, CUsurfref surfRef, ApexCudaModule* cudaModule)
+ {
+ ApexCudaObj::init(manager, cudaModule);
+
+ mSurfRef = surfRef;
+
+ mIsBinded = false;
+ }
+
+ ApexCudaSurfRef(const char* name) : ApexCudaObj(name), mSurfRef(0)
+ {
+ }
+
+ void bindTo(CUarray cuArray, ApexCudaMemFlags::Enum flags)
+ {
+ CUDA_ARRAY3D_DESCRIPTOR desc;
+ CUT_SAFE_CALL(cuArray3DGetDescriptor(&desc, cuArray));
+
+ CUT_SAFE_CALL(cuSurfRefSetArray(mSurfRef, cuArray, 0));
+
+ mIsBinded = true;
+ mBindedArray = cuArray;
+ mBindedFlags = flags;
+ }
+
+ void bindTo(const ApexCudaArray& cudaArray, ApexCudaMemFlags::Enum flags)
+ {
+ bindTo(cudaArray.getCuArray(), flags);
+ }
+
+ void unbind()
+ {
+ mIsBinded = false;
+ }
+
+ virtual ApexCudaObjType getType()
+ {
+ return SURFACE;
+ }
+
+ virtual void release() {}
+
+ virtual void formContext(ApexCudaTestKernelContext* context)
+ {
+ if (mIsBinded)
+ {
+ context->addSurfRef(mName, mBindedArray, mBindedFlags);
+ }
+ }
+
+private:
+ CUsurfref mSurfRef;
+
+ bool mIsBinded;
+ CUarray mBindedArray;
+ ApexCudaMemFlags::Enum mBindedFlags;
+};
+
+class ApexCudaTexRefScopeBind
+{
+private:
+ ApexCudaTexRefScopeBind& operator=(const ApexCudaTexRefScopeBind&);
+ ApexCudaTexRef& mTexRef;
+
+public:
+ ApexCudaTexRefScopeBind(ApexCudaTexRef& texRef, void* ptr, size_t bytes, size_t* retByteOffset = 0)
+ : mTexRef(texRef)
+ {
+ mTexRef.bindTo(ptr, bytes, retByteOffset);
+ }
+ template <typename T>
+ ApexCudaTexRefScopeBind(ApexCudaTexRef& texRef, ApexMirroredArray<T>& mem, size_t* retByteOffset = 0)
+ : mTexRef(texRef)
+ {
+ mTexRef.bindTo(mem, retByteOffset);
+ }
+ template <typename T>
+ ApexCudaTexRefScopeBind(ApexCudaTexRef& texRef, ApexMirroredArray<T>& mem, size_t size, size_t* retByteOffset = 0)
+ : mTexRef(texRef)
+ {
+ mTexRef.bindTo(mem, size, retByteOffset);
+ }
+ ApexCudaTexRefScopeBind(ApexCudaTexRef& texRef, const ApexCudaArray& cudaArray)
+ : mTexRef(texRef)
+ {
+ mTexRef.bindTo(cudaArray);
+ }
+ ~ApexCudaTexRefScopeBind()
+ {
+ mTexRef.unbind();
+ }
+};
+
+#define APEX_CUDA_TEXTURE_SCOPE_BIND(texRef, mem) ApexCudaTexRefScopeBind texRefScopeBind_##texRef (CUDA_OBJ(texRef), mem);
+#define APEX_CUDA_TEXTURE_SCOPE_BIND_SIZE(texRef, mem, size) ApexCudaTexRefScopeBind texRefScopeBind_##texRef (CUDA_OBJ(texRef), mem, size);
+#define APEX_CUDA_TEXTURE_SCOPE_BIND_PTR(texRef, ptr, count) ApexCudaTexRefScopeBind texRefScopeBind_##texRef (CUDA_OBJ(texRef), ptr, sizeof(*ptr) * count);
+#define APEX_CUDA_TEXTURE_BIND(texRef, mem) CUDA_OBJ(texRef).bindTo(mem);
+#define APEX_CUDA_TEXTURE_BIND_PTR(texRef, ptr, count) CUDA_OBJ(texRef).bindTo(ptr, sizeof(*ptr) * count);
+#define APEX_CUDA_TEXTURE_UNBIND(texRef) CUDA_OBJ(texRef).unbind();
+
+
+class ApexCudaSurfRefScopeBind
+{
+private:
+ ApexCudaSurfRefScopeBind& operator=(const ApexCudaSurfRefScopeBind&);
+ ApexCudaSurfRef& mSurfRef;
+
+public:
+ ApexCudaSurfRefScopeBind(ApexCudaSurfRef& surfRef, ApexCudaArray& cudaArray, ApexCudaMemFlags::Enum flags)
+ : mSurfRef(surfRef)
+ {
+ mSurfRef.bindTo(cudaArray, flags);
+ }
+ ApexCudaSurfRefScopeBind(ApexCudaSurfRef& surfRef, CUarray cuArray, ApexCudaMemFlags::Enum flags)
+ : mSurfRef(surfRef)
+ {
+ mSurfRef.bindTo(cuArray, flags);
+ }
+ ~ApexCudaSurfRefScopeBind()
+ {
+ mSurfRef.unbind();
+ }
+};
+
+#define APEX_CUDA_SURFACE_SCOPE_BIND(surfRef, mem, flags) ApexCudaSurfRefScopeBind surfRefScopeBind_##surfRef (CUDA_OBJ(surfRef), mem, flags);
+#define APEX_CUDA_SURFACE_BIND(surfRef, mem, flags) CUDA_OBJ(surfRef).bindTo(mem, flags);
+#define APEX_CUDA_SURFACE_UNBIND(surfRef) CUDA_OBJ(surfRef).unbind();
+
+
+class ApexCudaVar : public ApexCudaObj
+{
+public:
+ size_t getSize() const
+ {
+ return mSize;
+ }
+
+ void init(ApexCudaObjManager* manager, ApexCudaModule* cudaModule, CUdeviceptr devPtr, size_t size, PxCudaContextManager* ctx)
+ {
+ ApexCudaObj::init(manager, cudaModule);
+
+ mDevPtr = devPtr;
+ mSize = size;
+ init(manager, ctx);
+ }
+
+ virtual void release() {}
+ virtual void formContext(ApexCudaTestKernelContext*) {}
+
+protected:
+ virtual void init(ApexCudaObjManager* , PxCudaContextManager*) = 0;
+
+ ApexCudaVar(const char* name) : ApexCudaObj(name), mDevPtr(0), mSize(0)
+ {
+ }
+
+protected:
+ CUdeviceptr mDevPtr;
+ size_t mSize;
+};
+
+
+class ApexCudaConstStorage : public ApexCudaVar, public InplaceStorage
+{
+public:
+ ApexCudaConstStorage(const char* nameVar, const char* nameTexRef)
+ : ApexCudaVar(nameVar), mCudaTexRef(nameTexRef), mStoreInTexture(false)
+ {
+ mStorageSize = 0;
+ mStoragePtr = 0;
+
+ mHostBuffer = 0;
+ mDeviceBuffer = 0;
+ }
+
+ virtual ApexCudaObjType getType()
+ {
+ return CONST_STORAGE;
+ }
+
+ virtual void formContext(ApexCudaTestKernelContext* context)
+ {
+ if (!mStoreInTexture && mHostBuffer != 0)
+ {
+ PX_ASSERT(mHostBuffer->getSize() >= ApexCudaVar::getSize());
+ void* hostPtr = reinterpret_cast<void*>(mHostBuffer->getPtr());
+ context->addConstMem(mName, hostPtr, ApexCudaVar::getSize());
+ }
+ }
+
+ virtual void init(ApexCudaObjManager* manager, PxCudaContextManager* ctx)
+ {
+ PX_ASSERT(mCudaModule != 0);
+ PX_ASSERT(mCudaModule->mStorage == 0);
+ mCudaModule->mStorage = this;
+
+ CUtexref cuTexRef;
+ CUT_SAFE_CALL(cuModuleGetTexRef(&cuTexRef, mCudaModule->getCuModule(), mCudaTexRef.getName()));
+
+ mCudaTexRef.init(manager, cuTexRef, mCudaModule, CU_AD_FORMAT_SIGNED_INT32, 1, 1, CU_TRSF_READ_AS_INTEGER);
+
+ //prealloc. host buffer for Apex Cuda Test framework
+ reallocHostBuffer(ctx, ApexCudaVar::getSize());
+ }
+
+ virtual void release()
+ {
+ InplaceStorage::release();
+
+ if (mDeviceBuffer != 0)
+ {
+ mDeviceBuffer->free();
+ mDeviceBuffer = 0;
+ }
+ if (mHostBuffer != 0)
+ {
+ mHostBuffer->free();
+ mHostBuffer = 0;
+ }
+
+ if (mStoragePtr != 0)
+ {
+ getAllocator().deallocate(mStoragePtr);
+ mStoragePtr = 0;
+ mStorageSize = 0;
+ }
+ }
+
+ bool copyToDevice(PxCudaContextManager* ctx, CUstream stream)
+ {
+ if (mStoragePtr == 0)
+ {
+ return false;
+ }
+
+ bool result = false;
+
+ InplaceStorage* storage = static_cast<InplaceStorage*>(this);
+ mMutex.lock();
+ if (storage->isChanged())
+ {
+ if (!reallocHostBuffer(ctx, mStorageSize))
+ {
+ return false;
+ }
+
+ CUdeviceptr copyDevPtr = 0;
+ if (mStoreInTexture)
+ {
+ if (mDeviceBuffer == 0)
+ {
+ mDeviceBuffer = ctx->getMemoryManager()->alloc(
+ PxCudaBufferType(PxCudaBufferMemorySpace::T_GPU, PxCudaBufferFlags::F_READ_WRITE),
+ mStorageSize);
+ if (mDeviceBuffer == 0)
+ {
+ APEX_INTERNAL_ERROR("ApexCudaConstStorage failed to allocate GPU Memory!");
+ return false;
+ }
+ }
+ else if (mDeviceBuffer->getSize() < mStorageSize)
+ {
+ mDeviceBuffer->realloc(mStorageSize);
+ }
+ copyDevPtr = mDeviceBuffer->getPtr();
+ }
+ else
+ {
+ if (mDeviceBuffer != 0)
+ {
+ mDeviceBuffer->free();
+ mDeviceBuffer = 0;
+ }
+ copyDevPtr = mDevPtr;
+ }
+
+ uint8_t* hostPtr = reinterpret_cast<uint8_t*>(mHostBuffer->getPtr());
+
+ size_t size = storage->mapTo(hostPtr);
+ // padding up to the next dword
+ size = (size + 7) & ~7;
+ if (size > mStorageSize) size = mStorageSize;
+
+ CUT_SAFE_CALL(cuMemcpyHtoDAsync(copyDevPtr, hostPtr, size, stream));
+
+ storage->setUnchanged();
+ result = true;
+ }
+ mMutex.unlock();
+
+ return result;
+ }
+
+ PX_INLINE bool getStoreInTexture() const
+ {
+ return mStoreInTexture;
+ }
+
+ PX_INLINE void onBeforeLaunch()
+ {
+ if (mStoreInTexture)
+ {
+ mCudaTexRef.bindTo( mDeviceBuffer ? reinterpret_cast<void*>(mDeviceBuffer->getPtr()) : 0, mStorageSize );
+ }
+ }
+
+ PX_INLINE void onAfterLaunch()
+ {
+ if (mStoreInTexture)
+ {
+ mCudaTexRef.unbind();
+ }
+ }
+
+protected:
+ bool reallocHostBuffer(PxCudaContextManager* ctx, size_t size)
+ {
+ if (mHostBuffer == 0)
+ {
+ mHostBuffer = ctx->getMemoryManager()->alloc(
+ PxCudaBufferType(PxCudaBufferMemorySpace::T_PINNED_HOST, PxCudaBufferFlags::F_READ_WRITE),
+ size);
+ if (mHostBuffer == 0)
+ {
+ APEX_INTERNAL_ERROR("ApexCudaConstStorage failed to allocate Pinned Host Memory!");
+ return false;
+ }
+ }
+ else if (mHostBuffer->getSize() < size)
+ {
+ mHostBuffer->realloc(size);
+ }
+ return true;
+ }
+
+ virtual uint8_t* storageResizeBuffer(uint32_t newSize)
+ {
+ if (!mStoreInTexture && newSize > ApexCudaVar::getSize())
+ {
+#if 0
+ APEX_INTERNAL_ERROR("Out of CUDA constant memory");
+ PX_ALWAYS_ASSERT();
+ return 0;
+#else
+ //switch to texture
+ mStoreInTexture = true;
+#endif
+ }
+ else if (mStoreInTexture && newSize <= ApexCudaVar::getSize())
+ {
+ //switch back to const mem.
+ mStoreInTexture = false;
+ }
+
+ const uint32_t PageSize = 4096;
+ size_t allocSize = mStoreInTexture ? (newSize + (PageSize - 1)) & ~(PageSize - 1) : ApexCudaVar::getSize();
+
+ if (allocSize > mStorageSize)
+ {
+ uint8_t* allocStoragePtr = static_cast<uint8_t*>(getAllocator().allocate(allocSize, "ApexCudaConstStorage", __FILE__, __LINE__));
+ if (allocStoragePtr == 0)
+ {
+ APEX_INTERNAL_ERROR("ApexCudaConstStorage failed to allocate memory!");
+ return 0;
+ }
+ if (mStoragePtr != 0)
+ {
+ memcpy(allocStoragePtr, mStoragePtr, mStorageSize);
+ getAllocator().deallocate(mStoragePtr);
+ }
+ mStorageSize = allocSize;
+ mStoragePtr = allocStoragePtr;
+ }
+ return mStoragePtr;
+ }
+
+ virtual void storageLock()
+ {
+ mMutex.lock();
+ }
+ virtual void storageUnlock()
+ {
+ mMutex.unlock();
+ }
+
+private:
+ bool mStoreInTexture;
+ ApexCudaTexRef mCudaTexRef;
+
+ size_t mStorageSize;
+ uint8_t* mStoragePtr;
+
+ PxCudaBuffer* mHostBuffer;
+ PxCudaBuffer* mDeviceBuffer;
+
+ nvidia::Mutex mMutex;
+
+ friend class ApexCudaTestKernelContextReader;
+};
+
+typedef InplaceStorageGroup ApexCudaConstMemGroup;
+
+#define APEX_CUDA_CONST_MEM_GROUP_SCOPE(group) INPLACE_STORAGE_GROUP_SCOPE(group)
+
+
+
+struct ApexCudaFuncParams
+{
+ int mOffset;
+ char mParams[CUDA_MAX_PARAM_SIZE];
+
+ ApexCudaFuncParams() : mOffset(0) {}
+
+
+};
+
+class ApexCudaFunc : public ApexCudaObj
+{
+public:
+ PX_INLINE bool testNameMatch(const char* name) const
+ {
+ if (const char* name$ = strrchr(name, '$'))
+ {
+ if (const char* name_ = strrchr(name, '_'))
+ {
+ return (nvidia::strncmp(name, mName, (uint32_t)(name_ - name)) == 0);
+ }
+ }
+ return (nvidia::strcmp(name, mName) == 0);
+ }
+
+ void init(ApexCudaObjManager* manager, const char* name, CUfunction cuFunc, ApexCudaModule* cudaModule)
+ {
+ int funcInstIndex = 0;
+ if (const char* name$ = strrchr(name, '$'))
+ {
+ funcInstIndex = atoi(name$ + 1);
+ }
+ if (funcInstIndex >= MAX_INST_COUNT)
+ {
+ PX_ALWAYS_ASSERT();
+ return;
+ }
+
+ if (mFuncInstCount == 0)
+ {
+ ApexCudaObj::init(manager, cudaModule);
+ }
+
+ PxCudaContextManager* ctx = mManager->mGpuDispatcher->getCudaContextManager();
+ {
+ int funcMaxThreadsPerBlock;
+ cuFuncGetAttribute(&funcMaxThreadsPerBlock, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuFunc);
+
+ int funcNumRegsPerThread;
+ cuFuncGetAttribute(&funcNumRegsPerThread, CU_FUNC_ATTRIBUTE_NUM_REGS, cuFunc);
+
+ int funcSharedMemSize;
+ cuFuncGetAttribute(&funcSharedMemSize, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, cuFunc);
+ const int sharedMemGranularity = (ctx->supportsArchSM20() ? 128 : 512) - 1;
+ funcSharedMemSize = (funcSharedMemSize + sharedMemGranularity) & ~sharedMemGranularity;
+
+ FuncInstData& fid = mFuncInstData[funcInstIndex];
+ fid.mName = name;
+ fid.mCuFunc = cuFunc;
+ fid.mMaxThreadsPerBlock = (uint32_t)funcMaxThreadsPerBlock;
+
+ fid.mNumRegsPerThread = (uint32_t)funcNumRegsPerThread;
+ fid.mStaticSharedSize = (uint32_t)funcSharedMemSize;
+ PX_ASSERT(fid.mStaticSharedSize <= mManager->getDeviceTraits().mMaxSharedMemPerBlock);
+
+ fid.mWarpsPerBlock = 0;
+ fid.mDynamicShared = 0;
+ }
+
+ init(ctx, funcInstIndex);
+ mFuncInstCount = PxMax(mFuncInstCount, uint32_t(funcInstIndex) + 1);
+ }
+
+ virtual ApexCudaObjType getType()
+ {
+ return FUNCTION;
+ }
+ virtual void release() {}
+
+ virtual void formContext(ApexCudaTestKernelContext*) {}
+
+ /** This function force cuda stream syncronization that may slowdown application
+ */
+ PX_INLINE void setProfileSession(ApexCudaProfileSession* cudaProfileSession)
+ {
+ mManager->mCudaProfileSession = cudaProfileSession;
+ mProfileId = cudaProfileSession ? cudaProfileSession->getProfileId(mName, mManager->mNxModule->getName()) : 0;
+ }
+
+ PX_INLINE uint32_t getProfileId() const
+ {
+ return mProfileId;
+ }
+
+protected:
+ static const int MAX_INST_COUNT = 2;
+
+ struct FuncInstData
+ {
+ const char* mName;
+ CUfunction mCuFunc;
+
+ uint32_t mMaxThreadsPerBlock;
+ uint32_t mNumRegsPerThread;
+ uint32_t mStaticSharedSize;
+
+ uint32_t mWarpsPerBlock;
+ uint32_t mDynamicShared;
+ };
+
+ uint32_t mFuncInstCount;
+ FuncInstData mFuncInstData[MAX_INST_COUNT];
+
+ uint32_t mProfileId;
+ ApexCudaTestKernelContext* mCTContext;
+
+ ApexCudaFunc(const char* name)
+ : ApexCudaObj(name), mFuncInstCount(0), mProfileId(0), mCTContext(0)
+ {
+ }
+ virtual void init(PxCudaContextManager* , int /*funcInstIndex*/) {}
+
+ bool isValid() const
+ {
+ return (mFuncInstCount != 0) && (mCudaModule != 0);
+ }
+
+ const FuncInstData& getFuncInstData() const
+ {
+ PX_ASSERT(isValid());
+
+ ApexCudaConstStorage* storage = mCudaModule->getStorage();
+ if (storage != 0 && mFuncInstCount > 1)
+ {
+ PX_ASSERT(mFuncInstCount == 2);
+ return mFuncInstData[ storage->getStoreInTexture() ? 1 : 0 ];
+ }
+ else
+ {
+ PX_ASSERT(mFuncInstCount == 1);
+ return mFuncInstData[0];
+ }
+ }
+
+ PX_INLINE void onBeforeLaunch(CUstream stream)
+ {
+ if (ApexCudaConstStorage* storage = mCudaModule->getStorage())
+ {
+ storage->onBeforeLaunch();
+ }
+
+ mManager->onBeforeLaunchApexCudaFunc(*this, stream);
+ }
+ PX_INLINE void onAfterLaunch(CUstream stream)
+ {
+ mManager->onAfterLaunchApexCudaFunc(*this, stream);
+
+ if (ApexCudaConstStorage* storage = mCudaModule->getStorage())
+ {
+ storage->onAfterLaunch();
+ }
+ }
+
+ template <typename T>
+ void setParam(ApexCudaFuncParams& params, T* ptr)
+ {
+ ALIGN_OFFSET(params.mOffset, (int)__alignof(ptr));
+ PX_ASSERT(params.mOffset + sizeof(ptr) <= CUDA_MAX_PARAM_SIZE);
+ memcpy(params.mParams + params.mOffset, &ptr, sizeof(ptr));
+ params.mOffset += sizeof(ptr);
+ mCTContext = NULL; // context can't catch pointers, use instead ApexCudaMemRef
+ }
+
+ template <typename T>
+ void setParam(ApexCudaFuncParams& params, const ApexCudaMemRef<T>& memRef)
+ {
+ T* ptr = memRef.getPtr();
+ ALIGN_OFFSET(params.mOffset, (int)__alignof(ptr));
+ PX_ASSERT(params.mOffset + sizeof(ptr) <= CUDA_MAX_PARAM_SIZE);
+ memcpy(params.mParams + params.mOffset, &ptr, sizeof(ptr));
+ params.mOffset += sizeof(ptr);
+ }
+
+ template <typename T>
+ void setParam(ApexCudaFuncParams& params, const T& val)
+ {
+ ALIGN_OFFSET(params.mOffset, (int)__alignof(val));
+ PX_ASSERT(params.mOffset + sizeof(val) <= CUDA_MAX_PARAM_SIZE);
+ memcpy(params.mParams + params.mOffset, (void*)&val, sizeof(val));
+ params.mOffset += sizeof(val);
+ }
+
+ void resolveContext()
+ {
+ mCTContext->startObjList();
+ ApexCudaObj* obj = mManager->getObjListHead();
+ while(obj)
+ {
+ if ((CUmodule)obj->getCudaModule()->getCuModule() == mCudaModule->getCuModule())
+ {
+ obj->formContext(mCTContext);
+ }
+ obj = obj->next();
+ }
+ mCTContext->finishObjList();
+ }
+
+ template <typename T>
+ void copyParam(const char* name, const ApexCudaMemRef<T>& memRef)
+ {
+ mCTContext->addParam(name, __alignof(void*), memRef.ptr, memRef.size, memRef.intent, memRef.offset);
+ }
+
+ template <typename T>
+ void copyParam(const char* name, const T& val)
+ {
+ mCTContext->addParam(name, __alignof(val), (void*)&val, sizeof(val));
+ }
+
+private:
+ template <typename T>
+ void copyParam(const char* name, const ApexCudaMemRef<T>& memRef, uint32_t fpType)
+ {
+ mCTContext->addParam(name, __alignof(void*), memRef.ptr, memRef.size, memRef.intent, memRef.offset, fpType);
+ }
+ void setParam(ApexCudaFuncParams& params, unsigned align, unsigned size, void* ptr)
+ {
+ ALIGN_OFFSET(params.mOffset, (int)align);
+ PX_ASSERT(params.mOffset + size <= CUDA_MAX_PARAM_SIZE);
+ memcpy(params.mParams + params.mOffset, ptr, (uint32_t)size);
+ params.mOffset += size;
+ }
+ friend class ApexCudaTestKernelContextReader;
+};
+
+template <>
+inline void ApexCudaFunc::copyParam<float>(const char* name, const ApexCudaMemRef<float>& memRef)
+{
+ copyParam(name, memRef, 4);
+}
+
+template <>
+inline void ApexCudaFunc::copyParam<float2>(const char* name, const ApexCudaMemRef<float2>& memRef)
+{
+ copyParam(name, memRef, 4);
+}
+
+template <>
+inline void ApexCudaFunc::copyParam<float3>(const char* name, const ApexCudaMemRef<float3>& memRef)
+{
+ copyParam(name, memRef, 4);
+}
+
+template <>
+inline void ApexCudaFunc::copyParam<float4>(const char* name, const ApexCudaMemRef<float4>& memRef)
+{
+ copyParam(name, memRef, 4);
+}
+
+template <>
+inline void ApexCudaFunc::copyParam<double>(const char* name, const ApexCudaMemRef<double>& memRef)
+{
+ copyParam(name, memRef, 8);
+}
+
+
+class ApexCudaTimer
+{
+public:
+ ApexCudaTimer()
+ : mIsStarted(false)
+ , mIsFinished(false)
+ , mStart(NULL)
+ , mFinish(NULL)
+ {
+ }
+ ~ApexCudaTimer()
+ {
+ if (mStart != NULL)
+ {
+ CUT_SAFE_CALL(cuEventDestroy(mStart));
+ }
+ if (mFinish != NULL)
+ {
+ CUT_SAFE_CALL(cuEventDestroy(mFinish));
+ }
+ }
+ void init()
+ {
+ if (mStart == NULL)
+ {
+ CUT_SAFE_CALL(cuEventCreate(&mStart, CU_EVENT_DEFAULT));
+ }
+ if (mFinish == NULL)
+ {
+ CUT_SAFE_CALL(cuEventCreate(&mFinish, CU_EVENT_DEFAULT));
+ }
+ }
+
+ void onStart(CUstream stream)
+ {
+ if (mStart != NULL)
+ {
+ mIsStarted = true;
+ CUT_SAFE_CALL(cuEventRecord(mStart, stream));
+ }
+ }
+ void onFinish(CUstream stream)
+ {
+ if (mFinish != NULL && mIsStarted)
+ {
+ mIsFinished = true;
+ CUT_SAFE_CALL(cuEventRecord(mFinish, stream));
+ }
+ }
+
+ float getElapsedTime()
+ {
+ if (mIsStarted && mIsFinished)
+ {
+ mIsStarted = false;
+ mIsFinished = false;
+ CUT_SAFE_CALL(cuEventSynchronize(mStart));
+ CUT_SAFE_CALL(cuEventSynchronize(mFinish));
+ float time;
+ CUT_SAFE_CALL(cuEventElapsedTime(&time, mStart, mFinish));
+ return time;
+ }
+ else
+ {
+ return 0.0f;
+ }
+ }
+private:
+ CUevent mStart, mFinish;
+ bool mIsStarted;
+ bool mIsFinished;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif //__APEX_CUDA_WRAPPER_H__
diff --git a/APEX_1.4/common/include/ApexCutil.h b/APEX_1.4/common/include/ApexCutil.h
new file mode 100644
index 00000000..c6a545e1
--- /dev/null
+++ b/APEX_1.4/common/include/ApexCutil.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_CUTIL_H
+#define APEX_CUTIL_H
+
+#if defined(__CUDACC__)
+# define CUT_SAFE_CALL(call) call
+# define CUT_CHECK_ERROR(errorMessage)
+#else
+# include "ApexSDKIntl.h"
+
+# define CUT_SAFE_CALL(call) { CUresult ret = call; \
+ if( CUDA_SUCCESS != ret ) { \
+ APEX_INTERNAL_ERROR("Cuda Error %d", ret); \
+ PX_ASSERT(!ret); } }
+
+# if _DEBUG
+# define CUT_CHECK_ERROR(errorMessage) \
+ if( CUDA_SUCCESS != cuCtxSynchronize() ) { \
+ APEX_INTERNAL_ERROR(errorMessage); \
+ PX_ASSERT(0); }
+# else
+# define CUT_CHECK_ERROR(errorMessage)
+# endif
+
+#endif
+
+#define CUT_TODEVICE(gpuptr) (CUdeviceptr)(size_t)(gpuptr)
+
+#endif
diff --git a/APEX_1.4/common/include/ApexFIFO.h b/APEX_1.4/common/include/ApexFIFO.h
new file mode 100644
index 00000000..b087ac0e
--- /dev/null
+++ b/APEX_1.4/common/include/ApexFIFO.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_FIFO_H__
+#define __APEX_FIFO_H__
+
+#include "Apex.h"
+#include "PsUserAllocated.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+template <typename T>
+struct FIFOEntry
+{
+ T data;
+ uint32_t next;
+ bool isValidEntry;
+};
+
+template<typename T>
+class ApexFIFO : public UserAllocated
+{
+public:
+ ApexFIFO() : first((uint32_t) - 1), last((uint32_t) - 1), count(0) {}
+
+ bool popFront(T& frontElement)
+ {
+ if (first == (uint32_t)-1)
+ {
+ return false;
+ }
+
+ PX_ASSERT(first < list.size());
+ frontElement = list[first].data;
+
+ if (first == last)
+ {
+ list.clear();
+ first = (uint32_t) - 1;
+ last = (uint32_t) - 1;
+ }
+ else
+ {
+ list[first].isValidEntry = false;
+
+ if (list[last].next == (uint32_t)-1)
+ {
+ list[last].next = first;
+ }
+ first = list[first].next;
+ }
+
+ count--;
+ return true;
+ }
+
+
+ void pushBack(const T& newElement)
+ {
+ if (list.size() == 0 || list[last].next == (uint32_t)-1)
+ {
+ FIFOEntry<T> newEntry;
+ newEntry.data = newElement;
+ newEntry.next = (uint32_t) - 1;
+ newEntry.isValidEntry = true;
+ list.pushBack(newEntry);
+
+ if (first == (uint32_t) - 1)
+ {
+ PX_ASSERT(last == (uint32_t) - 1);
+ first = list.size() - 1;
+ }
+ else
+ {
+ PX_ASSERT(last != (uint32_t) - 1);
+ list[last].next = list.size() - 1;
+ }
+
+ last = list.size() - 1;
+ }
+ else
+ {
+ uint32_t freeIndex = list[last].next;
+ PX_ASSERT(freeIndex < list.size());
+
+ FIFOEntry<T>& freeEntry = list[freeIndex];
+ freeEntry.data = newElement;
+ freeEntry.isValidEntry = true;
+
+ if (freeEntry.next == first)
+ {
+ freeEntry.next = (uint32_t) - 1;
+ }
+
+ last = freeIndex;
+ }
+ count++;
+ }
+
+ uint32_t size()
+ {
+ return count;
+ }
+
+ PX_INLINE void reserve(const uint32_t capacity)
+ {
+ list.reserve(capacity);
+ }
+
+ PX_INLINE uint32_t capacity() const
+ {
+ return list.capacity();
+ }
+
+private:
+ uint32_t first;
+ uint32_t last;
+ uint32_t count;
+ physx::Array<FIFOEntry<T> > list;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/common/include/ApexFind.h b/APEX_1.4/common/include/ApexFind.h
new file mode 100644
index 00000000..32801944
--- /dev/null
+++ b/APEX_1.4/common/include/ApexFind.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_FIND_H
+#define APEX_FIND_H
+
+namespace nvidia
+{
+namespace apex
+{
+ // binary search
+ template<class Sortable>
+ int32_t ApexFind(const Sortable* buffer, uint32_t numEntries, const Sortable& element, int (*compare)(const void*, const void*))
+ {
+
+#if PX_CHECKED
+ if (numEntries > 0)
+ {
+ for (uint32_t i = 1; i < numEntries; ++i)
+ {
+ PX_ASSERT(compare(buffer + i - 1, buffer + i) <= 0);
+ }
+ }
+#endif
+
+ int32_t curMin = 0;
+ int32_t curMax = (int32_t)numEntries;
+ int32_t testIndex = 0;
+
+ while (curMin < curMax)
+ {
+ testIndex = (curMin + curMax) / 2;
+ int32_t compResult = compare(&element, buffer+testIndex);
+ if (compResult < 0)
+ {
+ curMax = testIndex;
+ }
+ else if (compResult > 0)
+ {
+ curMin = testIndex;
+ }
+ else
+ {
+ return testIndex;
+ }
+
+ }
+
+ return -1;
+ }
+
+} // namespace apex
+} // namespace nvidia
+
+#endif // APEX_FIND_H
diff --git a/APEX_1.4/common/include/ApexGeneralizedCubeTemplates.h b/APEX_1.4/common/include/ApexGeneralizedCubeTemplates.h
new file mode 100644
index 00000000..c9b18f5a
--- /dev/null
+++ b/APEX_1.4/common/include/ApexGeneralizedCubeTemplates.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_GENERALIZED_CUBE_TEMPLATES_H__
+#define __APEX_GENERALIZED_CUBE_TEMPLATES_H__
+
+#include "ApexUsingNamespace.h"
+#include "PsUserAllocated.h"
+#include "PsArray.h"
+
+#include "PxVec3.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexGeneralizedCubeTemplates : public UserAllocated
+{
+public:
+ ApexGeneralizedCubeTemplates();
+
+ void getTriangles(const int groups[8], physx::Array<int32_t> &indices);
+
+private:
+
+ enum AllConsts
+ {
+ GEN_NUM_SUB_CELLS = 6,
+ SUB_GRID_LEN = GEN_NUM_SUB_CELLS,
+ SUB_GRID_LEN_2 = GEN_NUM_SUB_CELLS * GEN_NUM_SUB_CELLS,
+ NUM_SUB_CELLS = GEN_NUM_SUB_CELLS * GEN_NUM_SUB_CELLS * GEN_NUM_SUB_CELLS,
+ NUM_CUBE_VERTS = 19,
+ NUM_CASES_3 = 6561, // 2^8
+ };
+ struct GenSubCell
+ {
+ inline void init()
+ {
+ group = -1;
+ marked = false;
+ }
+ int32_t group;
+ bool marked;
+ };
+
+ struct GenCoord
+ {
+ void init(int32_t xi, int32_t yi, int32_t zi)
+ {
+ this->xi = xi;
+ this->yi = yi;
+ this->zi = zi;
+ }
+ bool operator == (const GenCoord& c) const
+ {
+ return xi == c.xi && yi == c.yi && zi == c.zi;
+ }
+
+ int32_t xi, yi, zi;
+ };
+
+
+ void createLookupTable3();
+ void setCellGroups(const int32_t groups[8]);
+ void splitDisconnectedGroups();
+ void findVertices();
+ void createTriangles(physx::Array<int32_t>& currentIndices);
+ bool isEdge(const GenCoord& c, int32_t dim, int32_t group0, int32_t group1);
+
+
+ inline uint32_t cellNr(uint32_t x, uint32_t y, uint32_t z)
+ {
+ return x * SUB_GRID_LEN_2 + y * SUB_GRID_LEN + z;
+ }
+
+ inline int32_t groupAt(int32_t x, int32_t y, int32_t z)
+ {
+ if (x < 0 || x >= SUB_GRID_LEN || y < 0 || y >= SUB_GRID_LEN || z < 0 || z >= SUB_GRID_LEN)
+ {
+ return -1;
+ }
+ return mSubGrid[x * SUB_GRID_LEN_2 + y * SUB_GRID_LEN + z].group;
+ }
+
+ inline bool vertexMarked(const GenCoord& c)
+ {
+ if (c.xi < 0 || c.xi > SUB_GRID_LEN || c.yi < 0 || c.yi > SUB_GRID_LEN || c.zi < 0 || c.zi > SUB_GRID_LEN)
+ {
+ return false;
+ }
+ return mVertexMarked[c.xi][c.yi][c.zi];
+ }
+
+ inline void markVertex(const GenCoord& c)
+ {
+ if (c.xi < 0 || c.xi > SUB_GRID_LEN || c.yi < 0 || c.yi > SUB_GRID_LEN || c.zi < 0 || c.zi > SUB_GRID_LEN)
+ {
+ return;
+ }
+ mVertexMarked[c.xi][c.yi][c.zi] = true;
+ }
+
+
+
+
+ float mBasis[NUM_SUB_CELLS][8];
+ PxVec3 mVertPos[NUM_CUBE_VERTS];
+ int mVertexAt[SUB_GRID_LEN + 1][SUB_GRID_LEN + 1][SUB_GRID_LEN + 1];
+ bool mVertexMarked[SUB_GRID_LEN + 1][SUB_GRID_LEN + 1][SUB_GRID_LEN + 1];
+
+ GenSubCell mSubGrid[NUM_SUB_CELLS];
+ int32_t mFirst3[NUM_CASES_3]; // 3^8
+
+ physx::Array<int32_t> mLookupIndices3;
+
+ int32_t mFirstPairVertex[8][8];
+ GenCoord mFirstPairCoord[8][8];
+
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/ApexGeneralizedMarchingCubes.h b/APEX_1.4/common/include/ApexGeneralizedMarchingCubes.h
new file mode 100644
index 00000000..b1b153cf
--- /dev/null
+++ b/APEX_1.4/common/include/ApexGeneralizedMarchingCubes.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_GENERALIZED_MARCHING_CUBES_H__
+#define __APEX_GENERALIZED_MARCHING_CUBES_H__
+
+#include "ApexUsingNamespace.h"
+#include "PsUserAllocated.h"
+#include "PsArray.h"
+
+#include "PxBounds3.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class IProgressListener;
+class ApexGeneralizedCubeTemplates;
+
+class ApexGeneralizedMarchingCubes : public UserAllocated
+{
+public:
+ ApexGeneralizedMarchingCubes(const PxBounds3& bound, uint32_t subdivision);
+ ~ApexGeneralizedMarchingCubes();
+
+ void release();
+
+ void registerTriangle(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2);
+ bool endRegistration(uint32_t bubleSizeToRemove, IProgressListener* progress);
+
+ uint32_t getNumVertices()
+ {
+ return mVertices.size();
+ }
+ uint32_t getNumIndices()
+ {
+ return mIndices.size();
+ }
+ const PxVec3* getVertices()
+ {
+ return mVertices.begin();
+ }
+ const uint32_t* getIndices()
+ {
+ return mIndices.begin();
+ }
+private:
+
+ struct GeneralizedVertRef
+ {
+ void init()
+ {
+ vertNr = -1;
+ dangling = false;
+ deleted = false;
+ }
+ int32_t vertNr;
+ bool dangling;
+ bool deleted;
+ };
+
+ struct GeneralizedCube
+ {
+ void init(int32_t xi, int32_t yi, int32_t zi)
+ {
+ this->xi = xi;
+ this->yi = yi;
+ this->zi = zi;
+ next = -1;
+ vertRefs[0].init();
+ vertRefs[1].init();
+ vertRefs[2].init();
+ sideVertexNr[0] = -1;
+ sideVertexNr[1] = -1;
+ sideVertexNr[2] = -1;
+ sideBounds[0].setEmpty();
+ sideBounds[1].setEmpty();
+ sideBounds[2].setEmpty();
+ firstTriangle = -1;
+ numTriangles = 0;
+ deleted = false;
+ }
+ int32_t xi, yi, zi;
+ int32_t next;
+ GeneralizedVertRef vertRefs[3];
+ int32_t sideVertexNr[3];
+ PxBounds3 sideBounds[3];
+ int32_t firstTriangle;
+ uint32_t numTriangles;
+ bool deleted;
+ };
+
+ inline int hashFunction(int xi, int yi, int zi)
+ {
+ int h = (int)(unsigned int)((xi * 92837111) ^(yi * 689287499) ^(zi * 283923481));
+ return h % HASH_INDEX_SIZE;
+ }
+ int32_t createCube(int32_t xi, int32_t yi, int32_t zi);
+ int32_t findCube(int32_t xi, int32_t yi, int32_t zi);
+ void completeCells();
+ void createTrianglesForCube(int32_t cellNr);
+ void createNeighbourInfo();
+ void getCubeEdgesAndGroups(int32_t cellNr, GeneralizedVertRef* vertRefs[12], int32_t groups[8]);
+ void determineGroups();
+ void removeBubbles(int32_t minGroupSize);
+ void fixOrientations();
+ void compress();
+
+
+ PxBounds3 mBound;
+
+ float mSpacing;
+ float mInvSpacing;
+
+ physx::Array<GeneralizedCube> mCubes;
+
+ enum { HASH_INDEX_SIZE = 170111 };
+
+ int32_t mFirstCube[HASH_INDEX_SIZE];
+
+ physx::Array<PxVec3> mVertices;
+ physx::Array<uint32_t> mIndices;
+
+ physx::Array<int32_t> mFirstNeighbour;
+ physx::Array<int32_t> mNeighbours;
+ physx::Array<uint8_t> mTriangleDeleted;
+ physx::Array<int32_t> mGeneralizedTriangles;
+ physx::Array<int32_t> mCubeQueue;
+
+ physx::Array<int32_t> mTriangleGroup;
+ physx::Array<int32_t> mGroupFirstTriangle;
+ physx::Array<int32_t> mGroupTriangles;
+
+ ApexGeneralizedCubeTemplates* mTemplates;
+
+ // for debugging only
+public:
+ physx::Array<PxVec3> mDebugLines;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/ApexGroupsFiltering.h b/APEX_1.4/common/include/ApexGroupsFiltering.h
new file mode 100644
index 00000000..8ea397e1
--- /dev/null
+++ b/APEX_1.4/common/include/ApexGroupsFiltering.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_GROUPS_FILTERING_H__
+#define __APEX_GROUPS_FILTERING_H__
+
+#include "ApexDefs.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+template <typename G>
+class ApexGroupsFiltering
+{
+ typedef void (*FilterOp)(const G& mask0, const G& mask1, G& result);
+
+ static void filterOp_AND(const G& mask0, const G& mask1, G& result)
+ {
+ result = (mask0 & mask1);
+ }
+ static void filterOp_OR(const G& mask0, const G& mask1, G& result)
+ {
+ result = (mask0 | mask1);
+ }
+ static void filterOp_XOR(const G& mask0, const G& mask1, G& result)
+ {
+ result = (mask0 ^ mask1);
+ }
+ static void filterOp_NAND(const G& mask0, const G& mask1, G& result)
+ {
+ result = ~(mask0 & mask1);
+ }
+ static void filterOp_NOR(const G& mask0, const G& mask1, G& result)
+ {
+ result = ~(mask0 | mask1);
+ }
+ static void filterOp_NXOR(const G& mask0, const G& mask1, G& result)
+ {
+ result = ~(mask0 ^ mask1);
+ }
+ static void filterOp_SWAP_AND(const G& mask0, const G& mask1, G& result)
+ {
+ result = SWAP_AND(mask0, mask1);
+ }
+
+ GroupsFilterOp::Enum mFilterOp0, mFilterOp1, mFilterOp2;
+ bool mFilterBool;
+ G mFilterConstant0;
+ G mFilterConstant1;
+
+public:
+ ApexGroupsFiltering()
+ {
+ mFilterOp0 = mFilterOp1 = mFilterOp2 = GroupsFilterOp::AND;
+ mFilterBool = false;
+ setZero(mFilterConstant0);
+ setZero(mFilterConstant1);
+ }
+
+ bool setFilterOps(GroupsFilterOp::Enum op0, GroupsFilterOp::Enum op1, GroupsFilterOp::Enum op2)
+ {
+ if (mFilterOp0 != op0 || mFilterOp1 != op1 || mFilterOp2 != op2)
+ {
+ mFilterOp0 = op0;
+ mFilterOp1 = op1;
+ mFilterOp2 = op2;
+ return true;
+ }
+ return false;
+ }
+ void getFilterOps(GroupsFilterOp::Enum& op0, GroupsFilterOp::Enum& op1, GroupsFilterOp::Enum& op2) const
+ {
+ op0 = mFilterOp0;
+ op1 = mFilterOp1;
+ op2 = mFilterOp2;
+ }
+
+ bool setFilterBool(bool flag)
+ {
+ if (mFilterBool != flag)
+ {
+ mFilterBool = flag;
+ return true;
+ }
+ return false;
+ }
+ bool getFilterBool() const
+ {
+ return mFilterBool;
+ }
+
+ bool setFilterConstant0(const G& mask)
+ {
+ if (mFilterConstant0 != mask)
+ {
+ mFilterConstant0 = mask;
+ return true;
+ }
+ return false;
+ }
+ G getFilterConstant0() const
+ {
+ return mFilterConstant0;
+ }
+ bool setFilterConstant1(const G& mask)
+ {
+ if (mFilterConstant1 != mask)
+ {
+ mFilterConstant1 = mask;
+ return true;
+ }
+ return false;
+ }
+ G getFilterConstant1() const
+ {
+ return mFilterConstant1;
+ }
+
+ bool operator()(const G& mask0, const G& mask1) const
+ {
+ static const FilterOp sFilterOpList[] =
+ {
+ &filterOp_AND,
+ &filterOp_OR,
+ &filterOp_XOR,
+ &filterOp_NAND,
+ &filterOp_NOR,
+ &filterOp_NXOR,
+ &filterOp_SWAP_AND,
+ };
+
+ if (hasBits(mask0) & hasBits(mask1))
+ {
+ G result0, result1, result;
+ sFilterOpList[mFilterOp0](mask0, mFilterConstant0, result0);
+ sFilterOpList[mFilterOp1](mask1, mFilterConstant1, result1);
+ sFilterOpList[mFilterOp2](result0, result1, result);
+ return (hasBits(result) == mFilterBool);
+ }
+ return true;
+ }
+};
+
+
+}
+} // end namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/ApexIsoMesh.h b/APEX_1.4/common/include/ApexIsoMesh.h
new file mode 100644
index 00000000..d8644e87
--- /dev/null
+++ b/APEX_1.4/common/include/ApexIsoMesh.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_ISO_MESH_H
+#define APEX_ISO_MESH_H
+
+#include "ApexUsingNamespace.h"
+#include "ApexUsingNamespace.h"
+#include "PsUserAllocated.h"
+#include "PsArray.h"
+#include "PxBounds3.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class IProgressListener;
+
+class ApexIsoMesh : public UserAllocated
+{
+public:
+ ApexIsoMesh(uint32_t isoGridSubdivision, uint32_t keepNBiggestMeshes, bool discardInnerMeshes);
+ ~ApexIsoMesh();
+
+ void setBound(const PxBounds3& bound);
+ void clear();
+ void clearTemp();
+ void addTriangle(const PxVec3& v0, const PxVec3& v1, const PxVec3& v2);
+ bool update(IProgressListener* progress);
+
+
+ uint32_t getNumVertices() const
+ {
+ return mIsoVertices.size();
+ }
+ const PxVec3& getVertex(uint32_t index) const
+ {
+ PX_ASSERT(index < mIsoVertices.size());
+ return mIsoVertices[index];
+ }
+
+ uint32_t getNumTriangles() const
+ {
+ return mIsoTriangles.size();
+ }
+ void getTriangle(uint32_t index, uint32_t& v0, uint32_t& v1, uint32_t& v2) const;
+private:
+ // settable parameters
+ uint32_t mIsoGridSubdivision;
+ uint32_t mKeepNBiggestMeshes;
+ bool mDiscardInnerMeshes;
+ PxBounds3 mBound;
+
+ bool generateMesh(IProgressListener* progress);
+ bool interpolate(float d0, float d1, const PxVec3& pos0, const PxVec3& pos1, PxVec3& pos);
+ bool findNeighbors(IProgressListener* progress);
+ void removeLayers();
+ uint32_t floodFill(uint32_t triangleNr, uint32_t groupNr);
+
+ void removeTrisAndVerts();
+
+ // non-settable parameters (deducted from the ones you can set)
+ float mCellSize;
+ float mThickness;
+ PxVec3 mOrigin;
+ int32_t mNumX, mNumY, mNumZ;
+ const float mIsoValue;
+
+
+ struct IsoCell
+ {
+ void init()
+ {
+ density = 0.0f;
+ vertNrX = -1;
+ vertNrY = -1;
+ vertNrZ = -1;
+ firstTriangle = -1;
+ numTriangles = 0;
+ }
+ float density;
+ int32_t vertNrX;
+ int32_t vertNrY;
+ int32_t vertNrZ;
+ int32_t firstTriangle;
+ int32_t numTriangles;
+ };
+ physx::Array<IsoCell> mGrid;
+ inline IsoCell& cellAt(int xi, int yi, int zi)
+ {
+ uint32_t index = (uint32_t)(((xi * mNumY) + yi) * mNumZ + zi);
+ PX_ASSERT(index < mGrid.size());
+ return mGrid[index];
+ }
+
+
+ struct IsoTriangle
+ {
+ void init()
+ {
+ vertexNr[0] = -1;
+ vertexNr[1] = -1;
+ vertexNr[2] = -1;
+ adjTriangles[0] = -1;
+ adjTriangles[1] = -1;
+ adjTriangles[2] = -1;
+ groupNr = -1;
+ deleted = false;
+ }
+ void set(int32_t v0, int32_t v1, int32_t v2, int32_t cubeX, int32_t cubeY, int32_t cubeZ)
+ {
+ init();
+ vertexNr[0] = v0;
+ vertexNr[1] = v1;
+ vertexNr[2] = v2;
+ this->cubeX = cubeX;
+ this->cubeY = cubeY;
+ this->cubeZ = cubeZ;
+ }
+ void addNeighbor(int32_t triangleNr)
+ {
+ if (adjTriangles[0] == -1)
+ {
+ adjTriangles[0] = triangleNr;
+ }
+ else if (adjTriangles[1] == -1)
+ {
+ adjTriangles[1] = triangleNr;
+ }
+ else if (adjTriangles[2] == -1)
+ {
+ adjTriangles[2] = triangleNr;
+ }
+ }
+
+ int32_t vertexNr[3];
+ int32_t cubeX, cubeY, cubeZ;
+ int32_t adjTriangles[3];
+ int32_t groupNr;
+ bool deleted;
+ };
+
+ struct IsoEdge
+ {
+ void set(int newV0, int newV1, int newTriangle)
+ {
+ if (newV0 < newV1)
+ {
+ v0 = newV0;
+ v1 = newV1;
+ }
+ else
+ {
+ v0 = newV1;
+ v1 = newV0;
+ }
+ triangleNr = newTriangle;
+ }
+ PX_INLINE bool operator < (const IsoEdge& e) const
+ {
+ if (v0 < e.v0)
+ {
+ return true;
+ }
+ if (v0 > e.v0)
+ {
+ return false;
+ }
+ return (v1 < e.v1);
+ }
+
+ PX_INLINE bool operator()(const IsoEdge& e1, const IsoEdge& e2) const
+ {
+ return e1 < e2;
+ }
+
+ PX_INLINE bool operator == (const IsoEdge& e) const
+ {
+ return v0 == e.v0 && v1 == e.v1;
+ }
+
+ int v0, v1;
+ int triangleNr;
+ };
+
+ physx::Array<PxVec3> mIsoVertices;
+ physx::Array<IsoTriangle> mIsoTriangles;
+ physx::Array<IsoEdge> mIsoEdges;
+
+ // evil, should not be used
+ ApexIsoMesh& operator=(const ApexIsoMesh&);
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // APEX_ISO_MESH_H
diff --git a/APEX_1.4/common/include/ApexLegacyModule.h b/APEX_1.4/common/include/ApexLegacyModule.h
new file mode 100644
index 00000000..fef29118
--- /dev/null
+++ b/APEX_1.4/common/include/ApexLegacyModule.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_LEGACY_MODULE
+#define APEX_LEGACY_MODULE
+
+#include "nvparameterized/NvParameterizedTraits.h"
+
+#include "ModuleIntl.h"
+#include "ModuleBase.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct LegacyClassEntry
+{
+ uint32_t version;
+ uint32_t nextVersion;
+ NvParameterized::Factory* factory;
+ void (*freeParameterDefinitionTable)(NvParameterized::Traits* t);
+ NvParameterized::Conversion* (*createConv)(NvParameterized::Traits*);
+ NvParameterized::Conversion* conv;
+};
+
+template<typename IFaceT>
+class TApexLegacyModule : public IFaceT, public ModuleIntl, public ModuleBase
+{
+public:
+ virtual ~TApexLegacyModule() {}
+
+ // base class methods
+ void init(NvParameterized::Interface&) {}
+
+ NvParameterized::Interface* getDefaultModuleDesc()
+ {
+ return 0;
+ }
+
+ void release()
+ {
+ ModuleBase::release();
+ }
+ void destroy()
+ {
+ releaseLegacyObjects();
+ ModuleBase::destroy();
+ delete this;
+ }
+
+ const char* getName() const
+ {
+ return ModuleBase::getName();
+ }
+
+ ModuleSceneIntl* createInternalModuleScene(SceneIntl&, RenderDebugInterface*)
+ {
+ return NULL;
+ }
+ void releaseModuleSceneIntl(ModuleSceneIntl&) {}
+ uint32_t forceLoadAssets()
+ {
+ return 0;
+ }
+ AuthObjTypeID getModuleID() const
+ {
+ return UINT32_MAX;
+ }
+ RenderableIterator* createRenderableIterator(const Scene&)
+ {
+ return NULL;
+ }
+
+protected:
+ virtual void releaseLegacyObjects() = 0;
+
+ void registerLegacyObjects(LegacyClassEntry* e)
+ {
+ NvParameterized::Traits* t = mSdk->getParameterizedTraits();
+ if (!t)
+ {
+ return;
+ }
+
+ for (; e->factory; ++e)
+ {
+ t->registerFactory(*e->factory);
+
+ e->conv = e->createConv(t);
+ t->registerConversion(e->factory->getClassName(), e->version, e->nextVersion, *e->conv);
+ }
+ }
+
+ void unregisterLegacyObjects(LegacyClassEntry* e)
+ {
+ NvParameterized::Traits* t = mSdk->getParameterizedTraits();
+ if (!t)
+ {
+ return;
+ }
+
+ for (; e->factory; ++e)
+ {
+ t->removeConversion(
+ e->factory->getClassName(),
+ e->version,
+ e->nextVersion
+ );
+ e->conv->release();
+
+ t->removeFactory(e->factory->getClassName(), e->factory->getVersion());
+
+ e->freeParameterDefinitionTable(t);
+ }
+ }
+};
+
+typedef TApexLegacyModule<Module> ApexLegacyModule;
+
+} // namespace apex
+} // namespace nvidia
+
+#define DEFINE_CREATE_MODULE(ModuleBase) \
+ ApexSDKIntl* gApexSdk = 0; \
+ ApexSDK* GetApexSDK() { return gApexSdk; } \
+ ApexSDKIntl* GetInternalApexSDK() { return gApexSdk; } \
+ APEX_API Module* CALL_CONV createModule( \
+ ApexSDKIntl* inSdk, \
+ ModuleIntl** niRef, \
+ uint32_t APEXsdkVersion, \
+ uint32_t PhysXsdkVersion, \
+ ApexCreateError* errorCode) \
+ { \
+ if (APEXsdkVersion != APEX_SDK_VERSION) \
+ { \
+ if (errorCode) *errorCode = APEX_CE_WRONG_VERSION; \
+ return NULL; \
+ } \
+ \
+ if (PhysXsdkVersion != PHYSICS_SDK_VERSION) \
+ { \
+ if (errorCode) *errorCode = APEX_CE_WRONG_VERSION; \
+ return NULL; \
+ } \
+ \
+ gApexSdk = inSdk; \
+ \
+ ModuleBase *impl = PX_NEW(ModuleBase)(inSdk); \
+ *niRef = (ModuleIntl *) impl; \
+ return (Module *) impl; \
+ }
+
+#define DEFINE_INSTANTIATE_MODULE(ModuleBase) \
+ void instantiate##ModuleBase() \
+ { \
+ ApexSDKIntl *sdk = GetInternalApexSDK(); \
+ ModuleBase *impl = PX_NEW(ModuleBase)(sdk); \
+ sdk->registerExternalModule((Module *) impl, (ModuleIntl *) impl); \
+ }
+
+#endif // __APEX_LEGACY_MODULE__
diff --git a/APEX_1.4/common/include/ApexMarchingCubes.h b/APEX_1.4/common/include/ApexMarchingCubes.h
new file mode 100644
index 00000000..4fc59347
--- /dev/null
+++ b/APEX_1.4/common/include/ApexMarchingCubes.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef _APEX_MARCHING_CUBES_H__
+#define _APEX_MARCHING_CUBES_H__
+
+namespace MarchingCubes
+{
+
+// point numbering
+
+// 7-----------6
+// /| /|
+// / | / |
+// / | / |
+// 4-----------5 |
+// | | | |
+// | 3-------|---2
+// | / | /
+// | / | /
+// |/ |/
+// 0-----------1
+
+// edge numbering
+
+// *-----6-----*
+// /| /|
+// 7 | 5 |
+// / 11 / 10
+// *-----4-----* |
+// | | | |
+// | *-----2-|---*
+// 8 / 9 /
+// | 3 | 1
+// |/ |/
+// *-----0-----*
+
+
+// z
+// | y
+// | /
+// |/
+// 0---- x
+
+// dirs: 0:+x, 1:-x, 2:+y, 3:-y, 4:+z, 5:-z
+
+static const int cubeAdjacency[8][3][2] =
+{
+ {{3, 3}, {4, 8}, {1, 0}},
+ {{0, 0}, {5, 9}, {2, 1}},
+ {{1, 1}, {6, 10}, {3, 2}},
+ {{2, 2}, {7, 11}, {0, 3}},
+ {{7, 7}, {0, 8}, {5, 4}},
+ {{4, 4}, {1, 9}, {6, 5}},
+ {{5, 5}, {2, 10}, {7, 6}},
+ {{6, 6}, {3, 11}, {4, 7}}
+};
+
+static const int cubeEdges[12][3] =
+{
+ {0, 1}, {1, 2,}, {2, 3}, {3, 0},
+ {4, 5}, {5, 6}, {6, 7}, {7, 4},
+ {0, 4}, {1, 5}, {2, 6}, {3, 7}
+};
+
+static const int cubeEdgeDirs[12][3] =
+{
+ {0, 1}, {2, 3}, {1, 0}, {3, 2},
+ {0, 1}, {2, 3}, {1, 0}, {3, 2},
+ {4, 5}, {4, 5}, {4, 5}, {4, 5}
+};
+
+static const int edgeTable[256] =
+{
+ 0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
+ 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
+ 0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
+ 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
+ 0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
+ 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
+ 0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
+ 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
+ 0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
+ 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
+ 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
+ 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
+ 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
+ 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
+ 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
+ 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
+ 0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
+ 0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
+ 0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
+ 0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
+ 0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
+ 0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
+ 0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
+ 0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
+ 0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
+ 0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
+ 0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
+ 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
+ 0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
+ 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
+ 0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
+ 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
+};
+
+static const int triTable[256][16] =
+{
+ { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
+ {3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
+ {3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
+ {3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
+ {9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
+ {9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
+ {2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
+ {8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
+ {9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
+ {4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
+ {3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
+ {1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
+ {4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
+ {4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
+ {5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
+ {2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
+ {9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
+ {0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
+ {2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
+ {10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
+ {4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
+ {5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
+ {5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
+ {9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
+ {0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
+ {1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
+ {10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
+ {8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
+ {2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
+ {7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
+ {2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
+ {11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
+ {5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
+ {11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
+ {11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
+ {1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
+ {9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
+ {5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
+ {2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
+ {5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
+ {6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
+ {3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
+ {6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
+ {5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
+ {1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
+ {10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
+ {6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
+ {8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
+ {7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
+ {3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
+ {5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
+ {0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
+ {9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
+ {8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
+ {5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
+ {0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
+ {6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
+ {10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
+ {10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
+ {8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
+ {1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
+ {0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
+ {10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
+ {3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
+ {6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
+ {9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
+ {8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
+ {3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
+ {6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
+ {0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
+ {10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
+ {10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
+ {2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
+ {7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
+ {7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
+ {2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
+ {1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
+ {11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
+ {8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
+ {0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
+ {7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
+ {10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
+ {2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
+ {6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
+ {7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
+ {2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
+ {1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
+ {10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
+ {10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
+ {0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
+ {7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
+ {6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
+ {8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
+ {6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
+ {4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
+ {10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
+ {8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
+ {0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
+ {1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
+ {8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
+ {10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
+ {4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
+ {10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
+ {5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
+ {11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
+ {9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
+ {6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
+ {7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
+ {3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
+ {7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
+ {3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
+ {6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
+ {9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
+ {1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
+ {4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
+ {7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
+ {6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
+ {3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
+ {0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
+ {6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
+ {0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
+ {11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
+ {6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
+ {5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
+ {9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
+ {1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
+ {1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
+ {10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
+ {0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
+ {5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
+ {10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
+ {11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
+ {9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
+ {7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
+ {2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
+ {8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
+ {9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
+ {9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
+ {1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
+ {9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
+ {9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
+ {5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
+ {0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
+ {10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
+ {2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
+ {0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
+ {0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
+ {9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
+ {5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
+ {3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
+ {5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
+ {8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
+ {9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
+ {1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
+ {3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
+ {4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
+ {9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
+ {11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
+ {11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
+ {2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
+ {9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
+ {3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
+ {1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
+ {4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
+ {4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
+ {3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
+ {0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
+ {9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
+ {1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
+};
+
+} // namespace MarchingCubes
+
+
+#endif
diff --git a/APEX_1.4/common/include/ApexMath.h b/APEX_1.4/common/include/ApexMath.h
new file mode 100644
index 00000000..19d4af4c
--- /dev/null
+++ b/APEX_1.4/common/include/ApexMath.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_MATH_H
+#define APEX_MATH_H
+
+#include "PxMat44.h"
+#include "PsMathUtils.h"
+
+#include "PsVecMath.h"
+namespace nvidia
+{
+
+#define APEX_ALIGN_UP(offset, alignment) (((offset) + (alignment)-1) & ~((alignment)-1))
+
+/**
+ * computes weight * _origin + (1.0f - weight) * _target
+ */
+PX_INLINE PxMat44 interpolateMatrix(float weight, const PxMat44& _origin, const PxMat44& _target)
+{
+ // target: normalize, save scale, transform to quat
+ PxMat33 target(_target.column0.getXYZ(),
+ _target.column1.getXYZ(),
+ _target.column2.getXYZ());
+ PxVec3 axis0 = target.column0;
+ PxVec3 axis1 = target.column1;
+ PxVec3 axis2 = target.column2;
+ const PxVec4 targetScale(axis0.normalize(), axis1.normalize(), axis2.normalize(), 1.0f);
+ target.column0 = axis0;
+ target.column1 = axis1;
+ target.column2 = axis2;
+ const PxQuat targetQ = PxQuat(target);
+
+ // origin: normalize, save scale, transform to quat
+ PxMat33 origin(_origin.column0.getXYZ(),
+ _origin.column1.getXYZ(),
+ _origin.column2.getXYZ());
+ const PxVec4 originScale(axis0.normalize(), axis1.normalize(), axis2.normalize(), 1.0f);
+ origin.column0 = axis0;
+ origin.column1 = axis1;
+ origin.column2 = axis2;
+ const PxQuat originQ = PxQuat(origin);
+
+ // interpolate
+ PxQuat relativeQ = physx::shdfnd::slerp(1.0f - weight, originQ, targetQ);
+ PxMat44 relative(relativeQ);
+ relative.setPosition(weight * _origin.getPosition() + (1.0f - weight) * _target.getPosition());
+
+ PxMat44 _relative = relative;
+ const PxVec4 scale = weight * originScale + (1.0f - weight) * targetScale;
+ _relative.scale(scale);
+
+ return _relative;
+}
+
+bool operator != (const PxMat44& a, const PxMat44& b);
+
+}
+
+
+#endif // APEX_MATH_H
diff --git a/APEX_1.4/common/include/ApexMerge.h b/APEX_1.4/common/include/ApexMerge.h
new file mode 100644
index 00000000..3821d290
--- /dev/null
+++ b/APEX_1.4/common/include/ApexMerge.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_MERGE_H
+#define APEX_MERGE_H
+
+namespace nvidia
+{
+namespace apex
+{
+ // merge 2 increasingly sorted arrays
+ // it's ok if one of the input buffers is also the results array
+ template<class Sortable>
+ bool ApexMerge(Sortable* bufferA, uint32_t numEntriesA, Sortable* bufferB, uint32_t numEntriesB, Sortable* resultBuffer, uint32_t numEntriesResult, int (*compare)(const void*, const void*))
+ {
+ if (numEntriesResult != numEntriesA + numEntriesB)
+ return false;
+
+#if PX_CHECKED
+ if (numEntriesA > 0)
+ {
+ for (uint32_t i = 1; i < numEntriesA; ++i)
+ {
+ PX_ASSERT(compare(bufferA + i - 1, bufferA + i) <= 0);
+ }
+ }
+
+ if (numEntriesB > 0)
+ {
+ for (uint32_t i = 1; i < numEntriesB; ++i)
+ {
+ PX_ASSERT(compare(bufferB + i - 1, bufferB + i) <= 0);
+ }
+ }
+#endif
+
+ int32_t iA = (int32_t)numEntriesA-1;
+ int32_t iB = (int32_t)numEntriesB-1;
+ uint32_t iResult = numEntriesA + numEntriesB - 1;
+
+ while (iA >= 0 && iB >= 0)
+ {
+ if (compare(&bufferA[iA], &bufferB[iB]) > 0)
+ {
+ resultBuffer[iResult] = bufferA[iA--];
+ }
+ else
+ {
+ resultBuffer[iResult] = bufferB[iB--];
+ }
+
+ --iResult;
+ }
+
+ if (iA < 0)
+ {
+ if (resultBuffer != bufferB)
+ {
+ memcpy(resultBuffer, bufferB, (iB + 1) * sizeof(Sortable));
+ }
+ }
+ else
+ {
+ if (resultBuffer != bufferA)
+ {
+ memcpy(resultBuffer, bufferA, (iA + 1) * sizeof(Sortable));
+ }
+ }
+
+ return true;
+ }
+
+} // namespace apex
+} // namespace nvidia
+
+#endif // APEX_MERGE_H
diff --git a/APEX_1.4/common/include/ApexMeshContractor.h b/APEX_1.4/common/include/ApexMeshContractor.h
new file mode 100644
index 00000000..5a638031
--- /dev/null
+++ b/APEX_1.4/common/include/ApexMeshContractor.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_MESH_CONTRACTOR_H
+#define APEX_MESH_CONTRACTOR_H
+
+#include "ApexDefs.h"
+#include "ApexUsingNamespace.h"
+#include "PsArray.h"
+#include "PxVec3.h"
+#include "PsUserAllocated.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class IProgressListener;
+
+class ApexMeshContractor : public UserAllocated
+{
+public:
+ ApexMeshContractor();
+
+ void registerVertex(const PxVec3& pos);
+ void registerTriangle(uint32_t v0, uint32_t v1, uint32_t v2);
+ bool endRegistration(uint32_t subdivision, IProgressListener* progress);
+
+ uint32_t contract(int32_t steps, float abortionRatio, float& volumeRatio, IProgressListener* progress);
+ void expandBorder();
+
+ uint32_t getNumVertices()
+ {
+ return mVertices.size();
+ }
+ uint32_t getNumIndices()
+ {
+ return mIndices.size();
+ }
+ const PxVec3* getVertices()
+ {
+ return mVertices.begin();
+ }
+ const uint32_t* getIndices()
+ {
+ return mIndices.begin();
+ }
+private:
+
+ void computeNeighbours();
+ void computeSignedDistanceField();
+ void contractionStep();
+ void computeAreaAndVolume(float& area, float& volume);
+
+ void addTriangle(const PxVec3& v0, const PxVec3& v1, const PxVec3& v2);
+ bool updateDistance(uint32_t xi, uint32_t yi, uint32_t zi);
+ void setInsideOutside();
+ void interpolateGradientAt(const PxVec3& pos, PxVec3& grad);
+ void subdivide(float spacing);
+ void collapse(float spacing);
+
+ void getButterfly(uint32_t triNr, uint32_t v0, uint32_t v1, int32_t& adj, int32_t& t0, int32_t& t1, int32_t& t2, int32_t& t3) const;
+ int32_t getOppositeVertex(int32_t t, uint32_t v0, uint32_t v1) const;
+ void replaceVertex(int32_t t, uint32_t vOld, uint32_t vNew);
+ void replaceNeighbor(int32_t t, int32_t nOld, uint32_t nNew);
+ bool triangleContains(int32_t t, uint32_t v) const;
+ bool legalCollapse(int32_t triNr, uint32_t v0, uint32_t v1) const;
+ void advanceAdjTriangle(uint32_t v, int32_t& t, int32_t& prev) const;
+ bool areNeighbors(int32_t t0, int32_t t1) const;
+ float findMin(const PxVec3& p, const PxVec3& maxDisp) const;
+ float interpolateDistanceAt(const PxVec3& pos) const;
+ void collectNeighborhood(int32_t triNr, float radius, uint32_t newMark, physx::Array<int32_t> &tris, physx::Array<float> &dists, uint32_t* triMarks) const;
+ void getTriangleCenter(int32_t triNr, PxVec3& center) const;
+ float curvatureAt(int triNr, int v);
+
+ struct ContractorCell
+ {
+ ContractorCell() : inside(0), distance(PX_MAX_F32), marked(false)
+ {
+ numCuts[0] = numCuts[1] = numCuts[2] = 0;
+ }
+ /*
+ void init() {
+ distance = PX_MAX_F32;
+ inside = 0;
+ marked = false;
+ numCuts[0] = 0;
+ numCuts[1] = 0;
+ numCuts[2] = 0;
+ }
+ */
+ uint32_t inside;
+ float distance;
+ uint8_t numCuts[3];
+ bool marked;
+ };
+ inline ContractorCell& cellAt(int32_t xi, int32_t yi, int32_t zi)
+ {
+ return mGrid[(((uint32_t)xi * mNumY) + (uint32_t)yi) * mNumZ + (uint32_t)zi];
+ }
+
+ inline const ContractorCell& constCellAt(int32_t xi, int32_t yi, int32_t zi) const
+ {
+ return mGrid[(((uint32_t)xi * mNumY) + (uint32_t)yi) * mNumZ + (uint32_t)zi];
+ }
+ float mCellSize;
+ PxVec3 mOrigin;
+
+ uint32_t mNumX, mNumY, mNumZ;
+
+ physx::Array<PxVec3> mVertices;
+ physx::Array<uint32_t> mIndices;
+ physx::Array<int32_t> mNeighbours;
+
+ physx::Array<ContractorCell> mGrid;
+ physx::Array<float> mVertexCurvatures;
+
+ float mInitialVolume;
+ float mCurrentVolume;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // APEX_MESH_CONTRACTOR_H
diff --git a/APEX_1.4/common/include/ApexMeshHash.h b/APEX_1.4/common/include/ApexMeshHash.h
new file mode 100644
index 00000000..97a015ad
--- /dev/null
+++ b/APEX_1.4/common/include/ApexMeshHash.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_MESH_HASH_H
+#define APEX_MESH_HASH_H
+
+#include "ApexDefs.h"
+
+#include "ApexUsingNamespace.h"
+#include "PsUserAllocated.h"
+#include "PsArray.h"
+
+#include "PxVec3.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct MeshHashRoot
+{
+ int32_t first;
+ uint32_t timeStamp;
+};
+
+struct MeshHashEntry
+{
+ int32_t next;
+ uint32_t itemIndex;
+};
+
+
+class ApexMeshHash : public UserAllocated
+{
+public:
+ ApexMeshHash();
+ ~ApexMeshHash();
+
+ void setGridSpacing(float spacing);
+ float getGridSpacing()
+ {
+ return 1.0f / mInvSpacing;
+ }
+ void reset();
+ void add(const PxBounds3& bounds, uint32_t itemIndex);
+ void add(const PxVec3& pos, uint32_t itemIndex);
+
+ void query(const PxBounds3& bounds, physx::Array<uint32_t>& itemIndices, int32_t maxIndices = -1);
+ void queryUnique(const PxBounds3& bounds, physx::Array<uint32_t>& itemIndices, int32_t maxIndices = -1);
+
+ void query(const PxVec3& pos, physx::Array<uint32_t>& itemIndices, int32_t maxIndices = -1);
+ void queryUnique(const PxVec3& pos, physx::Array<uint32_t>& itemIndices, int32_t maxIndices = -1);
+
+ // applied functions, only work if inserted objects are points!
+ int32_t getClosestPointNr(const PxVec3* points, uint32_t numPoints, uint32_t pointStride, const PxVec3& pos);
+
+private:
+ enum
+ {
+ HashIndexSize = 170111
+ };
+
+ void compressIndices(physx::Array<uint32_t>& itemIndices);
+ float mSpacing;
+ float mInvSpacing;
+ uint32_t mTime;
+
+ inline uint32_t hashFunction(int32_t xi, int32_t yi, int32_t zi)
+ {
+ uint32_t h = (uint32_t)((xi * 92837111) ^(yi * 689287499) ^(zi * 283923481));
+ return h % HashIndexSize;
+ }
+
+ inline void cellCoordOf(const PxVec3& v, int& xi, int& yi, int& zi)
+ {
+ xi = (int)(v.x * mInvSpacing);
+ if (v.x < 0.0f)
+ {
+ xi--;
+ }
+ yi = (int)(v.y * mInvSpacing);
+ if (v.y < 0.0f)
+ {
+ yi--;
+ }
+ zi = (int)(v.z * mInvSpacing);
+ if (v.z < 0.0f)
+ {
+ zi--;
+ }
+ }
+
+ MeshHashRoot* mHashIndex;
+ physx::Array<MeshHashEntry> mEntries;
+
+ physx::Array<uint32_t> mTempIndices;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/common/include/ApexMirrored.h b/APEX_1.4/common/include/ApexMirrored.h
new file mode 100644
index 00000000..3ae808f7
--- /dev/null
+++ b/APEX_1.4/common/include/ApexMirrored.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_MIRRORED_H
+#define APEX_MIRRORED_H
+
+#include "ApexDefs.h"
+
+#include "Apex.h"
+#include "ApexCutil.h"
+#include "SceneIntl.h"
+
+#include "PxTaskManager.h"
+#include "PxGpuDispatcher.h"
+#include "PxGpuCopyDesc.h"
+#include "PxGpuCopyDescQueue.h"
+#include "PxCudaContextManager.h"
+#include "PxCudaMemoryManager.h"
+//#include <cuda.h>
+
+#if defined(__CUDACC__)
+#error "Mirrored arrays should not be visible to CUDA code. Send device pointers to CUDA kernels."
+#endif
+
+
+#if !PX_SUPPORT_GPU_PHYSX
+#define PX_ALLOC_INFO(name, ID) __FILE__, __LINE__, name, physx::PxAllocId::ID
+#define PX_ALLOC_INFO_PARAMS_DECL(p0, p1, p2, p3) const char* file = p0, int line = p1, const char* allocName = p2, physx::PxAllocId::Enum allocId = physx::PxAllocId::p3
+#define PX_ALLOC_INFO_PARAMS_DEF() const char* file, int line, const char* allocName, physx::PxAllocId::Enum allocId
+#define PX_ALLOC_INFO_PARAMS_INPUT() file, line, allocName, allocId
+#define PX_ALLOC_INFO_PARAMS_INPUT_INFO(info) info.getFileName(), info.getLine(), info.getAllocName(), info.getAllocId()
+
+namespace physx
+{
+
+struct PxAllocId
+{
+ /**
+ * \brief ID of the Feature which owns/allocated memory from the heap
+ */
+ enum Enum
+ {
+ UNASSIGNED, //!< default
+ APEX, //!< APEX stuff not further classified
+ PARTICLES, //!< all particle related
+ GPU_UTIL, //!< e.g. RadixSort (used in SPH and deformable self collision)
+ CLOTH, //!< all cloth related
+ NUM_IDS //!< number of IDs, be aware that ApexHeapStats contains PxAllocIdStats[NUM_IDS]
+ };
+};
+
+/// \brief class to track allocation statistics, see PxgMirrored
+class PxAllocInfo
+{
+public:
+ /**
+ * \brief AllocInfo default constructor
+ */
+ PxAllocInfo() {}
+
+ /**
+ * \brief AllocInfo constructor that initializes all of the members
+ */
+ PxAllocInfo(const char* file, int line, const char* allocName, PxAllocId::Enum allocId)
+ : mFileName(file)
+ , mLine(line)
+ , mAllocName(allocName)
+ , mAllocId(allocId)
+ {
+ }
+
+ /// \brief get the allocation file name
+ inline const char* getFileName() const
+ {
+ return mFileName;
+ }
+
+ /// \brief get the allocation line
+ inline int getLine() const
+ {
+ return mLine;
+ }
+
+ /// \brief get the allocation name
+ inline const char* getAllocName() const
+ {
+ return mAllocName;
+ }
+
+ /// \brief get the allocation ID
+ inline PxAllocId::Enum getAllocId() const
+ {
+ return mAllocId;
+ }
+
+private:
+ const char* mFileName;
+ int mLine;
+ const char* mAllocName;
+ PxAllocId::Enum mAllocId;
+};
+
+}
+
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct ApexMirroredPlace
+{
+ enum Enum
+ {
+ DEFAULT = 0,
+ CPU = 0x01,
+#if APEX_CUDA_SUPPORT
+ GPU = 0x02,
+ CPU_GPU = (CPU | GPU),
+#endif
+ };
+};
+
+
+template <class T>
+class ApexMirrored
+{
+ PX_NOCOPY(ApexMirrored);
+
+public:
+ ApexMirrored(SceneIntl& scene, PX_ALLOC_INFO_PARAMS_DECL(NULL, 0, NULL, UNASSIGNED))
+ : mCpuPtr(0)
+ , mByteCount(0)
+ , mPlace(ApexMirroredPlace::CPU)
+ , mAllocInfo(PX_ALLOC_INFO_PARAMS_INPUT())
+#if APEX_CUDA_SUPPORT
+ , mCpuBuffer(NULL)
+ , mGpuPtr(0)
+ , mGpuBuffer(NULL)
+#endif
+ {
+ PX_UNUSED(scene);
+#if APEX_CUDA_SUPPORT
+ PxGpuDispatcher* gd = scene.getTaskManager()->getGpuDispatcher();
+ if (gd)
+ {
+ mCtx = gd->getCudaContextManager();
+ }
+ else
+ {
+ mCtx = NULL;
+ return;
+ }
+#endif
+ };
+
+ ~ApexMirrored()
+ {
+ }
+
+ //Operators for accessing the data pointed to on the host. Using these operators is guaranteed
+ //to maintain the class invariants. Note that these operators are only ever called on the host.
+ //The GPU never sees this class as instances are converted to regular pointers upon kernel
+ //invocation.
+
+ PX_INLINE T& operator*()
+ {
+ return *getCpuPtr();
+ }
+
+ PX_INLINE const T& operator*() const
+ {
+ return *getCpuPtr();
+ }
+
+ PX_INLINE T* operator->()
+ {
+ return getCpuPtr();
+ }
+
+ PX_INLINE const T* operator->() const
+ {
+ return getCpuPtr();
+ }
+
+ PX_INLINE T& operator[](unsigned int i)
+ {
+ return getCpuPtr()[i];
+ }
+
+ //Methods for converting the pointer to a regular pointer for use on
+ //the CPU After a pointer has been obtained with these methods, the
+ //data can be accessed multiple times with no extra cost. This is the
+ //fastest method for accessing the data on the cpu.
+
+ PX_INLINE T* getCpuPtr() const
+ {
+ return mCpuPtr;
+ }
+
+ /*!
+ \return
+ returns whether CPU buffer has been allocated for this array
+ */
+ PX_INLINE bool cpuPtrIsValid() const
+ {
+ return mCpuPtr != 0;
+ }
+
+ PX_INLINE size_t* getCpuHandle() const
+ {
+ return reinterpret_cast<size_t*>(&mCpuPtr);
+ }
+
+ PX_INLINE size_t getByteSize() const
+ {
+ return mByteCount;
+ }
+
+#if APEX_CUDA_SUPPORT
+ /*!
+ \return
+ returns whether GPU buffer has been allocated for this array
+ */
+ PX_INLINE bool gpuPtrIsValid() const
+ {
+ return mGpuPtr != 0;
+ }
+
+ PX_INLINE T* getGpuPtr() const
+ {
+ return mGpuPtr;
+ }
+
+ /*!
+ Get opaque handle to the underlying gpu or cpu memory These must not
+ be cast to a pointer or derefernced, they should only be used to
+ identify the memory region to the allocator
+ */
+ PX_INLINE size_t* getGpuHandle() const
+ {
+ return reinterpret_cast<size_t*>(&mGpuPtr);
+ }
+
+ PX_INLINE void copyDeviceToHostDesc(PxGpuCopyDesc& desc, size_t byteSize, size_t byteOffset) const
+ {
+ PX_ASSERT(mCpuPtr && mGpuPtr && mByteCount);
+ desc.type = PxGpuCopyDesc::DeviceToHost;
+ desc.bytes = byteSize;
+ desc.source = ((size_t) mGpuPtr) + byteOffset;
+ desc.dest = ((size_t) mCpuPtr) + byteOffset;
+ }
+
+ PX_INLINE void copyHostToDeviceDesc(PxGpuCopyDesc& desc, size_t byteSize, size_t byteOffset) const
+ {
+ PX_ASSERT(mCpuPtr && mGpuPtr && mByteCount);
+ desc.type = PxGpuCopyDesc::HostToDevice;
+ desc.bytes = byteSize;
+ desc.source = ((size_t) mCpuPtr) + byteOffset;
+ desc.dest = ((size_t) mGpuPtr) + byteOffset;
+ }
+
+ PX_INLINE void mallocGpu(size_t byteSize)
+ {
+ PxCudaBufferType bufferType(PxCudaBufferMemorySpace::T_GPU, PxCudaBufferFlags::F_READ_WRITE);
+ PxCudaBuffer* buffer = mCtx->getMemoryManager()->alloc(bufferType, (uint32_t)byteSize);
+ if (buffer)
+ {
+ // in case of realloc
+ if (mGpuBuffer)
+ {
+ mGpuBuffer->free();
+ }
+ mGpuBuffer = buffer;
+ mGpuPtr = reinterpret_cast<T*>(mGpuBuffer->getPtr());
+ PX_ASSERT(mGpuPtr);
+ }
+ else
+ {
+ PX_ASSERT(!"Out of GPU Memory!");
+ }
+ }
+
+ PX_INLINE void freeGpu()
+ {
+ if (mGpuBuffer)
+ {
+ bool success = mGpuBuffer->free();
+ mGpuBuffer = NULL;
+ mGpuPtr = NULL;
+ PX_UNUSED(success);
+ PX_ASSERT(success);
+ }
+ }
+
+ PX_INLINE void mallocHost(size_t byteSize)
+ {
+ PxCudaBufferType bufferType(PxCudaBufferMemorySpace::T_PINNED_HOST, PxCudaBufferFlags::F_READ_WRITE);
+ PxCudaBuffer* buffer = mCtx->getMemoryManager()->alloc(bufferType, (uint32_t)byteSize);
+ if (buffer)
+ {
+ // in case of realloc
+ if (mCpuBuffer)
+ {
+ mCpuBuffer->free();
+ }
+ mCpuBuffer = buffer;
+ mCpuPtr = reinterpret_cast<T*>(mCpuBuffer->getPtr());
+ PX_ASSERT(mCpuPtr);
+ }
+ else
+ {
+ PX_ASSERT(!"Out of Pinned Host Memory!");
+ }
+ }
+ PX_INLINE void freeHost()
+ {
+ if (mCpuBuffer)
+ {
+ bool success = mCpuBuffer->free();
+ mCpuBuffer = NULL;
+ mCpuPtr = NULL;
+ PX_UNUSED(success);
+ PX_ASSERT(success);
+ }
+ }
+ PX_INLINE void swapGpuPtr(ApexMirrored<T>& other)
+ {
+ nvidia::swap(mGpuPtr, other.mGpuPtr);
+ nvidia::swap(mGpuBuffer, other.mGpuBuffer);
+ }
+#endif
+
+ PX_INLINE const PxAllocInfo& getAllocInfo() const
+ {
+ return mAllocInfo;
+ }
+
+ PX_INLINE void mallocCpu(size_t byteSize)
+ {
+ mCpuPtr = (T*)getAllocator().allocate(byteSize, mAllocInfo.getAllocName(), mAllocInfo.getFileName(), mAllocInfo.getLine());
+ PX_ASSERT(mCpuPtr && "Out of CPU Memory!");
+ }
+ PX_INLINE void freeCpu()
+ {
+ if (mCpuPtr)
+ {
+ getAllocator().deallocate(mCpuPtr);
+ mCpuPtr = NULL;
+ }
+ }
+
+
+ PX_INLINE const char* getName() const
+ {
+ return mAllocInfo.getAllocName();
+ }
+
+ void realloc(size_t byteCount, ApexMirroredPlace::Enum place)
+ {
+ ApexMirroredPlace::Enum oldPlace = mPlace;
+ ApexMirroredPlace::Enum newPlace = (place != ApexMirroredPlace::DEFAULT) ? place : oldPlace;
+ if (oldPlace == newPlace && byteCount <= mByteCount)
+ {
+ return;
+ }
+
+ size_t newSize = PxMax(byteCount, mByteCount);
+
+#if APEX_CUDA_SUPPORT
+ if (oldPlace != ApexMirroredPlace::CPU && newPlace != ApexMirroredPlace::CPU)
+ {
+ PX_ASSERT(oldPlace != ApexMirroredPlace::CPU);
+ PX_ASSERT(newPlace != ApexMirroredPlace::CPU);
+
+ if ((mCpuPtr != NULL && byteCount > mByteCount) ||
+ (mCpuPtr == NULL && (place & ApexMirroredPlace::CPU) != 0))
+ {
+ PxCudaBuffer* oldCpuBuffer = mCpuBuffer;
+ T* oldCpuPtr = mCpuPtr;
+
+ mCpuBuffer = NULL;
+
+ mallocHost(newSize);
+
+ PxCudaBuffer* newCpuBuffer = mCpuBuffer;
+ T* newCpuPtr = mCpuPtr;
+
+
+ if (oldCpuPtr != NULL && newCpuPtr != NULL && mByteCount > 0)
+ {
+ memcpy(mCpuPtr, oldCpuPtr, mByteCount);
+ }
+
+ mCpuBuffer = oldCpuBuffer;
+ mCpuPtr = newCpuPtr;
+
+ freeHost();
+
+ mCpuBuffer = newCpuBuffer;
+ mCpuPtr = newCpuPtr;
+ }
+ if ((mGpuPtr != NULL && byteCount > mByteCount) ||
+ (mGpuPtr == NULL && (place & ApexMirroredPlace::GPU) != 0))
+ {
+ // we explicitly do not move old data to the new buffer
+
+ freeGpu();
+ mallocGpu(newSize);
+ }
+ }
+ else
+#endif
+ {
+ T* oldCpuPtr = mCpuPtr;
+#if APEX_CUDA_SUPPORT
+ if (newPlace != ApexMirroredPlace::CPU)
+ {
+ if (newPlace == ApexMirroredPlace::CPU_GPU)
+ {
+ mallocHost(newSize);
+ }
+ else
+ {
+ mCpuPtr = NULL;
+ }
+ mallocGpu(newSize);
+ }
+ else
+#endif
+ {
+ mallocCpu(newSize);
+ }
+ T* newCpuPtr = mCpuPtr;
+
+ if (oldCpuPtr != NULL && newCpuPtr != NULL && mByteCount > 0)
+ {
+ memcpy(newCpuPtr, oldCpuPtr, mByteCount);
+ }
+
+ mCpuPtr = oldCpuPtr;
+#if APEX_CUDA_SUPPORT
+ if (oldPlace != ApexMirroredPlace::CPU)
+ {
+ if (oldPlace == ApexMirroredPlace::CPU_GPU)
+ {
+ freeHost();
+ }
+ freeGpu();
+ }
+ else
+#endif
+ {
+ freeCpu();
+ }
+ mCpuPtr = newCpuPtr;
+ }
+ mByteCount = newSize;
+ mPlace = newPlace;
+ }
+
+ void free()
+ {
+ PX_ASSERT(mPlace != ApexMirroredPlace::DEFAULT);
+#if APEX_CUDA_SUPPORT
+ if (mPlace != ApexMirroredPlace::CPU)
+ {
+ freeHost();
+ freeGpu();
+ }
+ else
+#endif
+ {
+ freeCpu();
+ }
+ mByteCount = 0;
+ }
+
+private:
+ mutable T* mCpuPtr;
+ size_t mByteCount;
+
+ ApexMirroredPlace::Enum mPlace;
+ PxAllocInfo mAllocInfo;
+
+#if APEX_CUDA_SUPPORT
+ mutable PxCudaBuffer* mCpuBuffer;
+ mutable T* mGpuPtr;
+ mutable PxCudaBuffer* mGpuBuffer;
+ PxCudaContextManager* mCtx;
+#endif
+};
+
+
+}
+} // end namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/ApexMirroredArray.h b/APEX_1.4/common/include/ApexMirroredArray.h
new file mode 100644
index 00000000..cb34b26b
--- /dev/null
+++ b/APEX_1.4/common/include/ApexMirroredArray.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_MIRRORED_ARRAY_H
+#define APEX_MIRRORED_ARRAY_H
+
+#include "ApexDefs.h"
+
+#include "ApexMirrored.h"
+#include <new>
+
+#if defined(__CUDACC__) || PX_ANDROID || PX_PS4 || PX_LINUX_FAMILY || PX_OSX
+#define DEFAULT_NAME "unassigned"
+#else
+#include <typeinfo>
+#define DEFAULT_NAME typeid(T).name()
+#endif
+
+#pragma warning(push)
+#pragma warning(disable:4348)
+
+
+namespace nvidia
+{
+namespace apex
+{
+
+template <class T>
+class ApexMirroredArray
+{
+ PX_NOCOPY(ApexMirroredArray);
+
+public:
+ /*!
+ Default array constructor.
+ Initialize an empty array
+ */
+ explicit PX_INLINE ApexMirroredArray(SceneIntl& scene, PX_ALLOC_INFO_PARAMS_DECL("", 0, DEFAULT_NAME, UNASSIGNED)) :
+ mData(scene, PX_ALLOC_INFO_PARAMS_INPUT()), mCapacity(0), mSize(0) {};
+
+ /*!
+ Default destructor
+ */
+ PX_INLINE ~ApexMirroredArray()
+ {
+ mData.free();
+ }
+
+ /*!
+ Return an element from this array. Operation is O(1).
+ \param i
+ The index of the element that will be returned.
+ \return
+ Element i in the array.
+ */
+ PX_INLINE const T& get(uint32_t i) const
+ {
+ return mData.getCpuPtr()[i];
+ }
+
+ /*!
+ Return an element from this array. Operation is O(1).
+ \param i
+ The index of the element that will be returned.
+ \return
+ Element i in the array.
+ */
+ PX_INLINE T& get(uint32_t i)
+ {
+ return mData.getCpuPtr()[i];
+ }
+
+ /*!
+ Array indexing operator.
+ \param i
+ The index of the element that will be returned.
+ \return
+ The element i in the array.
+ */
+ PX_INLINE const T& operator[](uint32_t i) const
+ {
+ return get(i);
+ }
+
+ /*!
+ Array indexing operator.
+ \param i
+ The index of the element that will be returned.
+ \return
+ The element i in the array.
+ */
+ PX_INLINE T& operator[](uint32_t i)
+ {
+ return get(i);
+ }
+
+ /*!
+ \return
+ returns whether GPU buffer has been allocated for this array
+ */
+ PX_INLINE bool cpuPtrIsValid() const
+ {
+ return mData.cpuPtrIsValid();
+ }
+
+ /*!
+ Returns the plain array representation.
+ \return
+ The sets representation.
+ */
+ PX_INLINE T* getPtr() const
+ {
+ return mData.getCpuPtr();
+ }
+
+#if APEX_CUDA_SUPPORT
+ /*!
+ \return
+ returns whether GPU buffer has been allocated for this array
+ */
+ PX_INLINE bool gpuPtrIsValid() const
+ {
+ return mData.gpuPtrIsValid();
+ }
+
+ PX_INLINE T* getGpuPtr() const
+ {
+ return mData.getGpuPtr();
+ }
+
+ PX_INLINE void copyDeviceToHostDesc(PxGpuCopyDesc& desc, uint32_t size, uint32_t offset) const
+ {
+ PX_ASSERT(gpuPtrIsValid() && cpuPtrIsValid());
+ if (size == 0)
+ {
+ size = mSize;
+ }
+ mData.copyDeviceToHostDesc(desc, sizeof(T) * size, sizeof(T) * offset);
+ }
+ PX_INLINE void copyDeviceToHostQ(PxGpuCopyDescQueue& queue, uint32_t size = 0, uint32_t offset = 0) const
+ {
+ PxGpuCopyDesc desc;
+ copyDeviceToHostDesc(desc, size, offset);
+ queue.enqueue(desc);
+ }
+
+ PX_INLINE void copyHostToDeviceDesc(PxGpuCopyDesc& desc, uint32_t size, uint32_t offset) const
+ {
+ PX_ASSERT(gpuPtrIsValid() && cpuPtrIsValid());
+ if (size == 0)
+ {
+ size = mSize;
+ }
+ mData.copyHostToDeviceDesc(desc, sizeof(T) * size, sizeof(T) * offset);
+ }
+ PX_INLINE void copyHostToDeviceQ(PxGpuCopyDescQueue& queue, uint32_t size = 0, uint32_t offset = 0) const
+ {
+ PxGpuCopyDesc desc;
+ copyHostToDeviceDesc(desc, size, offset);
+ queue.enqueue(desc);
+ }
+ PX_INLINE void swapGpuPtr(ApexMirroredArray<T>& other)
+ {
+ PX_ASSERT(mCapacity == other.mCapacity);
+
+ mData.swapGpuPtr(other.mData);
+ }
+#endif /* APEX_CUDA_SUPPORT */
+
+ /*!
+ Returns the number of entries in the array. This can, and probably will,
+ differ from the array size.
+ \return
+ The number of of entries in the array.
+ */
+ PX_INLINE uint32_t getSize() const
+ {
+ return mSize;
+ }
+
+ PX_INLINE size_t getByteSize() const
+ {
+ return mData.getByteSize();
+ }
+
+ PX_INLINE char* getName() const
+ {
+ return mData.getName();
+ }
+
+ /*!
+ Clears the array.
+ */
+ PX_INLINE void clear()
+ {
+ mSize = 0;
+ mData.free();
+ mCapacity = 0;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ /*!
+ Resize array
+ */
+ //////////////////////////////////////////////////////////////////////////
+ PX_INLINE void setSize(const uint32_t size, ApexMirroredPlace::Enum place = ApexMirroredPlace::DEFAULT)
+ {
+ if (size > mCapacity)
+ {
+ mCapacity = size;
+ }
+ mData.realloc(sizeof(T) * mCapacity, place);
+ mSize = size;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ /*!
+ Ensure that the array has at least size capacity.
+ */
+ //////////////////////////////////////////////////////////////////////////
+ PX_INLINE void reserve(const uint32_t capacity, ApexMirroredPlace::Enum place = ApexMirroredPlace::DEFAULT)
+ {
+ if (capacity > mCapacity)
+ {
+ mCapacity = capacity;
+ }
+ mData.realloc(sizeof(T) * mCapacity, place);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ /*!
+ Query the capacity(allocated mem) for the array.
+ */
+ //////////////////////////////////////////////////////////////////////////
+ PX_INLINE uint32_t getCapacity()
+ {
+ return mCapacity;
+ }
+
+private:
+ ApexMirrored<T> mData;
+ uint32_t mCapacity;
+ uint32_t mSize;
+};
+
+}
+} // end namespace nvidia::apex
+
+#pragma warning(pop)
+
+#endif
diff --git a/APEX_1.4/common/include/ApexPermute.h b/APEX_1.4/common/include/ApexPermute.h
new file mode 100644
index 00000000..ef0001f2
--- /dev/null
+++ b/APEX_1.4/common/include/ApexPermute.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_PERMUTE_H
+#define APEX_PERMUTE_H
+
+namespace nvidia
+{
+namespace apex
+{
+
+// permutationBuffer has to contain the indices that map from the new to the old index
+template<class Sortable>
+inline void ApexPermute(Sortable* sortBuffer, const uint32_t* permutationBuffer, uint32_t numElements, uint32_t numElementsPerPermutation = 1)
+{
+ nvidia::Array<Sortable> temp;
+ temp.resize(numElementsPerPermutation);
+
+ // TODO remove used buffer
+ nvidia::Array<bool> used(numElements, false);
+
+ for (uint32_t i = 0; i < numElements; i++)
+ {
+ //if (permutationBuffer[i] == (uint32_t)-1 || permutationBuffer[i] == i)
+ if (used[i] || permutationBuffer[i] == i)
+ {
+ continue;
+ }
+
+ uint32_t dst = i;
+ uint32_t src = permutationBuffer[i];
+ for (uint32_t j = 0; j < numElementsPerPermutation; j++)
+ {
+ temp[j] = sortBuffer[numElementsPerPermutation * dst + j];
+ }
+ do
+ {
+ for (uint32_t j = 0; j < numElementsPerPermutation; j++)
+ {
+ sortBuffer[numElementsPerPermutation * dst + j] = sortBuffer[numElementsPerPermutation * src + j];
+ }
+ //permutationBuffer[dst] = (uint32_t)-1;
+ used[dst] = true;
+ dst = src;
+ src = permutationBuffer[src];
+ //} while (permutationBuffer[src] != (uint32_t)-1);
+ }
+ while (!used[src]);
+ for (uint32_t j = 0; j < numElementsPerPermutation; j++)
+ {
+ sortBuffer[numElementsPerPermutation * dst + j] = temp[j];
+ }
+ //permutationBuffer[dst] = (uint32_t)-1;
+ used[dst] = true;
+ }
+}
+
+} // namespace apex
+} // namespace nvidia
+
+#endif // APEX_PERMUTE_H
diff --git a/APEX_1.4/common/include/ApexPreview.h b/APEX_1.4/common/include/ApexPreview.h
new file mode 100644
index 00000000..1d8ff77d
--- /dev/null
+++ b/APEX_1.4/common/include/ApexPreview.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_PREVIEW_H__
+#define __APEX_PREVIEW_H__
+
+#include "ApexRenderable.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexContext;
+
+/**
+ Class that implements preview interface
+*/
+class ApexPreview : public ApexRenderable
+{
+public:
+ ApexPreview();
+ virtual ~ApexPreview();
+
+ // Each class that derives from ApexPreview may optionally implement this function
+ virtual Renderable* getRenderable()
+ {
+ return NULL;
+ }
+
+ virtual void setPose(const PxMat44& pose);
+ virtual const PxMat44 getPose() const;
+
+ virtual void release() = 0;
+ void destroy();
+
+protected:
+ bool mInRelease;
+
+ PxMat44 mPose;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // __APEX_PREVIEW_H__
diff --git a/APEX_1.4/common/include/ApexPvdClient.h b/APEX_1.4/common/include/ApexPvdClient.h
new file mode 100644
index 00000000..f2da76d3
--- /dev/null
+++ b/APEX_1.4/common/include/ApexPvdClient.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_PVD_CLIENT_H
+#define APEX_PVD_CLIENT_H
+
+#include "Px.h"
+#include "ApexDefs.h"
+
+#define APEX_PVD_NAMESPACE "Apex"
+
+//#define WITHOUT_PVD 1
+#ifdef WITHOUT_PVD
+namespace physx
+{
+ class PxPvd;
+ namespace pvdsdk
+ {
+ class PvdDataStream;
+ class PvdUserRenderer;
+ }
+}
+#else
+#include "PsPvd.h"
+#include "PxPvdClient.h"
+#include "PxPvdObjectModelBaseTypes.h"
+#include "PxPvdDataStream.h"
+#include "PxPvdUserRenderer.h"
+#endif
+
+namespace NvParameterized
+{
+ class Interface;
+ class Definition;
+ class Handle;
+}
+
+namespace physx
+{
+namespace pvdsdk
+{
+ /**
+ \brief Define what action needs to be done when updating pvd with an NvParameterized object.
+ */
+ struct PvdAction
+ {
+ /**
+ \brief Enum
+ */
+ enum Enum
+ {
+ /**
+ \brief Create instances and update properties.
+ */
+ UPDATE,
+
+ /**
+ \brief Destroy instances.
+ */
+ DESTROY
+ };
+ };
+
+
+
+ /**
+ \brief The ApexPvdClient class allows APEX and PhysX to both connect to the PhysX Visual Debugger (PVD)
+ */
+ class ApexPvdClient : public PvdClient
+ {
+ public:
+ /**
+ \brief Check if the PVD connection is active
+ */
+ virtual bool isConnected() const = 0;
+
+ /**
+ \brief Called when PVD connection established
+ */
+ virtual void onPvdConnected() = 0;
+
+ /**
+ \brief Called when PVD connection finished
+ */
+ virtual void onPvdDisconnected() = 0;
+
+ /**
+ \brief Flush data streams etc.
+ */
+ virtual void flush() = 0;
+
+ /**
+ \brief Retrieve the PxPvd
+ */
+ virtual PxPvd& getPxPvd() = 0;
+
+ /**
+ \brief Returns the data stream if Pvd is connected.
+ */
+ virtual PvdDataStream* getDataStream() = 0;
+
+ /**
+ \brief Returns the PvdUserRenderer if Pvd is connected.
+ */
+ virtual PvdUserRenderer* getUserRender() = 0;
+
+ //virtial PvdMetaDataBinding* getMetaDataBinding() = 0;
+
+ /**
+ \brief Initializes the classes sent to pvd.
+ */
+ virtual void initPvdClasses() = 0;
+
+ /**
+ \brief Sends the existing instances to pvd.
+ */
+ virtual void initPvdInstances() = 0;
+
+ /**
+ \brief Adds properties of an NvParameterized object to the provided class and creates necessary subclasses for structs.
+
+ \note The pvd class pvdClassName must already exist. Pvd classes for structs are being created, but not for references.
+ */
+ virtual void initPvdClasses(const NvParameterized::Definition& paramsHandle, const char* pvdClassName) = 0;
+
+ /**
+ \brief Creates or destroys pvdInstances and/or updates properties.
+ */
+ virtual void updatePvd(const void* pvdInstance, NvParameterized::Interface& params, PvdAction::Enum pvdAction = PvdAction::UPDATE) = 0;
+
+ //////////////////
+
+ /**
+ \brief Start the profiling frame
+ \note inInstanceId must *not* be used already by pvd
+ */
+ virtual void beginFrame( void* inInstanceId ) = 0;
+
+ /**
+ \brief End the profiling frame
+ */
+ virtual void endFrame( void* inInstanceId ) = 0;
+
+ /**
+ \brief Destroy this instance
+ */
+ virtual void release() = 0;
+
+ /**
+ * Assumes foundation is already booted up.
+ */
+ static ApexPvdClient* create( PxPvd* pvd );
+ };
+
+}
+}
+
+
+
+#endif // APEX_PVD_CLIENT_H
diff --git a/APEX_1.4/common/include/ApexQuadricSimplifier.h b/APEX_1.4/common/include/ApexQuadricSimplifier.h
new file mode 100644
index 00000000..75b050cf
--- /dev/null
+++ b/APEX_1.4/common/include/ApexQuadricSimplifier.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_QUADRIC_SIMPLIFIER_H__
+#define __APEX_QUADRIC_SIMPLIFIER_H__
+
+#include "ApexUsingNamespace.h"
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexQuadricSimplifier : public UserAllocated
+{
+public:
+ ApexQuadricSimplifier();
+
+ ~ApexQuadricSimplifier();
+ void clear();
+
+ // registration
+ void registerVertex(const PxVec3& pos);
+ void registerTriangle(uint32_t v0, uint32_t v1, uint32_t v2);
+ bool endRegistration(bool mergeCloseVertices, IProgressListener* progress);
+
+ // manipulation
+ uint32_t simplify(uint32_t subdivision, int32_t maxSteps, float maxError, IProgressListener* progress);
+
+ // accessors
+ uint32_t getNumVertices() const
+ {
+ return mVertices.size();
+ }
+ uint32_t getNumDeletedVertices() const
+ {
+ return mNumDeletedVertices;
+ }
+
+ bool getVertexPosition(uint32_t vertexNr, PxVec3& pos) const
+ {
+ PX_ASSERT(vertexNr < mVertices.size());
+ if (mVertices[vertexNr]->bDeleted == 1)
+ {
+ return false;
+ }
+
+ pos = mVertices[vertexNr]->pos;
+ return true;
+ }
+ int32_t getTriangleNr(uint32_t v0, uint32_t v1, uint32_t v2) const;
+ uint32_t getNumTriangles() const
+ {
+ return mTriangles.size() - mNumDeletedTriangles;
+ }
+ bool getTriangle(uint32_t i, uint32_t& v0, uint32_t& v1, uint32_t& v2) const;
+
+private:
+
+ class Quadric
+ {
+ public:
+ void zero()
+ {
+ a00 = 0.0f;
+ a01 = 0.0f;
+ a02 = 0.0f;
+ a03 = 0.0f;
+ a11 = 0.0f;
+ a12 = 0.0f;
+ a13 = 0.0f;
+ a22 = 0.0f;
+ a23 = 0.0f;
+ a33 = 0.0f;
+ }
+
+ // generate quadric from plane
+ void setFromPlane(const PxVec3& v0, const PxVec3& v1, const PxVec3& v2)
+ {
+ PxVec3 n = (v1 - v0).cross(v2 - v0);
+ n.normalize();
+ float d = -n.dot(v0);
+ a00 = n.x * n.x;
+ a01 = n.x * n.y;
+ a02 = n.x * n.z;
+ a03 = n.x * d;
+ a11 = n.y * n.y;
+ a12 = n.y * n.z;
+ a13 = n.y * d;
+ a22 = n.z * n.z;
+ a23 = n.z * d;
+ a33 = d * d;
+ }
+
+ Quadric operator +(const Quadric& q) const
+ {
+ Quadric sum;
+ sum.a00 = a00 + q.a00;
+ sum.a01 = a01 + q.a01;
+ sum.a02 = a02 + q.a02;
+ sum.a03 = a03 + q.a03;
+ sum.a11 = a11 + q.a11;
+ sum.a12 = a12 + q.a12;
+ sum.a13 = a13 + q.a13;
+ sum.a22 = a22 + q.a22;
+ sum.a23 = a23 + q.a23;
+ sum.a33 = a33 + q.a33;
+ return sum;
+ }
+
+ void operator +=(const Quadric& q)
+ {
+ a00 += q.a00;
+ a01 += q.a01;
+ a02 += q.a02;
+ a03 += q.a03;
+ a11 += q.a11;
+ a12 += q.a12;
+ a13 += q.a13;
+ a22 += q.a22;
+ a23 += q.a23;
+ a33 += q.a33;
+ }
+
+ float outerProduct(const PxVec3& v)
+ {
+ return a00 * v.x * v.x + 2.0f * a01 * v.x * v.y + 2.0f * a02 * v.x * v.z + 2.0f * a03 * v.x +
+ a11 * v.y * v.y + 2.0f * a12 * v.y * v.z + 2.0f * a13 * v.y +
+ a22 * v.z * v.z + 2.0f * a23 * v.z + a33;
+ }
+ private:
+ float a00, a01, a02, a03;
+ float a11, a12, a13;
+ float a22, a23;
+ float a33;
+
+ };
+
+ struct QuadricVertex : public UserAllocated
+ {
+ QuadricVertex(const PxVec3& newPos)
+ {
+ pos = newPos;
+ q.zero();
+ bDeleted = 0;
+ bReferenced = 0;
+ bBorder = 0;
+ }
+ void removeEdge(int32_t edgeNr);
+ void addTriangle(int32_t triangleNr);
+ void removeTriangle(int32_t triangleNr);
+ PxVec3 pos;
+ Quadric q;
+ physx::Array<uint32_t> mEdges;
+ physx::Array<uint32_t> mTriangles;
+ uint32_t bDeleted : 1;
+ uint32_t bReferenced : 1;
+ uint32_t bBorder : 1;
+ };
+
+ struct QuadricEdge
+ {
+ void init(int32_t v0, int32_t v1)
+ {
+ vertexNr[0] = (uint32_t)PxMin(v0, v1);
+ vertexNr[1] = (uint32_t)PxMax(v0, v1);
+ cost = -1.0f;
+ lengthSquared = -1.0f;
+ ratio = -1.0f;
+ heapPos = -1;
+ border = false;
+ deleted = false;
+ }
+ bool operator < (QuadricEdge& e) const
+ {
+ if (vertexNr[0] < e.vertexNr[0])
+ {
+ return true;
+ }
+ if (vertexNr[0] > e.vertexNr[0])
+ {
+ return false;
+ }
+ return vertexNr[1] < e.vertexNr[1];
+ }
+ bool operator == (QuadricEdge& e) const
+ {
+ return vertexNr[0] == e.vertexNr[0] && vertexNr[1] == e.vertexNr[1];
+ }
+ uint32_t otherVertex(uint32_t vNr) const
+ {
+ if (vertexNr[0] == vNr)
+ {
+ return vertexNr[1];
+ }
+ else
+ {
+ PX_ASSERT(vertexNr[1] == vNr);
+ return vertexNr[0];
+ }
+ }
+ void replaceVertex(uint32_t vOld, uint32_t vNew)
+ {
+ if (vertexNr[0] == vOld)
+ {
+ vertexNr[0] = vNew;
+ }
+ else if (vertexNr[1] == vOld)
+ {
+ vertexNr[1] = vNew;
+ }
+ else
+ {
+ PX_ASSERT(0);
+ }
+ if (vertexNr[0] > vertexNr[1])
+ {
+ unsigned v = vertexNr[0];
+ vertexNr[0] = vertexNr[1];
+ vertexNr[1] = v;
+ }
+ }
+ uint32_t vertexNr[2];
+ float cost;
+ float lengthSquared;
+ float ratio;
+ int32_t heapPos;
+ bool border;
+ bool deleted;
+ };
+
+ struct QuadricTriangle
+ {
+ void init(uint32_t v0, uint32_t v1, uint32_t v2)
+ {
+ vertexNr[0] = v0;
+ vertexNr[1] = v1;
+ vertexNr[2] = v2;
+ deleted = false;
+ }
+ bool containsVertex(uint32_t vNr) const
+ {
+ return vertexNr[0] == vNr || vertexNr[1] == vNr || vertexNr[2] == vNr;
+ }
+ uint32_t otherVertex(uint32_t v0, uint32_t v1)
+ {
+ if (vertexNr[0] != v0 && vertexNr[0] != v1)
+ {
+ PX_ASSERT(v0 == vertexNr[1] || v0 == vertexNr[2]);
+ PX_ASSERT(v1 == vertexNr[1] || v1 == vertexNr[2]);
+ return vertexNr[0];
+ }
+ else if (vertexNr[1] != v0 && vertexNr[1] != v1)
+ {
+ PX_ASSERT(v0 == vertexNr[0] || v0 == vertexNr[2]);
+ PX_ASSERT(v1 == vertexNr[0] || v1 == vertexNr[2]);
+ return vertexNr[1];
+ }
+ else
+ {
+ PX_ASSERT(vertexNr[2] != v0 && vertexNr[2] != v1);
+ PX_ASSERT(v0 == vertexNr[0] || v0 == vertexNr[1]);
+ PX_ASSERT(v1 == vertexNr[0] || v1 == vertexNr[1]);
+ return vertexNr[2];
+ }
+ }
+ void replaceVertex(uint32_t vOld, uint32_t vNew)
+ {
+ if (vertexNr[0] == vOld)
+ {
+ vertexNr[0] = vNew;
+ }
+ else if (vertexNr[1] == vOld)
+ {
+ vertexNr[1] = vNew;
+ }
+ else if (vertexNr[2] == vOld)
+ {
+ vertexNr[2] = vNew;
+ }
+ else
+ {
+ PX_ASSERT(0);
+ }
+ }
+ bool operator == (QuadricTriangle& t) const
+ {
+ return t.containsVertex(vertexNr[0]) &&
+ t.containsVertex(vertexNr[1]) &&
+ t.containsVertex(vertexNr[2]);
+ }
+ uint32_t vertexNr[3];
+ bool deleted;
+ };
+
+ struct QuadricVertexRef
+ {
+ void init(const PxVec3& p, int32_t vNr)
+ {
+ pos = p;
+ vertexNr = (uint32_t)vNr;
+ }
+ bool operator < (const QuadricVertexRef& vr)
+ {
+ return pos.x < vr.pos.x;
+ }
+ PxVec3 pos;
+ uint32_t vertexNr;
+ };
+
+
+ void computeCost(QuadricEdge& edge);
+ bool legalCollapse(QuadricEdge& edge, float maxLength);
+ void collapseEdge(QuadricEdge& edge);
+ void quickSortEdges(int32_t l, int32_t r);
+ void quickSortVertexRefs(int32_t l, int32_t r);
+ void mergeVertices();
+
+ bool heapElementSmaller(QuadricEdge* e0, QuadricEdge* e1);
+ void heapUpdate(uint32_t i);
+ void heapSift(uint32_t i);
+ void heapRemove(uint32_t i, bool append);
+ void testHeap();
+ void testMesh();
+
+ PxBounds3 mBounds;
+
+ physx::Array<QuadricVertex*> mVertices;
+ physx::Array<QuadricEdge> mEdges;
+ physx::Array<QuadricTriangle> mTriangles;
+ physx::Array<QuadricEdge*> mHeap;
+ physx::Array<QuadricVertexRef> mVertexRefs;
+
+ uint32_t mNumDeletedTriangles;
+ uint32_t mNumDeletedVertices;
+ uint32_t mNumDeletedHeapElements;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/common/include/ApexQuickSelectSmallestK.h b/APEX_1.4/common/include/ApexQuickSelectSmallestK.h
new file mode 100644
index 00000000..fabd8c88
--- /dev/null
+++ b/APEX_1.4/common/include/ApexQuickSelectSmallestK.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_QUICK_SELECT_SMALLEST_K_H
+#define APEX_QUICK_SELECT_SMALLEST_K_H
+
+namespace nvidia
+{
+namespace apex
+{
+//A variant of quick sort to move the smallest k members of a sequence to its start.
+//Does much less work than a full sort.
+
+template<class Sortable, class Predicate>
+PX_INLINE void ApexQuickSelectSmallestK(Sortable* start, Sortable* end, uint32_t k, const Predicate& p = Predicate())
+{
+ Sortable* origStart = start;
+ Sortable* i;
+ Sortable* j;
+ Sortable m;
+
+ for (;;)
+ {
+ i = start;
+ j = end;
+ m = *(i + ((j - i) >> 1));
+
+ while (i <= j)
+ {
+ while (p(*i, m))
+ {
+ i++;
+ }
+ while (p(m, *j))
+ {
+ j--;
+ }
+ if (i <= j)
+ {
+ if (i != j)
+ {
+ nvidia::swap(*i, *j);
+ }
+ i++;
+ j--;
+ }
+ }
+
+
+
+ if (start < j
+ && k + origStart - 1 < j) //we now have found the (j - start+1) smallest. we need to continue sorting these only if k < (j - start+1)
+ //if we sort this we definitely won't need to sort the right hand side.
+ {
+ end = j;
+ }
+ else if (i < end
+ && k + origStart > i) //only continue sorting these if left side is not larger than k.
+ //we do this instead of recursing
+ {
+ start = i;
+ }
+ else
+ {
+ return;
+ }
+ }
+}
+
+} // namespace apex
+} // namespace nvidia
+
+#endif // APEX_QUICK_SELECT_SMALLEST_K_H
diff --git a/APEX_1.4/common/include/ApexRWLockable.h b/APEX_1.4/common/include/ApexRWLockable.h
new file mode 100644
index 00000000..2820db72
--- /dev/null
+++ b/APEX_1.4/common/include/ApexRWLockable.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_RW_LOCKABLE_H
+#define APEX_RW_LOCKABLE_H
+
+#include "RWLockable.h"
+#include "PsThread.h"
+#include "PsMutex.h"
+#include "PsHashMap.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct ThreadReadWriteCount
+{
+ ThreadReadWriteCount() : value(0) {}
+ union {
+ struct {
+ uint8_t readDepth; // depth of re-entrant reads
+ uint8_t writeDepth; // depth of re-entrant writes
+ uint8_t readLockDepth; // depth of read-locks
+ uint8_t writeLockDepth; // depth of write-locks
+ } counters;
+ uint32_t value;
+ };
+};
+
+class ApexRWLockable : public RWLockable
+{
+public:
+ ApexRWLockable();
+ virtual ~ApexRWLockable();
+
+ virtual void acquireReadLock(const char *fileName, const uint32_t lineno) const;
+ virtual void acquireWriteLock(const char *fileName, const uint32_t lineno)const;
+ virtual void releaseReadLock(void) const;
+ virtual void releaseWriteLock(void) const;
+ virtual uint32_t getReadWriteErrorCount() const;
+ bool startWrite(bool allowReentry);
+ void stopWrite(bool allowReentry);
+ nvidia::Thread::Id getCurrentWriter() const;
+
+ bool startRead() const;
+ void stopRead() const;
+
+ void setEnabled(bool);
+ bool isEnabled() const;
+private:
+ bool mEnabled;
+ mutable volatile nvidia::Thread::Id mCurrentWriter;
+ mutable nvidia::ReadWriteLock mRWLock;
+ volatile int32_t mConcurrentWriteCount;
+ mutable volatile int32_t mConcurrentReadCount;
+ mutable volatile int32_t mConcurrentErrorCount;
+ nvidia::Mutex mDataLock;
+ typedef nvidia::HashMap<nvidia::ThreadImpl::Id, ThreadReadWriteCount> DepthsHashMap_t;
+ mutable DepthsHashMap_t mData;
+};
+
+#define APEX_RW_LOCKABLE_BOILERPLATE \
+ virtual void acquireReadLock(const char *fileName, const uint32_t lineno) const \
+ { \
+ ApexRWLockable::acquireReadLock(fileName, lineno); \
+ } \
+ virtual void acquireWriteLock(const char *fileName, const uint32_t lineno) const\
+ { \
+ ApexRWLockable::acquireWriteLock(fileName, lineno); \
+ } \
+ virtual void releaseReadLock(void) const\
+ { \
+ ApexRWLockable::releaseReadLock(); \
+ } \
+ virtual void releaseWriteLock(void) const\
+ { \
+ ApexRWLockable::releaseWriteLock(); \
+ } \
+ virtual uint32_t getReadWriteErrorCount() const \
+ { \
+ return ApexRWLockable::getReadWriteErrorCount(); \
+ } \
+ bool startWrite(bool allowReentry) \
+ { \
+ return ApexRWLockable::startWrite(allowReentry); \
+ } \
+ void stopWrite(bool allowReentry) \
+ { \
+ ApexRWLockable::stopWrite(allowReentry); \
+ } \
+ bool startRead() const \
+ { \
+ return ApexRWLockable::startRead(); \
+ } \
+ void stopRead() const \
+ { \
+ ApexRWLockable::stopRead(); \
+ } \
+
+class ApexRWLockableScopedDisable
+{
+public:
+ ApexRWLockableScopedDisable(RWLockable*);
+ ~ApexRWLockableScopedDisable();
+private:
+ ApexRWLockableScopedDisable(const ApexRWLockableScopedDisable&);
+ ApexRWLockableScopedDisable& operator=(const ApexRWLockableScopedDisable&);
+
+ ApexRWLockable* mLockable;
+};
+
+#define APEX_RW_LOCKABLE_SCOPED_DISABLE(lockable) ApexRWLockableScopedDisable __temporaryDisable(lockable);
+
+}
+}
+
+#endif // APEX_RW_LOCKABLE_H
diff --git a/APEX_1.4/common/include/ApexRand.h b/APEX_1.4/common/include/ApexRand.h
new file mode 100644
index 00000000..1c075f7e
--- /dev/null
+++ b/APEX_1.4/common/include/ApexRand.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_RAND_H
+#define APEX_RAND_H
+
+#include "PxMath.h"
+#include "PxVec3.h"
+#include "ApexUsingNamespace.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+// "Quick and Dirty Symmetric Random number generator" - returns a uniform deviate in [-1.0,1.0)
+class QDSRand
+{
+ uint32_t mSeed;
+
+public:
+
+ PX_CUDA_CALLABLE PX_INLINE QDSRand(uint32_t seed = 0) : mSeed(seed) {}
+
+ PX_CUDA_CALLABLE PX_INLINE void setSeed(uint32_t seed = 0)
+ {
+ mSeed = seed;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE uint32_t seed() const
+ {
+ return mSeed;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE uint32_t nextSeed()
+ {
+ mSeed = mSeed * 1664525L + 1013904223L;
+ return mSeed;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE float getNext()
+ {
+ union U32F32
+ {
+ uint32_t u;
+ float f;
+ } r;
+ r.u = 0x40000000 | (nextSeed() >> 9);
+ return r.f - 3.0f;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE float getScaled(const float min, const float max)
+ {
+ const float scale = (max - min) / 2.0f;
+ return ((getNext() + 1.0f) * scale) + min;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE PxVec3 getScaled(const PxVec3& min, const PxVec3& max)
+ {
+ return PxVec3(getScaled(min.x, max.x), getScaled(min.y, max.y), getScaled(min.z, max.z));
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE float getUnit()
+ {
+ union U32F32
+ {
+ uint32_t u;
+ float f;
+ } r;
+ r.u = 0x3F800000 | (nextSeed() >> 9);
+ return r.f - 1.0f;
+ }
+
+};
+
+// "Quick and Dirty Normal Random number generator" - returns normally-distributed values
+class QDNormRand
+{
+ QDSRand mBase;
+
+public:
+
+ PX_CUDA_CALLABLE PX_INLINE QDNormRand(uint32_t seed = 0) : mBase(seed) {}
+
+ PX_CUDA_CALLABLE PX_INLINE void setSeed(uint32_t seed = 0)
+ {
+ mBase.setSeed(seed);
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE uint32_t setSeed() const
+ {
+ return mBase.seed();
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE uint32_t nextSeed()
+ {
+ return mBase.nextSeed();
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE float getNext()
+ {
+ //Using Box-Muller transform (see http://en.wikipedia.org/wiki/Box_Muller_transform)
+
+ float u, v, s;
+ do
+ {
+ u = mBase.getNext();
+ v = mBase.getNext();
+ s = u * u + v * v;
+ }
+ while (s >= 1.0);
+
+ return u * PxSqrt(-2.0f * PxLog(s) / s);
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE float getScaled(const float m, const float s)
+ {
+ return m + s * getNext();
+ }
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/ApexRenderable.h b/APEX_1.4/common/include/ApexRenderable.h
new file mode 100644
index 00000000..ad4bfdfa
--- /dev/null
+++ b/APEX_1.4/common/include/ApexRenderable.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_RENDERABLE_H
+#define APEX_RENDERABLE_H
+
+#include "ApexUsingNamespace.h"
+#include "PsMutex.h"
+
+#include "PxBounds3.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+/**
+ Base class for implementations of Renderable classes
+*/
+
+class ApexRenderable
+{
+public:
+ ApexRenderable()
+ {
+ mRenderBounds.setEmpty();
+ }
+ ~ApexRenderable()
+ {
+ // the PS3 Mutex cannot be unlocked without first being locked, so grab the lock
+ if (renderDataTryLock())
+ {
+ renderDataUnLock();
+ }
+ else
+ {
+ // someone is holding the lock and should not be, so assert
+ PX_ALWAYS_ASSERT();
+ }
+ }
+ void renderDataLock()
+ {
+ mRenderDataLock.lock();
+ }
+ void renderDataUnLock()
+ {
+ mRenderDataLock.unlock();
+ }
+ bool renderDataTryLock()
+ {
+ return mRenderDataLock.trylock();
+ }
+ const PxBounds3& getBounds() const
+ {
+ return mRenderBounds;
+ }
+
+protected:
+ //nvidia::Mutex mRenderDataLock;
+ // Converting to be a PS3 SPU-friendly lock
+ // On PC this is the same as a Mutex, on PS3 it is a 128b (!) aligned U32. Subclasses might get bigger on PS3 and they
+ // are most likely distributed over more than one cache line.
+ AtomicLock mRenderDataLock;
+
+ PxBounds3 mRenderBounds;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // APEX_RENDERABLE_H
diff --git a/APEX_1.4/common/include/ApexResource.h b/APEX_1.4/common/include/ApexResource.h
new file mode 100644
index 00000000..d2ebda70
--- /dev/null
+++ b/APEX_1.4/common/include/ApexResource.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_RESOURCE_H
+#define APEX_RESOURCE_H
+
+#include "ApexUsingNamespace.h"
+#include "PsUserAllocated.h"
+
+namespace physx
+{
+ namespace pvdsdk
+ {
+ class PvdDataStream;
+ }
+}
+namespace nvidia
+{
+namespace apex
+{
+
+/**
+ * Class defines semi-public interface to ApexResource objects
+ * Resource - gets added to a list, will be deleted when the list is deleted
+ */
+class ApexResourceInterface
+{
+public:
+ virtual void release() = 0;
+ virtual void setListIndex(class ResourceList& list, uint32_t index) = 0;
+ virtual uint32_t getListIndex() const = 0;
+ virtual void initPvdInstances(pvdsdk::PvdDataStream& /*pvdStream*/) {};
+};
+
+/**
+Class that implements resource ID and bank
+*/
+class ApexResource : public UserAllocated
+{
+public:
+ ApexResource() : m_listIndex(0xFFFFFFFF), m_list(NULL) {}
+ void removeSelf();
+ virtual ~ApexResource();
+
+ uint32_t m_listIndex;
+ class ResourceList* m_list;
+};
+
+
+/**
+Initialized Template class.
+*/
+template <class DescType>class InitTemplate
+{
+ //gotta make a derived class cause of protected ctor
+public:
+ InitTemplate() : isSet(false) {}
+
+ bool isSet;
+ DescType data;
+
+
+ void set(const DescType* desc)
+ {
+ if (desc)
+ {
+ isSet = true;
+ //memcpy(this,desc, sizeof(DescType));
+ data = *desc;
+ }
+ else
+ {
+ isSet = false;
+ }
+ }
+
+
+ bool get(DescType& dest) const
+ {
+ if (isSet)
+ {
+ //memcpy(&dest,this, sizeof(DescType));
+ dest = data;
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+
+ }
+};
+
+} // namespace apex
+} // namespace nvidia
+
+#endif // APEX_RESOURCE_H
diff --git a/APEX_1.4/common/include/ApexResourceHelper.h b/APEX_1.4/common/include/ApexResourceHelper.h
new file mode 100644
index 00000000..c35aced7
--- /dev/null
+++ b/APEX_1.4/common/include/ApexResourceHelper.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_RESOURCE_HELPER_H__
+#define __APEX_RESOURCE_HELPER_H__
+
+#include "Apex.h"
+#include "ResourceProviderIntl.h"
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+#include <PxFiltering.h>
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexResourceHelper
+{
+ ApexResourceHelper() {}
+public:
+
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ static PX_INLINE PxFilterData resolveCollisionGroup128(const char* collisionGroup128Name)
+ {
+ PxFilterData result; //default constructor sets all words to 0
+
+ if (collisionGroup128Name)
+ {
+ /* create namespace for Collision Group (if it has not already been created) */
+ ResourceProviderIntl* nrp = GetInternalApexSDK()->getInternalResourceProvider();
+ ResID collisionGroup128NS = GetInternalApexSDK()->getCollisionGroup128NameSpace();
+ ResID id = nrp->createResource(collisionGroup128NS, collisionGroup128Name);
+ const uint32_t* resourcePtr = static_cast<const uint32_t*>(nrp->getResource(id));
+ if (resourcePtr)
+ {
+ result.word0 = resourcePtr[0];
+ result.word1 = resourcePtr[1];
+ result.word2 = resourcePtr[2];
+ result.word3 = resourcePtr[3];
+ }
+ }
+ return result;
+ }
+#endif
+
+ static PX_INLINE GroupsMask64 resolveCollisionGroup64(const char* collisionGroup64Name)
+ {
+ GroupsMask64 result(0, 0);
+
+ if (collisionGroup64Name)
+ {
+ /* create namespace for Collision Group (if it has not already been created) */
+ ResourceProviderIntl* nrp = GetInternalApexSDK()->getInternalResourceProvider();
+ ResID collisionGroup64NS = GetInternalApexSDK()->getCollisionGroup64NameSpace();
+
+ ResID id = nrp->createResource(collisionGroup64NS, collisionGroup64Name);
+ const uint32_t* resourcePtr = static_cast<const uint32_t*>(nrp->getResource(id));
+ if (resourcePtr)
+ {
+ result.bits0 = resourcePtr[0];
+ result.bits1 = resourcePtr[1];
+ }
+ }
+ return result;
+ }
+
+ static PX_INLINE uint32_t resolveCollisionGroupMask(const char* collisionGroupMaskName, uint32_t defGroupMask = 0xFFFFFFFFu)
+ {
+ uint32_t groupMask = defGroupMask;
+ if (collisionGroupMaskName)
+ {
+ ResourceProviderIntl* nrp = GetInternalApexSDK()->getInternalResourceProvider();
+ ResID collisionGroupMaskNS = GetInternalApexSDK()->getCollisionGroupMaskNameSpace();
+ ResID id = nrp->createResource(collisionGroupMaskNS, collisionGroupMaskName);
+ groupMask = (uint32_t)(size_t)(nrp->getResource(id));
+ }
+ return groupMask;
+ }
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // __APEX_RESOURCE_HELPER_H__
diff --git a/APEX_1.4/common/include/ApexSDKCachedDataImpl.h b/APEX_1.4/common/include/ApexSDKCachedDataImpl.h
new file mode 100644
index 00000000..2f4e27d7
--- /dev/null
+++ b/APEX_1.4/common/include/ApexSDKCachedDataImpl.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_SCENE_CACHED_DATA_H__
+
+#define __APEX_SCENE_CACHED_DATA_H__
+
+#include "ApexUsingNamespace.h"
+#include "PxSimpleTypes.h"
+#include "PxFileBuf.h"
+#include "PsUserAllocated.h"
+#include "PsArray.h"
+
+#include "ApexSDKCachedData.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexScene;
+class ModuleSceneIntl;
+
+/**
+ Cached data is stored per-module.
+*/
+class ModuleCachedDataIntl : public ModuleCachedData
+{
+public:
+ virtual AuthObjTypeID getModuleID() const = 0;
+
+ virtual NvParameterized::Interface* getCachedDataForAssetAtScale(Asset& asset, const PxVec3& scale) = 0;
+ virtual PxFileBuf& serialize(PxFileBuf& stream) const = 0;
+ virtual PxFileBuf& deserialize(PxFileBuf& stream) = 0;
+ virtual void clear(bool force = true) = 0; // If force == false, data in use by actors will not be deleted
+};
+
+//**************************************************************************************************************************
+//**************************************************************************************************************************
+//**** APEX SCENE CACHED DATA
+//**************************************************************************************************************************
+//**************************************************************************************************************************
+
+class ApexSDKCachedDataImpl : public ApexSDKCachedData, public UserAllocated
+{
+public:
+ bool registerModuleDataCache(ModuleCachedDataIntl* cache);
+ bool unregisterModuleDataCache(ModuleCachedDataIntl* cache);
+
+ // ApexSDKCachedData interface
+ ApexSDKCachedDataImpl();
+ virtual ~ApexSDKCachedDataImpl();
+
+ virtual ModuleCachedData* getCacheForModule(AuthObjTypeID moduleID);
+ virtual PxFileBuf& serialize(PxFileBuf& stream) const;
+ virtual PxFileBuf& deserialize(PxFileBuf& stream);
+ virtual void clear(bool force = true);
+
+ struct Version
+ {
+ enum Enum
+ {
+ First = 0,
+
+ Count,
+ Current = Count - 1
+ };
+ };
+
+ // Data
+ physx::Array<ModuleCachedDataIntl*> mModuleCaches;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // __APEX_SCENE_CACHED_DATA_H__
diff --git a/APEX_1.4/common/include/ApexSDKHelpers.h b/APEX_1.4/common/include/ApexSDKHelpers.h
new file mode 100644
index 00000000..a3ca7529
--- /dev/null
+++ b/APEX_1.4/common/include/ApexSDKHelpers.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEXSDKHELPERS_H__
+#define __APEXSDKHELPERS_H__
+
+
+#include "PsArray.h"
+#include "PsSort.h"
+#include "ApexString.h"
+#include "ApexSDKIntl.h"
+#include "ApexResource.h"
+#include "ResourceProviderIntl.h"
+#include "PxMat33.h"
+#include "PsMutex.h"
+
+namespace physx
+{
+ namespace pvdsdk
+ {
+ class PvdDataStream;
+ }
+}
+namespace nvidia
+{
+namespace apex
+{
+
+enum StreamPointerToken
+{
+ SPT_INVALID_PTR,
+ SPT_VALID_PTR
+};
+
+
+/*
+ Resource list - holds a list of ApexResourceInterface objects, for quick removal
+ */
+class ResourceList: public nvidia::UserAllocated
+{
+ physx::Array<ApexResourceInterface*> mArray;
+ nvidia::ReadWriteLock mRWLock;
+
+#ifndef WITHOUT_PVD
+ // for PVD
+ const void* mOwner;
+ ApexSimpleString mListName;
+ ApexSimpleString mEntryName;
+#endif
+
+public:
+
+ ResourceList()
+#ifndef WITHOUT_PVD
+ : mOwner(NULL)
+#endif
+ {}
+ ~ResourceList();
+
+ void clear(); // explicitely free children
+
+ void add(ApexResourceInterface& resource);
+ void remove(uint32_t index);
+ uint32_t getSize() const
+ {
+ ScopedReadLock scopedLock(const_cast<nvidia::ReadWriteLock&>(mRWLock));
+ return mArray.size();
+ }
+ ApexResourceInterface* getResource(uint32_t index) const
+ {
+ ScopedReadLock scopedLock(const_cast<nvidia::ReadWriteLock&>(mRWLock));
+ return mArray[index];
+ }
+
+ template<typename Predicate>
+ void sort(const Predicate& compare)
+ {
+ ScopedWriteLock scopedLock(mRWLock);
+ uint32_t size = mArray.size();
+ if (size > 0)
+ {
+ nvidia::sort(&mArray[0], size, compare);
+ }
+
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ mArray[i]->setListIndex(*this, i);
+ }
+ }
+
+#ifndef WITHOUT_PVD
+ void setupForPvd(const void* owner, const char* listName, const char* entryName);
+ void initPvdInstances(pvdsdk::PvdDataStream& pvdStream);
+#endif
+};
+
+
+#ifndef M_SQRT1_2 //1/sqrt(2)
+#define M_SQRT1_2 double(0.7071067811865475244008443621048490)
+#endif
+
+
+/*
+ Creates a rotation matrix which rotates about the axisAngle vector. The length of
+ the axisAngle vector is the desired rotation angle. In this approximation, however,
+ this is only the case for small angles. As the length of axisAngle grows (is no
+ longer very much less than 1 radian) the approximation becomes worse. As the length
+ of axisAngle approaches infinity, the rotation angle approaches pi. The exact
+ relation is:
+
+ rotation_angle = 2*atan( axisAngle.magnitude()/2 )
+
+ One use for this construction is the rotation applied to mesh particle system particles. With a
+ decent frame rate, the rotation angle should be small, unless the particle is going
+ very fast or has very small radius. In that case, or if the frame rate is poor,
+ the inaccuracy in this construction probably won't be noticed.
+
+ Error: The rotation angle is accurate to:
+ 1% up to 20 degrees
+ 10% up to 70 degrees
+*/
+PX_INLINE void approxAxisAngleToMat33(const PxVec3& axisAngle, PxMat33& rot)
+{
+ const float x = 0.5f * axisAngle.x;
+ const float y = 0.5f * axisAngle.y;
+ const float z = 0.5f * axisAngle.z;
+ const float xx = x * x;
+ const float yy = y * y;
+ const float zz = z * z;
+ const float xy = x * y;
+ const float yz = y * z;
+ const float zx = z * x;
+ const float twoRecipNorm2 = 2.0f / (1.0f + xx + yy + zz); // w = 1
+ rot(0, 0) = 1.0f - twoRecipNorm2 * (yy + zz);
+ rot(0, 1) = twoRecipNorm2 * (xy - z);
+ rot(0, 2) = twoRecipNorm2 * (zx + y);
+ rot(1, 0) = twoRecipNorm2 * (xy + z);
+ rot(1, 1) = 1.0f - twoRecipNorm2 * (zz + xx);
+ rot(1, 2) = twoRecipNorm2 * (yz - x);
+ rot(2, 0) = twoRecipNorm2 * (zx - y);
+ rot(2, 1) = twoRecipNorm2 * (yz + x);
+ rot(2, 2) = 1.0f - twoRecipNorm2 * (xx + yy);
+}
+
+
+// stl hash
+PX_INLINE uint32_t hash(const char* str, uint32_t len)
+{
+ uint32_t hash = 0;
+
+ for (uint32_t i = 0; i < len; i++)
+ {
+ hash = 5 * hash + str[i];
+ }
+
+ return hash;
+}
+
+PX_INLINE uint32_t GetStamp(ApexSimpleString& name)
+{
+ return hash(name.c_str(), name.len());
+}
+
+#if 0
+// these are poison
+void writeStreamHeader(PxFileBuf& stream, ApexSimpleString& streamName, uint32_t versionStamp);
+uint32_t readStreamHeader(const PxFileBuf& stream, ApexSimpleString& streamName);
+#endif
+
+PX_INLINE uint32_t MaxElementIndex(const PxVec3& v)
+{
+ const uint32_t m01 = (uint32_t)(v.y > v.x);
+ const uint32_t m2 = (uint32_t)(v.z > v[m01]);
+ return m2 << 1 | m01 >> m2;
+}
+
+PX_INLINE uint32_t MinElementIndex(const PxVec3& v)
+{
+ const uint32_t m01 = (uint32_t)(v.y < v.x);
+ const uint32_t m2 = (uint32_t)(v.z < v[m01]);
+ return m2 << 1 | m01 >> m2;
+}
+
+PX_INLINE uint32_t MaxAbsElementIndex(const PxVec3& v)
+{
+ const PxVec3 a(PxAbs(v.x), PxAbs(v.y), PxAbs(v.z));
+ const uint32_t m01 = (uint32_t)(a.y > a.x);
+ const uint32_t m2 = (uint32_t)(a.z > a[m01]);
+ return m2 << 1 | m01 >> m2;
+}
+
+PX_INLINE uint32_t MinAbsElementIndex(const PxVec3& v)
+{
+ const PxVec3 a(PxAbs(v.x), PxAbs(v.y), PxAbs(v.z));
+ const uint32_t m01 = (uint32_t)(a.y < a.x);
+ const uint32_t m2 = (uint32_t)(a.z < a[m01]);
+ return m2 << 1 | m01 >> m2;
+}
+
+
+/******************************************************************************
+ * Helper functions for loading assets
+ *****************************************************************************/
+class ApexAssetHelper
+{
+public:
+ static void* getAssetFromName(ApexSDKIntl* sdk,
+ const char* authoringTypeName,
+ const char* assetName,
+ ResID& inOutResID,
+ ResID optionalPsID = INVALID_RESOURCE_ID);
+
+ static void* getAssetFromNameList(ApexSDKIntl* sdk,
+ const char* authoringTypeName,
+ physx::Array<AssetNameIDMapping*>& nameIdList,
+ const char* assetName,
+ ResID assetPsId = INVALID_RESOURCE_ID);
+
+ static void* getIosAssetFromName(ApexSDKIntl* sdk,
+ const char* iosTypeName,
+ const char* iosAssetName);
+
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // __APEXSDKHELPERS_H__
diff --git a/APEX_1.4/common/include/ApexSDKIntl.h b/APEX_1.4/common/include/ApexSDKIntl.h
new file mode 100644
index 00000000..6bcc91bb
--- /dev/null
+++ b/APEX_1.4/common/include/ApexSDKIntl.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_SDK_INTL_H
+#define APEX_SDK_INTL_H
+
+/* Framework-internal interface class */
+
+#include "ApexSDK.h"
+#include "ResourceProviderIntl.h"
+#include "PhysXObjectDescIntl.h"
+
+#include "PsString.h"
+#include "PxErrors.h"
+
+#if APEX_CUDA_SUPPORT
+namespace nvidia
+{
+ class PhysXGpuIndicator;
+}
+#endif
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+namespace NvParameterized
+{
+class Traits;
+};
+
+namespace physx
+{
+namespace pvdsdk
+{
+ class ApexPvdClient;
+}
+namespace profile
+{
+ class PxProfileZone;
+}
+}
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ModuleIntl;
+class Actor;
+class ApexActor;
+class AuthorableObjectIntl;
+class UserOpaqueMesh;
+
+/**
+Internal interface to the ApexSDK, available to Modules and Scenes
+*/
+class ApexSDKIntl : public ApexSDK
+{
+public:
+ /**
+ Register an authorable object type with the SDK. These IDs should be accessable
+ from the Module* public interface so users can compare them against game objects
+ in callbacks.
+ */
+ virtual AuthObjTypeID registerAuthObjType(const char*, ResID nsid) = 0;
+ virtual AuthObjTypeID registerAuthObjType(const char*, AuthorableObjectIntl* authObjPtr) = 0;
+ virtual AuthObjTypeID registerNvParamAuthType(const char*, AuthorableObjectIntl* authObjPtr) = 0;
+ virtual void unregisterAuthObjType(const char*) = 0;
+ virtual void unregisterNvParamAuthType(const char*) = 0;
+ /**
+ Query the ResID of an authorable object namespace. This is useful if you have
+ an authorable object class name, but not a module pointer.
+ */
+ virtual AuthorableObjectIntl* getAuthorableObject(const char*) = 0;
+ virtual AuthorableObjectIntl* getParamAuthObject(const char*) = 0;
+
+ virtual AssetAuthoring* createAssetAuthoring(const char* aoTypeName) = 0;
+ virtual AssetAuthoring* createAssetAuthoring(const char* aoTypeName, const char* name) = 0;
+ virtual Asset* createAsset(AssetAuthoring&, const char*) = 0;
+ virtual Asset* createAsset(NvParameterized::Interface*, const char*) = 0;
+ virtual Asset* createAsset(const char* opaqueMeshName, UserOpaqueMesh* om) = 0;
+ virtual void releaseAsset(Asset&) = 0;
+ virtual void releaseAssetAuthoring(AssetAuthoring&) = 0;
+ /**
+ Request an ApexActor pointer for an Actor
+ */
+ virtual ApexActor* getApexActor(Actor*) const = 0;
+
+ /**
+ When APEX creates a PhysX object and injects it into a PhysX scene, it must
+ register that object with the ApexSDK so that the end user can differentiate between
+ their own PhysX objects and those created by APEX. The object descriptor will also
+ provide a method for working their way back to the containing APEX data structure.
+ */
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ virtual PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxActor*) = 0;
+ virtual PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxShape*) = 0;
+ virtual PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxJoint*) = 0;
+ virtual PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxCloth*) = 0;
+#endif // PX_PHYSICS_VERSION_MAJOR == 3
+
+ /**
+ Retrieve an object desc created by createObjectDesc.
+ */
+ virtual PhysXObjectDescIntl* getGenericPhysXObjectInfo(const void*) const = 0;
+
+ /**
+ When APEX deletes a PhysX object or otherwise removes it from the PhysX scene, it must
+ call this function to remove it from the ApexSDK object description cache.
+ */
+ virtual void releaseObjectDesc(void*) = 0;
+
+ /* Utility functions intended to be used internally */
+ virtual void reportError(PxErrorCode::Enum code, const char* file, int line, const char* functionName, const char* message, ...) = 0;
+
+ virtual uint32_t getCookingVersion() const = 0;
+
+ virtual void* getTempMemory(uint32_t size) = 0;
+
+ virtual void releaseTempMemory(void* data) = 0;
+
+ /**
+ * ApexScenes and Modules can query the ModuleIntl interfaces from the ApexSDK
+ */
+ virtual ModuleIntl** getInternalModules() = 0;
+
+ /**
+ * Returns the internal named resource provider. The internal interface allows
+ * resource creation and deletion
+ */
+ virtual ResourceProviderIntl* getInternalResourceProvider() = 0;
+
+ /**
+ * Allow 3rd party modules distributed as static libraries to link
+ * into the ApexSDK such that the normal ApexSDK::createModule()
+ * will still work correctly. The 3rd party lib must export a C
+ * function that instantiates their module and calls this function.
+ * The user must call that instantiation function before calling
+ * createModule() for it.
+ */
+ virtual void registerExternalModule(Module* nx, ModuleIntl* ni) = 0;
+
+ /**
+ * Allow modules to fetch these ApexSDK global name spaces
+ */
+ virtual ResID getMaterialNameSpace() const = 0;
+ virtual ResID getOpaqueMeshNameSpace() const = 0;
+ virtual ResID getCustomVBNameSpace() const = 0;
+ virtual ResID getApexMeshNameSpace() = 0;
+ virtual ResID getCollisionGroupNameSpace() const = 0;
+ virtual ResID getCollisionGroup128NameSpace() const = 0;
+ virtual ResID getCollisionGroup64NameSpace() const = 0;
+ virtual ResID getCollisionGroupMaskNameSpace() const = 0;
+ virtual ResID getPhysicalMaterialNameSpace() const = 0;
+ virtual ResID getAuthorableTypesNameSpace() const = 0;
+
+ /**
+ * Retrieve the user provided render resource manager
+ */
+ virtual UserRenderResourceManager* getUserRenderResourceManager() const = 0;
+
+ virtual NvParameterized::Traits* getParameterizedTraits() = 0;
+
+ virtual ModuleIntl* getInternalModuleByName(const char* name) = 0;
+
+ /**
+ * Update debug renderer color tables in each apex scene with NvParameterized color table.
+ */
+ virtual void updateDebugColorParams(const char* color, uint32_t val) = 0;
+
+ virtual bool getRMALoadMaterialsLazily() = 0;
+
+ virtual pvdsdk::ApexPvdClient* getApexPvdClient() = 0;
+ virtual profile::PxProfileZoneManager * getProfileZoneManager() = 0;
+ virtual profile::PxProfileZone * getProfileZone() = 0;
+
+ // applications can append strings to the APEX DLL filenames
+ virtual const char* getCustomDllNamePostfix() const = 0;
+#if PX_WINDOWS_FAMILY
+ /**
+ * Return the user-provided appGuid, or the default appGuid if the user didn't provide one.
+ */
+ virtual const char* getAppGuid() = 0;
+#endif
+
+#if APEX_CUDA_SUPPORT
+ virtual PhysXGpuIndicator* registerPhysXIndicatorGpuClient() = 0;
+ virtual void unregisterPhysXIndicatorGpuClient(PhysXGpuIndicator* gpuIndicator) = 0;
+#endif
+
+ virtual ModuleIntl *getInternalModule(Module *module) = 0;
+ virtual Module *getModule(ModuleIntl *module) = 0;
+
+ virtual void enterURR() = 0;
+ virtual void leaveURR() = 0;
+ virtual void checkURR() = 0;
+
+ PX_FORCE_INLINE PxU64 getContextId() const { return PxU64(reinterpret_cast<size_t>(this)); }
+
+protected:
+ virtual ~ApexSDKIntl() {}
+
+};
+
+/**
+Returns global SDK pointer. Not sure if we can do this.
+*/
+APEX_API ApexSDKIntl* CALL_CONV GetInternalApexSDK();
+
+}
+} // end namespace nvidia::apex
+
+#define APEX_SPRINTF_S(dest_buf, n, str_fmt, ...) \
+ shdfnd::snprintf((char *) dest_buf, n, str_fmt, ##__VA_ARGS__);
+
+// gcc uses names ...s
+#define APEX_INVALID_PARAMETER(_A, ...) \
+ nvidia::GetInternalApexSDK()->reportError(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, __FUNCTION__, _A, ##__VA_ARGS__)
+#define APEX_INVALID_OPERATION(_A, ...) \
+ nvidia::GetInternalApexSDK()->reportError(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, __FUNCTION__, _A, ##__VA_ARGS__)
+#define APEX_INTERNAL_ERROR(_A, ...) \
+ nvidia::GetInternalApexSDK()->reportError(PxErrorCode::eINTERNAL_ERROR , __FILE__, __LINE__, __FUNCTION__, _A, ##__VA_ARGS__)
+#define APEX_DEBUG_INFO(_A, ...) \
+ nvidia::GetInternalApexSDK()->reportError(PxErrorCode::eDEBUG_INFO , __FILE__, __LINE__, __FUNCTION__, _A, ##__VA_ARGS__)
+#define APEX_DEBUG_WARNING(_A, ...) \
+ nvidia::GetInternalApexSDK()->reportError(PxErrorCode::eDEBUG_WARNING , __FILE__, __LINE__, __FUNCTION__, _A, ##__VA_ARGS__)
+#define APEX_DEPRECATED() \
+ nvidia::GetInternalApexSDK()->reportError(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, __FUNCTION__, "This method is deprecated")
+#define APEX_DEPRECATED_ONCE() \
+ { static bool firstTime = true; if (firstTime) { firstTime = false; APEX_DEPRECATED(); } }
+
+
+#if PX_DEBUG || PX_CHECKED
+
+namespace nvidia
+{
+ namespace apex
+ {
+ class UpdateRenderResourcesScope
+ {
+ public:
+ PX_INLINE UpdateRenderResourcesScope() { GetInternalApexSDK()->enterURR(); }
+ PX_INLINE ~UpdateRenderResourcesScope() { GetInternalApexSDK()->leaveURR(); }
+ };
+ }
+}
+
+# define URR_SCOPE UpdateRenderResourcesScope updateRenderResourcesScope
+# define URR_CHECK GetInternalApexSDK()->checkURR()
+#else
+# define URR_SCOPE
+# define URR_CHECK
+#endif
+
+
+#endif // APEX_SDK_INTL_H
diff --git a/APEX_1.4/common/include/ApexShape.h b/APEX_1.4/common/include/ApexShape.h
new file mode 100644
index 00000000..f0e87b34
--- /dev/null
+++ b/APEX_1.4/common/include/ApexShape.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_SHAPE_H__
+#define __APEX_SHAPE_H__
+
+#include "Apex.h"
+#include "ApexUsingNamespace.h"
+#include "PsUserAllocated.h"
+#include "Shape.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexSphereShape : public SphereShape, public UserAllocated
+{
+private:
+ float mRadius;
+ PxMat44 mTransform4x4;
+ PxMat44 mOldTransform4x4;
+ PxBounds3 mBounds;
+
+ void calculateAABB();
+
+public:
+
+ ApexSphereShape();
+
+ virtual ~ApexSphereShape() {};
+
+ virtual void releaseApexShape()
+ {
+ delete this;
+ }
+
+ const ApexSphereShape* isSphereGeom() const
+ {
+ return this;
+ }
+
+ bool intersectAgainstAABB(PxBounds3 bounds);
+
+ PxBounds3 getAABB() const
+ {
+ return mBounds;
+ }
+
+ void setRadius(float radius);
+
+ float getRadius() const
+ {
+ return mRadius;
+ };
+
+ void setPose(PxMat44 pose);
+
+ PxMat44 getPose() const
+ {
+ return mTransform4x4;
+ }
+
+ PxMat44 getPreviousPose() const
+ {
+ return mOldTransform4x4;
+ }
+
+ void visualize(RenderDebugInterface* renderer) const;
+};
+
+//the capsule is oriented along the y axis by default and its total height is height+2*radius
+class ApexCapsuleShape : public CapsuleShape, public UserAllocated
+{
+private:
+
+ float mRadius;
+ float mHeight;
+ PxMat44 mTransform4x4;
+ PxMat44 mOldTransform4x4;
+ PxBounds3 mBounds;
+ void calculateAABB();
+
+public:
+
+ ApexCapsuleShape();
+
+ virtual ~ApexCapsuleShape() {};
+
+ virtual void releaseApexShape()
+ {
+ delete this;
+ }
+
+ const ApexCapsuleShape* isCapsuleGeom() const
+ {
+ return this;
+ }
+
+ bool intersectAgainstAABB(PxBounds3 bounds);
+
+ PxBounds3 getAABB() const
+ {
+ return mBounds;
+ }
+
+ void setDimensions(float height, float radius);
+
+ void getDimensions(float& height, float& radius) const
+ {
+ radius = mRadius;
+ height = mHeight;
+ };
+
+ void setPose(PxMat44 pose);
+
+ PxMat44 getPose() const
+ {
+ return mTransform4x4;
+ }
+
+ PxMat44 getPreviousPose() const
+ {
+ return mOldTransform4x4;
+ }
+
+ void visualize(RenderDebugInterface* renderer) const;
+};
+
+class ApexBoxShape : public BoxShape, public UserAllocated
+{
+private:
+
+ PxVec3 mSize;
+ PxMat44 mTransform4x4;
+ PxMat44 mOldTransform4x4;
+ PxBounds3 mBounds;
+ void calculateAABB();
+
+public:
+
+ ApexBoxShape();
+
+ virtual ~ApexBoxShape() {};
+
+ virtual void releaseApexShape()
+ {
+ delete this;
+ }
+
+ const ApexBoxShape* isBoxGeom() const
+ {
+ return this;
+ }
+
+ bool intersectAgainstAABB(PxBounds3 bounds);
+
+ PxBounds3 getAABB() const
+ {
+ return mBounds;
+ }
+
+ void setSize(PxVec3 size);
+
+ void setPose(PxMat44 pose);
+
+ PxMat44 getPose() const
+ {
+ return mTransform4x4;
+ }
+
+ PxMat44 getPreviousPose() const
+ {
+ return mOldTransform4x4;
+ }
+
+ PxVec3 getSize() const
+ {
+ return mSize;
+ }
+
+ void visualize(RenderDebugInterface* renderer) const;
+};
+
+class ApexHalfSpaceShape : public HalfSpaceShape, public UserAllocated
+{
+private:
+ PxVec3 mOrigin;
+ PxVec3 mPreviousOrigin;
+ PxVec3 mNormal;
+ PxVec3 mPreviousNormal;
+ bool isPointInside(PxVec3 pos);
+
+public:
+ ApexHalfSpaceShape();
+
+ virtual ~ApexHalfSpaceShape() {};
+
+ virtual void releaseApexShape()
+ {
+ delete this;
+ }
+
+ const ApexHalfSpaceShape* isHalfSpaceGeom() const
+ {
+ return this;
+ }
+
+ bool intersectAgainstAABB(PxBounds3 bounds);
+
+ PxBounds3 getAABB() const
+ {
+ return PxBounds3(PxVec3(0), PxVec3(PX_MAX_F32));
+ }
+
+ void setOriginAndNormal(PxVec3 origin, PxVec3 normal);
+
+ PxVec3 getNormal() const
+ {
+ return mNormal;
+ };
+
+ PxVec3 getPreviousNormal() const
+ {
+ return mPreviousNormal;
+ };
+
+ PxVec3 getOrigin() const
+ {
+ return mOrigin;
+ };
+
+ PxVec3 getPreviousOrigin() const
+ {
+ return mPreviousOrigin;
+ };
+
+ PxMat44 getPose() const;
+
+ PxMat44 getPreviousPose() const;
+
+ void visualize(RenderDebugInterface* renderer) const;
+
+ virtual void setPose(PxMat44 pose)
+ {
+ PX_UNUSED(pose);
+ //dummy
+ }
+};
+
+
+}
+} // end namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/ApexSharedUtils.h b/APEX_1.4/common/include/ApexSharedUtils.h
new file mode 100644
index 00000000..337f40cb
--- /dev/null
+++ b/APEX_1.4/common/include/ApexSharedUtils.h
@@ -0,0 +1,2364 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEXSHAREDUTILS_H
+#define APEXSHAREDUTILS_H
+
+#include "ApexUsingNamespace.h"
+#include "ApexDefs.h"
+#include "IProgressListener.h"
+#include "RenderMeshAsset.h"
+
+#include "PxStreamFromFileBuf.h"
+
+#include "ApexString.h"
+#include "PsArray.h"
+#include "ConvexHullMethod.h"
+#include "PxPlane.h"
+
+#include "ConvexHullParameters.h"
+
+#include "Cof44.h"
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+namespace physx
+{
+class PxConvexMesh;
+}
+typedef physx::PxConvexMesh ConvexMesh;
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+PX_INLINE PxPlane toPxPlane(const ConvexHullParametersNS::Plane_Type& plane)
+{
+ return PxPlane(plane.normal.x, plane.normal.y, plane.normal.z, plane.d);
+}
+
+/*
+File-local functions and definitions
+*/
+
+
+/*
+Global utilities
+*/
+
+// Diagonalize a symmetric 3x3 matrix. Returns the eigenvectors in the first parameter, eigenvalues as the return value.
+PxVec3 diagonalizeSymmetric(PxMat33& eigenvectors, const PxMat33& m);
+
+
+
+PX_INLINE bool worldToLocalRay(PxVec3& localorig, PxVec3& localdir,
+ const PxVec3& worldorig, const PxVec3& worlddir,
+ const physx::PxTransform& localToWorldRT, const PxVec3& scale)
+{
+ // Invert scales
+ const float detS = scale.x * scale.y * scale.z;
+ if (detS == 0.0f)
+ {
+ return false; // Not handling singular TMs
+ }
+ const float recipDetS = 1.0f / detS;
+
+ // Is it faster to do a bunch of multiplies, or a few divides?
+ const PxVec3 invS(scale.y * scale.z * recipDetS, scale.z * scale.x * recipDetS, scale.x * scale.y * recipDetS);
+
+ // Create hull-local ray
+ localorig = localToWorldRT.transformInv(worldorig);
+ localorig = invS.multiply(localorig);
+ localdir = localToWorldRT.rotateInv(worlddir);
+ localdir = invS.multiply(localdir);
+
+ return true;
+}
+
+// barycentric utilities
+/**
+This function starts recording the number of cycles elapsed.
+\param pa [in] first vertex of triangle
+\param pb [in] second vertex of triangle
+\param pc [in] third vertex of triangle
+\param p [in] vertex to generate barycentric coordinates for
+\param s [out] the first barycentric coordinate
+\param t [out] the second barycentric coordinate
+\note the third barycentric coordinate is defined as (1 - s - t)
+\see EndProfile
+*/
+void generateBarycentricCoordinatesTri(const PxVec3& pa, const PxVec3& pb, const PxVec3& pc, const PxVec3& p, float& s, float& t);
+void generateBarycentricCoordinatesTet(const PxVec3& pa, const PxVec3& pb, const PxVec3& pc, const PxVec3& pd, const PxVec3& p, PxVec3& bary);
+
+struct OverlapLineSegmentAABBCache
+{
+ PxVec3 sgnDir;
+ PxVec3 invDir;
+};
+
+PX_INLINE void computeOverlapLineSegmentAABBCache(OverlapLineSegmentAABBCache& cache, const PxVec3& segmentDisp)
+{
+ cache.sgnDir = PxVec3((float)(1 - (((int)(segmentDisp.x < 0.0f)) << 1)), (float)(1 - (((int)(segmentDisp.y < 0.0f)) << 1)), (float)(1 - (((int)(segmentDisp.z < 0.0f)) << 1)));
+ PxVec3 absDir = cache.sgnDir.multiply(segmentDisp);
+ absDir += PxVec3(PX_EPS_F32); // To avoid divide-by-zero
+ cache.invDir = PxVec3(absDir.y * absDir.z, absDir.z * absDir.x, absDir.x * absDir.y);
+ cache.invDir *= 1.0f / (absDir.x * cache.invDir.x);
+}
+
+PX_INLINE bool overlapLineSegmentAABBCached(const PxVec3& segmentOrig, const OverlapLineSegmentAABBCache& cache, const PxBounds3& aabb)
+{
+ const PxVec3 center = 0.5f * (aabb.maximum + aabb.minimum);
+ const PxVec3 radii = 0.5f * (aabb.maximum - aabb.minimum);
+ PxVec3 disp = (center - segmentOrig).multiply(cache.sgnDir);
+ PxVec3 tMin = (disp - radii).multiply(cache.invDir);
+ PxVec3 tMax = (disp + radii).multiply(cache.invDir);
+ int maxMinIndex = tMin.y > tMin.x;
+ const int maxMinIndexIs2 = tMin.z > tMin[(unsigned int)maxMinIndex];
+ maxMinIndex = (maxMinIndex | maxMinIndexIs2) << maxMinIndexIs2;
+ int minMaxIndex = tMax.y < tMax.x;
+ const int minMaxIndexIs2 = tMax.z > tMax[(unsigned int)minMaxIndex];
+ minMaxIndex = (minMaxIndex | minMaxIndexIs2) << minMaxIndexIs2;
+ const float tIn = tMin[(unsigned int)maxMinIndex];
+ const float tOut = tMax[(unsigned int)minMaxIndex];
+ return tIn < tOut && tOut > 0.0f && tIn < 1.0f;
+}
+
+PX_INLINE bool overlapLineSegmentAABB(const PxVec3& segmentOrig, const PxVec3& segmentDisp, const PxBounds3& aabb)
+{
+ OverlapLineSegmentAABBCache cache;
+ computeOverlapLineSegmentAABBCache(cache, segmentDisp);
+ return overlapLineSegmentAABBCached(segmentOrig, cache, aabb);
+}
+
+struct IntPair
+{
+ void set(int32_t _i0, int32_t _i1)
+ {
+ i0 = _i0;
+ i1 = _i1;
+ }
+
+ int32_t i0, i1;
+
+ static int compare(const void* a, const void* b)
+ {
+ const int32_t diff0 = ((IntPair*)a)->i0 - ((IntPair*)b)->i0;
+ return diff0 ? diff0 : (((IntPair*)a)->i1 - ((IntPair*)b)->i1);
+ }
+};
+
+PX_INLINE PxFileBuf& operator >> (PxFileBuf& stream, IntPair& p)
+{
+ p.i0 = (int32_t)stream.readDword();
+ p.i1 = (int32_t)stream.readDword();
+ return stream;
+}
+PX_INLINE PxFileBuf& operator << (PxFileBuf& stream, const IntPair& p)
+{
+ stream.storeDword((uint32_t)p.i0);
+ stream.storeDword((uint32_t)p.i1);
+ return stream;
+}
+
+struct BoundsRep
+{
+ BoundsRep() : type(0)
+ {
+ aabb.setEmpty();
+ }
+
+ PxBounds3 aabb;
+ uint32_t type; // By default only reports if subtypes are the same, configurable. Valid range {0...7}
+};
+
+struct BoundsInteractions
+{
+ BoundsInteractions() : bits(0x8040201008040201ULL) {}
+ BoundsInteractions(bool setAll) : bits(setAll ? 0xFFFFFFFFFFFFFFFFULL : 0x0000000000000000ULL) {}
+
+ bool set(unsigned group1, unsigned group2, bool interacts)
+ {
+ if (group1 >= 8 || group2 >= 8)
+ {
+ return false;
+ }
+ const uint64_t mask = (uint64_t)1 << ((group1 << 3) + group2) | (uint64_t)1 << ((group2 << 3) + group1);
+ if (interacts)
+ {
+ bits |= mask;
+ }
+ else
+ {
+ bits &= ~mask;
+ }
+ return true;
+ }
+
+ uint64_t bits;
+};
+
+enum Bounds3Axes
+{
+ Bounds3X = 1,
+ Bounds3Y = 2,
+ Bounds3Z = 4,
+
+ Bounds3XY = Bounds3X | Bounds3Y,
+ Bounds3YZ = Bounds3Y | Bounds3Z,
+ Bounds3ZX = Bounds3Z | Bounds3X,
+
+ Bounds3XYZ = Bounds3X | Bounds3Y | Bounds3Z
+};
+
+void boundsCalculateOverlaps(physx::Array<IntPair>& overlaps, Bounds3Axes axesToUse, const BoundsRep* bounds, uint32_t boundsCount, uint32_t boundsByteStride,
+ const BoundsInteractions& interactions = BoundsInteractions(), bool append = false);
+
+
+/*
+Descriptor for building a ConvexHullImpl, below
+*/
+class ConvexHullDesc
+{
+public:
+ const void* vertices;
+ uint32_t numVertices;
+ uint32_t vertexStrideBytes;
+ uint32_t* indices;
+ uint32_t numIndices;
+ uint8_t* faceIndexCounts;
+ uint32_t numFaces;
+
+ ConvexHullDesc() :
+ vertices(NULL),
+ numVertices(0),
+ vertexStrideBytes(0),
+ indices(NULL),
+ numIndices(0),
+ faceIndexCounts(NULL),
+ numFaces(0)
+ {
+ }
+
+ bool isValid() const
+ {
+ return
+ vertices != NULL &&
+ numVertices != 0 &&
+ vertexStrideBytes != 0 &&
+ indices != NULL &&
+ numIndices != 0 &&
+ faceIndexCounts != NULL &&
+ numFaces != 0;
+ }
+};
+
+/*
+ConvexHullImpl - precomputed (redundant) information about a convex hull: vertices, hull planes, etc.
+*/
+class ConvexHullImpl
+{
+public:
+ struct Separation
+ {
+ PxPlane plane;
+ float min0, max0, min1, max1;
+
+ float getDistance()
+ {
+ return PxMax(min0 - max1, min1 - max0);
+ }
+ };
+
+ ConvexHullImpl();
+ ConvexHullImpl(const ConvexHullImpl& hull) : mParams(NULL), mOwnsParams(false)
+ {
+ *this = hull;
+ }
+ virtual ~ConvexHullImpl();
+
+ // If params == NULL, this will build (and own) its own internal parameters
+ void init(NvParameterized::Interface* params = NULL);
+
+ ConvexHullImpl& operator = (const ConvexHullImpl& hull)
+ {
+ mParams = hull.mParams;
+ mOwnsParams = false;
+ return *this;
+ }
+
+ // Only returns non-NULL value if this object owns its parameters.
+ NvParameterized::Interface* giveOwnersipOfNvParameters();
+
+ // Releases parameters if it owns them
+ void term();
+
+ void buildFromDesc(const ConvexHullDesc& desc);
+ void buildFromPoints(const void* points, uint32_t numPoints, uint32_t pointStrideBytes);
+ void buildFromPlanes(const PxPlane* planes, uint32_t numPlanes, float eps);
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ void buildFromConvexMesh(const ConvexMesh* mesh);
+#endif
+ void buildFromAABB(const PxBounds3& aabb);
+ void buildKDOP(const void* points, uint32_t numPoints, uint32_t pointStrideBytes, const PxVec3* directions, uint32_t numDirections);
+
+ void intersectPlaneSide(const PxPlane& plane);
+ void intersectHull(const ConvexHullImpl& hull);
+
+ // If the distance between the hulls exceeds maxDistance, false is returned.
+ // Otherwise, true is returned. In this case, if 'separation' is not NULL, then separation plane
+ // and projected extents are returned in *separation.
+ static bool hullsInProximity(const ConvexHullImpl& hull0, const physx::PxTransform& localToWorldRT0, const PxVec3& scale0,
+ const ConvexHullImpl& hull1, const physx::PxTransform& localToWorldRT1, const PxVec3& scale1,
+ float maxDistance, Separation* separation = NULL);
+
+ // If the distance between this hull and the given sphere exceeds maxDistance, false is returned.
+ // Otherwise, true is returned. In this case, if 'separation' is not NULL, then separation plane
+ // and projected extents are returned in *separation. The '0' values will correspond to the hull,
+ // and the '1' values to the sphere.
+ bool sphereInProximity(const physx::PxTransform& hullLocalToWorldRT, const PxVec3& hullScale,
+ const PxVec3& sphereWorldCenter, float sphereRadius,
+ float maxDistance, Separation* separation = NULL);
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ bool intersects(const PxShape& shape, const physx::PxTransform& localToWorldRT, const PxVec3& scale, float padding) const;
+#endif
+
+ // worldRay.dir need not be normalized. in & out times are relative to worldRay.dir length
+ // N.B. in & out are both input and output variables:
+ // input: in = minimum possible ray intersect time
+ // out = maximum possible ray intersect time
+ // output: in = time ray enters hull
+ // out = time ray exits hull
+ bool rayCast(float& in, float& out, const PxVec3& orig, const PxVec3& dir,
+ const physx::PxTransform& localToWorldRT, const PxVec3& scale, PxVec3* normal = NULL) const;
+
+ // in & out times are relative to worldDisp length
+ // N.B. in & out are both input and output variables:
+ // input: in = minimum possible ray intersect time
+ // out = maximum possible ray intersect time
+ // output: in = time ray enters hull
+ // out = time ray exits hull
+ bool obbSweep(float& in, float& out, const PxVec3& worldBoxCenter, const PxVec3& worldBoxExtents, const PxVec3 worldBoxAxes[3],
+ const PxVec3& worldDisp, const physx::PxTransform& localToWorldRT, const PxVec3& scale, PxVec3* normal = NULL) const;
+
+ // Returns the min and max dot product of the vertices with the given normal
+ void extent(float& min, float& max, const PxVec3& normal) const;
+
+ void fill(physx::Array<PxVec3>& outPoints, const physx::PxTransform& localToWorldRT, const PxVec3& scale,
+ float spacing, float jitter, uint32_t maxPoints, bool adjustSpacing) const;
+
+ void setEmpty()
+ {
+ NvParameterized::Handle handle(*mParams);
+ mParams->getParameterHandle("vertices", handle);
+ mParams->resizeArray(handle, 0);
+ mParams->getParameterHandle("uniquePlanes", handle);
+ mParams->resizeArray(handle, 0);
+ mParams->getParameterHandle("widths", handle);
+ mParams->resizeArray(handle, 0);
+ mParams->getParameterHandle("edges", handle);
+ mParams->resizeArray(handle, 0);
+ mParams->bounds.setEmpty();
+ mParams->volume = 0.0f;
+ mParams->uniqueEdgeDirectionCount = 0;
+ mParams->planeCount = 0;
+ }
+
+ bool isEmpty() const
+ {
+ PX_ASSERT(mParams->bounds.isEmpty() == (mParams->vertices.arraySizes[0] == 0));
+ PX_ASSERT(mParams->bounds.isEmpty() == (mParams->uniquePlanes.arraySizes[0] == 0));
+ PX_ASSERT(mParams->bounds.isEmpty() == (mParams->widths.arraySizes[0] == 0));
+ PX_ASSERT(mParams->bounds.isEmpty() == (mParams->edges.arraySizes[0] == 0));
+ PX_ASSERT(mParams->bounds.isEmpty() == (mParams->volume == 0.0f));
+ return mParams->bounds.isEmpty();
+ }
+
+ uint32_t getVertexCount() const
+ {
+ return (uint32_t)mParams->vertices.arraySizes[0];
+ }
+ const PxVec3& getVertex(uint32_t index) const
+ {
+ return mParams->vertices.buf[index];
+ }
+
+ uint32_t getPlaneCount() const
+ {
+ return mParams->planeCount;
+ }
+ uint32_t getUniquePlaneNormalCount() const
+ {
+ return (uint32_t)mParams->uniquePlanes.arraySizes[0];
+ }
+ PxPlane getPlane(uint32_t index) const
+ {
+ PX_ASSERT(index < getPlaneCount());
+ if (index < (uint32_t)mParams->uniquePlanes.arraySizes[0])
+ {
+ return toPxPlane(mParams->uniquePlanes.buf[index]);
+ }
+ index -= mParams->uniquePlanes.arraySizes[0];
+ PxPlane plane = toPxPlane(mParams->uniquePlanes.buf[index]);
+ plane.n = -plane.n;
+ plane.d = -plane.d - mParams->widths.buf[index];
+ return plane;
+ }
+
+ uint32_t getWidthCount() const
+ {
+ return (uint32_t)mParams->widths.arraySizes[0];
+ }
+ float getWidth(uint32_t index) const
+ {
+ return mParams->widths.buf[index];
+ }
+
+ uint32_t getEdgeCount() const
+ {
+ return (uint32_t)mParams->edges.arraySizes[0];
+ }
+ uint32_t getEdgeEndpointIndex(uint32_t edgeIndex, uint32_t endpointIndex) const // endpointIndex = 0 or 1
+ {
+ PX_ASSERT(edgeIndex < getEdgeCount());
+ PX_ASSERT((endpointIndex & 1) == endpointIndex);
+ endpointIndex &= 1;
+ const uint32_t edge = mParams->edges.buf[edgeIndex];
+ return (endpointIndex & 1) ? (edge & 0x0000FFFF) : (edge >> 16);
+ }
+ uint32_t getEdgeAdjacentFaceIndex(uint32_t edgeIndex, uint32_t adjacencyIndex) const // adjacencyIndex = 0 or 1
+ {
+ PX_ASSERT(edgeIndex < getEdgeCount());
+ PX_ASSERT((adjacencyIndex & 1) == adjacencyIndex);
+ adjacencyIndex &= 1;
+ const uint32_t adj = mParams->adjacentFaces.buf[edgeIndex];
+ return (adjacencyIndex & 1) ? (adj & 0x0000FFFF) : (adj >> 16);
+ }
+ uint32_t getUniqueEdgeDirectionCount() const
+ {
+ return mParams->uniqueEdgeDirectionCount;
+ }
+ PxVec3 getEdgeDirection(uint32_t index) const
+ {
+ PX_ASSERT(index < getEdgeCount());
+ uint32_t edge = mParams->edges.buf[index];
+ return mParams->vertices.buf[edge & 0xFFFF] - mParams->vertices.buf[edge >> 16];
+ }
+
+ const PxBounds3& getBounds() const
+ {
+ return mParams->bounds;
+ }
+ float getVolume() const
+ {
+ return (float)mParams->volume;
+ }
+
+ // transform may include an arbitrary 3x3 block and a translation
+ void applyTransformation(const PxMat44& tm)
+ {
+ PX_ASSERT(mParams);
+
+ const float det3 = PxMat33(tm.getBasis(0), tm.getBasis(1), tm.getBasis(2)).getDeterminant();
+ PX_ASSERT(det3 > 0.0f); // mirroring or degeneracy won't work well here
+
+ // planes and slab widths
+ const Cof44 cof(tm);
+ const uint32_t numPlanes = (uint32_t)mParams->uniquePlanes.arraySizes[0];
+ ConvexHullParametersNS::Plane_Type* planes = mParams->uniquePlanes.buf;
+ PX_ASSERT(planes);
+ PX_ASSERT(numPlanes == (uint32_t)mParams->widths.arraySizes[0]);
+ float* widths = mParams->widths.buf;
+ PX_ASSERT(widths);
+ for (uint32_t i = 0; i < numPlanes; i++)
+ {
+ PxPlane src(planes[i].normal, planes[i].d);
+ PxPlane dst;
+ cof.transform(src, dst);
+ planes[i].normal = dst.n;
+ planes[i].d = dst.d;
+ const float n2 = dst.n.magnitudeSquared();
+ if (n2 > 0.0f)
+ {
+ const float recipN = PxRecipSqrt(n2);
+ planes[i].normal *= recipN;
+ planes[i].d *= recipN;
+ widths[i] *= det3*recipN;
+ }
+ }
+
+ // vertices
+ const uint32_t numVertices = (uint32_t)mParams->vertices.arraySizes[0];
+ PxVec3* vertices = mParams->vertices.buf;
+ PX_ASSERT(vertices);
+
+ mParams->bounds.setEmpty();
+ for (uint32_t i = 0; i < numVertices; i++)
+ {
+ vertices[i] = tm.transform(vertices[i]);
+ mParams->bounds.include(vertices[i]);
+ }
+
+ // volume
+ mParams->volume *= det3;
+ }
+
+ // Special case - transformation must be a pure rotation plus translation, and we only allow a positive, uniform scale
+ // Note, we could implement this with applyTransformation(const PxMat44& tm), above, but we will keep this
+ // old implementation to ensure that behavior doesn't change
+ void applyTransformation(const PxMat44& transformation, float scale)
+ {
+ PX_ASSERT(mParams);
+ PX_ASSERT(scale > 0.0f); // negative scale won't work well here
+
+ // planes
+ const uint32_t numPlanes = (uint32_t)mParams->uniquePlanes.arraySizes[0];
+ ConvexHullParametersNS::Plane_Type* planes = mParams->uniquePlanes.buf;
+ PX_ASSERT(planes);
+ for (uint32_t i = 0; i < numPlanes; i++)
+ {
+ planes[i].normal = transformation.rotate(planes[i].normal);
+ planes[i].d *= scale;
+ }
+
+ // slab widths
+ const uint32_t numWidths = (uint32_t)mParams->widths.arraySizes[0];
+ float* widths = mParams->widths.buf;
+ PX_ASSERT(widths);
+ for (uint32_t i = 0; i < numWidths; i++)
+ {
+ widths[i] *= scale;
+ }
+
+ // vertices
+ const uint32_t numVertices = (uint32_t)mParams->vertices.arraySizes[0];
+ PxVec3* vertices = mParams->vertices.buf;
+ PX_ASSERT(vertices);
+
+ mParams->bounds.setEmpty();
+ for (uint32_t i = 0; i < numVertices; i++)
+ {
+ vertices[i] = transformation.transform(vertices[i]) * scale; // Works since scale is uniform
+ mParams->bounds.include(vertices[i]);
+ }
+
+ // volume
+ mParams->volume *= scale*scale*scale;
+ }
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ // Returns the number of vertices and faces of the cooked mesh. If inflated = false,
+ // these should be the same as the values returned by getVertexCount() and getPlaneCount().
+ // However, the numerical properties of the cooker could result in different values. If inflated = true,
+ // then sharp edges will be beveled by the cooker, resulting in more vertices and faces.
+ // Note: the number of edges E may be calculated from the number of vertices V and faces F using E = V + F - 2.
+ // Return value = size in bytes of the cooked convex mesh
+ uint32_t calculateCookedSizes(uint32_t& vertexCount, uint32_t& faceCount, bool inflated) const;
+
+ // Removes vertices from the hull until the bounds given in the function's parameters are met.
+ // If inflated = true, then the maximum counts given are compared with the cooked hull, which may have higher counts due to beveling.
+ // Note: a value of zero indicates no limit, effectively infinite.
+ // Return value: true if successful, i.e. the limits were met. False otherwise.
+ bool reduceHull(uint32_t maxVertexCount, uint32_t maxEdgeCount, uint32_t maxFaceCount, bool inflated);
+
+ // Replaces vertices with cooked, un-inflated vertices, if the latter set is smaller. Returns true if the number of vertices is reduced.
+ bool reduceByCooking();
+#endif
+
+ // Utility function
+ static bool createKDOPDirections(physx::Array<PxVec3>& directions, ConvexHullMethod::Enum method);
+
+// DeclareArray(PxVec3) vertices;
+// DeclareArray(PxPlane) uniquePlanes; // These are the unique face directions. If there is an opposite face, the corresponding widths[i] will give its distance
+// physx::Array<float> widths; // Same size as uniquePlanes. Gives width of hull in uniquePlane direction
+// physx::Array<uint32_t> edges; // Vertex indices stored in high/low words. The first uniqueEdgeDirectionCount elements give the unique directions.
+// PxBounds3 bounds;
+// float volume;
+// uint32_t uniqueEdgeDirectionCount;
+// uint32_t planeCount; // Total number of faces. Greater than or equal to size of uniquePlanes.
+
+ ConvexHullParameters* mParams;
+ bool mOwnsParams;
+};
+
+
+/*
+ConvexMeshBuilder - creates triangles for a convex hull defined by a set of planes. Copied from physx samples (RenderClothActor)
+*/
+struct ConvexMeshBuilder
+{
+ ConvexMeshBuilder(const PxVec4* planes)
+ : mPlanes(planes)
+ {}
+
+ void operator()(uint32_t mask, float scale=1.0f);
+
+ const PxVec4* mPlanes;
+ Array<PxVec3> mVertices;
+ Array<uint16_t> mIndices;
+};
+
+
+// Fast implementation for sse
+PX_INLINE float RecipSqrt(float x)
+{
+#if defined( APEX_SUPPORT_SSE )
+ const float three = 3.0f;
+ const float oneHalf = 0.5f;
+ float y;
+ _asm
+ {
+ movss xmm2, three;
+ rsqrtss xmm0, x
+ movss xmm1, xmm0
+ mulss xmm1, oneHalf
+ mulss xmm0, xmm0
+ mulss xmm0, x
+ subss xmm2, xmm0
+ mulss xmm1, xmm2
+ movss y, xmm1
+ }
+ return y;
+#else
+ return 1.0f / sqrtf(x);
+#endif
+}
+
+/*
+ Array find utility
+ */
+
+// If t is found in array, index is set to the array element and the function returns true
+// If t is not found in the array, index is not modified and the function returns false
+template<class T>
+bool arrayFind(uint32_t& index, const T& t, const physx::Array<T>& array)
+{
+ const uint32_t size = array.size();
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ if (array[i] == t)
+ {
+ index = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+#include "ApexUsingNamespace.h"
+#if PX_X64
+#pragma warning(push)
+#pragma warning(disable: 4324) // 'IndexBank<IndexType>' : structure was padded due to __declspec(align())
+#endif
+
+/*
+ Index bank - double-sided free list for O(1) borrow/return of unique IDs
+
+ Type IndexType should be an unsigned integer type or something that can be cast to and from
+ an integer
+ */
+template <class IndexType>
+class IndexBank
+{
+ public:
+ IndexBank<IndexType>(uint32_t capacity = 0) : indexCount(0), capacityLocked(false)
+ {
+ maxCapacity = calculateMaxCapacity();
+ reserve_internal(capacity);
+ }
+
+ // Copy constructor
+ IndexBank<IndexType>(const IndexBank<IndexType>& other)
+ {
+ *this = other;
+ }
+
+ virtual ~IndexBank<IndexType>() {}
+
+ // Assignment operator
+ IndexBank<IndexType>& operator = (const IndexBank<IndexType>& other)
+ {
+ indices = other.indices;
+ ranks = other.ranks;
+ maxCapacity = other.maxCapacity;
+ indexCount = other.indexCount;
+ capacityLocked = other.capacityLocked;
+ return *this;
+ }
+
+ void setIndicesAndRanks(uint16_t* indicesIn, uint16_t* ranksIn, uint32_t capacityIn, uint32_t usedCountIn)
+ {
+ indexCount = usedCountIn;
+ reserve_internal(capacityIn);
+ for (uint32_t i = 0; i < capacityIn; ++i)
+ {
+ indices[i] = indicesIn[i];
+ ranks[i] = ranksIn[i];
+ }
+ }
+
+ void clear(uint32_t capacity = 0, bool used = false)
+ {
+ capacityLocked = false;
+ indices.reset();
+ ranks.reset();
+ reserve_internal(capacity);
+ if (used)
+ {
+ indexCount = capacity;
+ indices.resize(capacity);
+ for (IndexType i = (IndexType)0; i < (IndexType)capacity; ++i)
+ {
+ indices[i] = i;
+ }
+ }
+ else
+ {
+ indexCount = 0;
+ }
+ }
+
+ // Equivalent to calling freeLastUsed() until the used list is empty.
+ void clearFast()
+ {
+ indexCount = 0;
+ }
+
+ // This is the reserve size. The bank can only grow, due to shuffling of indices
+ virtual void reserve(uint32_t capacity)
+ {
+ reserve_internal(capacity);
+ }
+
+ // If lock = true, keeps bank from automatically resizing
+ void lockCapacity(bool lock)
+ {
+ capacityLocked = lock;
+ }
+
+ bool isCapacityLocked() const
+ {
+ return capacityLocked;
+ }
+
+ void setMaxCapacity(uint32_t inMaxCapacity)
+ {
+ // Cannot drop below current capacity, nor above max set by data types
+ maxCapacity = PxClamp(inMaxCapacity, capacity(), calculateMaxCapacity());
+ }
+
+ uint32_t capacity() const
+ {
+ return indices.size();
+ }
+ uint32_t usedCount() const
+ {
+ return indexCount;
+ }
+ uint32_t freeCount() const
+ {
+ return capacity() - usedCount();
+ }
+
+ // valid from [0] to [size()-1]
+ const IndexType* usedIndices() const
+ {
+ return indices.begin();
+ }
+
+ // valid from [0] to [free()-1]
+ const IndexType* freeIndices() const
+ {
+ return indices.begin() + usedCount();
+ }
+
+ bool isValid(IndexType index) const
+ {
+ return index < (IndexType)capacity();
+ }
+ bool isUsed(IndexType index) const
+ {
+ return isValid(index) && (ranks[index] < (IndexType)usedCount());
+ }
+ bool isFree(IndexType index) const
+ {
+ return isValid(index) && !isUsed();
+ }
+
+ IndexType getRank(IndexType index) const
+ {
+ return ranks[index];
+ }
+
+ // Gets the next available index, if any
+ bool useNextFree(IndexType& index)
+ {
+ if (freeCount() == 0)
+ {
+ if (capacityLocked)
+ {
+ return false;
+ }
+ if (capacity() >= maxCapacity)
+ {
+ return false;
+ }
+ reserve(PxClamp(capacity() * 2, (uint32_t)1, maxCapacity));
+ PX_ASSERT(freeCount() > 0);
+ }
+ index = indices[indexCount++];
+ return true;
+ }
+
+ // Frees the last used index, if any
+ bool freeLastUsed(IndexType& index)
+ {
+ if (usedCount() == 0)
+ {
+ return false;
+ }
+ index = indices[--indexCount];
+ return true;
+ }
+
+ // Requests a particular index. If that index is available, it is borrowed and the function
+ // returns true. Otherwise nothing happens and the function returns false.
+ bool use(IndexType index)
+ {
+ if (!indexIsValidForUse(index))
+ {
+ return false;
+ }
+ IndexType oldRank;
+ placeIndexAtRank(index, (IndexType)indexCount++, oldRank);
+ return true;
+ }
+
+ bool free(IndexType index)
+ {
+ if (!indexIsValidForFreeing(index))
+ {
+ return false;
+ }
+ IndexType oldRank;
+ placeIndexAtRank(index, (IndexType)--indexCount, oldRank);
+ return true;
+ }
+
+ bool useAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank)
+ {
+ if (!indexIsValidForUse(index))
+ {
+ return false;
+ }
+ newRank = (IndexType)indexCount++;
+ placeIndexAtRank(index, newRank, oldRank);
+ return true;
+ }
+
+ bool freeAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank)
+ {
+ if (!indexIsValidForFreeing(index))
+ {
+ return false;
+ }
+ newRank = (IndexType)--indexCount;
+ placeIndexAtRank(index, newRank, oldRank);
+ return true;
+ }
+
+ protected:
+
+ bool indexIsValidForUse(IndexType index)
+ {
+ if (!isValid(index))
+ {
+ if (capacityLocked)
+ {
+ return false;
+ }
+ if (capacity() >= maxCapacity)
+ {
+ return false;
+ }
+ reserve(PxClamp(2*(uint32_t)index, (uint32_t)1, maxCapacity));
+ PX_ASSERT(isValid(index));
+ }
+ return !isUsed(index);
+ }
+
+ bool indexIsValidForFreeing(IndexType index)
+ {
+ if (!isValid(index))
+ {
+ // Invalid index
+ return false;
+ }
+ return isUsed(index);
+ }
+
+ // This is the reserve size. The bank can only grow, due to shuffling of indices
+ void reserve_internal(uint32_t capacity)
+ {
+ capacity = PxMin(capacity, maxCapacity);
+ const uint32_t oldCapacity = indices.size();
+ if (capacity > oldCapacity)
+ {
+ indices.resize(capacity);
+ ranks.resize(capacity);
+ for (IndexType i = (IndexType)oldCapacity; i < (IndexType)capacity; ++i)
+ {
+ indices[i] = i;
+ ranks[i] = i;
+ }
+ }
+ }
+
+ private:
+
+ void placeIndexAtRank(IndexType index, IndexType newRank, IndexType& oldRank) // returns old rank
+ {
+ const IndexType replacementIndex = indices[newRank];
+ oldRank = ranks[index];
+ indices[oldRank] = replacementIndex;
+ indices[newRank] = index;
+ ranks[replacementIndex] = oldRank;
+ ranks[index] = newRank;
+ }
+
+ uint32_t calculateMaxCapacity()
+ {
+#pragma warning(push)
+#pragma warning(disable: 4127) // conditional expression is constant
+ if (sizeof(IndexType) >= sizeof(uint32_t))
+ {
+ return 0xFFFFFFFF; // Limited by data type we use to report capacity
+ }
+ else
+ {
+ return (1u << (8 * PxMin((uint32_t)sizeof(IndexType), 3u))) - 1; // Limited by data type we use for indices
+ }
+#pragma warning(pop)
+ }
+
+ protected:
+
+ Array<IndexType> indices;
+ Array<IndexType> ranks;
+ uint32_t maxCapacity;
+ uint32_t indexCount;
+ bool capacityLocked;
+};
+
+#if PX_X64
+#pragma warning(pop)
+#endif
+
+/*
+ Bank - Index bank of type IndexType with an associated object array of type T
+ */
+template <class T, class IndexType>
+class Bank : public IndexBank<IndexType>
+{
+ public:
+ Bank<T, IndexType>(uint32_t capacity = 0) : IndexBank<IndexType>(capacity)
+ {
+ objects = (T*)PX_ALLOC(IndexBank<IndexType>::indices.size() * sizeof(T), PX_DEBUG_EXP("Bank"));
+ if (objects != NULL)
+ {
+ PX_ASSERT(memset(objects, 0, IndexBank<IndexType>::indices.size() * sizeof(T)));
+ }
+ }
+ Bank<T, IndexType>(const Bank<T, IndexType>& bank) : objects(NULL)
+ {
+ *this = bank;
+ }
+
+ ~Bank<T, IndexType>()
+ {
+ clear();
+ }
+
+ Bank<T, IndexType>& operator = (const Bank<T, IndexType>& bank)
+ {
+ if (&bank == this)
+ {
+ return *this;
+ }
+
+ this->clear();
+
+ this->indices = bank.indices;
+ this->ranks = bank.ranks;
+ this->maxCapacity = bank.maxCapacity;
+ this->indexCount = bank.indexCount;
+ this->capacityLocked = bank.capacityLocked;
+
+ if (this->indices.size())
+ {
+ objects = (T*)PX_ALLOC(IndexBank<IndexType>::indices.size() * sizeof(T), PX_DEBUG_EXP("Bank"));
+ PX_ASSERT(memset(objects, 0, IndexBank<IndexType>::indices.size() * sizeof(T)));
+ for (uint32_t i = 0; i < this->indexCount; ++i)
+ {
+ uint32_t index = this->indices[i];
+ new(objects + index) T();
+ objects[index] = bank.objects[index];
+ }
+ }
+ return *this;
+ }
+
+ // This is the reserve size. The bank can only grow, due to shuffling of indices
+ virtual void reserve(uint32_t capacity)
+ {
+ const uint32_t oldSize = IndexBank<IndexType>::indices.size();
+ IndexBank<IndexType>::reserve_internal(capacity);
+ if (IndexBank<IndexType>::indices.size() > oldSize)
+ {
+ T* nb = (T*)PX_ALLOC(IndexBank<IndexType>::indices.size() * sizeof(T), PX_DEBUG_EXP("Bank"));
+ if (nb)
+ {
+ PX_ASSERT(memset(nb, 0, IndexBank<IndexType>::indices.size() * sizeof(T)));
+
+ const IndexType* usedIndices = IndexBank<IndexType>::usedIndices();
+ uint32_t numIndices = IndexBank<IndexType>::usedCount();
+
+ // this copy needs to be correct for nonPOD type T's
+ for (int32_t i = (int32_t)numIndices - 1; i >= 0; i--)
+ {
+ IndexType index = usedIndices[i];
+ new(nb + index) T(objects[index]);
+ objects[index].~T();
+ }
+ //memcpy( nb, objects, IndexBank<IndexType>::indices.size()*sizeof(T) );
+ }
+ PX_FREE(objects);
+ objects = nb;
+ }
+ }
+
+ // Indirect array accessors: rank in [0,usedCount()-1] returns all "used" indexed objects
+ const T& getUsed(IndexType rank) const
+ {
+ return objects[ IndexBank<IndexType>::indices[rank] ];
+ }
+ T& getUsed(IndexType rank)
+ {
+ return objects[ IndexBank<IndexType>::indices[rank] ];
+ }
+
+ // Direct array accessors
+ const T& direct(IndexType index) const
+ {
+ return objects[index];
+ }
+ T& direct(IndexType index)
+ {
+ return objects[index];
+ }
+
+ // Wrappers for base class, which call appropriate constructors and destructors of objects
+ bool useNextFree(IndexType& index)
+ {
+ if (IndexBank<IndexType>::useNextFree(index))
+ {
+ new(objects + index) T();
+ return true;
+ }
+ return false;
+ }
+
+ bool freeLastUsed(IndexType& index)
+ {
+ if (IndexBank<IndexType>::freeLastUsed(index))
+ {
+ objects[index].~T();
+ return true;
+ }
+ return false;
+ }
+
+ bool use(IndexType index)
+ {
+ if (IndexBank<IndexType>::use(index))
+ {
+ new(objects + index) T();
+ return true;
+ }
+ return false;
+ }
+
+ bool free(IndexType index)
+ {
+ if (IndexBank<IndexType>::free(index))
+ {
+ objects[index].~T();
+ return true;
+ }
+ return false;
+ }
+
+ bool useAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank)
+ {
+ if (IndexBank<IndexType>::useAndReturnRanks(index, newRank, oldRank))
+ {
+ new(objects + index) T();
+ return true;
+ }
+ return false;
+ }
+
+ bool freeAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank)
+ {
+ if (IndexBank<IndexType>::freeAndReturnRanks(index, newRank, oldRank))
+ {
+ objects[index].~T();
+ return true;
+ }
+ return false;
+ }
+
+ // Erases all object, index, and rank arrays (complete deallocation)
+ void clear()
+ {
+ const IndexType* usedIndices = IndexBank<IndexType>::usedIndices();
+ uint32_t numIndices = IndexBank<IndexType>::usedCount();
+
+ for (int32_t i = (int32_t)numIndices - 1; i >= 0; i--)
+ {
+ bool test = free(usedIndices[i]);
+ PX_UNUSED(test);
+ PX_ASSERT(test);
+ }
+
+ IndexBank<IndexType>::clear();
+ PX_FREE(objects);
+ objects = NULL;
+ }
+
+ // Re-arranges objects internally into rank-order, afterwards rank = index
+ void clean()
+ {
+ for (IndexType i = 0; i < IndexBank<IndexType>::capacity(); ++i)
+ {
+ const IndexType index = IndexBank<IndexType>::indices[i];
+ if (index != i)
+ {
+ nvidia::swap(objects[i], objects[index]);
+ const IndexType displacedRank = IndexBank<IndexType>::ranks[i];
+ IndexBank<IndexType>::indices[i] = i;
+ IndexBank<IndexType>::ranks[i] = i;
+ IndexBank<IndexType>::indices[displacedRank] = index;
+ IndexBank<IndexType>::ranks[index] = displacedRank;
+ }
+ }
+ }
+
+ protected:
+ T* objects;
+};
+
+
+/*
+ Ring buffer
+*/
+template <class T>
+class RingBuffer
+{
+ public:
+ RingBuffer() : frontIndex(0), backIndex(0xFFFFFFFF), usedSize(0), bufferSize(0), buffer(NULL) {}
+ ~RingBuffer()
+ {
+ erase();
+ }
+
+ uint32_t size() const
+ {
+ return usedSize;
+ }
+
+ T& operator [](uint32_t i)
+ {
+ PX_ASSERT(i < usedSize);
+ i += frontIndex;
+ return buffer[ i < bufferSize ? i : i - bufferSize ];
+ }
+
+ const T& operator [](uint32_t i) const
+ {
+ return (const T&)(const_cast<RingBuffer<T>*>(this)->operator[](i));
+ }
+
+ T& back() const
+ {
+ return buffer[backIndex];
+ }
+ T& front() const
+ {
+ return buffer[frontIndex];
+ }
+
+ T& pushBack()
+ {
+ if (bufferSize == usedSize)
+ {
+ reserve(2 * (bufferSize + 1));
+ }
+ ++usedSize;
+ if (++backIndex == bufferSize)
+ {
+ backIndex = 0;
+ }
+ T& back = buffer[backIndex];
+ PX_PLACEMENT_NEW(&back, T)();
+ return back;
+ }
+
+ void popBack()
+ {
+ PX_ASSERT(size() != 0);
+ if (size() == 0)
+ {
+ return;
+ }
+ buffer[backIndex].~T();
+ --usedSize;
+ if (backIndex-- == 0)
+ {
+ backIndex += bufferSize;
+ }
+ }
+
+ T& pushFront()
+ {
+ if (bufferSize == usedSize)
+ {
+ reserve(2 * (bufferSize + 1));
+ }
+ ++usedSize;
+ if (frontIndex-- == 0)
+ {
+ frontIndex += bufferSize;
+ }
+ T& front = buffer[frontIndex];
+ PX_PLACEMENT_NEW(&front, T)();
+ return front;
+ }
+
+ void popFront()
+ {
+ PX_ASSERT(size() != 0);
+ if (size() == 0)
+ {
+ return;
+ }
+ buffer[frontIndex].~T();
+ --usedSize;
+ if (++frontIndex == bufferSize)
+ {
+ frontIndex = 0;
+ }
+ }
+
+ void clear()
+ {
+ while (size() != 0)
+ {
+ popBack();
+ }
+ frontIndex = 0;
+ backIndex = 0xFFFFFFFF;
+ }
+
+ void erase()
+ {
+ clear();
+ if (buffer != NULL)
+ {
+ PX_FREE(buffer);
+ buffer = NULL;
+ }
+ bufferSize = 0;
+ }
+
+ void reserve(uint32_t newBufferSize)
+ {
+ if (newBufferSize <= bufferSize)
+ {
+ return;
+ }
+ T* newBuffer = (T*)PX_ALLOC(newBufferSize * sizeof(T), PX_DEBUG_EXP("RingBuffer"));
+ const uint32_t lastIndex = frontIndex + usedSize;
+ if (lastIndex <= bufferSize)
+ {
+ for (uint32_t i = 0; i < usedSize; i++)
+ {
+ PX_PLACEMENT_NEW(newBuffer + i, T)(buffer[i]);
+ buffer[i].~T();
+ }
+ //memcpy( newBuffer, buffer+frontIndex, usedSize*sizeof( T ) );
+ }
+ else
+ {
+ for (uint32_t i = 0; i < (bufferSize - frontIndex); i++)
+ {
+ PX_PLACEMENT_NEW(newBuffer + i, T)(buffer[i + frontIndex]);
+ buffer[i + frontIndex].~T();
+ }
+ //memcpy( newBuffer, buffer+frontIndex, (bufferSize-frontIndex)*sizeof( T ) );
+
+ for (uint32_t i = 0; i < (lastIndex - bufferSize); i++)
+ {
+ PX_PLACEMENT_NEW(newBuffer + i + (bufferSize - frontIndex), T)(buffer[i]);
+ buffer[i].~T();
+ }
+ //memcpy( newBuffer + (bufferSize-frontIndex), buffer, (lastIndex-bufferSize)*sizeof( T ) );
+ }
+ bufferSize = newBufferSize;
+ frontIndex = 0;
+ backIndex = frontIndex + usedSize - 1;
+ if (buffer)
+ {
+ PX_FREE(buffer);
+ }
+ buffer = newBuffer;
+ }
+
+ class It
+ {
+ public:
+ It(const RingBuffer<T>& buffer) :
+ m_bufferStart(buffer.buffer), m_bufferStop(buffer.buffer + buffer.bufferSize),
+ m_current(buffer.usedSize > 0 ? buffer.buffer + buffer.frontIndex : NULL), m_remaining(buffer.usedSize) {}
+
+ operator T* () const
+ {
+ return m_current;
+ }
+ T* operator ++ ()
+ {
+ inc();
+ return m_current;
+ }
+ T* operator ++ (int)
+ {
+ T* prev = m_current;
+ inc();
+ return prev;
+ }
+
+ private:
+ void inc()
+ {
+ if (m_remaining > 1)
+ {
+ --m_remaining;
+ if (++m_current == m_bufferStop)
+ {
+ m_current = m_bufferStart;
+ }
+ }
+ else
+ {
+ m_remaining = 0;
+ m_current = NULL;
+ }
+ }
+
+ T* m_bufferStart;
+ T* m_bufferStop;
+ T* m_current;
+ uint32_t m_remaining;
+ };
+
+ friend class It;
+
+ protected:
+ uint32_t frontIndex;
+ uint32_t backIndex;
+ uint32_t usedSize;
+ uint32_t bufferSize;
+ T* buffer;
+};
+
+
+template<class T>
+class Pool
+{
+ enum { DefaultBlockSizeInBytes = 1024 }; // This must be positive
+
+ public:
+ Pool(uint32_t objectsPerBlock = 0) : m_head(NULL), m_inUse(0)
+ {
+ PX_ASSERT(sizeof(T) >= sizeof(void*));
+ setBlockSize(objectsPerBlock);
+ }
+
+ ~Pool()
+ {
+ empty();
+ }
+
+ void setBlockSize(uint32_t objectsPerBlock)
+ {
+ m_objectsPerBlock = objectsPerBlock > 0 ? objectsPerBlock : ((uint32_t)DefaultBlockSizeInBytes + sizeof(T) - 1) / sizeof(T);
+ }
+
+ /* Gives a single object, allocating if necessary */
+ T* borrow()
+ {
+ if (m_head == NULL)
+ {
+ allocateBlock();
+ }
+ T* ptr = (T*)m_head;
+ m_head = *(void**)m_head;
+ new(ptr) T();
+ ++m_inUse;
+ return ptr;
+ }
+
+ /* Return a single object */
+ void replace(T* ptr)
+ {
+ if (ptr != NULL)
+ {
+ ptr->~T();
+ *(void**)ptr = m_head;
+ m_head = (void*)ptr;
+ --m_inUse;
+ }
+ }
+
+ void allocateBlock()
+ {
+ T* block = (T*)PX_ALLOC(sizeof(T) * m_objectsPerBlock, PX_DEBUG_EXP("ApexSharedUtils::Pool"));
+ m_blocks.pushBack(block);
+ for (T* ptr = block + m_objectsPerBlock; ptr-- != block;)
+ {
+ *(void**)ptr = m_head;
+ m_head = (void*)ptr;
+ }
+ }
+
+ int32_t empty()
+ {
+ while (m_blocks.size())
+ {
+ PX_FREE(m_blocks.back());
+ m_blocks.popBack();
+ }
+ m_blocks.reset();
+ m_head = NULL;
+ const int32_t inUse = m_inUse;
+ m_inUse = 0;
+ return inUse;
+ }
+
+ protected:
+
+ void* m_head;
+ uint32_t m_objectsPerBlock;
+ physx::Array<T*>m_blocks;
+ int32_t m_inUse;
+};
+
+
+// Progress listener implementation for hierarchical progress reporting
+class HierarchicalProgressListener : public IProgressListener
+{
+ public:
+ HierarchicalProgressListener(int totalWork, IProgressListener* parent) :
+ m_work(0), m_subtaskWork(1), m_totalWork(PxMax(totalWork, 1)), m_taskName(NULL), m_parent(parent) {}
+
+ void setSubtaskWork(int subtaskWork, const char* taskName = NULL)
+ {
+ if (subtaskWork < 0)
+ {
+ subtaskWork = m_totalWork - m_work;
+ }
+
+ m_subtaskWork = subtaskWork;
+ PX_ASSERT(m_work + m_subtaskWork <= m_totalWork);
+ m_taskName = taskName;
+ setProgress(0, m_taskName);
+ }
+
+ void completeSubtask()
+ {
+ setProgress(100, m_taskName);
+ m_work += m_subtaskWork;
+ }
+
+ void setProgress(int progress, const char* taskName = NULL)
+ {
+ PX_ASSERT(progress >= 0);
+ PX_ASSERT(progress <= 100);
+
+ if (taskName == NULL)
+ {
+ taskName = m_taskName;
+ }
+
+ if (m_parent != NULL)
+ {
+ const int parentProgress = m_totalWork > 0 ? (m_work * 100 + m_subtaskWork * progress) / m_totalWork : 100;
+ m_parent->setProgress(PxClamp(parentProgress, 0, 100), taskName);
+ }
+ }
+
+ protected:
+ int m_work;
+ int m_subtaskWork;
+ int m_totalWork;
+ const char* m_taskName;
+ IProgressListener* m_parent;
+};
+
+void createIndexStartLookup(physx::Array<uint32_t>& lookup, int32_t indexBase, uint32_t indexRange, int32_t* indexSource, uint32_t indexCount, uint32_t indexByteStride);
+
+void findIslands(physx::Array< physx::Array<uint32_t> >& islands, const physx::Array<IntPair>& overlaps, uint32_t indexRange);
+
+// Neighbor-finding utility class
+class NeighborLookup
+{
+public:
+ void setBounds(const BoundsRep* bounds, uint32_t boundsCount, uint32_t boundsByteStride);
+
+ uint32_t getNeighborCount(const uint32_t index) const;
+ const uint32_t* getNeighbors(const uint32_t index) const;
+
+protected:
+ physx::Array<uint32_t> m_neighbors;
+ physx::Array<uint32_t> m_firstNeighbor;
+};
+
+
+// TriangleFrame - calculates interpolation data for triangle quantities
+class TriangleFrame
+{
+ public:
+
+ enum VertexField
+ {
+// Position_x, Position_y, Position_z, // Not interpolating positions
+ Normal_x, Normal_y, Normal_z,
+ Tangent_x, Tangent_y, Tangent_z,
+ Binormal_x, Binormal_y, Binormal_z,
+ UV0_u, UV0_v, UV1_u, UV1_v, UV2_u, UV2_v, UV3_u, UV3_v,
+ Color_r, Color_g, Color_b, Color_a,
+
+ VertexFieldCount
+ };
+
+ TriangleFrame() : m_fieldMask(0) {}
+ TriangleFrame(const ExplicitRenderTriangle& tri, uint64_t fieldMask = 0xFFFFFFFFFFFFFFFFULL)
+ {
+ setFromTriangle(tri, fieldMask);
+ }
+ TriangleFrame(const PxMat44& tm, const PxVec2& uvScale, const PxVec2& uvOffset, uint64_t fieldMask = 0xFFFFFFFFFFFFFFFFULL)
+ {
+ setFlat(tm, uvScale, uvOffset, fieldMask);
+ }
+
+ PX_INLINE void setFromTriangle(const ExplicitRenderTriangle& tri, uint64_t fieldMask = 0xFFFFFFFFFFFFFFFFULL);
+ PX_INLINE void setFlat(const PxMat44& tm, const PxVec2& uvScale, const PxVec2& uvOffset, uint64_t fieldMask = 0xFFFFFFFFFFFFFFFFULL);
+
+ PX_INLINE void interpolateVertexData(Vertex& vertex) const;
+
+ private:
+
+ static size_t s_offsets[VertexFieldCount];
+ PxPlane m_frames[VertexFieldCount];
+ uint64_t m_fieldMask;
+
+ friend class TriangleFrameBuilder;
+};
+
+PX_INLINE void
+TriangleFrame::setFromTriangle(const ExplicitRenderTriangle& tri, uint64_t fieldMask)
+{
+ m_fieldMask = fieldMask;
+
+ PxVec3 p0, p1, p2;
+ p0 = tri.vertices[0].position;
+ p1 = tri.vertices[1].position;
+ p2 = tri.vertices[2].position;
+ const PxVec3 p1xp2 = p1.cross(p2);
+ const PxVec3 p2xp0 = p2.cross(p0);
+ const PxVec3 p0xp1 = p0.cross(p1);
+ const PxVec3 n = p1xp2 + p2xp0 + p0xp1;
+ const float n2 = n.dot(n);
+ if (n2 < PX_EPS_F32 * PX_EPS_F32)
+ {
+ for (uint32_t i = 0; fieldMask != 0 && i < VertexFieldCount; ++i, (fieldMask >>= 1))
+ {
+ if (fieldMask & 1)
+ {
+ m_frames[i] = PxPlane(0, 0, 0, 0);
+ }
+ }
+ return;
+ }
+
+ // Calculate inverse 4x4 matrix (only need first three columns):
+ const PxVec3 nP = n / n2; // determinant is -n2
+ const PxVec3 Q0(nP.z * (p1.y - p2.y) - nP.y * (p1.z - p2.z), nP.z * (p2.y - p0.y) - nP.y * (p2.z - p0.z), nP.z * (p0.y - p1.y) - nP.y * (p0.z - p1.z));
+ const PxVec3 Q1(nP.x * (p1.z - p2.z) - nP.z * (p1.x - p2.x), nP.x * (p2.z - p0.z) - nP.z * (p2.x - p0.x), nP.x * (p0.z - p1.z) - nP.z * (p0.x - p1.x));
+ const PxVec3 Q2(nP.y * (p1.x - p2.x) - nP.x * (p1.y - p2.y), nP.y * (p2.x - p0.x) - nP.x * (p2.y - p0.y), nP.y * (p0.x - p1.x) - nP.x * (p0.y - p1.y));
+ const PxVec3 r(nP.dot(p1xp2), nP.dot(p2xp0), nP.dot(p0xp1));
+
+ for (uint32_t i = 0; fieldMask != 0 && i < VertexFieldCount; ++i, (fieldMask >>= 1))
+ {
+ if (fieldMask & 1)
+ {
+ const size_t offset = s_offsets[i];
+ const PxVec3 vi(*(float*)(((uint8_t*)&tri.vertices[0]) + offset), *(float*)(((uint8_t*)&tri.vertices[1]) + offset), *(float*)(((uint8_t*)&tri.vertices[2]) + offset));
+ m_frames[i] = PxPlane(Q0.dot(vi), Q1.dot(vi), Q2.dot(vi), r.dot(vi));
+ }
+ }
+}
+
+PX_INLINE void
+TriangleFrame::setFlat(const PxMat44& tm, const PxVec2& uvScale, const PxVec2& uvOffset, uint64_t fieldMask)
+{
+ m_fieldMask = fieldMask;
+
+ // Local z ~ normal = tangents[2], x ~ u and tangent = tangents[0], y ~ v and binormal = tangents[1]
+ if ((fieldMask >> Normal_x) & 1)
+ {
+ m_frames[Normal_x] = PxPlane(PxVec3((float)0), tm(0, 2));
+ }
+ if ((fieldMask >> Normal_y) & 1)
+ {
+ m_frames[Normal_y] = PxPlane(PxVec3((float)0), tm(1, 2));
+ }
+ if ((fieldMask >> Normal_z) & 1)
+ {
+ m_frames[Normal_z] = PxPlane(PxVec3((float)0), tm(2, 2));
+ }
+ if ((fieldMask >> Tangent_x) & 1)
+ {
+ m_frames[Tangent_x] = PxPlane(PxVec3((float)0), tm(0, 0));
+ }
+ if ((fieldMask >> Tangent_y) & 1)
+ {
+ m_frames[Tangent_y] = PxPlane(PxVec3((float)0), tm(1, 0));
+ }
+ if ((fieldMask >> Tangent_z) & 1)
+ {
+ m_frames[Tangent_z] = PxPlane(PxVec3((float)0), tm(2, 0));
+ }
+ if ((fieldMask >> Binormal_x) & 1)
+ {
+ m_frames[Binormal_x] = PxPlane(PxVec3((float)0), tm(0, 1));
+ }
+ if ((fieldMask >> Binormal_y) & 1)
+ {
+ m_frames[Binormal_y] = PxPlane(PxVec3((float)0), tm(1, 1));
+ }
+ if ((fieldMask >> Binormal_z) & 1)
+ {
+ m_frames[Binormal_z] = PxPlane(PxVec3((float)0), tm(2, 1));
+ }
+ const PxVec3 psu = (uvScale[0] ? 1 / uvScale[0] : (float)0) * tm.column0.getXYZ();
+ const PxVec3 psv = (uvScale[1] ? 1 / uvScale[1] : (float)0) * tm.column1.getXYZ();
+ if ((fieldMask >> UV0_u) & 1)
+ {
+ m_frames[UV0_u] = PxPlane(psu, uvOffset[0]);
+ }
+ if ((fieldMask >> UV0_v) & 1)
+ {
+ m_frames[UV0_v] = PxPlane(psv, uvOffset[1]);
+ }
+ if ((fieldMask >> UV1_u) & 1)
+ {
+ m_frames[UV1_u] = PxPlane(psu, uvOffset[0]);
+ }
+ if ((fieldMask >> UV1_v) & 1)
+ {
+ m_frames[UV1_v] = PxPlane(psv, uvOffset[1]);
+ }
+ if ((fieldMask >> UV2_u) & 1)
+ {
+ m_frames[UV2_u] = PxPlane(psu, uvOffset[0]);
+ }
+ if ((fieldMask >> UV2_v) & 1)
+ {
+ m_frames[UV2_v] = PxPlane(psv, uvOffset[1]);
+ }
+ if ((fieldMask >> UV3_u) & 1)
+ {
+ m_frames[UV3_u] = PxPlane(psu, uvOffset[0]);
+ }
+ if ((fieldMask >> UV3_v) & 1)
+ {
+ m_frames[UV3_v] = PxPlane(psv, uvOffset[1]);
+ }
+ if ((fieldMask >> Color_r) & 1)
+ {
+ m_frames[Color_r] = PxPlane(PxVec3((float)0), (float)1);
+ }
+ if ((fieldMask >> Color_g) & 1)
+ {
+ m_frames[Color_g] = PxPlane(PxVec3((float)0), (float)1);
+ }
+ if ((fieldMask >> Color_b) & 1)
+ {
+ m_frames[Color_b] = PxPlane(PxVec3((float)0), (float)1);
+ }
+ if ((fieldMask >> Color_a) & 1)
+ {
+ m_frames[Color_a] = PxPlane(PxVec3((float)0), (float)1);
+ }
+}
+
+PX_INLINE void
+TriangleFrame::interpolateVertexData(Vertex& vertex) const
+{
+ uint64_t fieldMask = m_fieldMask;
+ for (uint32_t i = 0; fieldMask != 0 && i < VertexFieldCount; ++i, (fieldMask >>= 1))
+ {
+ if (fieldMask & 1)
+ {
+ float& value = *(float*)(((uint8_t*)&vertex) + s_offsets[i]);
+ value = m_frames[i].distance(vertex.position);
+ }
+ }
+}
+
+class TriangleFrameBuilder
+{
+ public:
+ TriangleFrameBuilder()
+ {
+#define CREATE_TF_OFFSET( field, element ) (size_t)((uintptr_t)&vertex.field.element-(uintptr_t)&vertex)
+#define CREATE_TF_OFFSET_IDX( field, element, index ) (size_t)((uintptr_t)&vertex.field[index].element-(uintptr_t)&vertex)
+
+ Vertex vertex;
+// TriangleFrame::s_offsets[TriangleFrame::Position_x] = CREATE_TF_OFFSET( position, x );
+// TriangleFrame::s_offsets[TriangleFrame::Position_y] = CREATE_TF_OFFSET( position, y );
+// TriangleFrame::s_offsets[TriangleFrame::Position_z] = CREATE_TF_OFFSET( position, z );
+ TriangleFrame::s_offsets[TriangleFrame::Normal_x] = CREATE_TF_OFFSET(normal, x);
+ TriangleFrame::s_offsets[TriangleFrame::Normal_y] = CREATE_TF_OFFSET(normal, y);
+ TriangleFrame::s_offsets[TriangleFrame::Normal_z] = CREATE_TF_OFFSET(normal, z);
+ TriangleFrame::s_offsets[TriangleFrame::Tangent_x] = CREATE_TF_OFFSET(tangent, x);
+ TriangleFrame::s_offsets[TriangleFrame::Tangent_y] = CREATE_TF_OFFSET(tangent, y);
+ TriangleFrame::s_offsets[TriangleFrame::Tangent_z] = CREATE_TF_OFFSET(tangent, z);
+ TriangleFrame::s_offsets[TriangleFrame::Binormal_x] = CREATE_TF_OFFSET(binormal, x);
+ TriangleFrame::s_offsets[TriangleFrame::Binormal_y] = CREATE_TF_OFFSET(binormal, y);
+ TriangleFrame::s_offsets[TriangleFrame::Binormal_z] = CREATE_TF_OFFSET(binormal, z);
+ TriangleFrame::s_offsets[TriangleFrame::UV0_u] = CREATE_TF_OFFSET_IDX(uv, u, 0);
+ TriangleFrame::s_offsets[TriangleFrame::UV0_v] = CREATE_TF_OFFSET_IDX(uv, v, 0);
+ TriangleFrame::s_offsets[TriangleFrame::UV1_u] = CREATE_TF_OFFSET_IDX(uv, u, 1);
+ TriangleFrame::s_offsets[TriangleFrame::UV1_v] = CREATE_TF_OFFSET_IDX(uv, v, 1);
+ TriangleFrame::s_offsets[TriangleFrame::UV2_u] = CREATE_TF_OFFSET_IDX(uv, u, 2);
+ TriangleFrame::s_offsets[TriangleFrame::UV2_v] = CREATE_TF_OFFSET_IDX(uv, v, 2);
+ TriangleFrame::s_offsets[TriangleFrame::UV3_u] = CREATE_TF_OFFSET_IDX(uv, u, 3);
+ TriangleFrame::s_offsets[TriangleFrame::UV3_v] = CREATE_TF_OFFSET_IDX(uv, v, 3);
+ TriangleFrame::s_offsets[TriangleFrame::Color_r] = CREATE_TF_OFFSET(color, r);
+ TriangleFrame::s_offsets[TriangleFrame::Color_g] = CREATE_TF_OFFSET(color, g);
+ TriangleFrame::s_offsets[TriangleFrame::Color_b] = CREATE_TF_OFFSET(color, b);
+ TriangleFrame::s_offsets[TriangleFrame::Color_a] = CREATE_TF_OFFSET(color, a);
+ }
+};
+
+
+// Format conversion utilities
+
+// Explicit data layouts, used for data conversion
+
+typedef uint8_t UBYTE1_TYPE;
+typedef uint8_t UBYTE2_TYPE[2];
+typedef uint8_t UBYTE3_TYPE[3];
+typedef uint8_t UBYTE4_TYPE[4];
+
+typedef uint16_t USHORT1_TYPE;
+typedef uint16_t USHORT2_TYPE[2];
+typedef uint16_t USHORT3_TYPE[3];
+typedef uint16_t USHORT4_TYPE[4];
+
+typedef int16_t SHORT1_TYPE;
+typedef int16_t SHORT2_TYPE[2];
+typedef int16_t SHORT3_TYPE[3];
+typedef int16_t SHORT4_TYPE[4];
+
+typedef uint32_t UINT1_TYPE;
+typedef uint32_t UINT2_TYPE[2];
+typedef uint32_t UINT3_TYPE[3];
+typedef uint32_t UINT4_TYPE[4];
+
+struct R8G8B8A8_TYPE
+{
+ uint8_t r, g, b, a;
+};
+struct B8G8R8A8_TYPE
+{
+ uint8_t b, g, r, a;
+};
+struct R32G32B32A32_FLOAT_TYPE
+{
+ float r, g, b, a;
+};
+struct B32G32R32A32_FLOAT_TYPE
+{
+ float b, g, r, a;
+};
+
+typedef uint8_t BYTE_UNORM1_TYPE;
+typedef uint8_t BYTE_UNORM2_TYPE[2];
+typedef uint8_t BYTE_UNORM3_TYPE[3];
+typedef uint8_t BYTE_UNORM4_TYPE[4];
+
+typedef uint16_t SHORT_UNORM1_TYPE;
+typedef uint16_t SHORT_UNORM2_TYPE[2];
+typedef uint16_t SHORT_UNORM3_TYPE[3];
+typedef uint16_t SHORT_UNORM4_TYPE[4];
+
+typedef int8_t BYTE_SNORM1_TYPE;
+typedef int8_t BYTE_SNORM2_TYPE[2];
+typedef int8_t BYTE_SNORM3_TYPE[3];
+typedef int8_t BYTE_SNORM4_TYPE[4];
+
+typedef int16_t SHORT_SNORM1_TYPE;
+typedef int16_t SHORT_SNORM2_TYPE[2];
+typedef int16_t SHORT_SNORM3_TYPE[3];
+typedef int16_t SHORT_SNORM4_TYPE[4];
+
+typedef uint16_t HALF1_TYPE;
+typedef uint16_t HALF2_TYPE[2];
+typedef uint16_t HALF3_TYPE[3];
+typedef uint16_t HALF4_TYPE[4];
+
+typedef float FLOAT1_TYPE;
+typedef float FLOAT2_TYPE[2];
+typedef float FLOAT3_TYPE[3];
+typedef float FLOAT4_TYPE[4];
+
+typedef PxMat44 FLOAT4x4_TYPE;
+typedef PxMat33 FLOAT3x3_TYPE;
+
+typedef PxQuat FLOAT4_QUAT_TYPE;
+typedef int8_t BYTE_SNORM4_QUATXYZW_TYPE[4];
+typedef int16_t SHORT_SNORM4_QUATXYZW_TYPE[4];
+
+
+// Data converters
+
+// USHORT1_TYPE -> UINT1_TYPE
+PX_INLINE void convert_UINT1_from_USHORT1(UINT1_TYPE& dst, const USHORT1_TYPE& src)
+{
+ dst = (uint32_t)src;
+}
+
+// USHORT2_TYPE -> UINT2_TYPE
+PX_INLINE void convert_UINT2_from_USHORT2(UINT2_TYPE& dst, const USHORT2_TYPE& src)
+{
+ convert_UINT1_from_USHORT1(dst[0], src[0]);
+ convert_UINT1_from_USHORT1(dst[1], src[1]);
+}
+
+// USHORT3_TYPE -> UINT3_TYPE
+PX_INLINE void convert_UINT3_from_USHORT3(UINT3_TYPE& dst, const USHORT3_TYPE& src)
+{
+ convert_UINT1_from_USHORT1(dst[0], src[0]);
+ convert_UINT1_from_USHORT1(dst[1], src[1]);
+ convert_UINT1_from_USHORT1(dst[2], src[2]);
+}
+
+// USHORT4_TYPE -> UINT4_TYPE
+PX_INLINE void convert_UINT4_from_USHORT4(UINT4_TYPE& dst, const USHORT4_TYPE& src)
+{
+ convert_UINT1_from_USHORT1(dst[0], src[0]);
+ convert_UINT1_from_USHORT1(dst[1], src[1]);
+ convert_UINT1_from_USHORT1(dst[2], src[2]);
+ convert_UINT1_from_USHORT1(dst[3], src[3]);
+}
+
+// UINT1_TYPE -> USHORT1_TYPE
+PX_INLINE void convert_USHORT1_from_UINT1(USHORT1_TYPE& dst, const UINT1_TYPE& src)
+{
+ dst = (uint16_t)src;
+}
+
+// UINT2_TYPE -> USHORT2_TYPE
+PX_INLINE void convert_USHORT2_from_UINT2(USHORT2_TYPE& dst, const UINT2_TYPE& src)
+{
+ convert_USHORT1_from_UINT1(dst[0], src[0]);
+ convert_USHORT1_from_UINT1(dst[1], src[1]);
+}
+
+// UINT3_TYPE -> USHORT3_TYPE
+PX_INLINE void convert_USHORT3_from_UINT3(USHORT3_TYPE& dst, const UINT3_TYPE& src)
+{
+ convert_USHORT1_from_UINT1(dst[0], src[0]);
+ convert_USHORT1_from_UINT1(dst[1], src[1]);
+ convert_USHORT1_from_UINT1(dst[2], src[2]);
+}
+
+// UINT4_TYPE -> USHORT4_TYPE
+PX_INLINE void convert_USHORT4_from_UINT4(USHORT4_TYPE& dst, const UINT4_TYPE& src)
+{
+ convert_USHORT1_from_UINT1(dst[0], src[0]);
+ convert_USHORT1_from_UINT1(dst[1], src[1]);
+ convert_USHORT1_from_UINT1(dst[2], src[2]);
+ convert_USHORT1_from_UINT1(dst[3], src[3]);
+}
+
+// BYTE_SNORM1_TYPE -> FLOAT1_TYPE
+PX_INLINE void convert_FLOAT1_from_BYTE_SNORM1(FLOAT1_TYPE& dst, const BYTE_SNORM1_TYPE& src)
+{
+ dst = (float)src / 127.0f;
+}
+
+// BYTE_SNORM2_TYPE -> FLOAT2_TYPE
+PX_INLINE void convert_FLOAT2_from_BYTE_SNORM2(FLOAT2_TYPE& dst, const BYTE_SNORM2_TYPE& src)
+{
+ convert_FLOAT1_from_BYTE_SNORM1(dst[0], src[0]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst[1], src[1]);
+}
+
+// BYTE_SNORM3_TYPE -> FLOAT3_TYPE
+PX_INLINE void convert_FLOAT3_from_BYTE_SNORM3(FLOAT3_TYPE& dst, const BYTE_SNORM3_TYPE& src)
+{
+ convert_FLOAT1_from_BYTE_SNORM1(dst[0], src[0]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst[1], src[1]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst[2], src[2]);
+}
+
+// BYTE_SNORM4_TYPE -> FLOAT4_TYPE
+PX_INLINE void convert_FLOAT4_from_BYTE_SNORM4(FLOAT4_TYPE& dst, const BYTE_SNORM4_TYPE& src)
+{
+ convert_FLOAT1_from_BYTE_SNORM1(dst[0], src[0]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst[1], src[1]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst[2], src[2]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst[3], src[3]);
+}
+
+// BYTE_SNORM4_QUATXYZW_TYPE -> FLOAT4_QUAT_TYPE
+PX_INLINE void convert_FLOAT4_QUAT_from_BYTE_SNORM4_QUATXYZW(FLOAT4_QUAT_TYPE& dst, const BYTE_SNORM4_QUATXYZW_TYPE& src)
+{
+ convert_FLOAT1_from_BYTE_SNORM1(dst.x, src[0]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst.y, src[1]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst.z, src[2]);
+ convert_FLOAT1_from_BYTE_SNORM1(dst.w, src[3]);
+}
+
+// SHORT_SNORM1_TYPE -> FLOAT1_TYPE
+PX_INLINE void convert_FLOAT1_from_SHORT_SNORM1(FLOAT1_TYPE& dst, const SHORT_SNORM1_TYPE& src)
+{
+ dst = (float)src / 32767.0f;
+}
+
+// SHORT_SNORM2_TYPE -> FLOAT2_TYPE
+PX_INLINE void convert_FLOAT2_from_SHORT_SNORM2(FLOAT2_TYPE& dst, const SHORT_SNORM2_TYPE& src)
+{
+ convert_FLOAT1_from_SHORT_SNORM1(dst[0], src[0]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst[1], src[1]);
+}
+
+// SHORT_SNORM3_TYPE -> FLOAT3_TYPE
+PX_INLINE void convert_FLOAT3_from_SHORT_SNORM3(FLOAT3_TYPE& dst, const SHORT_SNORM3_TYPE& src)
+{
+ convert_FLOAT1_from_SHORT_SNORM1(dst[0], src[0]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst[1], src[1]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst[2], src[2]);
+}
+
+// SHORT_SNORM4_TYPE -> FLOAT4_TYPE
+PX_INLINE void convert_FLOAT4_from_SHORT_SNORM4(FLOAT4_TYPE& dst, const SHORT_SNORM4_TYPE& src)
+{
+ convert_FLOAT1_from_SHORT_SNORM1(dst[0], src[0]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst[1], src[1]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst[2], src[2]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst[3], src[3]);
+}
+
+// SHORT_SNORM4_QUATXYZW_TYPE -> FLOAT4_QUAT_TYPE
+PX_INLINE void convert_FLOAT4_QUAT_from_SHORT_SNORM4_QUATXYZW(FLOAT4_QUAT_TYPE& dst, const SHORT_SNORM4_QUATXYZW_TYPE& src)
+{
+ convert_FLOAT1_from_SHORT_SNORM1(dst.x, src[0]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst.y, src[1]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst.z, src[2]);
+ convert_FLOAT1_from_SHORT_SNORM1(dst.w, src[3]);
+}
+
+// FLOAT1_TYPE -> BYTE_SNORM1_TYPE
+PX_INLINE void convert_BYTE_SNORM1_from_FLOAT1(BYTE_SNORM1_TYPE& dst, const FLOAT1_TYPE& src)
+{
+ dst = (int8_t)((int16_t)(src * 127.0f + 127.5f) - 127); // Doing it this way to avoid nonuniform mapping near zero
+}
+
+// FLOAT2_TYPE -> BYTE_SNORM2_TYPE
+PX_INLINE void convert_BYTE_SNORM2_from_FLOAT2(BYTE_SNORM2_TYPE& dst, const FLOAT2_TYPE& src)
+{
+ convert_BYTE_SNORM1_from_FLOAT1(dst[0], src[0]);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[1], src[1]);
+}
+
+// FLOAT3_TYPE -> BYTE_SNORM3_TYPE
+PX_INLINE void convert_BYTE_SNORM3_from_FLOAT3(BYTE_SNORM3_TYPE& dst, const FLOAT3_TYPE& src)
+{
+ convert_BYTE_SNORM1_from_FLOAT1(dst[0], src[0]);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[1], src[1]);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[2], src[2]);
+}
+
+// FLOAT4_TYPE -> BYTE_SNORM4_TYPE
+PX_INLINE void convert_BYTE_SNORM4_from_FLOAT4(BYTE_SNORM4_TYPE& dst, const FLOAT4_TYPE& src)
+{
+ convert_BYTE_SNORM1_from_FLOAT1(dst[0], src[0]);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[1], src[1]);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[2], src[2]);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[3], src[3]);
+}
+
+// FLOAT4_QUAT_TYPE -> BYTE_SNORM4_QUATXYZW_TYPE
+PX_INLINE void convert_BYTE_SNORM4_QUATXYZW_from_FLOAT4_QUAT(BYTE_SNORM4_QUATXYZW_TYPE& dst, const FLOAT4_QUAT_TYPE& src)
+{
+ convert_BYTE_SNORM1_from_FLOAT1(dst[0], src.x);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[1], src.y);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[2], src.z);
+ convert_BYTE_SNORM1_from_FLOAT1(dst[3], src.w);
+}
+
+// FLOAT1_TYPE -> SHORT_SNORM1_TYPE
+PX_INLINE void convert_SHORT_SNORM1_from_FLOAT1(SHORT_SNORM1_TYPE& dst, const FLOAT1_TYPE& src)
+{
+ dst = (int16_t)((int32_t)(src * 32767.0f + 32767.5f) - 32767); // Doing it this way to avoid nonuniform mapping near zero
+}
+
+// FLOAT2_TYPE -> SHORT_SNORM2_TYPE
+PX_INLINE void convert_SHORT_SNORM2_from_FLOAT2(SHORT_SNORM2_TYPE& dst, const FLOAT2_TYPE& src)
+{
+ convert_SHORT_SNORM1_from_FLOAT1(dst[0], src[0]);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[1], src[1]);
+}
+
+// FLOAT3_TYPE -> SHORT_SNORM3_TYPE
+PX_INLINE void convert_SHORT_SNORM3_from_FLOAT3(SHORT_SNORM3_TYPE& dst, const FLOAT3_TYPE& src)
+{
+ convert_SHORT_SNORM1_from_FLOAT1(dst[0], src[0]);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[1], src[1]);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[2], src[2]);
+}
+
+// FLOAT4_TYPE -> SHORT_SNORM4_TYPE
+PX_INLINE void convert_SHORT_SNORM4_from_FLOAT4(SHORT_SNORM4_TYPE& dst, const FLOAT4_TYPE& src)
+{
+ convert_SHORT_SNORM1_from_FLOAT1(dst[0], src[0]);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[1], src[1]);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[2], src[2]);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[3], src[3]);
+}
+
+// FLOAT4_QUAT_TYPE -> SHORT_SNORM4_QUATXYZW_TYPE
+PX_INLINE void convert_SHORT_SNORM4_QUATXYZW_from_FLOAT4_QUAT(SHORT_SNORM4_QUATXYZW_TYPE& dst, const FLOAT4_QUAT_TYPE& src)
+{
+ convert_SHORT_SNORM1_from_FLOAT1(dst[0], src.x);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[1], src.y);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[2], src.z);
+ convert_SHORT_SNORM1_from_FLOAT1(dst[3], src.w);
+}
+
+// Color format conversions
+PX_INLINE void convert_B8G8R8A8_from_R8G8B8A8(B8G8R8A8_TYPE& dst, const R8G8B8A8_TYPE& src)
+{
+ dst.r = src.r;
+ dst.g = src.g;
+ dst.b = src.b;
+ dst.a = src.a;
+}
+
+PX_INLINE void convert_R8G8B8A8_from_B8G8R8A8(R8G8B8A8_TYPE& dst, const B8G8R8A8_TYPE& src)
+{
+ dst.r = src.r;
+ dst.g = src.g;
+ dst.b = src.b;
+ dst.a = src.a;
+}
+
+PX_INLINE void convert_R32G32B32A32_FLOAT_from_R8G8B8A8(R32G32B32A32_FLOAT_TYPE& dst, const R8G8B8A8_TYPE& src)
+{
+ (VertexColor&)dst = VertexColor((const ColorRGBA&)src);
+}
+
+PX_INLINE void convert_R8G8B8A8_from_R32G32B32A32_FLOAT(R8G8B8A8_TYPE& dst, const R32G32B32A32_FLOAT_TYPE& src)
+{
+ (ColorRGBA&)dst = ((const VertexColor&)src).toColorRGBA();
+}
+
+PX_INLINE void convert_B32G32R32A32_FLOAT_from_R8G8B8A8(B32G32R32A32_FLOAT_TYPE& dst, const R8G8B8A8_TYPE& src)
+{
+ (VertexColor&)dst = VertexColor((const ColorRGBA&)src);
+ float t = dst.r;
+ dst.r = dst.b;
+ dst.b = t;
+}
+
+PX_INLINE void convert_R8G8B8A8_from_B32G32R32A32_FLOAT(R8G8B8A8_TYPE& dst, const B32G32R32A32_FLOAT_TYPE& src)
+{
+ (ColorRGBA&)dst = ((const VertexColor&)src).toColorRGBA();
+ uint8_t t = dst.r;
+ dst.r = dst.b;
+ dst.b = t;
+}
+
+PX_INLINE void convert_R32G32B32A32_FLOAT_from_B8G8R8A8(R32G32B32A32_FLOAT_TYPE& dst, const B8G8R8A8_TYPE& src)
+{
+ (VertexColor&)dst = VertexColor((const ColorRGBA&)src);
+ float t = dst.r;
+ dst.r = dst.b;
+ dst.b = t;
+}
+
+PX_INLINE void convert_B8G8R8A8_from_R32G32B32A32_FLOAT(B8G8R8A8_TYPE& dst, const R32G32B32A32_FLOAT_TYPE& src)
+{
+ (ColorRGBA&)dst = ((const VertexColor&)src).toColorRGBA();
+ uint8_t t = dst.r;
+ dst.r = dst.b;
+ dst.b = t;
+}
+
+PX_INLINE void convert_B32G32R32A32_FLOAT_from_B8G8R8A8(B32G32R32A32_FLOAT_TYPE& dst, const B8G8R8A8_TYPE& src)
+{
+ (VertexColor&)dst = VertexColor((const ColorRGBA&)src);
+}
+
+PX_INLINE void convert_B8G8R8A8_from_B32G32R32A32_FLOAT(B8G8R8A8_TYPE& dst, const B32G32R32A32_FLOAT_TYPE& src)
+{
+ (ColorRGBA&)dst = ((const VertexColor&)src).toColorRGBA();
+}
+
+PX_INLINE void convert_B32G32R32A32_FLOAT_from_R32G32B32A32_FLOAT(B32G32R32A32_FLOAT_TYPE& dst, const R32G32B32A32_FLOAT_TYPE& src)
+{
+ dst.r = src.r;
+ dst.g = src.g;
+ dst.b = src.b;
+ dst.a = src.a;
+}
+
+PX_INLINE void convert_R32G32B32A32_FLOAT_from_B32G32R32A32_FLOAT(R32G32B32A32_FLOAT_TYPE& dst, const B32G32R32A32_FLOAT_TYPE& src)
+{
+ dst.r = src.r;
+ dst.g = src.g;
+ dst.b = src.b;
+ dst.a = src.a;
+}
+
+// Data conversion macros
+#define HANDLE_CONVERT1( _DstFormat, _SrcFormat ) \
+ case RenderDataFormat::_DstFormat : \
+ if( srcFormat == RenderDataFormat::_SrcFormat ) \
+ { \
+ convert_##_DstFormat##_from_##_SrcFormat( ((_DstFormat##_TYPE*)dst)[dstIndex], ((const _SrcFormat##_TYPE*)src)[srcIndex] ); \
+ } \
+ break;
+
+#define HANDLE_CONVERT2( _DstFormat, _SrcFormat1, _SrcFormat2 ) \
+ case RenderDataFormat::_DstFormat : \
+ if( srcFormat == RenderDataFormat::_SrcFormat1 ) \
+ { \
+ convert_##_DstFormat##_from_##_SrcFormat1( ((_DstFormat##_TYPE*)dst)[dstIndex], ((const _SrcFormat1##_TYPE*)src)[srcIndex] ); \
+ } \
+ else if( srcFormat == RenderDataFormat::_SrcFormat2 ) \
+ { \
+ convert_##_DstFormat##_from_##_SrcFormat2( ((_DstFormat##_TYPE*)dst)[dstIndex], ((const _SrcFormat2##_TYPE*)src)[srcIndex] ); \
+ } \
+ break;
+
+#define HANDLE_CONVERT3( _DstFormat, _SrcFormat1, _SrcFormat2, _SrcFormat3 ) \
+ case RenderDataFormat::_DstFormat : \
+ if( srcFormat == RenderDataFormat::_SrcFormat1 ) \
+ { \
+ convert_##_DstFormat##_from_##_SrcFormat1( ((_DstFormat##_TYPE*)dst)[dstIndex], ((const _SrcFormat1##_TYPE*)src)[srcIndex] ); \
+ } \
+ else if( srcFormat == RenderDataFormat::_SrcFormat2 ) \
+ { \
+ convert_##_DstFormat##_from_##_SrcFormat2( ((_DstFormat##_TYPE*)dst)[dstIndex], ((const _SrcFormat2##_TYPE*)src)[srcIndex] ); \
+ } \
+ else if( srcFormat == RenderDataFormat::_SrcFormat3 ) \
+ { \
+ convert_##_DstFormat##_from_##_SrcFormat3( ((_DstFormat##_TYPE*)dst)[dstIndex], ((const _SrcFormat3##_TYPE*)src)[srcIndex] ); \
+ } \
+ break;
+
+// ... etc.
+
+PX_INLINE bool copyRenderVertexData(void* dst, RenderDataFormat::Enum dstFormat, uint32_t dstIndex, const void* src, RenderDataFormat::Enum srcFormat, uint32_t srcIndex)
+{
+ if (dstFormat == srcFormat)
+ {
+ // Direct data copy
+ if (dstFormat != RenderDataFormat::UNSPECIFIED)
+ {
+ uint8_t* srcPtr = (uint8_t*)src;
+ uint8_t* dstPtr = (uint8_t*)dst;
+
+ const uint32_t size = RenderDataFormat::getFormatDataSize(dstFormat);
+ memcpy(dstPtr + (dstIndex * size), srcPtr + (srcIndex * size), size);
+ }
+ return true;
+ }
+
+ switch (dstFormat)
+ {
+ case RenderDataFormat::UNSPECIFIED:
+ break; // The simplest case, do nothing
+
+ // Put format converters here
+
+ HANDLE_CONVERT1(USHORT1, UINT1)
+ HANDLE_CONVERT1(USHORT2, UINT2)
+ HANDLE_CONVERT1(USHORT3, UINT3)
+ HANDLE_CONVERT1(USHORT4, UINT4)
+
+ HANDLE_CONVERT1(UINT1, USHORT1)
+ HANDLE_CONVERT1(UINT2, USHORT2)
+ HANDLE_CONVERT1(UINT3, USHORT3)
+ HANDLE_CONVERT1(UINT4, USHORT4)
+
+ HANDLE_CONVERT1(BYTE_SNORM1, FLOAT1)
+ HANDLE_CONVERT1(BYTE_SNORM2, FLOAT2)
+ HANDLE_CONVERT1(BYTE_SNORM3, FLOAT3)
+ HANDLE_CONVERT1(BYTE_SNORM4, FLOAT4)
+ HANDLE_CONVERT1(BYTE_SNORM4_QUATXYZW, FLOAT4_QUAT)
+ HANDLE_CONVERT1(SHORT_SNORM1, FLOAT1)
+ HANDLE_CONVERT1(SHORT_SNORM2, FLOAT2)
+ HANDLE_CONVERT1(SHORT_SNORM3, FLOAT3)
+ HANDLE_CONVERT1(SHORT_SNORM4, FLOAT4)
+ HANDLE_CONVERT1(SHORT_SNORM4_QUATXYZW, FLOAT4_QUAT)
+
+ HANDLE_CONVERT2(FLOAT1, BYTE_SNORM1, SHORT_SNORM1)
+ HANDLE_CONVERT2(FLOAT2, BYTE_SNORM2, SHORT_SNORM2)
+ HANDLE_CONVERT2(FLOAT3, BYTE_SNORM3, SHORT_SNORM3)
+ HANDLE_CONVERT2(FLOAT4, BYTE_SNORM4, SHORT_SNORM4)
+ HANDLE_CONVERT2(FLOAT4_QUAT, BYTE_SNORM4_QUATXYZW, SHORT_SNORM4_QUATXYZW)
+
+ HANDLE_CONVERT3(R8G8B8A8, B8G8R8A8, R32G32B32A32_FLOAT, B32G32R32A32_FLOAT)
+ HANDLE_CONVERT3(B8G8R8A8, R8G8B8A8, R32G32B32A32_FLOAT, B32G32R32A32_FLOAT)
+ HANDLE_CONVERT3(R32G32B32A32_FLOAT, R8G8B8A8, B8G8R8A8, B32G32R32A32_FLOAT)
+ HANDLE_CONVERT3(B32G32R32A32_FLOAT, R8G8B8A8, B8G8R8A8, R32G32B32A32_FLOAT)
+
+ default:
+ {
+ PX_ALWAYS_ASSERT(); // Format conversion not handled
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool copyRenderVertexBuffer(void* dst, RenderDataFormat::Enum dstFormat, uint32_t dstStride, uint32_t dstStart,
+ const void* src, RenderDataFormat::Enum srcFormat, uint32_t srcStride, uint32_t srcStart,
+ uint32_t numVertices, int32_t* invMap = NULL);
+
+/*
+ Local utilities
+ */
+PX_INLINE bool vertexSemanticFormatValid(RenderVertexSemantic::Enum semantic, RenderDataFormat::Enum format)
+{
+ switch (semantic)
+ {
+ case RenderVertexSemantic::POSITION:
+ return format == RenderDataFormat::FLOAT3;
+ case RenderVertexSemantic::NORMAL:
+ case RenderVertexSemantic::BINORMAL:
+ return format == RenderDataFormat::FLOAT3 || format == RenderDataFormat::BYTE_SNORM3;
+ case RenderVertexSemantic::TANGENT:
+ return format == RenderDataFormat::FLOAT3 || format == RenderDataFormat::BYTE_SNORM3 ||
+ format == RenderDataFormat::FLOAT4 || format == RenderDataFormat::BYTE_SNORM4;
+ case RenderVertexSemantic::COLOR:
+ return format == RenderDataFormat::R8G8B8A8 || format == RenderDataFormat::B8G8R8A8;
+ case RenderVertexSemantic::TEXCOORD0:
+ case RenderVertexSemantic::TEXCOORD1:
+ case RenderVertexSemantic::TEXCOORD2:
+ case RenderVertexSemantic::TEXCOORD3:
+ return format == RenderDataFormat::FLOAT2; // Not supporting other formats yet
+ case RenderVertexSemantic::DISPLACEMENT_TEXCOORD:
+ return format == RenderDataFormat::FLOAT2 || format == RenderDataFormat::FLOAT3;
+ case RenderVertexSemantic::DISPLACEMENT_FLAGS:
+ return format == RenderDataFormat::UINT1 || format == RenderDataFormat::USHORT1;
+ case RenderVertexSemantic::BONE_INDEX:
+ return format == RenderDataFormat::USHORT1 ||
+ format == RenderDataFormat::USHORT2 ||
+ format == RenderDataFormat::USHORT3 ||
+ format == RenderDataFormat::USHORT4; // Not supporting other formats yet
+ case RenderVertexSemantic::BONE_WEIGHT:
+ return format == RenderDataFormat::FLOAT1 ||
+ format == RenderDataFormat::FLOAT2 ||
+ format == RenderDataFormat::FLOAT3 ||
+ format == RenderDataFormat::FLOAT4; // Not supporting other formats yet
+ default:
+ return false;
+ }
+}
+
+PX_INLINE uint32_t vertexSemanticFormatElementCount(RenderVertexSemantic::Enum semantic, RenderDataFormat::Enum format)
+{
+ switch (semantic)
+ {
+ case RenderVertexSemantic::CUSTOM:
+ case RenderVertexSemantic::POSITION:
+ case RenderVertexSemantic::NORMAL:
+ case RenderVertexSemantic::TANGENT:
+ case RenderVertexSemantic::BINORMAL:
+ case RenderVertexSemantic::COLOR:
+ case RenderVertexSemantic::TEXCOORD0:
+ case RenderVertexSemantic::TEXCOORD1:
+ case RenderVertexSemantic::TEXCOORD2:
+ case RenderVertexSemantic::TEXCOORD3:
+ case RenderVertexSemantic::DISPLACEMENT_TEXCOORD:
+ case RenderVertexSemantic::DISPLACEMENT_FLAGS:
+ return 1;
+ case RenderVertexSemantic::BONE_INDEX:
+ switch (format)
+ {
+ case RenderDataFormat::USHORT1:
+ return 1;
+ case RenderDataFormat::USHORT2:
+ return 2;
+ case RenderDataFormat::USHORT3:
+ return 3;
+ case RenderDataFormat::USHORT4:
+ return 4;
+ default:
+ break;
+ }
+ return 0;
+ case RenderVertexSemantic::BONE_WEIGHT:
+ switch (format)
+ {
+ case RenderDataFormat::FLOAT1:
+ return 1;
+ case RenderDataFormat::FLOAT2:
+ return 2;
+ case RenderDataFormat::FLOAT3:
+ return 3;
+ case RenderDataFormat::FLOAT4:
+ return 4;
+ default:
+ break;
+ }
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+
+}
+} // end namespace apex
+
+
+#endif // __APEXSHAREDUTILS_H__
diff --git a/APEX_1.4/common/include/ApexSimdMath.h b/APEX_1.4/common/include/ApexSimdMath.h
new file mode 100644
index 00000000..9c807046
--- /dev/null
+++ b/APEX_1.4/common/include/ApexSimdMath.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_SIMD_MATH_H
+#define APEX_SIMD_MATH_H
+
+#include "PxMat44.h"
+#include "PxVec3.h"
+#include "PsMathUtils.h"
+#include "NvSimd4f.h"
+
+namespace nvidia
+{
+ /** Normalization of the (a[0], a[1], a[2]) vector
+ * @param a input vector
+ * @return normalized vector
+ */
+ inline Simd4f normalizeSimd3f(const Simd4f& a)
+ {
+ return a * rsqrt(dot3(a, a));
+ }
+
+ /** Create simd 4-float tuple from vec3 and wComponent
+ * @param vec3 vector with 3 components
+ * @param wComponent with this value final element will be initialized
+ * @return filled simd 4-float tuple
+ */
+ inline Simd4f createSimd3f(const physx::PxVec3& vec3, float wComponent = 0.0f)
+ {
+ return Simd4fLoad3SetWFactory(&vec3.x, wComponent);
+ }
+
+ /** Apply affine transform to position. Algorithm is not sensitive to pos.w.
+ * @param transformAlignMemLayout transform
+ * @param pos input position.
+ * @return transformed position. With pos.w setuped to one.
+ */
+ inline Simd4f applyAffineTransform(const physx::PxMat44& transformAlignMemLayout, const Simd4f& pos)
+ {
+ const physx::PxMat44& tr = transformAlignMemLayout;
+
+ const Simd4f& col0 = Simd4fAlignedLoadFactory(&tr.column0.x);
+ const Simd4f xMultiplier = splat<0>(pos);
+
+ const Simd4f& col1 = Simd4fAlignedLoadFactory(&tr.column1.x);
+ const Simd4f yMultiplier = splat<1>(pos);
+
+ const Simd4f& col2 = Simd4fAlignedLoadFactory(&tr.column2.x);
+ const Simd4f zMultiplier = splat<2>(pos);
+
+ Simd4f result = xMultiplier * col0 + yMultiplier * col1 + zMultiplier * col2 + Simd4fAlignedLoadFactory(&tr.column3.x);
+
+ array(result)[3] = 1.0f;
+
+ return result;
+ }
+
+ /** Apply linear transform to position or more probability to the vector(direction)
+ * @param transformAlignMemLayout transform
+ * @param pos input position. Algo does not sensitive to pos.w.
+ * @return transformed position. With pos.w setuped to zero.
+ */
+ inline Simd4f applyLinearTransform(const physx::PxMat44& transformAlignMemLayout, const Simd4f& direction)
+ {
+ const physx::PxMat44& tr = transformAlignMemLayout;
+ const Simd4f& col0 = Simd4fAlignedLoadFactory(&tr.column0.x);
+ const Simd4f xMultiplier = splat<0>(direction);
+ const Simd4f& col1 = Simd4fAlignedLoadFactory(&tr.column1.x);
+ const Simd4f yMultiplier = splat<1>(direction);
+ const Simd4f& col2 = Simd4fAlignedLoadFactory(&tr.column2.x);
+ const Simd4f zMultiplier = splat<2>(direction);
+ Simd4f result = xMultiplier * col0 + yMultiplier * col1 + zMultiplier * col2;
+ result = result & gSimd4fMaskXYZ;
+
+ return result;
+ }
+
+ /** Apply transpose of matrix consisted of col0, col1, col2, col3.
+ * Ported version of V4Transpose() from PxShared\*\foundation\include\PsVecMathAoSScalarInline.h
+ * @param col0 input column of the matrix, and output column of the result matrix
+ * @param col1 input column of the matrix, and output column of the result matrix
+ * @param col2 input column of the matrix, and output column of the result matrix
+ * @param col3 input column of the matrix, and output column of the result matrix
+ * @return None
+ */
+ inline void applyTranspose(Simd4f& col0, Simd4f& col1, Simd4f& col2, Simd4f& col3)
+ {
+ /*
+ col0 col1 col2 col3
+ 0 col0[0] col0[1] col0[1] col0[1]
+ 1 col0[1] col0[1] col0[1] col0[1]
+ 2 col0[2] col0[1] col0[1] col0[1]
+ 3 col0[3] col0[1] col0[1] col0[1]
+ */
+
+ float* arrayCol0 = array(col0);
+ float* arrayCol1 = array(col1);
+ float* arrayCol2 = array(col2);
+ float* arrayCol3 = array(col3);
+
+ using physx::PxF32;
+ const PxF32 t01 = arrayCol0[1];
+ const PxF32 t02 = arrayCol0[2];
+ const PxF32 t03 = arrayCol0[3];
+ const PxF32 t12 = arrayCol1[2];
+ const PxF32 t13 = arrayCol1[3];
+ const PxF32 t23 = arrayCol2[3];
+
+ // x -- 0, y -- 1, z -- 2, w -- 3
+ arrayCol0[1] = arrayCol1[0]; arrayCol0[2] = arrayCol2[0]; arrayCol0[3] = arrayCol3[0];
+ arrayCol1[2] = arrayCol2[1]; arrayCol1[3] = arrayCol3[1];
+ arrayCol2[3] = arrayCol3[2];
+
+ arrayCol1[0] = t01; arrayCol2[0] = t02; arrayCol3[0] = t03;
+ arrayCol2[1] = t12; arrayCol3[1] = t13;
+ arrayCol3[2] = t23;
+ }
+}
+
+#endif // APEX_SIND_MATH_H
diff --git a/APEX_1.4/common/include/ApexSubdivider.h b/APEX_1.4/common/include/ApexSubdivider.h
new file mode 100644
index 00000000..a91d2a74
--- /dev/null
+++ b/APEX_1.4/common/include/ApexSubdivider.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_SUBDIVIDER_H
+#define APEX_SUBDIVIDER_H
+
+#include "ApexDefs.h"
+#include "PsUserAllocated.h"
+#include "ApexRand.h"
+#include "PsArray.h"
+#include "ApexUsingNamespace.h"
+
+#include "PxBounds3.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class IProgressListener;
+
+class ApexSubdivider : public UserAllocated
+{
+public:
+ ApexSubdivider();
+
+ void clear();
+
+ void registerVertex(const PxVec3& v, uint32_t bitFlagPayload);
+ void registerTriangle(uint32_t i0, uint32_t i1, uint32_t i2);
+
+ void endRegistration();
+
+ void mergeVertices(IProgressListener* progress);
+ void closeMesh(IProgressListener* progress);
+ void subdivide(uint32_t subdivisionGridSize, IProgressListener* progress);
+
+ uint32_t getNumVertices() const;
+ uint32_t getNumTriangles() const;
+
+ void getVertex(uint32_t i, PxVec3& v, uint32_t& bitFlagPayload) const;
+ void getTriangle(uint32_t i, uint32_t& i0, uint32_t& i1, uint32_t& i2) const;
+
+private:
+ void compress();
+ void closeHole(uint32_t* indices, uint32_t numIndices);
+ float qualityOfTriangle(uint32_t v0, uint32_t v1, uint32_t v2) const;
+ int32_t getTriangleNr(const uint32_t v0, const uint32_t v1, const uint32_t v2) const;
+
+ PxBounds3 mBound;
+
+ struct SubdividerVertex
+ {
+ SubdividerVertex() : pos(0.0f, 0.0f, 0.0f), firstTriangle(-1), payload(0), marked(false) {}
+ SubdividerVertex(const PxVec3& newPos, uint32_t bitFlagPayload) : pos(newPos), firstTriangle(-1), payload(bitFlagPayload), marked(false) {}
+
+ PxVec3 pos;
+ int32_t firstTriangle;
+ uint32_t payload;
+ bool marked;
+ };
+
+ struct SubdividerVertexRef
+ {
+ SubdividerVertexRef() : pos(0.0f, 0.0f, 0.0f), vertexNr(0) {}
+ SubdividerVertexRef(const PxVec3& p, uint32_t vNr) : pos(p), vertexNr(vNr) {}
+ PX_INLINE bool operator < (const SubdividerVertexRef& vr) const
+ {
+ return pos.x < vr.pos.x;
+ }
+
+ PX_INLINE bool operator()(const SubdividerVertexRef& v1, const SubdividerVertexRef& v2) const
+ {
+ return v1 < v2;
+ }
+
+ PxVec3 pos;
+ uint32_t vertexNr;
+ };
+
+
+
+ struct SubdividerEdge
+ {
+ void init(uint32_t newV0, uint32_t newV1, uint32_t newTriangleNr)
+ {
+ v0 = PxMax(newV0, newV1);
+ v1 = PxMin(newV0, newV1);
+ triangleNr = newTriangleNr;
+ }
+ PX_INLINE bool operator < (const SubdividerEdge& e) const
+ {
+ if (v0 < e.v0)
+ {
+ return true;
+ }
+ if (v0 > e.v0)
+ {
+ return false;
+ }
+ return (v1 < e.v1);
+ }
+ PX_INLINE bool operator()(const SubdividerEdge& e1, const SubdividerEdge& e2) const
+ {
+ return e1 < e2;
+ }
+ PX_INLINE bool operator == (const SubdividerEdge& e) const
+ {
+ return v0 == e.v0 && v1 == e.v1;
+ }
+ uint32_t v0, v1;
+ uint32_t triangleNr;
+ };
+
+ int32_t binarySearchEdges(const Array<SubdividerEdge>& edges, uint32_t v0, uint32_t v1, uint32_t triangleNr) const;
+
+ struct SubdividerTriangle
+ {
+ void init(uint32_t v0, uint32_t v1, uint32_t v2)
+ {
+ vertexNr[0] = v0;
+ vertexNr[1] = v1;
+ vertexNr[2] = v2;
+ }
+
+ bool containsVertex(uint32_t vNr) const
+ {
+ return vertexNr[0] == vNr || vertexNr[1] == vNr || vertexNr[2] == vNr;
+ }
+
+ void replaceVertex(uint32_t vOld, uint32_t vNew)
+ {
+ if (vertexNr[0] == vOld)
+ {
+ vertexNr[0] = vNew;
+ }
+ else if (vertexNr[1] == vOld)
+ {
+ vertexNr[1] = vNew;
+ }
+ else if (vertexNr[2] == vOld)
+ {
+ vertexNr[2] = vNew;
+ }
+ else
+ {
+ PX_ASSERT(0 && "replaceVertex failed");
+ }
+ }
+
+ bool operator == (SubdividerTriangle& t) const
+ {
+ return
+ t.containsVertex(vertexNr[0]) &&
+ t.containsVertex(vertexNr[1]) &&
+ t.containsVertex(vertexNr[2]);
+ }
+ bool isValid() const
+ {
+ return (vertexNr[0] != vertexNr[1] && vertexNr[0] != vertexNr[2] && vertexNr[1] != vertexNr[2]);
+ }
+
+ uint32_t vertexNr[3];
+ };
+
+ struct TriangleList
+ {
+ TriangleList() : triangleNumber(0), nextTriangle(-1) {}
+ TriangleList(uint32_t tNr) : triangleNumber(tNr), nextTriangle(-1) {}
+
+ uint32_t triangleNumber;
+ int32_t nextTriangle;
+ };
+
+ Array<SubdividerVertex> mVertices;
+ Array<SubdividerTriangle> mTriangles;
+ uint32_t mMarkedVertices;
+
+ QDSRand mRand;
+
+ Array<TriangleList> mTriangleList;
+ int32_t mTriangleListEmptyElement;
+ void addTriangleToVertex(uint32_t vertexNumber, uint32_t triangleNumber);
+ void removeTriangleFromVertex(uint32_t vertexNumber, uint32_t triangleNumber);
+ TriangleList& allocateTriangleElement();
+ void freeTriangleElement(uint32_t index);
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/ApexTest.h b/APEX_1.4/common/include/ApexTest.h
new file mode 100644
index 00000000..d20267f7
--- /dev/null
+++ b/APEX_1.4/common/include/ApexTest.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_TEST_H
+#define APEX_TEST_H
+
+#define WARNING(exp, msg) if (!(exp)) {ret &= (exp); APEX_DEBUG_WARNING(msg);}
+#define EXPECT_TRUE(exp) WARNING(exp, "Expected true: " #exp)
+#define EXPECT_EQ(v1, v2) WARNING(v1 == v2, "Expected: " #v1 " == " #v2)
+#define EXPECT_NE(v1, v2) WARNING(v1 != v2, "Expected: " #v1 " != " #v2)
+#define EXPECT_GE(v1, v2) WARNING(v1 >= v2, "Expected: " #v1 " >= " #v2)
+#define EXPECT_GT(v1, v2) WARNING(v1 > v2, "Expected: " #v1 " > " #v2)
+#define EXPECT_LE(v1, v2) WARNING(v1 <= v2, "Expected: " #v1 " <= " #v2)
+#define EXPECT_LT(v1, v2) WARNING(v1 < v2, "Expected: " #v1 " < " #v2)
+
+#endif //APEX_TEST_H
diff --git a/APEX_1.4/common/include/ApexTetrahedralizer.h b/APEX_1.4/common/include/ApexTetrahedralizer.h
new file mode 100644
index 00000000..77fc67b2
--- /dev/null
+++ b/APEX_1.4/common/include/ApexTetrahedralizer.h
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef APEX_TETRAHEDRALIZER_H
+#define APEX_TETRAHEDRALIZER_H
+
+#include "ApexDefs.h"
+#include "PxBounds3.h"
+
+#include "ApexUsingNamespace.h"
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+
+#define TETRAHEDRALIZER_DEBUG_RENDERING 0
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexMeshHash;
+class IProgressListener;
+
+class ApexTetrahedralizer : public UserAllocated
+{
+public:
+ ApexTetrahedralizer(uint32_t subdivision);
+ ~ApexTetrahedralizer();
+
+
+ void registerVertex(const PxVec3& v);
+ void registerTriangle(uint32_t i0, uint32_t i1, uint32_t i2);
+ void endRegistration(IProgressListener* progress);
+
+ uint32_t getNumVertices() const
+ {
+ return mVertices.size();
+ }
+ void getVertices(PxVec3* data);
+ uint32_t getNumIndices() const
+ {
+ return mTetras.size() * 4;
+ }
+ void getIndices(uint32_t* data);
+
+private:
+ struct FullTetrahedron;
+
+ void weldVertices();
+ void delaunayTetrahedralization(IProgressListener* progress);
+ int32_t findSurroundingTetra(uint32_t startTetra, const PxVec3& p) const;
+ float retriangulate(const uint32_t tetraNr, uint32_t vertexNr);
+ uint32_t swapTetrahedra(uint32_t startTet, IProgressListener* progress);
+ bool swapEdge(uint32_t v0, uint32_t v1);
+ bool removeOuterTetrahedra(IProgressListener* progress);
+
+ void updateCircumSphere(FullTetrahedron& tetra) const;
+ bool pointInCircumSphere(FullTetrahedron& tetra, const PxVec3& p) const;
+ bool pointInTetra(const FullTetrahedron& tetra, const PxVec3& p) const;
+
+ float getTetraVolume(const FullTetrahedron& tetra) const;
+ float getTetraVolume(int32_t v0, int32_t v1, int32_t v2, int32_t v3) const;
+ float getTetraQuality(const FullTetrahedron& tetra) const;
+ float getTetraLongestEdge(const FullTetrahedron& tetra) const;
+
+ bool triangleContainsVertexNr(uint32_t* triangle, uint32_t* vertexNumber, uint32_t nbVertices);
+
+ void compressTetrahedra(bool trashNeighbours);
+ void compressVertices();
+
+ inline bool isFarVertex(uint32_t vertNr) const
+ {
+ return mFirstFarVertex <= vertNr && vertNr <= mLastFarVertex;
+ }
+
+ ApexMeshHash* mMeshHash;
+
+ uint32_t mSubdivision;
+
+ PxBounds3 mBound;
+ float mBoundDiagonal;
+
+ struct TetraVertex
+ {
+ inline void init(const PxVec3& pos, uint32_t flags)
+ {
+ this->pos = pos;
+ this->flags = flags;
+ }
+ inline bool isDeleted() const
+ {
+ return flags == (uint32_t)0xdeadf00d;
+ }
+ inline void markDeleted()
+ {
+ flags = 0xdeadf00d;
+ }
+
+ PxVec3 pos;
+ uint32_t flags;
+ };
+ class LessInOneAxis
+ {
+ uint32_t mAxis;
+ public:
+ LessInOneAxis(uint32_t axis) : mAxis(axis) {}
+ bool operator()(const TetraVertex& v1, const TetraVertex& v2) const
+ {
+ return v1.pos[mAxis] < v2.pos[mAxis];
+ }
+ };
+
+ struct TetraEdge
+ {
+ void init(int32_t v0, int32_t v1, int32_t tetra, int neighborNr = -1)
+ {
+ this->tetraNr = (uint32_t)tetra;
+ this->neighborNr = neighborNr;
+ PX_ASSERT(v0 != v1);
+ vNr0 = (uint32_t)PxMin(v0, v1);
+ vNr1 = (uint32_t)PxMax(v0, v1);
+ }
+ PX_INLINE bool operator <(const TetraEdge& e) const
+ {
+ if (vNr0 < e.vNr0)
+ {
+ return true;
+ }
+ if (vNr0 > e.vNr0)
+ {
+ return false;
+ }
+ if (vNr1 < e.vNr1)
+ {
+ return true;
+ }
+ if (vNr1 > e.vNr1)
+ {
+ return false;
+ }
+ return (neighborNr < e.neighborNr);
+ }
+ PX_INLINE bool operator()(const TetraEdge& e1, const TetraEdge& e2) const
+ {
+ return e1 < e2;
+ }
+ bool operator ==(TetraEdge& e) const
+ {
+ return vNr0 == e.vNr0 && vNr1 == e.vNr1;
+ }
+
+ bool allEqual(TetraEdge& e) const
+ {
+ return (vNr0 == e.vNr0) && (vNr1 == e.vNr1) && (neighborNr == e.neighborNr);
+ }
+ uint32_t vNr0, vNr1;
+ uint32_t tetraNr;
+ int32_t neighborNr;
+ };
+
+
+ struct TetraEdgeList
+ {
+ void add(TetraEdge& edge)
+ {
+ mEdges.pushBack(edge);
+ }
+ void insert(uint32_t pos, TetraEdge& edge);
+ uint32_t numEdges()
+ {
+ return mEdges.size();
+ }
+ void sort();
+ int findEdge(int v0, int v1);
+ int findEdgeTetra(int v0, int v1, int tetraNr);
+ TetraEdge& operator[](unsigned i)
+ {
+ return mEdges[i];
+ }
+ const TetraEdge& operator[](unsigned i) const
+ {
+ return mEdges[i];
+ }
+
+ physx::Array<TetraEdge> mEdges;
+ };
+
+
+ struct FullTetrahedron
+ {
+ void init()
+ {
+ vertexNr[0] = vertexNr[1] = vertexNr[2] = vertexNr[3] = -1;
+ neighborNr[0] = neighborNr[1] = neighborNr[2] = neighborNr[3] = -1;
+ center = PxVec3(0.0f);
+ radiusSquared = 0.0f;
+ quality = 0;
+ bCircumSphereDirty = 1;
+ bDeleted = 0;
+ }
+ void set(int32_t v0, int32_t v1, int32_t v2, int32_t v3)
+ {
+ vertexNr[0] = v0;
+ vertexNr[1] = v1;
+ vertexNr[2] = v2;
+ vertexNr[3] = v3;
+ neighborNr[0] = neighborNr[1] = neighborNr[2] = neighborNr[3] = -1;
+ center = PxVec3(0.0f);
+ radiusSquared = 0.0f;
+ quality = 0;
+ bCircumSphereDirty = 1;
+ bDeleted = 0;
+ }
+ bool operator==(const FullTetrahedron& t) const
+ {
+ return
+ (vertexNr[0] == t.vertexNr[0]) &&
+ (vertexNr[1] == t.vertexNr[1]) &&
+ (vertexNr[2] == t.vertexNr[2]) &&
+ (vertexNr[3] == t.vertexNr[3]);
+ }
+
+ bool containsVertex(int32_t nr) const
+ {
+ return (vertexNr[0] == nr || vertexNr[1] == nr || vertexNr[2] == nr || vertexNr[3] == nr);
+ }
+ void replaceVertex(int nr, int newNr)
+ {
+ if (vertexNr[0] == nr)
+ {
+ vertexNr[0] = newNr;
+ }
+ else if (vertexNr[1] == nr)
+ {
+ vertexNr[1] = newNr;
+ }
+ else if (vertexNr[2] == nr)
+ {
+ vertexNr[2] = newNr;
+ }
+ else if (vertexNr[3] == nr)
+ {
+ vertexNr[3] = newNr;
+ }
+ else
+ {
+ PX_ASSERT(0);
+ }
+ }
+ void get2OppositeVertices(int vNr0, int vNr1, int& vNr2, int& vNr3)
+ {
+ int v[4], p = 0;
+ if (vertexNr[0] != vNr0 && vertexNr[0] != vNr1)
+ {
+ v[p++] = vertexNr[0];
+ }
+ if (vertexNr[1] != vNr0 && vertexNr[1] != vNr1)
+ {
+ v[p++] = vertexNr[1];
+ }
+ if (vertexNr[2] != vNr0 && vertexNr[2] != vNr1)
+ {
+ v[p++] = vertexNr[2];
+ }
+ if (vertexNr[3] != vNr0 && vertexNr[3] != vNr1)
+ {
+ v[p++] = vertexNr[3];
+ }
+ PX_ASSERT(p == 2);
+ vNr2 = v[0];
+ vNr3 = v[1];
+ }
+ void get3OppositeVertices(int vNr, int& vNr0, int& vNr1, int& vNr2)
+ {
+ if (vNr == vertexNr[0])
+ {
+ vNr0 = vertexNr[1];
+ vNr1 = vertexNr[2];
+ vNr2 = vertexNr[3];
+ }
+ else if (vNr == vertexNr[1])
+ {
+ vNr0 = vertexNr[2];
+ vNr1 = vertexNr[0];
+ vNr2 = vertexNr[3];
+ }
+ else if (vNr == vertexNr[2])
+ {
+ vNr0 = vertexNr[0];
+ vNr1 = vertexNr[1];
+ vNr2 = vertexNr[3];
+ }
+ else if (vNr == vertexNr[3])
+ {
+ vNr0 = vertexNr[2];
+ vNr1 = vertexNr[1];
+ vNr2 = vertexNr[0];
+ }
+ else
+ {
+ PX_ASSERT(0);
+ }
+ }
+ int getOppositeVertex(int vNr0, int vNr1, int vNr2)
+ {
+ if (vertexNr[0] != vNr0 && vertexNr[0] != vNr1 && vertexNr[0] != vNr2)
+ {
+ return vertexNr[0];
+ }
+ if (vertexNr[1] != vNr0 && vertexNr[1] != vNr1 && vertexNr[1] != vNr2)
+ {
+ return vertexNr[1];
+ }
+ if (vertexNr[2] != vNr0 && vertexNr[2] != vNr1 && vertexNr[2] != vNr2)
+ {
+ return vertexNr[2];
+ }
+ if (vertexNr[3] != vNr0 && vertexNr[3] != vNr1 && vertexNr[3] != vNr2)
+ {
+ return vertexNr[3];
+ }
+ PX_ASSERT(0);
+ return -1;
+ }
+ int sideOf(int vNr0, int vNr1, int vNr2)
+ {
+ if (vertexNr[0] != vNr0 && vertexNr[0] != vNr1 && vertexNr[0] != vNr2)
+ {
+ return 0;
+ }
+ if (vertexNr[1] != vNr0 && vertexNr[1] != vNr1 && vertexNr[1] != vNr2)
+ {
+ return 1;
+ }
+ if (vertexNr[2] != vNr0 && vertexNr[2] != vNr1 && vertexNr[2] != vNr2)
+ {
+ return 2;
+ }
+ if (vertexNr[3] != vNr0 && vertexNr[3] != vNr1 && vertexNr[3] != vNr2)
+ {
+ return 3;
+ }
+ PX_ASSERT(0);
+ return -1;
+ }
+ inline int neighborNrOf(int vNr0, int vNr1, int vNr2)
+ {
+ PX_ASSERT(containsVertex(vNr0));
+ PX_ASSERT(containsVertex(vNr1));
+ PX_ASSERT(containsVertex(vNr2));
+ if (vertexNr[0] != vNr0 && vertexNr[0] != vNr1 && vertexNr[0] != vNr2)
+ {
+ return 0;
+ }
+ if (vertexNr[1] != vNr0 && vertexNr[1] != vNr1 && vertexNr[1] != vNr2)
+ {
+ return 1;
+ }
+ if (vertexNr[2] != vNr0 && vertexNr[2] != vNr1 && vertexNr[2] != vNr2)
+ {
+ return 2;
+ }
+ if (vertexNr[3] != vNr0 && vertexNr[3] != vNr1 && vertexNr[3] != vNr2)
+ {
+ return 3;
+ }
+ PX_ASSERT(0);
+ return 0;
+ }
+ inline int& neighborOf(int vNr0, int vNr1, int vNr2)
+ {
+ return neighborNr[neighborNrOf(vNr0, vNr1, vNr2)];
+ }
+
+ inline bool onSurface()
+ {
+ return neighborNr[0] < 0 || neighborNr[1] < 0 || neighborNr[2] < 0 || neighborNr[3] < 0;
+ }
+ // representation
+ PxVec3 center;
+ int32_t vertexNr[4];
+ int32_t neighborNr[4];
+ float radiusSquared;
+ uint32_t quality : 10;
+ uint32_t bDeleted : 1;
+ uint32_t bCircumSphereDirty : 1;
+
+ // static
+ static const uint32_t sideIndices[4][3];
+ };
+
+ physx::Array<FullTetrahedron> mTetras;
+
+ uint32_t mFirstFarVertex;
+ uint32_t mLastFarVertex;
+
+ physx::Array<TetraVertex> mVertices;
+ physx::Array<uint32_t> mIndices;
+
+ // temporary indices, that way we don't allocate this buffer all the time
+ physx::Array<uint32_t> mTempItemIndices;
+
+#if TETRAHEDRALIZER_DEBUG_RENDERING
+public:
+ physx::Array<PxVec3> debugLines;
+ physx::Array<PxVec3> debugBounds;
+ physx::Array<PxVec3> debugTetras;
+#endif
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // APEX_TETRAHEDRALIZER_H
diff --git a/APEX_1.4/common/include/AuthorableObjectIntl.h b/APEX_1.4/common/include/AuthorableObjectIntl.h
new file mode 100644
index 00000000..89012a1d
--- /dev/null
+++ b/APEX_1.4/common/include/AuthorableObjectIntl.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef AUTHORABLE_OBJECT_INTL_H
+#define AUTHORABLE_OBJECT_INTL_H
+
+#include "ApexString.h"
+#include "ApexSDKIntl.h"
+#include "ApexSDKHelpers.h"
+#include "ResourceProviderIntl.h"
+#include "ApexResource.h"
+
+class ResourceList;
+
+namespace NvParameterized
+{
+class Interface;
+};
+
+namespace nvidia
+{
+namespace apex
+{
+
+// This class currently contains implementation, this will be removed and put in APEXAuthorableObject
+class AuthorableObjectIntl : public ApexResourceInterface, public ApexResource
+{
+public:
+
+ AuthorableObjectIntl(ModuleIntl* m, ResourceList& list, const char* aoTypeName)
+ : mAOTypeName(aoTypeName),
+ mModule(m)
+ {
+ list.add(*this);
+ }
+
+ virtual Asset* createAsset(AssetAuthoring& author, const char* name) = 0;
+ virtual Asset* createAsset(NvParameterized::Interface* params, const char* name) = 0;
+ virtual void releaseAsset(Asset& nxasset) = 0;
+
+ virtual AssetAuthoring* createAssetAuthoring() = 0;
+ virtual AssetAuthoring* createAssetAuthoring(const char* name) = 0;
+ virtual AssetAuthoring* createAssetAuthoring(NvParameterized::Interface* params, const char* name) = 0;
+ virtual void releaseAssetAuthoring(AssetAuthoring& nxauthor) = 0;
+
+ virtual uint32_t forceLoadAssets() = 0;
+ virtual uint32_t getAssetCount() = 0;
+ virtual bool getAssetList(Asset** outAssets, uint32_t& outAssetCount, uint32_t inAssetCount) = 0;
+
+
+ virtual ResID getResID() = 0;
+ virtual ApexSimpleString& getName() = 0;
+
+ // ApexResourceInterface methods
+ virtual void release() = 0;
+ virtual void destroy() = 0;
+
+ // ApexResourceInterface methods
+ uint32_t getListIndex() const
+ {
+ return m_listIndex;
+ }
+
+ void setListIndex(ResourceList& list, uint32_t index)
+ {
+ m_listIndex = index;
+ m_list = &list;
+ }
+
+ ResID mAOResID;
+ ResID mAOPtrResID;
+ ApexSimpleString mAOTypeName;
+ ApexSimpleString mParameterizedName;
+
+ ResourceList mAssets;
+ ResourceList mAssetAuthors;
+
+ ModuleIntl* mModule;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // AUTHORABLE_OBJECT_INTL_H
diff --git a/APEX_1.4/common/include/Cof44.h b/APEX_1.4/common/include/Cof44.h
new file mode 100644
index 00000000..88bcdff3
--- /dev/null
+++ b/APEX_1.4/common/include/Cof44.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef COF44_H
+#define COF44_H
+
+#include "Apex.h"
+#include "PxMat44.h"
+#include "PxPlane.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+/**
+\brief Stores the info needed for the cofactor matrix of a 4x4 homogeneous transformation matrix (implicit last row = 0 0 0 1)
+*/
+class Cof44
+{
+public:
+ /**
+ \param [in] m can be an arbitrary homogeneoous transformation matrix
+ */
+ Cof44(const PxMat44& m)
+ {
+ _block33(0, 0) = m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1);
+ _block33(0, 1) = m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2);
+ _block33(0, 2) = m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0);
+ _block33(1, 0) = m(2, 1) * m(0, 2) - m(2, 2) * m(0, 1);
+ _block33(1, 1) = m(2, 2) * m(0, 0) - m(2, 0) * m(0, 2);
+ _block33(1, 2) = m(2, 0) * m(0, 1) - m(2, 1) * m(0, 0);
+ _block33(2, 0) = m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1);
+ _block33(2, 1) = m(0, 2) * m(1, 0) - m(0, 0) * m(1, 2);
+ _block33(2, 2) = m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0);
+ _44 = _block33(0, 0) * m(0, 0) + _block33(0, 1) * m(0, 1) + _block33(0, 2) * m(0, 2);
+
+ initCommon(m.getPosition());
+ }
+
+ /**
+ \param [in] rt must be pure (proper) rotation and translation
+ \param [in] s is any diagonal matrix (typically scale).
+ \note The combined transform is assumed to be (rt)*s, i.e. s is applied first
+ */
+ Cof44(const PxMat44& rt, const PxVec3 s)
+ {
+ const PxVec3 cofS(s.y * s.z, s.z * s.x, s.x * s.y);
+ _block33(0, 0) = rt(0, 0) * cofS.x;
+ _block33(0, 1) = rt(0, 1) * cofS.y;
+ _block33(0, 2) = rt(0, 2) * cofS.z;
+ _block33(1, 0) = rt(1, 0) * cofS.x;
+ _block33(1, 1) = rt(1, 1) * cofS.y;
+ _block33(1, 2) = rt(1, 2) * cofS.z;
+ _block33(2, 0) = rt(2, 0) * cofS.x;
+ _block33(2, 1) = rt(2, 1) * cofS.y;
+ _block33(2, 2) = rt(2, 2) * cofS.z;
+ _44 = cofS.x * s.x;
+
+ initCommon(rt.getPosition());
+ }
+
+ Cof44(const PxTransform rt, const PxVec3 s)
+ {
+ _block33 = PxMat33(rt.q);
+ const PxVec3 cofS(s.y * s.z, s.z * s.x, s.x * s.y);
+ _block33.column0 *= cofS.x;
+ _block33.column1 *= cofS.y;
+ _block33.column2 *= cofS.z;
+ _44 = cofS.x * s.x;
+
+ initCommon(rt.p);
+ }
+
+ /**
+ \brief Transforms a plane equation correctly even when the transformation is not a rotation
+ \note If the transformation is not a rotation then the length of the plane's normal vector is not preserved in general.
+ */
+ void transform(const PxPlane& src, PxPlane& dst) const
+ {
+ dst.n = _block33.transform(src.n);
+ dst.d = (_block13.dot(src.n)) + _44 * src.d;
+ }
+
+ /**
+ \brief Transforms a normal correctly even when the transformation is not a rotation
+ \note If the transformation is not a rotation then the normal's length is not preserved in general.
+ */
+ const PxMat33& getBlock33() const
+ {
+ return _block33;
+ }
+
+ /**
+ \brief The determinant of the original matrix.
+ */
+ float getDeterminant() const
+ {
+ return _44;
+ }
+
+private:
+
+ void initCommon(const PxVec3& pos)
+ {
+ _block13 = _block33.transformTranspose(-pos);
+ if (_44 < 0)
+ {
+ // det is < 0, we need to negate all values
+ // The Cov Matrix divided by the determinant is the same as the inverse transposed of an affine transformation
+ // For rotation normals, dividing by the determinant is useless as it gets renormalized afterwards again.
+ // If the determinant is negative though, it is important that all values are negated to get the right results.
+ _block33 *= -1;
+ _block13 *= -1;
+ _44 = -_44;
+ }
+
+ }
+ PxMat33 _block33;
+ PxVec3 _block13;
+ float _44;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/common/include/CurveImpl.h b/APEX_1.4/common/include/CurveImpl.h
new file mode 100644
index 00000000..0d58721b
--- /dev/null
+++ b/APEX_1.4/common/include/CurveImpl.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __CURVE_IMPL_H__
+#define __CURVE_IMPL_H__
+
+#include "Apex.h"
+#include "PxAssert.h"
+#include "nvparameterized/NvParameterized.h"
+#include "PsArray.h"
+#include "Curve.h"
+#include <ApexUsingNamespace.h>
+
+namespace nvidia
+{
+namespace apex
+{
+
+typedef physx::Array<Vec2R> Vec2RPointArray;
+
+/**
+ The CurveImpl is a class for storing control points on a curve and evaluating the results later.
+*/
+class CurveImpl : public Curve
+{
+public:
+ CurveImpl()
+ {}
+
+ ~CurveImpl()
+ {}
+
+ /**
+ Retrieve the output Y for the specified input x, based on the properties of the stored curve described
+ by mControlPoints.
+ */
+ float evaluate(float x) const;
+
+ /**
+ Add a control point to the list of control points, returning the index of the new point.
+ */
+ uint32_t addControlPoint(const Vec2R& controlPoint);
+
+ /**
+ Add a control points to the list of control points. Assuming the
+ hPoints points to a list of vec2s
+ */
+ void addControlPoints(::NvParameterized::Interface* param, ::NvParameterized::Handle& hPoints);
+
+ /**
+ Locates the control points that contain x, placing the resulting control points in the two
+ out parameters. Returns true if the points were found, false otherwise. If the points were not
+ found, the output variables are untouched
+ */
+ bool calculateControlPoints(float x, Vec2R& outXPoints, Vec2R& outYPoints) const;
+
+ /**
+ Locates the first control point with x larger than xValue or the nimber of control points if such point doesn't exist
+ */
+ uint32_t calculateFollowingControlPoint(float xValue) const;
+
+ ///get the array of control points
+ const Vec2R* getControlPoints(uint32_t& outCount) const;
+
+private:
+ // mControlPoints is a sorted list of control points for a curve. Currently, the curve is a lame
+ // lirp'd curve. We could add support for other curvetypes in the future, either bezier curves,
+ // splines, etc.
+ Vec2RPointArray mControlPoints;
+};
+
+}
+} // namespace apex
+
+#endif /* __CURVE_IMPL_H__ */
diff --git a/APEX_1.4/common/include/DebugColorParamsEx.h b/APEX_1.4/common/include/DebugColorParamsEx.h
new file mode 100644
index 00000000..9cb6c070
--- /dev/null
+++ b/APEX_1.4/common/include/DebugColorParamsEx.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef HEADER_DebugColorParamsListener_h
+#define HEADER_DebugColorParamsListener_h
+
+#include "NvParameters.h"
+#include "nvparameterized/NvParameterized.h"
+#include "nvparameterized/NvParameterizedTraits.h"
+
+#include "DebugColorParams.h"
+#include "ApexSDKIntl.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+#define MAX_COLOR_NAME_LENGTH 32
+
+class DebugColorParamsEx : public DebugColorParams
+{
+public:
+ DebugColorParamsEx(NvParameterized::Traits* traits, ApexSDKIntl* mSdk) :
+ DebugColorParams(traits),
+ mApexSdk(mSdk) {}
+
+ ~DebugColorParamsEx()
+ {
+ }
+
+ void destroy()
+ {
+ this->~DebugColorParamsEx();
+ this->DebugColorParams::destroy();
+ }
+
+ NvParameterized::ErrorType setParamU32(const NvParameterized::Handle& handle, uint32_t val)
+ {
+ NvParameterized::ErrorType err = NvParameterized::NvParameters::setParamU32(handle, val);
+
+ NvParameterized::Handle& h = const_cast<NvParameterized::Handle&>(handle);
+ char color[MAX_COLOR_NAME_LENGTH];
+ h.getLongName(color, MAX_COLOR_NAME_LENGTH);
+ mApexSdk->updateDebugColorParams(color, val);
+
+ return err;
+ }
+
+private:
+ ApexSDKIntl* mApexSdk;
+};
+
+}
+} // namespace nvidia::apex::
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/common/include/DeclareArray.h b/APEX_1.4/common/include/DeclareArray.h
new file mode 100644
index 00000000..aca822d5
--- /dev/null
+++ b/APEX_1.4/common/include/DeclareArray.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef DECLARE_ARRAY_H
+
+#define DECLARE_ARRAY_H
+
+#error "Do not include DeclareArray.h anywhere!"
+// PH: Also, don't use DeclareArray anymore, use physx::Array< > or Array< > directly
+
+#include "ApexUsingNamespace.h"
+#include "PsArray.h"
+
+#define DeclareArray(x) physx::Array< x >
+
+
+#endif
diff --git a/APEX_1.4/common/include/FieldBoundaryIntl.h b/APEX_1.4/common/include/FieldBoundaryIntl.h
new file mode 100644
index 00000000..b799b8ac
--- /dev/null
+++ b/APEX_1.4/common/include/FieldBoundaryIntl.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef FIELD_BOUNDARY_INTL_H
+#define FIELD_BOUNDARY_INTL_H
+
+#include "InplaceTypes.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+
+struct FieldShapeTypeIntl
+{
+ enum Enum
+ {
+ NONE = 0,
+ SPHERE,
+ BOX,
+ CAPSULE,
+
+ FORCE_DWORD = 0xFFFFFFFFu
+ };
+};
+
+//struct FieldShapeDescIntl
+//dimensions for
+//SPHERE: x = radius
+//BOX: (x,y,z) = 1/2 size
+//CAPUSE: x = radius, y = height
+#define INPLACE_TYPE_STRUCT_NAME FieldShapeDescIntl
+#define INPLACE_TYPE_STRUCT_FIELDS \
+ INPLACE_TYPE_FIELD(InplaceEnum<FieldShapeTypeIntl::Enum>, type) \
+ INPLACE_TYPE_FIELD(PxTransform, worldToShape) \
+ INPLACE_TYPE_FIELD(PxVec3, dimensions) \
+ INPLACE_TYPE_FIELD(float, weight)
+#include INPLACE_TYPE_BUILD()
+
+
+#ifndef __CUDACC__
+
+struct FieldBoundaryDescIntl
+{
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ PxFilterData boundaryFilterData;
+#endif
+};
+
+class FieldBoundaryIntl
+{
+public:
+ virtual bool updateFieldBoundary(physx::Array<FieldShapeDescIntl>& shapes) = 0;
+
+protected:
+ virtual ~FieldBoundaryIntl() {}
+};
+#endif
+
+}
+} // end namespace nvidia::apex
+
+#endif // #ifndef FIELD_BOUNDARY_INTL_H
diff --git a/APEX_1.4/common/include/FieldSamplerIntl.h b/APEX_1.4/common/include/FieldSamplerIntl.h
new file mode 100644
index 00000000..b4a7cae9
--- /dev/null
+++ b/APEX_1.4/common/include/FieldSamplerIntl.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef FIELD_SAMPLER_INTL_H
+#define FIELD_SAMPLER_INTL_H
+
+#include "InplaceTypes.h"
+#include "FieldBoundaryIntl.h"
+
+#ifndef __CUDACC__
+#include "ApexSDKIntl.h"
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+
+struct FieldSamplerTypeIntl
+{
+ enum Enum
+ {
+ FORCE,
+ ACCELERATION,
+ VELOCITY_DRAG,
+ VELOCITY_DIRECT,
+ };
+};
+
+struct FieldSamplerGridSupportTypeIntl
+{
+ enum Enum
+ {
+ NONE = 0,
+ SINGLE_VELOCITY,
+ VELOCITY_PER_CELL,
+ };
+};
+
+#ifndef __CUDACC__
+
+struct FieldSamplerDescIntl
+{
+ FieldSamplerTypeIntl::Enum type;
+ FieldSamplerGridSupportTypeIntl::Enum gridSupportType;
+ bool cpuSimulationSupport;
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ PxFilterData samplerFilterData;
+ PxFilterData boundaryFilterData;
+#endif
+ float boundaryFadePercentage;
+
+ float dragCoeff; //only used then type is VELOCITY_DRAG
+
+ void* userData;
+
+ FieldSamplerDescIntl()
+ {
+ type = FieldSamplerTypeIntl::FORCE;
+ gridSupportType = FieldSamplerGridSupportTypeIntl::NONE;
+ cpuSimulationSupport = true;
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ samplerFilterData.word0 = 0xFFFFFFFF;
+ samplerFilterData.word1 = 0xFFFFFFFF;
+ samplerFilterData.word2 = 0xFFFFFFFF;
+ samplerFilterData.word3 = 0xFFFFFFFF;
+ boundaryFilterData.word0 = 0xFFFFFFFF;
+ boundaryFilterData.word1 = 0xFFFFFFFF;
+ boundaryFilterData.word2 = 0xFFFFFFFF;
+ boundaryFilterData.word3 = 0xFFFFFFFF;
+#endif
+ boundaryFadePercentage = 0.1;
+ dragCoeff = 0;
+ userData = NULL;
+ }
+};
+
+
+class FieldSamplerIntl
+{
+public:
+ //returns true if shape/params was changed
+ //required to return true on first call!
+ virtual bool updateFieldSampler(FieldShapeDescIntl& shapeDesc, bool& isEnabled) = 0;
+
+ struct ExecuteData
+ {
+ uint32_t count;
+ uint32_t positionStride;
+ uint32_t velocityStride;
+ uint32_t massStride;
+ uint32_t indicesMask;
+ const float* position;
+ const float* velocity;
+ const float* mass;
+ const uint32_t* indices;
+ PxVec3* resultField;
+ };
+
+ virtual void executeFieldSampler(const ExecuteData& data)
+ {
+ PX_UNUSED(data);
+ APEX_INVALID_OPERATION("not implemented");
+ }
+
+#if APEX_CUDA_SUPPORT
+ struct CudaExecuteInfo
+ {
+ uint32_t executeType;
+ InplaceHandleBase executeParamsHandle;
+ };
+
+ virtual void getFieldSamplerCudaExecuteInfo(CudaExecuteInfo& info) const
+ {
+ PX_UNUSED(info);
+ APEX_INVALID_OPERATION("not implemented");
+ }
+#endif
+
+ virtual PxVec3 queryFieldSamplerVelocity() const
+ {
+ APEX_INVALID_OPERATION("not implemented");
+ return PxVec3(0.0f);
+ }
+
+protected:
+ virtual ~FieldSamplerIntl() {}
+};
+
+#endif // __CUDACC__
+
+}
+} // end namespace nvidia::apex
+
+#endif // #ifndef FIELD_SAMPLER_INTL_H
diff --git a/APEX_1.4/common/include/FieldSamplerManagerIntl.h b/APEX_1.4/common/include/FieldSamplerManagerIntl.h
new file mode 100644
index 00000000..3822496e
--- /dev/null
+++ b/APEX_1.4/common/include/FieldSamplerManagerIntl.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef FIELD_SAMPLER_MANAGER_INTL_H
+#define FIELD_SAMPLER_MANAGER_INTL_H
+
+#include "Apex.h"
+#include "ApexSDKHelpers.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct FieldSamplerQueryDescIntl;
+class FieldSamplerQueryIntl;
+
+class FieldSamplerSceneIntl;
+
+struct FieldSamplerDescIntl;
+class FieldSamplerIntl;
+
+struct FieldBoundaryDescIntl;
+class FieldBoundaryIntl;
+
+class FieldSamplerManagerIntl
+{
+public:
+ virtual FieldSamplerQueryIntl* createFieldSamplerQuery(const FieldSamplerQueryDescIntl&) = 0;
+
+ virtual void registerFieldSampler(FieldSamplerIntl* , const FieldSamplerDescIntl& , FieldSamplerSceneIntl*) = 0;
+ virtual void unregisterFieldSampler(FieldSamplerIntl*) = 0;
+
+ virtual void registerFieldBoundary(FieldBoundaryIntl* , const FieldBoundaryDescIntl&) = 0;
+ virtual void unregisterFieldBoundary(FieldBoundaryIntl*) = 0;
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ virtual void registerUnhandledParticleSystem(PxActor*) = 0;
+ virtual void unregisterUnhandledParticleSystem(PxActor*) = 0;
+ virtual bool isUnhandledParticleSystem(PxActor*) = 0;
+
+ virtual bool doFieldSamplerFiltering(const PxFilterData &o1, const PxFilterData &o2, float &weight) const = 0;
+#endif
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // #ifndef FIELD_SAMPLER_MANAGER_INTL_H
diff --git a/APEX_1.4/common/include/FieldSamplerQueryIntl.h b/APEX_1.4/common/include/FieldSamplerQueryIntl.h
new file mode 100644
index 00000000..a72c7511
--- /dev/null
+++ b/APEX_1.4/common/include/FieldSamplerQueryIntl.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef FIELD_SAMPLER_QUERY_INTL_H
+#define FIELD_SAMPLER_QUERY_INTL_H
+
+#include "ApexDefs.h"
+#include "ApexMirroredArray.h"
+
+#include "PxTask.h"
+#include "ApexActor.h"
+#include "PxMat44.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+
+class FieldSamplerSceneIntl;
+
+struct FieldSamplerQueryDescIntl
+{
+ uint32_t maxCount;
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ PxFilterData samplerFilterData;
+#endif
+ FieldSamplerSceneIntl* ownerFieldSamplerScene;
+
+
+ FieldSamplerQueryDescIntl()
+ {
+ maxCount = 0;
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ samplerFilterData.word0 = 0xFFFFFFFF;
+ samplerFilterData.word1 = 0xFFFFFFFF;
+ samplerFilterData.word2 = 0xFFFFFFFF;
+ samplerFilterData.word3 = 0xFFFFFFFF;
+#endif
+ ownerFieldSamplerScene = 0;
+ }
+};
+
+struct FieldSamplerQueryDataIntl
+{
+ float timeStep;
+ uint32_t count;
+ bool isDataOnDevice;
+
+ uint32_t positionStrideBytes; //Stride for position
+ uint32_t velocityStrideBytes; //Stride for velocity
+ float* pmaInPosition;
+ float* pmaInVelocity;
+ PxVec4* pmaOutField;
+
+ uint32_t massStrideBytes; //if massStride set to 0 supposed single mass for all objects
+ float* pmaInMass;
+
+ uint32_t* pmaInIndices;
+};
+
+
+#if APEX_CUDA_SUPPORT
+
+class ApexCudaArray;
+
+struct FieldSamplerQueryGridDataIntl
+{
+ uint32_t numX, numY, numZ;
+
+ PxMat44 gridToWorld;
+
+ float mass;
+
+ float timeStep;
+
+ PxVec3 cellSize;
+
+ ApexCudaArray* resultVelocity; //x, y, z = velocity vector, w = weight
+
+ CUstream stream;
+};
+#endif
+
+class FieldSamplerCallbackIntl
+{
+public:
+ virtual void operator()(void* stream = NULL) = 0;
+};
+
+class FieldSamplerQueryIntl : public ApexActor
+{
+public:
+ virtual PxTaskID submitFieldSamplerQuery(const FieldSamplerQueryDataIntl& data, PxTaskID taskID) = 0;
+
+ //! FieldSamplerCallbackIntl will be called before FieldSampler computations
+ virtual void setOnStartCallback(FieldSamplerCallbackIntl*) = 0;
+ //! FieldSamplerCallbackIntl will be called after FieldSampler computations
+ virtual void setOnFinishCallback(FieldSamplerCallbackIntl*) = 0;
+
+#if APEX_CUDA_SUPPORT
+ virtual PxVec3 executeFieldSamplerQueryOnGrid(const FieldSamplerQueryGridDataIntl&)
+ {
+ APEX_INVALID_OPERATION("not implemented");
+ return PxVec3(0.0f);
+ }
+#endif
+
+protected:
+ virtual ~FieldSamplerQueryIntl() {}
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // #ifndef FIELD_SAMPLER_QUERY_INTL_H
diff --git a/APEX_1.4/common/include/FieldSamplerSceneIntl.h b/APEX_1.4/common/include/FieldSamplerSceneIntl.h
new file mode 100644
index 00000000..bce7f960
--- /dev/null
+++ b/APEX_1.4/common/include/FieldSamplerSceneIntl.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef FIELD_SAMPLER_SCENE_INTL_H
+#define FIELD_SAMPLER_SCENE_INTL_H
+
+#include "ApexDefs.h"
+#include "PxTask.h"
+
+#include "ModuleIntl.h"
+#include "ApexSDKIntl.h"
+
+namespace nvidia
+{
+
+namespace fieldsampler
+{
+ struct FieldSamplerKernelLaunchDataIntl;
+}
+
+namespace apex
+{
+
+class ApexCudaConstStorage;
+
+struct FieldSamplerSceneDescIntl
+{
+ bool isPrimary;
+
+ FieldSamplerSceneDescIntl()
+ {
+ isPrimary = false;
+ }
+};
+
+struct FieldSamplerQueryDataIntl;
+
+class FieldSamplerSceneIntl : public ModuleSceneIntl
+{
+public:
+ virtual void getFieldSamplerSceneDesc(FieldSamplerSceneDescIntl& desc) const = 0;
+
+ virtual const PxTask* onSubmitFieldSamplerQuery(const FieldSamplerQueryDataIntl& data, const PxTask* )
+ {
+ PX_UNUSED(data);
+ return 0;
+ }
+
+#if APEX_CUDA_SUPPORT
+ virtual ApexCudaConstStorage* getFieldSamplerCudaConstStorage()
+ {
+ APEX_INVALID_OPERATION("not implemented");
+ return 0;
+ }
+
+ virtual bool launchFieldSamplerCudaKernel(const nvidia::fieldsampler::FieldSamplerKernelLaunchDataIntl&)
+ {
+ APEX_INVALID_OPERATION("not implemented");
+ return false;
+ }
+#endif
+
+ virtual SceneStats* getStats()
+ {
+ return 0;
+ }
+
+};
+
+#define FSST_PHYSX_MONITOR_LOAD "FieldSamplerScene::PhysXMonitorLoad"
+#define FSST_PHYSX_MONITOR_FETCH "FieldSamplerScene::PhysXMonitorFetch"
+#define FSST_PHYSX_MONITOR_UPDATE "FieldSamplerPhysXMonitor::Update"
+}
+
+} // end namespace nvidia::apex
+
+#endif // FIELD_SAMPLER_SCENE_INTL_H
diff --git a/APEX_1.4/common/include/InplaceStorage.h b/APEX_1.4/common/include/InplaceStorage.h
new file mode 100644
index 00000000..d2d81912
--- /dev/null
+++ b/APEX_1.4/common/include/InplaceStorage.h
@@ -0,0 +1,985 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_INPLACE_STORAGE_H__
+#define __APEX_INPLACE_STORAGE_H__
+
+#include "ApexUsingNamespace.h"
+#include "PsAllocator.h"
+#include "InplaceTypes.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+
+class InplaceStorage;
+
+class InplaceStorageGroup
+{
+ friend class InplaceStorage;
+
+ InplaceStorage* _storage;
+ uint32_t _lastBlockIndex;
+
+ InplaceStorageGroup* _groupListPrev;
+ InplaceStorageGroup* _groupListNext;
+
+ PX_INLINE void reset(InplaceStorage* storage)
+ {
+ PX_UNUSED(storage);
+ PX_ASSERT(_storage == storage);
+ _storage = 0;
+ }
+
+public:
+ PX_INLINE InplaceStorageGroup() : _storage(0) {}
+ PX_INLINE InplaceStorageGroup(InplaceStorage& storage) : _storage(0)
+ {
+ init(storage);
+ }
+ PX_INLINE ~InplaceStorageGroup()
+ {
+ release();
+ }
+
+ PX_INLINE void init(InplaceStorage& storage);
+ PX_INLINE void release();
+
+ PX_INLINE void begin();
+ PX_INLINE void end();
+
+ PX_INLINE InplaceStorage& getStorage()
+ {
+ PX_ASSERT(_storage != 0);
+ return *_storage;
+ }
+};
+
+class InplaceStorage
+{
+ struct ReflectorArg
+ {
+ };
+
+ class Reflector
+ {
+ InplaceStorage* _storage;
+ uint32_t _blockIndex;
+ uint8_t* _blockPtr;
+
+ public:
+ Reflector(InplaceStorage* storage, uint32_t blockIndex, uint8_t* blockPtr)
+ : _storage(storage), _blockIndex(blockIndex), _blockPtr(blockPtr)
+ {
+ }
+
+ template <int _inplace_offset_, typename MT>
+ PX_INLINE void processType(ReflectorArg, InplaceHandleBase& handle, MT )
+ {
+ size_t offset = size_t(reinterpret_cast<uint8_t*>(&handle) - _blockPtr);
+ _storage->addHandleRef(_blockIndex, offset, MT::AutoFreeValue);
+ }
+ template <int _inplace_offset_, typename T, typename MT>
+ PX_INLINE void processType(ReflectorArg ra, InplaceHandle<T>& handle, MT mt)
+ {
+ processType<_inplace_offset_>(ra, static_cast<InplaceHandleBase&>(handle), mt);
+ }
+ template <int _inplace_offset_, typename T, typename MT>
+ PX_INLINE void processType(ReflectorArg, T& , MT )
+ {
+ ; //do nothing
+ }
+
+ template <int _inplace_offset_, typename T>
+ PX_INLINE void processPrimitiveType(ReflectorArg, T& )
+ {
+ ; //do nothing
+ }
+ };
+ friend class Reflector;
+
+ static const uint32_t NULL_INDEX = InplaceHandleBase::NULL_VALUE;
+
+ struct Block
+ {
+ uint32_t _size;
+ uint32_t _alignment;
+ uint32_t _offset;
+ uint32_t _prevIndex;
+ union
+ {
+ uint32_t _nextIndex;
+ uint32_t _nextFreeBlockIndex;
+ };
+ uint32_t _firstRefIndex;
+ InplaceStorageGroup* _group;
+
+ void reset()
+ {
+ _alignment = 0;
+ _size = 0;
+ _offset = uint32_t(-1);
+ _prevIndex = _nextIndex = NULL_INDEX;
+ _firstRefIndex = NULL_INDEX;
+ _group = NULL;
+ }
+ };
+
+ struct HandleRef
+ {
+ enum Flags
+ {
+ AUTO_FREE = 0x01,
+ };
+ uint32_t flags;
+ uint32_t ownerBlockIndex;
+ uint32_t offsetInBlock;
+ union
+ {
+ uint32_t nextIndex;
+ uint32_t nextFreeRefIndex;
+ };
+
+ void reset()
+ {
+ flags = 0;
+ ownerBlockIndex = NULL_INDEX;
+ offsetInBlock = 0;
+ }
+ };
+
+ void addHandleRef(uint32_t blockIndex, size_t offset, bool autoFree)
+ {
+ //find free handleRef
+ if (_firstFreeRefIndex == NULL_INDEX)
+ {
+ _firstFreeRefIndex = _handleRefs.size();
+ _handleRefs.resize(_firstFreeRefIndex + 1);
+
+ _handleRefs.back().nextFreeRefIndex = NULL_INDEX;
+ }
+ uint32_t thisRefIndex = _firstFreeRefIndex;
+ HandleRef& handleRef = _handleRefs[thisRefIndex];
+ _firstFreeRefIndex = handleRef.nextFreeRefIndex;
+
+ Block& block = _blocks[blockIndex];
+ handleRef.nextIndex = block._firstRefIndex;
+ block._firstRefIndex = thisRefIndex;
+
+ handleRef.ownerBlockIndex = blockIndex;
+ handleRef.offsetInBlock = (uint32_t) offset;
+ handleRef.flags = 0;
+ if (autoFree)
+ {
+ handleRef.flags |= HandleRef::AUTO_FREE;
+ }
+ }
+
+ template <typename F>
+ void removeHandleRefs(F func, uint32_t blockIndex, uint32_t minOffset = 0)
+ {
+ Block& block = _blocks[blockIndex];
+
+ uint32_t prevRefIndex = NULL_INDEX;
+ uint32_t currRefIndex = block._firstRefIndex;
+ while (currRefIndex != NULL_INDEX)
+ {
+ HandleRef& handleRef = _handleRefs[currRefIndex];
+ PX_ASSERT(handleRef.ownerBlockIndex == blockIndex);
+
+ uint32_t nextRefIndex = handleRef.nextIndex;
+ if (handleRef.offsetInBlock >= minOffset)
+ {
+ //remove
+ if (handleRef.flags & HandleRef::AUTO_FREE)
+ {
+ uint32_t blockOffset = block._offset;
+ PX_ASSERT(blockOffset != uint32_t(-1));
+ InplaceHandleBase handle = *reinterpret_cast<InplaceHandleBase*>(getBufferPtr() + blockOffset + handleRef.offsetInBlock);
+
+ (this->*func)(block, handle);
+ }
+
+ if (prevRefIndex != NULL_INDEX)
+ {
+ _handleRefs[prevRefIndex].nextIndex = nextRefIndex;
+ }
+ else
+ {
+ block._firstRefIndex = nextRefIndex;
+ }
+
+ handleRef.nextFreeRefIndex = _firstFreeRefIndex;
+ _firstFreeRefIndex = currRefIndex;
+
+ handleRef.reset();
+ }
+ else
+ {
+ prevRefIndex = currRefIndex;
+ }
+ currRefIndex = nextRefIndex;
+ }
+ }
+
+
+ PX_INLINE void mapHandle(InplaceHandleBase& handle) const
+ {
+ if (handle._value != NULL_INDEX)
+ {
+ handle._value = _blocks[handle._value]._offset;
+ }
+ }
+ PX_INLINE uint8_t* getBufferPtr()
+ {
+ PX_ASSERT(_bufferPtr != 0);
+ _isChanged = true;
+ return _bufferPtr;
+ }
+ PX_INLINE const uint8_t* getBufferPtr() const
+ {
+ PX_ASSERT(_bufferPtr != 0);
+ return _bufferPtr;
+ }
+
+ template <typename T>
+ PX_INLINE const T* resolveType(InplaceHandleBase handle) const
+ {
+ if (handle._value != NULL_INDEX)
+ {
+ const Block& block = _blocks[handle._value];
+ PX_ASSERT(block._offset != uint32_t(-1));
+ return reinterpret_cast<const T*>(getBufferPtr() + block._offset);
+ }
+ return 0;
+ }
+ template <typename T>
+ PX_INLINE T* resolveType(InplaceHandleBase handle)
+ {
+ if (handle._value != NULL_INDEX)
+ {
+ const Block& block = _blocks[handle._value];
+ PX_ASSERT(block._offset != uint32_t(-1));
+ return reinterpret_cast<T*>(getBufferPtr() + block._offset);
+ }
+ return 0;
+ }
+
+protected:
+ //buffer API
+ virtual uint8_t* storageResizeBuffer(uint32_t newSize) = 0;
+
+ virtual void storageLock() {}
+ virtual void storageUnlock() {}
+
+public:
+ InplaceStorage()
+ {
+ _bufferPtr = 0;
+ _isChanged = false;
+
+ _firstFreeBlockIndex = NULL_INDEX;
+ _lastAllocatedBlockIndex = NULL_INDEX;
+ _allocatedSize = 0;
+
+ _groupListHead = 0;
+ _activeGroup = NULL;
+
+ _firstFreeRefIndex = NULL_INDEX;
+ }
+ virtual ~InplaceStorage()
+ {
+ release();
+ }
+
+ void release()
+ {
+ releaseGroups();
+ }
+
+ bool isChanged() const
+ {
+ return _isChanged;
+ }
+ void setUnchanged()
+ {
+ _isChanged = false;
+ }
+
+ template <typename T>
+ PX_INLINE bool fetch(InplaceHandleBase handle, T& out, uint32_t index = 0) const
+ {
+ const T* ptr = resolveType<T>(handle);
+ if (ptr != 0)
+ {
+ out = ptr[index];
+ return true;
+ }
+ return false;
+ }
+ template <typename T>
+ PX_INLINE bool update(InplaceHandleBase handle, const T& in, uint32_t index = 0)
+ {
+ T* ptr = resolveType<T>(handle);
+ if (ptr != 0)
+ {
+ ptr[index] = in;
+ return true;
+ }
+ return false;
+ }
+ template <typename T>
+ PX_INLINE bool updateRange(InplaceHandleBase handle, const T* in, uint32_t count, uint32_t start = 0)
+ {
+ T* ptr = resolveType<T>(handle);
+ if (ptr != 0)
+ {
+ ::memcpy(ptr + start, in, sizeof(T) * count);
+ return true;
+ }
+ return false;
+ }
+
+
+ template <typename T>
+ PX_INLINE bool alloc(InplaceHandleBase& handle, uint32_t count = 1)
+ {
+ PX_ASSERT(count > 0);
+ handle._value = allocBlock(sizeof(T) * count, __alignof(T));
+ if (handle._value != NULL_INDEX)
+ {
+ reflectElems<T>(handle, count);
+ return true;
+ }
+ return false;
+ }
+
+ template <typename T>
+ PX_INLINE bool alloc(InplaceHandle<T>& handle, uint32_t count = 1)
+ {
+ return alloc<T>(static_cast<InplaceHandleBase&>(handle), count);
+ }
+
+ PX_INLINE void free(InplaceHandleBase& handle)
+ {
+ if (handle._value != NULL_INDEX)
+ {
+ freeBlock(handle._value);
+ handle._value = NULL_INDEX;
+ }
+ }
+
+ template <typename T>
+ bool realloc(InplaceHandleBase& handle, uint32_t oldCount, uint32_t newCount)
+ {
+ if (handle._value != NULL_INDEX)
+ {
+ PX_ASSERT(oldCount > 0);
+ if (oldCount != newCount)
+ {
+ if (newCount > 0)
+ {
+ if (resizeBlock(handle._value, sizeof(T) * newCount))
+ {
+ if (newCount > oldCount)
+ {
+ reflectElems<T>(handle, newCount, oldCount);
+ }
+ return true;
+ }
+ return false;
+ }
+ free(handle);
+ }
+ }
+ else
+ {
+ PX_ASSERT(oldCount == 0);
+ if (newCount > 0)
+ {
+ return (alloc<T>(handle, newCount) != 0);
+ }
+ }
+ return true;
+ }
+
+
+ template <typename T>
+ PX_INLINE InplaceHandle<T> mappedHandle(InplaceHandle<T> handle) const
+ {
+ mapHandle(handle);
+ return handle;
+ }
+
+ uint32_t mapTo(uint8_t* destPtr) const
+ {
+ PX_ASSERT(_lastAllocatedBlockIndex == NULL_INDEX || _blocks[_lastAllocatedBlockIndex]._offset + _blocks[_lastAllocatedBlockIndex]._size == _allocatedSize);
+
+ memcpy(destPtr, getBufferPtr(), _allocatedSize);
+
+ //iterate all blocks
+ for (uint32_t blockIndex = _lastAllocatedBlockIndex; blockIndex != NULL_INDEX; blockIndex = _blocks[blockIndex]._prevIndex)
+ {
+ const Block& block = _blocks[blockIndex];
+ //iterate all refs in current block
+ for (uint32_t refIndex = block._firstRefIndex; refIndex != NULL_INDEX; refIndex = _handleRefs[refIndex].nextIndex)
+ {
+ const HandleRef& handleRef = _handleRefs[refIndex];
+ PX_ASSERT(handleRef.ownerBlockIndex == blockIndex);
+
+ uint32_t blockOffset = block._offset;
+ PX_ASSERT(blockOffset != uint32_t(-1));
+ InplaceHandleBase& handle = *reinterpret_cast<InplaceHandleBase*>(destPtr + blockOffset + handleRef.offsetInBlock);
+
+ mapHandle(handle);
+ }
+ }
+ return _allocatedSize;
+ }
+
+ uint32_t getAllocatedSize() const
+ {
+ return _allocatedSize;
+ }
+
+private:
+ template <typename T>
+ T* reflectElems(InplaceHandleBase handle, uint32_t newCount, uint32_t oldCount = 0)
+ {
+ const Block& block = _blocks[handle._value];
+
+ uint8_t* ptr = (getBufferPtr() + block._offset);
+ T* ptrT0 = reinterpret_cast<T*>(ptr);
+ T* ptrT = ptrT0 + oldCount;
+ Reflector r(this, handle._value, ptr);
+ for (uint32_t index = oldCount; index < newCount; ++index)
+ {
+ ::new(ptrT) T;
+ InplaceTypeHelper::reflectType<0>(r, ReflectorArg(), *ptrT, InplaceTypeMemberDefaultTraits());
+ ++ptrT;
+ }
+ return ptrT0;
+ }
+
+ static PX_INLINE uint32_t alignUp(uint32_t size, uint32_t alignment)
+ {
+ PX_ASSERT(alignment > 0);
+ return (size + (alignment - 1)) & ~(alignment - 1);
+ }
+
+ PX_INLINE int32_t getMoveDelta(uint32_t blockIndex, uint32_t moveOffset) const
+ {
+ PX_ASSERT(blockIndex != NULL_INDEX);
+ const uint32_t currOffset = _blocks[blockIndex]._offset;
+
+ //calculate max alignment for all subsequent blocks
+ uint32_t alignment = 0;
+ do
+ {
+ const Block& block = _blocks[blockIndex];
+ alignment = PxMax(alignment, block._alignment);
+
+ blockIndex = block._nextIndex;
+ }
+ while (blockIndex != NULL_INDEX);
+
+ int32_t moveDelta = (int32_t)moveOffset - (int32_t)currOffset;
+ //align moveDelta
+ if (moveDelta >= 0)
+ {
+ moveDelta += (alignment - 1);
+ moveDelta &= ~(alignment - 1);
+ }
+ else
+ {
+ moveDelta = -moveDelta;
+ moveDelta &= ~(alignment - 1);
+ moveDelta = -moveDelta;
+ }
+ PX_ASSERT(currOffset + moveDelta >= moveOffset);
+ return moveDelta;
+ }
+
+ uint32_t getPrevAllocatedSize(uint32_t prevBlockIndex) const
+ {
+ uint32_t prevAllocatedSize = 0;
+ if (prevBlockIndex != NULL_INDEX)
+ {
+ const Block& prevBlock = _blocks[prevBlockIndex];
+ prevAllocatedSize = prevBlock._offset + prevBlock._size;
+ }
+ return prevAllocatedSize;
+ }
+
+ void moveBlocks(uint32_t moveBlockIndex, int32_t moveDelta)
+ {
+ if (moveDelta != 0)
+ {
+ const uint32_t currOffset = _blocks[moveBlockIndex]._offset;
+ const uint32_t moveOffset = currOffset + moveDelta;
+
+ uint32_t moveSize = _allocatedSize - currOffset;
+ uint8_t* moveFromPtr = getBufferPtr() + currOffset;
+ uint8_t* moveToPtr = getBufferPtr() + moveOffset;
+ memmove(moveToPtr, moveFromPtr, moveSize);
+
+ _allocatedSize += moveDelta;
+ //update moved blocks
+ do
+ {
+ Block& moveBlock = _blocks[moveBlockIndex];
+ moveBlock._offset += moveDelta;
+ PX_ASSERT((moveBlock._offset & (moveBlock._alignment - 1)) == 0);
+
+ moveBlockIndex = moveBlock._nextIndex;
+ }
+ while (moveBlockIndex != NULL_INDEX);
+ }
+ }
+
+ void removeBlocks(uint32_t prevBlockIndex, uint32_t nextBlockIndex, uint32_t lastBlockIndex)
+ {
+ PX_UNUSED(lastBlockIndex);
+
+ uint32_t prevAllocatedSize = getPrevAllocatedSize(prevBlockIndex);
+ if (prevBlockIndex != NULL_INDEX)
+ {
+ _blocks[prevBlockIndex]._nextIndex = nextBlockIndex;
+ }
+ if (nextBlockIndex != NULL_INDEX)
+ {
+ _blocks[nextBlockIndex]._prevIndex = prevBlockIndex;
+
+ const int32_t moveDelta = getMoveDelta(nextBlockIndex, prevAllocatedSize);
+ moveBlocks(nextBlockIndex, moveDelta);
+ }
+ else
+ {
+ //last block
+ PX_ASSERT(lastBlockIndex == _lastAllocatedBlockIndex);
+ _lastAllocatedBlockIndex = prevBlockIndex;
+
+ _allocatedSize = prevAllocatedSize;
+ }
+ shrinkBuffer();
+ }
+
+ PX_INLINE bool growBuffer(uint32_t newAllocatedSize)
+ {
+ PX_ASSERT(newAllocatedSize >= _allocatedSize);
+
+ uint8_t* newBufferPtr = storageResizeBuffer(newAllocatedSize);
+ if (newBufferPtr == 0)
+ {
+ PX_ASSERT(0 && "Out of memory!");
+ return false;
+ }
+ _bufferPtr = newBufferPtr;
+ return true;
+ }
+
+ PX_INLINE void shrinkBuffer()
+ {
+ uint8_t* newBufferPtr = storageResizeBuffer(_allocatedSize);
+ PX_ASSERT(newBufferPtr != 0);
+ _bufferPtr = newBufferPtr;
+ }
+
+ uint32_t allocBlock(uint32_t size, uint32_t alignment)
+ {
+ uint32_t insertBlockIndex;
+ uint32_t offset;
+ int32_t moveDelta;
+ uint32_t newAllocatedSize;
+
+ PX_ASSERT(_activeGroup != NULL);
+ if (_activeGroup->_lastBlockIndex == NULL_INDEX || _activeGroup->_lastBlockIndex == _lastAllocatedBlockIndex)
+ {
+ //push_back new block
+ insertBlockIndex = NULL_INDEX;
+ offset = alignUp(_allocatedSize, alignment);
+ moveDelta = 0;
+ newAllocatedSize = offset + size;
+ }
+ else
+ {
+ //insert new block
+ insertBlockIndex = _blocks[_activeGroup->_lastBlockIndex]._nextIndex;
+ PX_ASSERT(insertBlockIndex != NULL_INDEX);
+
+ uint32_t prevAllocatedSize = getPrevAllocatedSize(_blocks[insertBlockIndex]._prevIndex);
+ offset = alignUp(prevAllocatedSize, alignment);
+ const uint32_t moveOffset = offset + size;
+ moveDelta = getMoveDelta(insertBlockIndex, moveOffset);
+ newAllocatedSize = _allocatedSize + moveDelta;
+ }
+
+ if (growBuffer(newAllocatedSize) == false)
+ {
+ return NULL_INDEX;
+ }
+
+ //find free block
+ if (_firstFreeBlockIndex == NULL_INDEX)
+ {
+ _firstFreeBlockIndex = _blocks.size();
+ _blocks.resize(_firstFreeBlockIndex + 1);
+
+ _blocks.back()._nextFreeBlockIndex = NULL_INDEX;
+ }
+ uint32_t blockIndex = _firstFreeBlockIndex;
+ Block& block = _blocks[blockIndex];
+ _firstFreeBlockIndex = block._nextFreeBlockIndex;
+
+ //init block
+ block._size = size;
+ block._alignment = alignment;
+ block._offset = offset;
+ block._firstRefIndex = NULL_INDEX;
+ block._group = _activeGroup;
+
+ PX_ASSERT((block._offset & (block._alignment - 1)) == 0);
+
+ if (insertBlockIndex == NULL_INDEX)
+ {
+ //add new block after the _lastAllocatedBlockIndex
+ block._prevIndex = _lastAllocatedBlockIndex;
+ block._nextIndex = NULL_INDEX;
+
+ if (_lastAllocatedBlockIndex != NULL_INDEX)
+ {
+ PX_ASSERT(_blocks[_lastAllocatedBlockIndex]._nextIndex == NULL_INDEX);
+ _blocks[_lastAllocatedBlockIndex]._nextIndex = blockIndex;
+ }
+ _lastAllocatedBlockIndex = blockIndex;
+ }
+ else
+ {
+ PX_ASSERT(_activeGroup->_lastBlockIndex != NULL_INDEX);
+ //insert new block before the insertBlockIndex
+ block._prevIndex = _activeGroup->_lastBlockIndex;
+ _blocks[_activeGroup->_lastBlockIndex]._nextIndex = blockIndex;
+
+ block._nextIndex = insertBlockIndex;
+ _blocks[insertBlockIndex]._prevIndex = blockIndex;
+
+ moveBlocks(insertBlockIndex, moveDelta);
+ PX_ASSERT(_allocatedSize == newAllocatedSize);
+ }
+ _allocatedSize = newAllocatedSize;
+
+ //update group
+ _activeGroup->_lastBlockIndex = blockIndex;
+
+ return blockIndex;
+ }
+
+
+ PX_INLINE void onRemoveHandle(const Block& block, InplaceHandleBase handle)
+ {
+ PX_UNUSED(block);
+ if (handle._value != InplaceHandleBase::NULL_VALUE)
+ {
+ PX_ASSERT(handle._value < _blocks.size());
+ PX_ASSERT(_blocks[handle._value]._group == block._group);
+
+ freeBlock(handle._value);
+ }
+ }
+ PX_INLINE void onRemoveHandleEmpty(const Block& , InplaceHandleBase )
+ {
+ }
+
+ void freeBlock(uint32_t blockIndex)
+ {
+ PX_ASSERT(blockIndex != NULL_INDEX);
+ PX_ASSERT(_activeGroup != NULL);
+ PX_ASSERT(_blocks[blockIndex]._group == _activeGroup);
+
+ removeHandleRefs(&InplaceStorage::onRemoveHandle, blockIndex);
+
+ Block& block = _blocks[blockIndex];
+
+ removeBlocks(block._prevIndex, block._nextIndex, blockIndex);
+
+ //update group
+ if (_activeGroup->_lastBlockIndex == blockIndex)
+ {
+ _activeGroup->_lastBlockIndex =
+ (block._prevIndex != NULL_INDEX) &&
+ (_blocks[block._prevIndex]._group == _activeGroup) ? block._prevIndex : NULL_INDEX;
+ }
+
+ block.reset();
+ //add block to free list
+ block._nextFreeBlockIndex = _firstFreeBlockIndex;
+ _firstFreeBlockIndex = blockIndex;
+ }
+
+ bool resizeBlock(uint32_t blockIndex, uint32_t newSize)
+ {
+ if (newSize < _blocks[blockIndex]._size)
+ {
+ //remove refs
+ removeHandleRefs(&InplaceStorage::onRemoveHandle, blockIndex, newSize);
+ }
+
+ Block& block = _blocks[blockIndex];
+ const uint32_t nextBlockIndex = block._nextIndex;
+
+ uint32_t newAllocatedSize = block._offset + newSize;
+ int32_t moveDelta = 0;
+ if (nextBlockIndex != NULL_INDEX)
+ {
+ moveDelta = getMoveDelta(nextBlockIndex, newAllocatedSize);
+ newAllocatedSize = _allocatedSize + moveDelta;
+ }
+
+ const bool bGrow = (newAllocatedSize > _allocatedSize);
+ if (bGrow)
+ {
+ if (growBuffer(newAllocatedSize) == false)
+ {
+ return false;
+ }
+ }
+
+ block._size = newSize;
+
+ if (nextBlockIndex != NULL_INDEX)
+ {
+ moveBlocks(nextBlockIndex, moveDelta);
+ PX_ASSERT(_allocatedSize == newAllocatedSize);
+ }
+ _allocatedSize = newAllocatedSize;
+ if (!bGrow)
+ {
+ shrinkBuffer();
+ }
+ return true;
+ }
+
+ void groupInit(InplaceStorageGroup* group)
+ {
+ storageLock();
+
+ //init new group
+ group->_lastBlockIndex = NULL_INDEX;
+ group->_groupListPrev = 0;
+ group->_groupListNext = _groupListHead;
+ if (_groupListHead != NULL)
+ {
+ _groupListHead->_groupListPrev = group;
+ }
+ _groupListHead = group;
+
+ storageUnlock();
+ }
+
+ void groupFree(InplaceStorageGroup* group)
+ {
+ storageLock();
+
+ if (group->_lastBlockIndex != NULL_INDEX)
+ {
+ uint32_t prevBlockIndex = group->_lastBlockIndex;
+ uint32_t nextBlockIndex = _blocks[group->_lastBlockIndex]._nextIndex;
+ do
+ {
+ uint32_t freeBlockIndex = prevBlockIndex;
+ Block& freeBlock = _blocks[freeBlockIndex];
+ prevBlockIndex = freeBlock._prevIndex;
+
+ removeHandleRefs(&InplaceStorage::onRemoveHandleEmpty, freeBlockIndex);
+
+ freeBlock.reset();
+ //add block to free list
+ freeBlock._nextFreeBlockIndex = _firstFreeBlockIndex;
+ _firstFreeBlockIndex = freeBlockIndex;
+ }
+ while (prevBlockIndex != NULL_INDEX && _blocks[prevBlockIndex]._group == group);
+
+ PX_ASSERT(prevBlockIndex == NULL_INDEX || _blocks[prevBlockIndex]._group != group);
+ PX_ASSERT(nextBlockIndex == NULL_INDEX || _blocks[nextBlockIndex]._group != group);
+
+ removeBlocks(prevBlockIndex, nextBlockIndex, group->_lastBlockIndex);
+ }
+
+ //remove from GroupList
+ if (group->_groupListNext != 0)
+ {
+ group->_groupListNext->_groupListPrev = group->_groupListPrev;
+ }
+ if (group->_groupListPrev != 0)
+ {
+ group->_groupListPrev->_groupListNext = group->_groupListNext;
+ }
+ else
+ {
+ PX_ASSERT(_groupListHead == group);
+ _groupListHead = group->_groupListNext;
+ }
+
+ storageUnlock();
+ }
+
+ void groupBegin(InplaceStorageGroup* group)
+ {
+ storageLock();
+
+ PX_ASSERT(_activeGroup == NULL);
+ _activeGroup = group;
+ }
+
+ void groupEnd(InplaceStorageGroup* group)
+ {
+ PX_UNUSED(group);
+ PX_ASSERT(group == _activeGroup);
+ _activeGroup = NULL;
+
+ storageUnlock();
+ }
+
+ void releaseGroups()
+ {
+ storageLock();
+
+ while (_groupListHead != 0)
+ {
+ InplaceStorageGroup* group = _groupListHead;
+ _groupListHead = _groupListHead->_groupListNext;
+
+ group->reset(this);
+ }
+
+ storageUnlock();
+ }
+
+ uint8_t* _bufferPtr;
+ bool _isChanged;
+
+ uint32_t _allocatedSize;
+
+ uint32_t _firstFreeBlockIndex;
+ uint32_t _lastAllocatedBlockIndex;
+ physx::Array<Block> _blocks;
+
+ uint32_t _firstFreeRefIndex;
+ physx::Array<HandleRef> _handleRefs;
+
+ InplaceStorageGroup* _groupListHead;
+ InplaceStorageGroup* _activeGroup;
+
+ friend class InplaceStorageGroup;
+};
+
+PX_INLINE void InplaceStorageGroup::init(InplaceStorage& storage)
+{
+ PX_ASSERT(_storage == 0);
+ _storage = &storage;
+ getStorage().groupInit(this);
+}
+PX_INLINE void InplaceStorageGroup::release()
+{
+ if (_storage != 0)
+ {
+ getStorage().groupFree(this);
+ _storage = 0;
+ }
+}
+PX_INLINE void InplaceStorageGroup::begin()
+{
+ getStorage().groupBegin(this);
+}
+PX_INLINE void InplaceStorageGroup::end()
+{
+ getStorage().groupEnd(this);
+}
+
+class InplaceStorageGroupScope
+{
+private:
+ InplaceStorageGroupScope& operator=(const InplaceStorageGroupScope&);
+ InplaceStorageGroup& _group;
+
+public:
+ InplaceStorageGroupScope(InplaceStorageGroup& group) : _group(group)
+ {
+ _group.begin();
+ }
+ ~InplaceStorageGroupScope()
+ {
+ _group.end();
+ }
+};
+
+#define INPLACE_STORAGE_GROUP_SCOPE(group) InplaceStorageGroupScope scopeAccess_##group ( group ); InplaceStorage& _storage_ = group.getStorage();
+
+////
+class ApexCpuInplaceStorage : public InplaceStorage
+{
+public:
+ ApexCpuInplaceStorage(uint32_t allocStep = 4096)
+ : mAllocStep(allocStep)
+ {
+ mSize = 0;
+ mStoragePtr = 0;
+ }
+ ~ApexCpuInplaceStorage()
+ {
+ release();
+ }
+
+ void release()
+ {
+ if (mStoragePtr)
+ {
+ PX_FREE(mStoragePtr);
+ mSize = 0;
+ mStoragePtr = 0;
+ }
+ }
+
+protected:
+ //interface for InplaceStorage
+ uint8_t* storageResizeBuffer(uint32_t newSize)
+ {
+ if (newSize > mSize)
+ {
+ newSize = ((newSize + mAllocStep - 1) / mAllocStep) * mAllocStep;
+ PX_ASSERT(newSize > mSize && (newSize % mAllocStep) == 0);
+ uint8_t* newStoragePtr = (uint8_t*)PX_ALLOC(newSize, PX_DEBUG_EXP("ApexCpuInplaceStorage"));
+ if (!newStoragePtr)
+ {
+ return 0;
+ }
+ memcpy(newStoragePtr, mStoragePtr, mSize);
+ PX_FREE(mStoragePtr);
+ mSize = newSize;
+ mStoragePtr = newStoragePtr;
+ }
+ return mStoragePtr;
+ }
+
+private:
+ uint32_t mAllocStep;
+ uint32_t mSize;
+ uint8_t* mStoragePtr;
+};
+
+
+
+}
+} // end namespace nvidia::apex
+
+#endif // __APEX_INPLACE_STORAGE_H__
diff --git a/APEX_1.4/common/include/InplaceTypes.h b/APEX_1.4/common/include/InplaceTypes.h
new file mode 100644
index 00000000..31f7d3a3
--- /dev/null
+++ b/APEX_1.4/common/include/InplaceTypes.h
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEX_INPLACE_TYPES_H__
+#define __APEX_INPLACE_TYPES_H__
+
+#include "ApexUsingNamespace.h"
+#include "PxVec3.h"
+#include "PxVec4.h"
+#include "PxBounds3.h"
+#include "PxMat44.h"
+
+
+namespace nvidia
+{
+namespace apex
+{
+
+#define INPLACE_TYPE_BUILD() "InplaceTypesBuilder.h"
+#define INPLACE_TYPE_FIELD(_field_type_, _field_name_) ( (2, (_field_type_, _field_name_)) )
+#define INPLACE_TYPE_FIELD_N(_field_type_, _field_name_, _field_size_) ( (3, (_field_type_, _field_name_, _field_size_)) )
+
+#define APEX_OFFSETOF(type, member) offsetof(type, member)
+
+#ifdef __CUDACC__
+#define APEX_CUDA_CALLABLE __device__
+#else
+#define APEX_CUDA_CALLABLE
+#endif
+
+
+#ifdef __CUDACC__
+#define INPLACE_TEMPL_ARGS_DEF template <bool GpuInplaceStorageTemplArg>
+#define INPLACE_TEMPL_ARGS_VAL <GpuInplaceStorageTemplArg>
+#define INPLACE_TEMPL_VA_ARGS_DEF(...) template <bool GpuInplaceStorageTemplArg, __VA_ARGS__>
+#define INPLACE_TEMPL_VA_ARGS_VAL(...) <GpuInplaceStorageTemplArg, __VA_ARGS__>
+#define INPLACE_STORAGE_SUB_ARGS_DEF const uint8_t* _arg_constMem_, texture<int, 1, cudaReadModeElementType> _arg_texRef_
+#define INPLACE_STORAGE_ARGS_DEF InplaceHandleBase::StorageSelector<GpuInplaceStorageTemplArg> _arg_selector_, INPLACE_STORAGE_SUB_ARGS_DEF
+#define INPLACE_STORAGE_ARGS_VAL _arg_selector_, _arg_constMem_, _arg_texRef_
+#define CPU_INPLACE_STORAGE_ARGS_UNUSED
+#else
+#define INPLACE_TEMPL_ARGS_DEF template <typename CpuInplaceStorageTemplArg>
+#define INPLACE_TEMPL_ARGS_VAL <CpuInplaceStorageTemplArg>
+#define INPLACE_TEMPL_VA_ARGS_DEF(...) template <typename CpuInplaceStorageTemplArg, __VA_ARGS__>
+#define INPLACE_TEMPL_VA_ARGS_VAL(...) <CpuInplaceStorageTemplArg, __VA_ARGS__>
+#define INPLACE_STORAGE_ARGS_DEF const CpuInplaceStorageTemplArg& _arg_storage_
+#define INPLACE_STORAGE_ARGS_VAL _arg_storage_
+#define CPU_INPLACE_STORAGE_ARGS_UNUSED PX_UNUSED(_arg_storage_);
+#endif
+
+
+template <bool AutoFree>
+struct InplaceTypeMemberTraits
+{
+ enum { AutoFreeValue = AutoFree };
+};
+
+typedef InplaceTypeMemberTraits<false> InplaceTypeMemberDefaultTraits;
+
+template <typename T>
+struct InplaceTypeTraits;
+
+class InplaceTypeHelper
+{
+ template <int n>
+ struct ArrayIterator
+ {
+ template <int _inplace_offset_, typename R, typename RA, typename T, int N, typename MT>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectArray(R& r, RA ra, T (& arr)[N], MT mt)
+ {
+ InplaceTypeHelper::reflectType<(N - n)*sizeof(T) + _inplace_offset_>(r, ra, arr[ (N - n) ], mt);
+
+ ArrayIterator<n - 1>::reflectArray<_inplace_offset_>(r, ra, arr, mt);
+ }
+ };
+
+public:
+ template <int _inplace_offset_, typename R, typename RA, typename T, typename MT>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, T& t, MT mt)
+ {
+ r.processType<_inplace_offset_>(ra, t, mt);
+ InplaceTypeTraits<T>::reflectType<_inplace_offset_>(r, ra, t);
+ }
+
+ template <int _inplace_offset_, typename R, typename RA, typename T, int N, typename MT>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, T (& arr)[N], MT mt)
+ {
+ ArrayIterator<N>::reflectArray<_inplace_offset_>(r, ra, arr, mt);
+ }
+
+#ifdef __CUDACC__
+ class FetchReflector4ConstMem
+ {
+ public:
+ APEX_CUDA_CALLABLE PX_INLINE FetchReflector4ConstMem() {}
+
+ template <int _inplace_offset_, typename T, typename MT>
+ APEX_CUDA_CALLABLE PX_INLINE void processType(const uint8_t* constMem, T const& value, MT )
+ {
+ ; //do nothing
+ }
+
+ template <int _inplace_offset_, typename T>
+ APEX_CUDA_CALLABLE PX_INLINE void processPrimitiveType(const uint8_t* constMem, T& out)
+ {
+ out = *reinterpret_cast<const T*>(constMem + _inplace_offset_);
+ }
+ };
+ template <typename T>
+ APEX_CUDA_CALLABLE PX_INLINE static void fetchType(T& out, const uint8_t* constMem, uint32_t offset0)
+ {
+ FetchReflector4ConstMem r;
+ InplaceTypeHelper::reflectType<0>(r, constMem + offset0, out, InplaceTypeMemberDefaultTraits());
+ }
+
+ class FetchReflector4TexRef
+ {
+ uint32_t texIdx0;
+
+ APEX_CUDA_CALLABLE PX_INLINE void convertValue(int value, int32_t& out)
+ {
+ out = int32_t(value);
+ }
+ APEX_CUDA_CALLABLE PX_INLINE void convertValue(int value, uint32_t& out)
+ {
+ out = uint32_t(value);
+ }
+ APEX_CUDA_CALLABLE PX_INLINE void convertValue(int value, float& out)
+ {
+ out = __int_as_float(value);
+ }
+ template <typename T>
+ APEX_CUDA_CALLABLE PX_INLINE void convertValue(int value, T*& out)
+ {
+ out = reinterpret_cast<T*>(value);
+ }
+
+ template <int _inplace_offset_, typename T>
+ APEX_CUDA_CALLABLE PX_INLINE void processValue(texture<int, 1, cudaReadModeElementType> texRef, T& out)
+ {
+ PX_COMPILE_TIME_ASSERT((_inplace_offset_ & 3) == 0);
+ //PX_COMPILE_TIME_ASSERT(sizeof(T) == 4); this fails to compile
+ const int texIdx = (_inplace_offset_ >> 2);
+
+ const int value = tex1Dfetch(texRef, texIdx0 + texIdx);
+ convertValue(value, out);
+ }
+
+ template <int _inplace_offset_, typename T>
+ APEX_CUDA_CALLABLE PX_INLINE void processValue64(texture<int, 1, cudaReadModeElementType> texRef, T& out)
+ {
+ PX_COMPILE_TIME_ASSERT((_inplace_offset_ & 7) == 0);
+ PX_COMPILE_TIME_ASSERT(sizeof(T) == 8);
+ const int texIdx = (_inplace_offset_ >> 2);
+ union
+ {
+ struct
+ {
+ int value0;
+ int value1;
+ };
+ long long value;
+ } u;
+ u.value0 = tex1Dfetch(texRef, texIdx0 + texIdx + 0);
+ u.value1 = tex1Dfetch(texRef, texIdx0 + texIdx + 1);
+ out = reinterpret_cast<T>(u.value);
+ }
+
+ public:
+ APEX_CUDA_CALLABLE PX_INLINE FetchReflector4TexRef(uint32_t offset0) : texIdx0(offset0 >> 2) {}
+
+ template <int _inplace_offset_, typename T, typename MT>
+ APEX_CUDA_CALLABLE PX_INLINE void processType(texture<int, 1, cudaReadModeElementType> texRef, T const& value, MT )
+ {
+ ; //do nothing
+ }
+
+ template <int _inplace_offset_, typename T>
+ APEX_CUDA_CALLABLE PX_INLINE void processPrimitiveType(texture<int, 1, cudaReadModeElementType> texRef, T& out)
+ {
+ processValue<_inplace_offset_>(texRef, out);
+ }
+
+ template <int _inplace_offset_, typename T>
+ APEX_CUDA_CALLABLE PX_INLINE void processPrimitiveType(texture<int, 1, cudaReadModeElementType> texRef, T*& out)
+ {
+#if PX_X64
+ processValue64<_inplace_offset_>(texRef, out);
+#else
+ processValue<_inplace_offset_>(texRef, out);
+#endif
+ }
+ };
+ template <typename T>
+ APEX_CUDA_CALLABLE PX_INLINE static void fetchType(T& out, texture<int, 1, cudaReadModeElementType> texRef, uint32_t offset0)
+ {
+ FetchReflector4TexRef r(offset0);
+ InplaceTypeHelper::reflectType<0>(r, texRef, out, InplaceTypeMemberDefaultTraits());
+ }
+#endif
+};
+
+template <>
+struct InplaceTypeHelper::ArrayIterator<0>
+{
+ template <int _inplace_offset_, typename R, typename RA, typename T, int N, typename MT>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectArray(R& , RA , T (& )[N], MT )
+ {
+ ; // do nothing
+ }
+};
+
+template <typename T>
+struct InplaceTypeTraits
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, T& t)
+ {
+ t.reflectSelf<_inplace_offset_>(r, ra);
+ }
+};
+
+
+class InplacePrimitive
+{
+protected:
+ int _value;
+
+ APEX_CUDA_CALLABLE PX_INLINE InplacePrimitive() {}
+
+public:
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE void reflectSelf(R& r, RA ra)
+ {
+ //r.processPrimitiveType<APEX_OFFSETOF(InplacePrimitive, _value) + _inplace_offset_>(ra, _value);
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(InplacePrimitive, _value) + _inplace_offset_>(r, ra, _value, InplaceTypeMemberDefaultTraits());
+ }
+};
+
+class InplaceBool : public InplacePrimitive
+{
+public:
+ APEX_CUDA_CALLABLE PX_INLINE InplaceBool() {}
+ APEX_CUDA_CALLABLE PX_INLINE InplaceBool(bool b) { _value = (b ? 1 : 0); }
+ APEX_CUDA_CALLABLE PX_INLINE InplaceBool& operator= (bool b) { _value = (b ? 1 : 0); return *this; }
+ APEX_CUDA_CALLABLE PX_INLINE operator bool () const { return (_value != 0); }
+};
+
+template <typename ET>
+class InplaceEnum : public InplacePrimitive
+{
+public:
+ APEX_CUDA_CALLABLE PX_INLINE InplaceEnum() {}
+ APEX_CUDA_CALLABLE PX_INLINE InplaceEnum(ET value) { _value = value; }
+ APEX_CUDA_CALLABLE PX_INLINE InplaceEnum<ET>& operator=(ET value) { _value = value; return *this; }
+ APEX_CUDA_CALLABLE PX_INLINE operator ET () const { return ET(_value); }
+};
+
+
+template <typename T> struct InplaceTypeTraits<T*>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, T*& t) { r.processPrimitiveType<_inplace_offset_>(ra, t); }
+};
+template <> struct InplaceTypeTraits<int32_t>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, int32_t& t) { r.processPrimitiveType<_inplace_offset_>(ra, t); }
+};
+template <> struct InplaceTypeTraits<uint32_t>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, uint32_t& t) { r.processPrimitiveType<_inplace_offset_>(ra, t); }
+};
+template <> struct InplaceTypeTraits<float>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, float& t) { r.processPrimitiveType<_inplace_offset_>(ra, t); }
+};
+template <> struct InplaceTypeTraits<PxVec3>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, PxVec3& t)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxVec3, x) + _inplace_offset_>(r, ra, t.x, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxVec3, y) + _inplace_offset_>(r, ra, t.y, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxVec3, z) + _inplace_offset_>(r, ra, t.z, InplaceTypeMemberDefaultTraits());
+ }
+};
+template <> struct InplaceTypeTraits<PxVec4>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, PxVec4& t)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxVec4, x) + _inplace_offset_>(r, ra, t.x, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxVec4, y) + _inplace_offset_>(r, ra, t.y, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxVec4, z) + _inplace_offset_>(r, ra, t.z, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxVec4, w) + _inplace_offset_>(r, ra, t.w, InplaceTypeMemberDefaultTraits());
+ }
+};
+template <> struct InplaceTypeTraits<PxTransform>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, PxTransform& t)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxTransform, q) + _inplace_offset_>(r, ra, t.q, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxTransform, p) + _inplace_offset_>(r, ra, t.p, InplaceTypeMemberDefaultTraits());
+ }
+};
+template <> struct InplaceTypeTraits<PxQuat>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, PxQuat& t)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxQuat, x) + _inplace_offset_>(r, ra, t.x, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxQuat, y) + _inplace_offset_>(r, ra, t.y, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxQuat, z) + _inplace_offset_>(r, ra, t.z, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxQuat, w) + _inplace_offset_>(r, ra, t.w, InplaceTypeMemberDefaultTraits());
+ }
+};
+template <> struct InplaceTypeTraits<PxBounds3>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, PxBounds3& t)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxBounds3, minimum) + _inplace_offset_>(r, ra, t.minimum, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxBounds3, maximum) + _inplace_offset_>(r, ra, t.maximum, InplaceTypeMemberDefaultTraits());
+ }
+};
+template <> struct InplaceTypeTraits<PxPlane>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, PxPlane& t)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxPlane, n) + _inplace_offset_>(r, ra, t.n, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxPlane, d) + _inplace_offset_>(r, ra, t.d, InplaceTypeMemberDefaultTraits());
+ }
+};
+template <> struct InplaceTypeTraits<PxMat33>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, PxMat33& t)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxMat33, column0) + _inplace_offset_>(r, ra, t.column0, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxMat33, column1) + _inplace_offset_>(r, ra, t.column1, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxMat33, column2) + _inplace_offset_>(r, ra, t.column2, InplaceTypeMemberDefaultTraits());
+ }
+};
+template <> struct InplaceTypeTraits<PxMat44>
+{
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE static void reflectType(R& r, RA ra, PxMat44& t)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxMat44, column0) + _inplace_offset_>(r, ra, t.column0, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxMat44, column1) + _inplace_offset_>(r, ra, t.column1, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxMat44, column2) + _inplace_offset_>(r, ra, t.column2, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(PxMat44, column3) + _inplace_offset_>(r, ra, t.column3, InplaceTypeMemberDefaultTraits());
+ }
+};
+
+
+class InplaceHandleBase
+{
+protected:
+ friend class InplaceStorage;
+ friend class InplaceArrayBase;
+
+ static const uint32_t NULL_VALUE = uint32_t(-1);
+ uint32_t _value;
+
+public:
+ APEX_CUDA_CALLABLE PX_INLINE InplaceHandleBase()
+ {
+ _value = NULL_VALUE;
+ }
+
+ PX_INLINE void setNull()
+ {
+ _value = NULL_VALUE;
+ }
+
+ APEX_CUDA_CALLABLE PX_INLINE bool isNull() const
+ {
+ return _value == NULL_VALUE;
+ }
+
+#ifdef __CUDACC__
+ template <bool B>
+ struct StorageSelector
+ {
+ static const bool value = B;
+ };
+
+ template <typename T>
+ APEX_CUDA_CALLABLE PX_INLINE void fetch(StorageSelector<false>, INPLACE_STORAGE_SUB_ARGS_DEF, T& out, uint32_t index = 0) const
+ {
+ //out = reinterpret_cast<const T*>(_arg_constMem_ + _value)[index];
+ InplaceTypeHelper::fetchType<T>(out, _arg_constMem_, _value + sizeof(T) * index);
+ }
+
+ template <typename T>
+ APEX_CUDA_CALLABLE PX_INLINE void fetch(StorageSelector<true>, INPLACE_STORAGE_SUB_ARGS_DEF, T& out, uint32_t index = 0) const
+ {
+ InplaceTypeHelper::fetchType<T>(out, _arg_texRef_, _value + sizeof(T) * index);
+ }
+#else
+ template <typename S, typename T>
+ PX_INLINE void fetch(const S& storage, T& out, uint32_t index = 0) const
+ {
+ storage.fetch(*this, out, index);
+ }
+
+ template <typename S, typename T>
+ PX_INLINE void update(S& storage, const T& in, uint32_t index = 0) const
+ {
+ storage.update(*this, in, index);
+ }
+
+ template <typename S, typename T>
+ PX_INLINE bool allocOrFetch(S& storage, T& out)
+ {
+ if (isNull())
+ {
+ storage.template alloc<T>(*this);
+ return true;
+ }
+ else
+ {
+ storage.fetch(*this, out);
+ return false;
+ }
+ }
+#endif
+
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE void reflectSelf(R& r, RA ra)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(InplaceHandleBase, _value) + _inplace_offset_>(r, ra, _value, InplaceTypeMemberDefaultTraits());
+ }
+};
+
+template <typename T>
+class InplaceHandle : public InplaceHandleBase
+{
+public:
+ APEX_CUDA_CALLABLE PX_INLINE InplaceHandle() {}
+
+ template <typename S>
+ PX_INLINE bool alloc(S& storage)
+ {
+ return storage.alloc(*this);
+ }
+};
+
+
+class InplaceArrayBase
+{
+protected:
+ uint32_t _size;
+ InplaceHandleBase _elems;
+
+#ifndef __CUDACC__
+ PX_INLINE InplaceArrayBase()
+ {
+ _size = 0;
+ }
+#endif
+
+ template <int _inplace_offset_, typename R, typename RA, typename MT>
+ APEX_CUDA_CALLABLE PX_INLINE void reflectSelf(R& r, RA ra, MT mt)
+ {
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(InplaceArrayBase, _size) + _inplace_offset_>(r, ra, _size, InplaceTypeMemberDefaultTraits());
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF(InplaceArrayBase, _elems) + _inplace_offset_>(r, ra, _elems, mt);
+ }
+};
+
+
+// if AutoFreeElems is true, then InplaceHandles in deleted elements are automaticly got free on array resize!
+template <typename T, bool AutoFreeElems = false>
+class InplaceArray : public InplaceArrayBase
+{
+public:
+ APEX_CUDA_CALLABLE PX_INLINE uint32_t getSize() const
+ {
+ return _size;
+ }
+
+#ifndef __CUDACC__
+ PX_INLINE InplaceArray()
+ {
+ }
+
+ template <typename S>
+ PX_INLINE void fetchElem(const S& storage, T& out, uint32_t index) const
+ {
+ storage.fetch(_elems, out, index);
+ }
+ template <typename S>
+ PX_INLINE void updateElem(S& storage, const T& in, uint32_t index) const
+ {
+ storage.update(_elems, in, index);
+ }
+ template <typename S>
+ PX_INLINE void updateRange(S& storage, const T* in, uint32_t count, uint32_t start = 0) const
+ {
+ storage.updateRange(_elems, in, count, start);
+ }
+
+ template <typename S>
+ PX_INLINE bool resize(S& storage, uint32_t size)
+ {
+ if (storage.template realloc<T>(_elems, _size, size))
+ {
+ _size = size;
+ return true;
+ }
+ return false;
+ }
+
+#else
+ INPLACE_TEMPL_ARGS_DEF
+ APEX_CUDA_CALLABLE PX_INLINE void fetchElem(INPLACE_STORAGE_ARGS_DEF, T& out, uint32_t index) const
+ {
+ _elems.fetch(INPLACE_STORAGE_ARGS_VAL, out, index);
+ }
+#endif
+
+ template <int _inplace_offset_, typename R, typename RA>
+ APEX_CUDA_CALLABLE PX_INLINE void reflectSelf(R& r, RA ra)
+ {
+ InplaceArrayBase::reflectSelf<_inplace_offset_>(r, ra, InplaceTypeMemberTraits<AutoFreeElems>());
+ }
+
+};
+
+
+}
+} // end namespace nvidia::apex
+
+#endif // __APEX_INPLACE_TYPES_H__
diff --git a/APEX_1.4/common/include/InplaceTypesBuilder.h b/APEX_1.4/common/include/InplaceTypesBuilder.h
new file mode 100644
index 00000000..a7277ff5
--- /dev/null
+++ b/APEX_1.4/common/include/InplaceTypesBuilder.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#include <boost/preprocessor/seq.hpp>
+#include <boost/preprocessor/seq/for_each_i.hpp>
+#include <boost/preprocessor/seq/filter.hpp>
+#include <boost/preprocessor/iteration/local.hpp>
+#include <boost/preprocessor/tuple/elem.hpp>
+#include <boost/preprocessor/control/if.hpp>
+#include <boost/preprocessor/comparison/equal.hpp>
+#include <boost/preprocessor/facilities/empty.hpp>
+#include <boost/preprocessor/array/size.hpp>
+#include <boost/preprocessor/array/elem.hpp>
+#include <boost/preprocessor/punctuation/comma.hpp>
+
+#pragma warning(push)
+#pragma warning(disable: 4355)
+
+#define _PP_GET_FIELD_TYPE(n) BOOST_PP_ARRAY_ELEM(0, BOOST_PP_SEQ_ELEM(n, INPLACE_TYPE_STRUCT_FIELDS))
+#define _PP_GET_FIELD_NAME(n) BOOST_PP_ARRAY_ELEM(1, BOOST_PP_SEQ_ELEM(n, INPLACE_TYPE_STRUCT_FIELDS))
+
+#define _PP_HAS_FIELD_SIZE(n) BOOST_PP_EQUAL(BOOST_PP_ARRAY_SIZE(BOOST_PP_SEQ_ELEM(n, INPLACE_TYPE_STRUCT_FIELDS)), 3)
+#define _PP_GET_FIELD_SIZE(n) BOOST_PP_ARRAY_ELEM(2, BOOST_PP_SEQ_ELEM(n, INPLACE_TYPE_STRUCT_FIELDS))
+
+
+#ifdef INPLACE_TYPE_STRUCT_NAME
+
+struct INPLACE_TYPE_STRUCT_NAME
+#ifdef INPLACE_TYPE_STRUCT_BASE
+ : INPLACE_TYPE_STRUCT_BASE
+#endif
+{
+//fields
+#ifdef INPLACE_TYPE_STRUCT_FIELDS
+#define BOOST_PP_LOCAL_LIMITS (0, BOOST_PP_SEQ_SIZE(INPLACE_TYPE_STRUCT_FIELDS) - 1)
+#define BOOST_PP_LOCAL_MACRO(n) \
+ _PP_GET_FIELD_TYPE(n) _PP_GET_FIELD_NAME(n) BOOST_PP_IF(_PP_HAS_FIELD_SIZE(n), [##_PP_GET_FIELD_SIZE(n)##], BOOST_PP_EMPTY());
+#include BOOST_PP_LOCAL_ITERATE()
+#endif
+
+//reflectSelf
+ template <int _inplace_offset_, typename R, typename RA>
+#if defined(INPLACE_TYPE_STRUCT_BASE) | defined(INPLACE_TYPE_STRUCT_FIELDS)
+ APEX_CUDA_CALLABLE PX_INLINE void reflectSelf(R& r, RA ra)
+ {
+#ifdef INPLACE_TYPE_STRUCT_BASE
+ INPLACE_TYPE_STRUCT_BASE::reflectSelf<_inplace_offset_>(r, ra);
+#endif
+#ifdef INPLACE_TYPE_STRUCT_FIELDS
+#define BOOST_PP_LOCAL_LIMITS (0, BOOST_PP_SEQ_SIZE(INPLACE_TYPE_STRUCT_FIELDS) - 1)
+#define BOOST_PP_LOCAL_MACRO(n) \
+ InplaceTypeHelper::reflectType<APEX_OFFSETOF( INPLACE_TYPE_STRUCT_NAME, _PP_GET_FIELD_NAME(n) ) + _inplace_offset_>(r, ra, _PP_GET_FIELD_NAME(n), InplaceTypeMemberDefaultTraits());
+#include BOOST_PP_LOCAL_ITERATE()
+#endif
+ }
+#else
+ APEX_CUDA_CALLABLE PX_INLINE void reflectSelf(R& , RA )
+ {
+ }
+#endif
+
+#ifndef INPLACE_TYPE_STRUCT_LEAVE_OPEN
+};
+#endif
+
+#endif
+
+#undef _PP_GET_FIELD_TYPE
+#undef _PP_GET_FIELD_NAME
+#undef _PP_HAS_FIELD_SIZE
+#undef _PP_GET_FIELD_SIZE
+
+#undef INPLACE_TYPE_STRUCT_NAME
+#undef INPLACE_TYPE_STRUCT_BASE
+#undef INPLACE_TYPE_STRUCT_FIELDS
+#undef INPLACE_TYPE_STRUCT_LEAVE_OPEN
+
+#pragma warning(pop)
diff --git a/APEX_1.4/common/include/InstancedObjectSimulationIntl.h b/APEX_1.4/common/include/InstancedObjectSimulationIntl.h
new file mode 100644
index 00000000..d631589d
--- /dev/null
+++ b/APEX_1.4/common/include/InstancedObjectSimulationIntl.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef INSTANCED_OBJECT_SIMULATION_INTL_H
+#define INSTANCED_OBJECT_SIMULATION_INTL_H
+
+#include "ApexDefs.h"
+
+#include "PxTask.h"
+#include "ApexActor.h"
+#include "IofxManagerIntl.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class RenderVolume;
+
+/* Input data structure from Emitter ==> (injector) IOS */
+struct IosNewObject
+{
+ PxVec3 initialPosition;
+ PxVec3 initialVelocity;
+ float lifetime; // in seconds
+ float lodBenefit; // filled in by injector
+ IofxActorIDIntl iofxActorID; // filled in by injector
+
+ uint32_t userData;
+};
+
+/**
+ * Note the difference between how lifetimes are provided by the emitter (in seconds), and how
+ * they are reported to the IOFX (as a percentage of time remaining). This implies a couple IOS
+ * requirements. 1) The IOS must remember the initial total lifetime and each frame do a remain/total
+ * division in order to report the percent remain. 2) In order for the liferemain to report 1.0 on
+ * the object's inital frame, new objects cannot have their remain decremented.
+ */
+
+/**
+ * An emitter will aquire an instance of this class from each IOS actor it instantiates. The IOS
+ * will hold an array of these instances to manage all of its emitters and object ID ranges.
+ */
+class IosInjectorIntl : public ApexActor
+{
+public:
+ /**
+ * An emitter calls createObjects() at the end of its tick/step function to register its newly
+ * spawned objects with the IOS. If the IOS has limits on the number of objects it can spawn each
+ * simulation step, it must provide buffering beneath this API. The IOS must copy this data if it
+ * cannot create the objects within this function call. Note that the IOFX is unaware of the
+ * object creation path, it discovers spawned objects when they show up with liferemain of 1.0
+ * An IOS may have built-in emitters that do not call this API (Turbulence).
+ */
+ virtual void createObjects(uint32_t count, const IosNewObject* createList) = 0;
+
+ /**
+ * An emitter calls setLODWeights() as often as it needs to adjust the LOD paramters for its
+ * particles.
+ */
+ // distanceWeight minimum squared distance from camera before distance is included in LOD weight
+ // speedWeight minimum velocity parameter. Particles slower than this are culled more aggressively.
+ // lifeWeight lifetime minimum limit. Particles with less lifetime than this remaining will be culled.
+ virtual void setLODWeights(float maxDistance, float distanceWeight, float speedWeight, float lifeWeight, float separationWeight, float bias) = 0;
+
+ /**
+ * When an emitter is being destroyed, it must call this release method on all of its injectors
+ * so those IOS instances can reclaim those ID ranges and destroy any active objects.
+ */
+ virtual void release() = 0;
+
+ virtual PxTaskID getCompletionTaskID() const = 0;
+
+ virtual void setPreferredRenderVolume(nvidia::apex::RenderVolume* volume) = 0;
+
+ /**
+ * Return the value of the least benefit particle to survive last frame's LOD culling.
+ * An emitter can query this value to voluntarily throttle itself. However, to prevent
+ * feedback loops it should always try to emit at least a few particles when it is
+ * throttled.
+ */
+ virtual float getLeastBenefitValue() const = 0;
+
+ virtual uint32_t getSimParticlesCount() const = 0;
+
+ /**
+ * This injector has particles in it that were unable to be inserted at the last simulation
+ * step because of an insertion limit in the IOS. The emitter may chose to throttle its
+ * emissions when this returns true.
+ */
+ virtual bool isBacklogged() const = 0;
+
+ /**
+ Returns the current number of particles/objects active in the simulation.
+ */
+ virtual uint32_t getActivePaticleCount() const = 0;
+
+ virtual void setObjectScale(float objectScale) = 0;
+
+protected:
+ virtual ~IosInjectorIntl() {}
+};
+
+
+/**
+ * Base class for all particle simulation systems and other systems that can efficiently simulate
+ * instanced geometry. This is the interface to the IOS Actor (instance).
+ */
+class InstancedObjectSimulationIntl : public ApexActor
+{
+public:
+ /**
+ * An emitter calls allocateInjector() to create an injector targeted at a particular
+ * IOFX Asset. The IOS will allocate an IOFX actor as necessary. The emitter has no
+ * knowledge of the size of the IOFX actor, or how many emitters are also using it.
+ */
+ virtual IosInjectorIntl* allocateInjector(IofxAsset* iofxAsset) = 0;
+
+ /**
+ * Query the authored radius of the instanced objects simulated by this IOS. Emitters need this
+ * value for volume fill effects and an IOFX may need it for rendering purposes.
+ */
+ virtual float getObjectRadius() const = 0;
+
+ /**
+ * Query the authored density of the instanced objects simulated by this IOS. Emitters need this
+ * value for constant density emitter effects.
+ */
+ virtual float getObjectDensity() const = 0;
+
+ /**
+ * An emitter may use this API functions to query particle positions from the most recent simulation step
+ * This IOS output buffer is updated each frame during fetchResults.
+ */
+ virtual const PxVec3* getRecentPositions(uint32_t& count, uint32_t& stride) const = 0;
+
+ /**
+ * Set's the origin of the density grid; this is implemented for BasicIOS and ParticleIOS
+ */
+ virtual void setDensityOrigin(const PxVec3& v)
+ {
+ PX_UNUSED(v);
+ }
+
+protected:
+ virtual ~InstancedObjectSimulationIntl() {}
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // #ifndef INSTANCED_OBJECT_SIMULATION_INTL_H
diff --git a/APEX_1.4/common/include/IofxManagerIntl.h b/APEX_1.4/common/include/IofxManagerIntl.h
new file mode 100644
index 00000000..4ea3967d
--- /dev/null
+++ b/APEX_1.4/common/include/IofxManagerIntl.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef IOFX_MANAGER_INTL_H
+#define IOFX_MANAGER_INTL_H
+
+#include "PsArray.h"
+#include "PxVec3.h"
+#include "PxVec4.h"
+#include "PxTaskManager.h"
+
+namespace physx
+{
+ class PxGpuCopyDescQueue;
+}
+
+namespace nvidia
+{
+namespace apex
+{
+class IofxAsset;
+class RenderVolume;
+
+template <class T>
+class ApexMirroredArray;
+
+
+class IofxManagerDescIntl
+{
+public:
+ IofxManagerDescIntl() :
+ iosAssetName(NULL),
+ iosOutputsOnDevice(false),
+ iosSupportsDensity(false),
+ iosSupportsCollision(false),
+ iosSupportsUserData(false),
+ maxObjectCount(0),
+ maxInputCount(0),
+ maxInStateCount(0)
+ {
+ }
+
+ const char* iosAssetName;
+ bool iosOutputsOnDevice;
+ bool iosSupportsDensity;
+ bool iosSupportsCollision;
+ bool iosSupportsUserData;
+ uint32_t maxObjectCount;
+ uint32_t maxInputCount;
+ uint32_t maxInStateCount;
+};
+
+/// The IOFX will update the volumeID each simulation step, the IOS must
+/// persist this output. IOS provides initial volumeID, based on emitter's
+/// preferred volume.
+struct IofxActorIDIntl
+{
+ uint32_t value;
+
+ PX_CUDA_CALLABLE PX_INLINE IofxActorIDIntl() {}
+ PX_CUDA_CALLABLE PX_INLINE explicit IofxActorIDIntl(uint32_t arg)
+ {
+ value = arg;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE void set(uint16_t volumeID, uint16_t actorClassID)
+ {
+ value = (uint32_t(volumeID) << 16) | uint32_t(actorClassID);
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE uint16_t getVolumeID() const
+ {
+ return uint16_t(value >> 16);
+ }
+ PX_CUDA_CALLABLE PX_INLINE void setVolumeID(uint16_t volumeID)
+ {
+ value &= 0x0000FFFFu;
+ value |= (uint32_t(volumeID) << 16);
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE uint16_t getActorClassID() const
+ {
+ return uint16_t(value & 0xFFFFu);
+ }
+ PX_CUDA_CALLABLE PX_INLINE void setActorClassID(uint16_t actorClassID)
+ {
+ value &= 0xFFFF0000u;
+ value |= uint32_t(actorClassID);
+ }
+
+ static const uint16_t NO_VOLUME = 0xFFFFu;
+ static const uint16_t IPX_ACTOR = 0xFFFFu;
+};
+
+
+/* IOFX Manager returned pointers for simulation data */
+class IosBufferDescIntl
+{
+public:
+ /* All arrays are indexed by input ID */
+ ApexMirroredArray<PxVec4>* pmaPositionMass;
+ ApexMirroredArray<PxVec4>* pmaVelocityLife;
+ ApexMirroredArray<PxVec4>* pmaCollisionNormalFlags;
+ ApexMirroredArray<float>* pmaDensity;
+ ApexMirroredArray<IofxActorIDIntl>* pmaActorIdentifiers;
+ ApexMirroredArray<uint32_t>* pmaInStateToInput;
+ ApexMirroredArray<uint32_t>* pmaOutStateToInput;
+
+ ApexMirroredArray<uint32_t>* pmaUserData;
+
+ //< Value in inStateToInput field indicates a dead particle, input to IOFX
+ static const uint32_t NOT_A_PARTICLE = 0xFFFFFFFFu;
+
+ //< Flag in inStateToInput field indicates a new particle, input to IOFX
+ static const uint32_t NEW_PARTICLE_FLAG = 0x80000000u;
+};
+
+// This is a representative of uint4 on host
+struct IofxSlice
+{
+ uint32_t x, y, z, w;
+};
+
+typedef void (*EventCallback)(void*);
+
+class IofxManagerCallbackIntl
+{
+public:
+ virtual void operator()(void* stream = NULL) = 0;
+};
+
+class IofxManagerClientIntl
+{
+public:
+ struct Params
+ {
+ float objectScale;
+
+ Params()
+ {
+ setDefaults();
+ }
+
+ void setDefaults()
+ {
+ objectScale = 1.0f;
+ }
+ };
+ virtual void getParams(IofxManagerClientIntl::Params& params) const = 0;
+ virtual void setParams(const IofxManagerClientIntl::Params& params) = 0;
+};
+
+
+class IofxManagerIntl
+{
+public:
+ //! An IOS Actor will call this once, at creation
+ virtual void createSimulationBuffers(IosBufferDescIntl& outDesc) = 0;
+
+ //! An IOS actor will call this once, when it creates its fluid simulation
+ virtual void setSimulationParameters(float radius, const PxVec3& up, float gravity, float restDensity) = 0;
+
+ //! An IOS Actor will call this method after each simulation step
+ virtual void updateEffectsData(float deltaTime, uint32_t numObjects, uint32_t maxInputID, uint32_t maxStateID, void* extraData = 0) = 0;
+
+ //! An IOS Actor will call this method at the start of each step IOFX will run
+ virtual PxTaskID getUpdateEffectsTaskID(PxTaskID) = 0;
+
+ virtual uint16_t getActorClassID(IofxManagerClientIntl* client, uint16_t meshID) = 0;
+
+ virtual IofxManagerClientIntl* createClient(nvidia::apex::IofxAsset* asset, const IofxManagerClientIntl::Params& params) = 0;
+ virtual void releaseClient(IofxManagerClientIntl* client) = 0;
+
+ virtual uint16_t getVolumeID(nvidia::apex::RenderVolume* vol) = 0;
+
+ //! Triggers the IOFX Manager to copy host buffers to the device
+ //! This is intended for use in an IOS post-update task, if they
+ //! need the output buffers on the device.
+ virtual void outputHostToDevice(PxGpuCopyDescQueue& copyQueue) = 0;
+
+ //! IofxManagerCallbackIntl will be called before Iofx computations
+ virtual void setOnStartCallback(IofxManagerCallbackIntl*) = 0;
+ //! IofxManagerCallbackIntl will be called after Iofx computations
+ virtual void setOnFinishCallback(IofxManagerCallbackIntl*) = 0;
+
+ //! Called when IOS is being deleted
+ virtual void release() = 0;
+
+ //get bounding box
+ virtual PxBounds3 getBounds() const = 0;
+
+protected:
+ virtual ~IofxManagerIntl() {}
+};
+
+
+}
+} // end namespace nvidia::apex
+
+#endif // IOFX_MANAGER_INTL_H
diff --git a/APEX_1.4/common/include/ModuleBase.h b/APEX_1.4/common/include/ModuleBase.h
new file mode 100644
index 00000000..f58f6c1a
--- /dev/null
+++ b/APEX_1.4/common/include/ModuleBase.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MODULE_BASE_H
+#define MODULE_BASE_H
+
+#include "ApexResource.h"
+#include "ApexString.h"
+#include "ApexSDKIntl.h"
+#include "PsArray.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ModuleBase : public UserAllocated
+{
+public:
+ ModuleBase();
+ void release();
+
+ const char* getName() const;
+
+ /* Framework internal ModuleIntl class methods */
+ void destroy();
+
+ ApexSDKIntl* mSdk;
+
+protected:
+ ApexSimpleString mName;
+ Module* mApiProxy;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // MODULE_BASE_H
diff --git a/APEX_1.4/common/include/ModuleFieldSamplerIntl.h b/APEX_1.4/common/include/ModuleFieldSamplerIntl.h
new file mode 100644
index 00000000..4781fd06
--- /dev/null
+++ b/APEX_1.4/common/include/ModuleFieldSamplerIntl.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MODULE_FIELD_SAMPLER_INTL_H
+#define MODULE_FIELD_SAMPLER_INTL_H
+
+#include "ModuleIntl.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class FieldSamplerManagerIntl;
+class Scene;
+
+class ModuleFieldSamplerIntl : public ModuleIntl
+{
+public:
+ virtual FieldSamplerManagerIntl* getInternalFieldSamplerManager(const Scene& apexScene) = 0;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // MODULE_FIELD_SAMPLER_INTL_H
diff --git a/APEX_1.4/common/include/ModuleIntl.h b/APEX_1.4/common/include/ModuleIntl.h
new file mode 100644
index 00000000..601564bb
--- /dev/null
+++ b/APEX_1.4/common/include/ModuleIntl.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MODULE_INTL_H
+#define MODULE_INTL_H
+
+#include "ApexSDK.h"
+
+typedef struct CUgraphicsResource_st* CUgraphicsResource;
+
+// REMOVE OLD REGISTERS/UNREGISTER FACTORY PREPROCESSOR APPROACHES
+// #define PARAM_CLASS_DECLARE_FACTORY(clas) clas ## Factory m ## clas ## Factory;
+// #define PARAM_CLASS_REGISTER_FACTORY(t, clas) t->registerFactory(m ## clas ## Factory);
+// #define PARAM_CLASS_REMOVE_FACTORY(t, clas) t->removeFactory(clas::staticClassName()); clas::freeParameterDefinitionTable(t);
+
+namespace physx
+{
+ namespace pvdsdk
+ {
+ class PvdDataStream;
+ }
+}
+
+namespace nvidia
+{
+namespace apex
+{
+
+class SceneIntl;
+class ModuleSceneIntl;
+class RenderDebugInterface;
+class ApexActor;
+class Actor;
+class ModuleCachedDataIntl;
+struct SceneStats;
+
+/**
+Framework interface to modules for use by ApexScenes and the ApexSDK
+*/
+class ModuleIntl
+{
+public:
+ ModuleIntl(void)
+ {
+ mParent = NULL;
+ mCreateOk = true;
+ }
+ virtual ~ModuleIntl(void)
+ {
+ if ( mParent )
+ {
+ mParent->notifyChildGone(this);
+ }
+ }
+ /**
+ Cause a module to free all of its resources. Only callable from ApexSDK::releaseModule()
+ */
+ virtual void destroy() = 0;
+
+ /**
+ Notification from ApexSDK when it is being destructed and will, therefore, be releasing all modules
+ */
+ virtual void notifyReleaseSDK(void)
+ {
+
+ }
+
+ /**
+ Inits Classes sent to Pvd from this module
+ */
+ virtual void initPvdClasses(pvdsdk::PvdDataStream& /*pvdDataStream*/)
+ {
+ }
+
+ /**
+ Inits Instances when Pvd connects
+ */
+ virtual void initPvdInstances(pvdsdk::PvdDataStream& /*pvdDataStream*/)
+ {
+ }
+
+ /**
+ Called by a newly created Scene to instantiate an ModuleSceneIntl. Can also be
+ called when modules are created after scenes. If your module does
+ not create ApexActors, this function can return NULL.
+
+ The debug render that the scene is to use is also passed.
+ */
+ virtual ModuleSceneIntl* createInternalModuleScene(SceneIntl& apexScene, RenderDebugInterface*) = 0;
+
+ /**
+ Release an ModuleSceneIntl. Only called when an ApexScene has been released.
+ All actors and other resources in the context should be released.
+ */
+ virtual void releaseModuleSceneIntl(ModuleSceneIntl& moduleScene) = 0;
+
+ /**
+ Module can provide a data cache for its objects. It is valid to return NULL.
+ */
+ virtual ModuleCachedDataIntl* getModuleDataCache()
+ {
+ return NULL;
+ }
+
+ /**
+ Returns the number of assets force loaded by all of the module's loaded assets
+ Default impl returns 0, maybe this should be something really bad
+ */
+ virtual uint32_t forceLoadAssets()
+ {
+ return 0;
+ }
+
+ virtual ApexActor* getApexActor(Actor*, AuthObjTypeID) const
+ {
+ return NULL;
+ }
+
+ virtual void setParent(ModuleIntl *parent)
+ {
+ mParent = parent;
+ }
+
+ virtual void notifyChildGone(ModuleIntl *child)
+ {
+ PX_UNUSED(child);
+ }
+
+ void setCreateOk(bool state)
+ {
+ mCreateOk = state;
+ }
+
+ bool isCreateOk(void) const
+ {
+ return mCreateOk;
+ }
+
+ bool mCreateOk;
+ ModuleIntl *mParent;
+};
+
+class ModuleSceneIntl
+{
+public:
+
+ /**
+ ModuleSceneIntl::simulate() is called by ApexScene::simulate() from the context of the
+ APEX API call (typically the main game thread). Context sensitive code should run here.
+ Note that the task manager will be executing tasks while simulate() is running, so it must
+ be thread safe.
+ \param elapsedTime The time passed to the Scene::simulate call
+ */
+ virtual void simulate(float elapsedTime)
+ {
+ PX_UNUSED(elapsedTime);
+ }
+
+ /**
+ \brief If the PhysX scene runs with multiple substeps, modules can request manual substepping
+ */
+ virtual bool needsManualSubstepping() const
+ {
+ return false;
+ }
+
+ virtual void interStep(uint32_t substepNumber, uint32_t maxSubSteps)
+ {
+ PX_UNUSED(substepNumber);
+ PX_UNUSED(maxSubSteps);
+ }
+
+ /**
+ ModuleSceneIntl::submitTasks() is called by ApexScene::simulate() at the start of every
+ simulation step. Each module should submit tasks within this function call, though
+ they are not restricted from submitting tasks later if they require.
+ \param elapsedTime The time passed to the Scene::simulate call
+ \param numSubSteps Will be >1 if manual sub stepping is turned on, 1 otherwise
+ */
+ virtual void submitTasks(float elapsedTime, float substepSize, uint32_t numSubSteps) = 0;
+
+ /**
+ ModuleSceneIntl::setTaskDependencies() is called by ApexScene::simulate() after every
+ module has had the opportunity to submit their tasks to the task manager. Therefore it
+ is safe to set dependencies in this function based on cross-module TaskID APIs.
+ */
+ virtual void setTaskDependencies() {}
+
+ /**
+ ModuleSceneIntl::fetchResults() is called by ApexScene::fetchResults() from the context of
+ the APEX API call (typically the main game thread). All renderable actors are locked by
+ the scene for the length of this function call.
+ */
+ virtual void fetchResults() = 0;
+
+ virtual void fetchResultsPreRenderLock() {}
+ virtual void fetchResultsPostRenderUnlock() {}
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ /**
+ Called by ApexScene when its PxScene reference has changed. Provided pointer can be NULL.
+ */
+ virtual void setModulePhysXScene(PxScene* s) = 0;
+ virtual PxScene* getModulePhysXScene() const = 0;
+#endif // PX_PHYSICS_VERSION_MAJOR == 3
+
+ /**
+ Called by ApexScene when it has been released. The ModuleSceneIntl must call its
+ module's releaseModuleSceneIntl() method.
+ */
+ virtual void release() = 0;
+
+ /**
+ \brief Visualize the module's contents, using the new debug rendering facilities.
+
+ This gets called from Scene::updateRenderResources
+ */
+ virtual void visualize() = 0;
+
+ /**
+ \brief Returns the corresponding Module.
+
+ This allows to get to information like the module name.
+ */
+ virtual Module* getModule() = 0;
+
+ /**
+ \brief Lock render resources according to module scene-defined behavior.
+
+ Returns true iff successful.
+ */
+ virtual bool lockRenderResources() { return false; }
+
+ /**
+ \brief Unlock render resources according to module scene-defined behavior.
+
+ Returns true iff successful.
+ */
+ virtual bool unlockRenderResources() { return false; }
+
+ virtual SceneStats* getStats() = 0;
+
+ /**
+ \brief return ApexCudaObj from CudaModuleScene or NULL for non CUDA scenes
+ Should be implemented only for scenes that inherited from CudaModuleScene
+ */
+ virtual void* getHeadCudaObj()
+ {
+ return NULL;
+ }
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // MODULE_INTL_H
diff --git a/APEX_1.4/common/include/ModuleIofxIntl.h b/APEX_1.4/common/include/ModuleIofxIntl.h
new file mode 100644
index 00000000..bb23cb04
--- /dev/null
+++ b/APEX_1.4/common/include/ModuleIofxIntl.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MODULE_IOFX_INTL_H
+#define MODULE_IOFX_INTL_H
+
+#include "ModuleIntl.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class IofxManagerDescIntl;
+class IofxManagerIntl;
+class Scene;
+
+class ModuleIofxIntl : public ModuleIntl
+{
+public:
+ virtual IofxManagerIntl* createActorManager(const Scene& scene, const nvidia::apex::IofxAsset& asset, const IofxManagerDescIntl& desc) = 0;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // MODULE_IOFX_INTL_H
diff --git a/APEX_1.4/common/include/ModuleUpdateLoader.h b/APEX_1.4/common/include/ModuleUpdateLoader.h
new file mode 100644
index 00000000..d64d1eef
--- /dev/null
+++ b/APEX_1.4/common/include/ModuleUpdateLoader.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MODULEUPDATELOADER_H
+#define MODULEUPDATELOADER_H
+
+#ifdef WIN32
+
+#if PX_X64
+#define UPDATE_LOADER_DLL_NAME "PhysXUpdateLoader64.dll"
+#else
+#define UPDATE_LOADER_DLL_NAME "PhysXUpdateLoader.dll"
+#endif
+
+// This GUID should change any time we release APEX with a public interface change.
+#if PX_PHYSICS_VERSION_MAJOR == 0
+#define DEFAULT_APP_GUID "165F143C-15CB-47FA-ACE3-2002B3684026"
+#else
+#define DEFAULT_APP_GUID "1AE7180B-79E5-4234-91A7-E387331B5993"
+#endif
+
+//#include "PsWindowsInclude.h"
+#include <windows.h>
+
+class ModuleUpdateLoader
+{
+public:
+ ModuleUpdateLoader(const char* updateLoaderDllName);
+ ~ModuleUpdateLoader();
+
+ // Loads the given module through the update loader. Loads it from the path if
+ // the update loader doesn't find the requested module. Returns NULL if no
+ // module found.
+ HMODULE loadModule(const char* moduleName, const char* appGuid);
+
+protected:
+ HMODULE mUpdateLoaderDllHandle;
+ FARPROC mGetUpdatedModuleFunc;
+
+ // unit test fixture
+ friend class ModuleUpdateLoaderTest;
+};
+
+#endif // WIN32
+#endif // MODULEUPDATELOADER_H
diff --git a/APEX_1.4/common/include/P4Info.h b/APEX_1.4/common/include/P4Info.h
new file mode 100644
index 00000000..16ad7efb
--- /dev/null
+++ b/APEX_1.4/common/include/P4Info.h
@@ -0,0 +1,45 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved.
+
+#ifndef P4_INFO_H
+#define P4_INFO_H
+
+#define P4_CHANGELIST 1111
+
+#define P4_TOOLS_CHANGELIST 1111
+
+#define P4_APEX_VERSION_STRING "1.4"
+
+#define P4_APEX_BRANCH "trunk"
+
+#define P4_BUILD_TIME "12:20:57, Fri Oct 21, 2016"
+
+#define AUTHOR_DISTRO "empty"
+
+#define REASON_DISTRO "empty"
+
+#endif // P4_INFO_H
diff --git a/APEX_1.4/common/include/PVDParameterizedHandler.h b/APEX_1.4/common/include/PVDParameterizedHandler.h
new file mode 100644
index 00000000..4ac59273
--- /dev/null
+++ b/APEX_1.4/common/include/PVDParameterizedHandler.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef PVD_PARAMETERIZED_HANDLER
+#define PVD_PARAMETERIZED_HANDLER
+
+#include "ApexUsingNamespace.h"
+#ifndef WITHOUT_PVD
+
+#include "PsUserAllocated.h"
+#include "ApexPvdClient.h"
+
+#include "PsHashSet.h"
+#include "PsHashMap.h"
+
+namespace NvParameterized
+{
+class Definition;
+class Handle;
+}
+
+namespace physx
+{
+namespace pvdsdk
+{
+ struct NamespacedName;
+
+ class PvdDataStream;
+
+ class StructId
+ {
+ public:
+ StructId(void* address, const char* name) :
+ mAddress(address),
+ mName(name)
+ {}
+
+ bool operator<(const StructId& other) const
+ {
+ if (mAddress < other.mAddress)
+ return true;
+ else
+ return (mAddress == other.mAddress) && strcmp(mName, other.mName) < 0;
+ }
+
+ bool operator==(const StructId& other) const
+ {
+ return (mAddress == other.mAddress) && strcmp(mName, other.mName) == 0;
+ }
+
+ operator size_t() const
+ {
+ return (size_t)mAddress;
+ }
+
+ private:
+ void* mAddress;
+ const char* mName;
+ };
+
+ class PvdParameterizedHandler : public nvidia::UserAllocated
+ {
+ public:
+
+ PvdParameterizedHandler(pvdsdk::PvdDataStream& pvdStream) :
+ mPvdStream(&pvdStream)
+ ,mNextStructId(1)
+ {
+ }
+
+ /**
+ \brief Adds properties to the provided pvdClassName and creates classes for Structs that are inside the paramDefinition tree (not for references, though)
+ */
+ void initPvdClasses(const NvParameterized::Definition& paramDefinition, const char* pvdClassName);
+
+ /**
+ \brief Updates the provided pvd instance properties with the values in the provided handle, recursively.
+ pvdAction specifies if only properties are updated, if pvd instances for structs should be created (for initialization) or if they should be destroyed.
+ */
+ void updatePvd(const void* pvdInstance, NvParameterized::Handle& paramsHandle, PvdAction::Enum pvdAction = PvdAction::UPDATE);
+
+ protected:
+
+ bool createClass(const NamespacedName& className);
+ bool getPvdType(const NvParameterized::Definition& def, pvdsdk::NamespacedName& pvdTypeName);
+ size_t getStructId(void* structAddress, const char* structName, bool deleteId);
+ const void* getPvdId(const NvParameterized::Handle& handle, bool deleteId);
+ bool setProperty(const void* pvdInstance, NvParameterized::Handle& propertyHandle, bool isArrayElement, PvdAction::Enum pvdAction);
+
+
+ pvdsdk::PvdDataStream* mPvdStream;
+
+ physx::shdfnd::HashSet<const char*> mCreatedClasses;
+ physx::shdfnd::HashSet<const void*> mInstanceIds;
+
+ size_t mNextStructId;
+ nvidia::HashMap<StructId, size_t>mStructIdMap;
+ };
+
+} // namespacePvdNxParamSerializer
+}
+
+#endif //WITHOUT_PVD
+
+#endif // #ifndef PVD_PARAMETERIZED_HANDLER \ No newline at end of file
diff --git a/APEX_1.4/common/include/PhysXObjectDescIntl.h b/APEX_1.4/common/include/PhysXObjectDescIntl.h
new file mode 100644
index 00000000..1fd999d0
--- /dev/null
+++ b/APEX_1.4/common/include/PhysXObjectDescIntl.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef PHYSX_OBJECT_DESC_INTL_H
+#define PHYSX_OBJECT_DESC_INTL_H
+
+#include "PhysXObjectDesc.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+/**
+ * Module/Asset interface to actor info structure. This allows the asset to
+ * set the various flags without knowing their implementation.
+ */
+class PhysXObjectDescIntl : public PhysXObjectDesc
+{
+public:
+ void setIgnoreTransform(bool b)
+ {
+ if (b)
+ {
+ mFlags |= TRANSFORM;
+ }
+ else
+ {
+ mFlags &= ~(uint32_t)TRANSFORM;
+ }
+ };
+ void setIgnoreRaycasts(bool b)
+ {
+ if (b)
+ {
+ mFlags |= RAYCASTS;
+ }
+ else
+ {
+ mFlags &= ~(uint32_t)RAYCASTS;
+ }
+ };
+ void setIgnoreContacts(bool b)
+ {
+ if (b)
+ {
+ mFlags |= CONTACTS;
+ }
+ else
+ {
+ mFlags &= ~(uint32_t)CONTACTS;
+ }
+ };
+ void setUserDefinedFlag(uint32_t index, bool b)
+ {
+ if (b)
+ {
+ mFlags |= (1 << index);
+ }
+ else
+ {
+ mFlags &= ~(1 << index);
+ }
+ }
+
+ /**
+ \brief Implementation of pure virtual functions in PhysXObjectDesc, used for external (read-only)
+ access to the Actor list
+ */
+ uint32_t getApexActorCount() const
+ {
+ return mApexActors.size();
+ }
+ const Actor* getApexActor(uint32_t i) const
+ {
+ return mApexActors[i];
+ }
+
+
+ void swap(PhysXObjectDescIntl& rhs)
+ {
+ mApexActors.swap(rhs.mApexActors);
+ nvidia::swap(mPhysXObject, rhs.mPhysXObject);
+
+ nvidia::swap(userData, rhs.userData);
+ nvidia::swap(mFlags, rhs.mFlags);
+ }
+
+ /**
+ \brief Array of pointers to APEX actors assiciated with this PhysX object
+
+ Pointers may be NULL in cases where the APEX actor has been deleted
+ but PhysX actor cleanup has been deferred
+ */
+ physx::Array<const Actor*> mApexActors;
+
+ /**
+ \brief the PhysX object which uses this descriptor
+ */
+ const void* mPhysXObject;
+protected:
+ virtual ~PhysXObjectDescIntl(void) {}
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif // PHYSX_OBJECT_DESC_INTL_H
diff --git a/APEX_1.4/common/include/ProfilerCallback.h b/APEX_1.4/common/include/ProfilerCallback.h
new file mode 100644
index 00000000..bf36aefe
--- /dev/null
+++ b/APEX_1.4/common/include/ProfilerCallback.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+#ifndef PX_PROFILER_CALLBACK_H
+#define PX_PROFILER_CALLBACK_H
+
+#include "PxProfiler.h"
+
+#endif
+
diff --git a/APEX_1.4/common/include/RandState.h b/APEX_1.4/common/include/RandState.h
new file mode 100644
index 00000000..315adde0
--- /dev/null
+++ b/APEX_1.4/common/include/RandState.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef RAND_STATE_H
+#define RAND_STATE_H
+
+// This is shared by legacy IOFX and shaders
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct LCG_PRNG
+{
+ unsigned int a, c;
+
+ PX_CUDA_CALLABLE PX_INLINE LCG_PRNG()
+ {
+ }
+ PX_CUDA_CALLABLE PX_INLINE LCG_PRNG(unsigned int a, unsigned int c)
+ {
+ this->a = a;
+ this->c = c;
+ }
+
+ static PX_CUDA_CALLABLE PX_INLINE LCG_PRNG getIdentity()
+ {
+ return LCG_PRNG(1, 0);
+ }
+
+ static PX_CUDA_CALLABLE PX_INLINE LCG_PRNG getDefault()
+ {
+ return LCG_PRNG(1103515245u, 12345u);
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE LCG_PRNG& operator *= (const LCG_PRNG& rhs)
+ {
+ a *= rhs.a;
+ c *= rhs.a; c += rhs.c;
+ return *this;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE LCG_PRNG leapFrog(unsigned int leap) const
+ {
+ LCG_PRNG ret = getIdentity();
+ for (unsigned int i = 0; i < leap; ++i)
+ {
+ ret *= (*this);
+ }
+ return ret;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE unsigned int operator()(unsigned int x) const
+ {
+ return x * a + c;
+ }
+};
+
+struct RandState
+{
+ explicit PX_CUDA_CALLABLE PX_INLINE RandState(unsigned int seed)
+ {
+ curr = seed;
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE unsigned int next()
+ {
+ return (curr = LCG_PRNG::getDefault()(curr));
+ }
+
+ PX_CUDA_CALLABLE PX_INLINE float nextFloat()
+ {
+ return float(next()) * 0.00000000023283064365386962890625f;
+ }
+ PX_CUDA_CALLABLE PX_INLINE float nextFloat(float min, float max)
+ {
+ return min + nextFloat() * (max - min);
+ }
+
+private:
+ unsigned int curr;
+};
+
+// For CUDA PRNG
+struct PRNGInfo
+{
+ unsigned int* g_stateSpawnSeed;
+ nvidia::LCG_PRNG* g_randBlock;
+ unsigned int seed;
+ nvidia::LCG_PRNG randThread;
+ nvidia::LCG_PRNG randGrid;
+};
+
+// For CUDA PRNG: device part
+#ifdef __CUDACC__
+//*
+#define RAND_SCAN_OP(ofs) \
+ { \
+ unsigned int a = aData[scanIdx], c = cData[scanIdx]; \
+ unsigned int aOfs = aData[scanIdx - ofs], cOfs = cData[scanIdx - ofs]; \
+ aData[scanIdx] = a * aOfs; \
+ cData[scanIdx] = c * aOfs + cOfs; \
+ }
+/*/
+//THIS CODE CRASH ON CUDA 5.0.35
+#define RAND_SCAN_OP(ofs) \
+ { \
+ nvidia::LCG_PRNG val(aData[scanIdx], cData[scanIdx]); \
+ nvidia::LCG_PRNG valOfs(aData[scanIdx - ofs], cData[scanIdx - ofs]); \
+ val *= valOfs; \
+ aData[scanIdx] = val.a; cData[scanIdx] = val.c; \
+ }
+//*/
+PX_INLINE __device__ void randScanWarp(unsigned int scanIdx, volatile unsigned int* aData, volatile unsigned int* cData)
+{
+ RAND_SCAN_OP(1);
+ RAND_SCAN_OP(2);
+ RAND_SCAN_OP(4);
+ RAND_SCAN_OP(8);
+ RAND_SCAN_OP(16);
+}
+
+PX_INLINE __device__ nvidia::LCG_PRNG randScanBlock(nvidia::LCG_PRNG val, volatile unsigned int* aData, volatile unsigned int* cData)
+{
+ const unsigned int idx = threadIdx.x;
+ const unsigned int idxInWarp = idx & (WARP_SIZE-1);
+ const unsigned int warpIdx = (idx >> LOG2_WARP_SIZE);
+
+ //setup scan
+ unsigned int scanIdx = (warpIdx << (LOG2_WARP_SIZE + 1)) + idxInWarp;
+ //write identity
+ aData[scanIdx] = 1;
+ cData[scanIdx] = 0;
+
+ scanIdx += WARP_SIZE;
+ //write value
+ aData[scanIdx] = val.a;
+ cData[scanIdx] = val.c;
+
+ randScanWarp(scanIdx, aData, cData);
+
+ //read value
+ val.a = aData[scanIdx];
+ val.c = cData[scanIdx];
+
+ __syncthreads();
+
+ if (idxInWarp == WARP_SIZE-1)
+ {
+ const unsigned int idxWrite = warpIdx + WARP_SIZE;
+ aData[idxWrite] = val.a;
+ cData[idxWrite] = val.c;
+ }
+ __syncthreads();
+
+ if (warpIdx == 0)
+ {
+ randScanWarp(scanIdx, aData, cData);
+ }
+ __syncthreads();
+
+ if (warpIdx > 0)
+ {
+ const unsigned int idxRead = warpIdx + WARP_SIZE - 1;
+ const nvidia::LCG_PRNG valWarp(aData[idxRead], cData[idxRead]);
+ val *= valWarp;
+ }
+ return val;
+}
+
+#endif
+
+}
+} // nvidia::apex::
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/common/include/RandStateHelpers.h b/APEX_1.4/common/include/RandStateHelpers.h
new file mode 100644
index 00000000..afd79801
--- /dev/null
+++ b/APEX_1.4/common/include/RandStateHelpers.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef RAND_STATE_HELPERS_H
+#define RAND_STATE_HELPERS_H
+
+#include "PxTask.h"
+#include "ApexMirroredArray.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+// For CUDA PRNG: host part
+PX_INLINE void InitDevicePRNGs(
+ SceneIntl& scene,
+ unsigned int blockSize,
+ LCG_PRNG& threadLeap,
+ LCG_PRNG& gridLeap,
+ ApexMirroredArray<LCG_PRNG>& blockPRNGs)
+{
+ threadLeap = LCG_PRNG::getDefault().leapFrog(16);
+
+ LCG_PRNG randBlock = LCG_PRNG::getIdentity();
+ LCG_PRNG randBlockLeap = threadLeap.leapFrog(blockSize);
+
+ const uint32_t numBlocks = 32; //Max Multiprocessor count
+ blockPRNGs.setSize(numBlocks, ApexMirroredPlace::CPU_GPU);
+ for (uint32_t i = 0; i < numBlocks; ++i)
+ {
+ blockPRNGs[i] = randBlock;
+ randBlock *= randBlockLeap;
+ }
+ gridLeap = randBlock;
+
+ {
+ PxTaskManager* tm = scene.getTaskManager();
+ PxCudaContextManager* ctx = tm->getGpuDispatcher()->getCudaContextManager();
+
+ PxScopedCudaLock s(*ctx);
+
+ PxGpuCopyDesc desc;
+ blockPRNGs.copyHostToDeviceDesc(desc, 0, 0);
+ tm->getGpuDispatcher()->launchCopyKernel(&desc, 1, 0);
+ }
+}
+
+}
+} // nvidia::apex::
+
+#endif
diff --git a/APEX_1.4/common/include/ReadCheck.h b/APEX_1.4/common/include/ReadCheck.h
new file mode 100644
index 00000000..21529ae5
--- /dev/null
+++ b/APEX_1.4/common/include/ReadCheck.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef READ_CHECK_H
+#define READ_CHECK_H
+
+#include "PxSimpleTypes.h"
+#include "ApexUsingNamespace.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexRWLockable;
+
+// RAII wrapper around the Scene::apexStartRead() method, note that this
+// object does not acquire any scene locks, it is an error checking only mechanism
+class ReadCheck
+{
+public:
+ ReadCheck(const ApexRWLockable* scene, const char* functionName);
+ ~ReadCheck();
+
+private:
+ const ApexRWLockable* mLockable;
+ const char* mName;
+ uint32_t mErrorCount;
+};
+
+#if (PX_DEBUG || PX_CHECKED)
+ // Creates a scoped read check object that detects whether appropriate scene locks
+ // have been acquired and checks if reads/writes overlap, this macro should typically
+ // be placed at the beginning of any const API methods that are not multi-thread safe,
+ // the error conditions checked can be summarized as:
+
+ // 1. Other threads were already writing, or began writing during the object lifetime
+ #define READ_ZONE() ReadCheck __readCheck(static_cast<const ApexRWLockable*>(this), __FUNCTION__);
+#else
+ #define READ_ZONE()
+#endif
+
+}
+}
+
+#endif // READ_CHECK_H
diff --git a/APEX_1.4/common/include/RenderAPIIntl.h b/APEX_1.4/common/include/RenderAPIIntl.h
new file mode 100644
index 00000000..481712be
--- /dev/null
+++ b/APEX_1.4/common/include/RenderAPIIntl.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef RENDER_API_INTL_H
+#define RENDER_API_INTL_H
+
+#include "PsArray.h"
+#include "PsMutex.h"
+#include "UserRenderCallback.h"
+#if APEX_CUDA_SUPPORT
+#include <cuda.h>
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct RenderEntityMapStateIntl
+{
+ enum Enum
+ {
+ UNMAPPED = 0,
+ PENDING_MAP,
+ MAPPED,
+ PENDING_UNMAP
+ };
+};
+
+class RenderStorageStateIntl
+{
+public:
+ PX_INLINE bool isMapped() const { return (mMapFlags & MAPPED) != 0; }
+
+ virtual bool map(UserRenderStorage* renderStorage, RenderMapType::Enum mapType) = 0;
+ virtual void unmap(UserRenderStorage* renderStorage) = 0;
+
+#if APEX_CUDA_SUPPORT
+ PX_INLINE bool isCudaMapped() const { return (mMapFlags & CUDA_MAPPED) != 0; }
+ PX_INLINE void setCudaMapped() { mMapFlags |= CUDA_MAPPED; }
+ PX_INLINE void resetCudaMapped() { mMapFlags &= ~CUDA_MAPPED; }
+
+ virtual CUgraphicsResource getCudaHandle(UserRenderStorage* renderStorage) = 0;
+ virtual bool getCudaMapppedResult(UserRenderStorage* renderStorage) = 0;
+
+ PX_INLINE bool checkCudaHandle() const { return mInteropHandle != NULL; }
+ PX_INLINE void resetCudaHandle() { mInteropHandle = NULL; }
+#endif
+
+protected:
+ RenderStorageStateIntl()
+ : mMapFlags(0)
+#if APEX_CUDA_SUPPORT
+ , mInteropHandle(NULL)
+#endif
+ {
+ }
+
+ enum MapFlag
+ {
+ MAPPED = 0x01,
+#if APEX_CUDA_SUPPORT
+ CUDA_MAPPED = 0x02,
+#endif
+ };
+ uint32_t mMapFlags;
+
+#if APEX_CUDA_SUPPORT
+ CUgraphicsResource mInteropHandle;
+#endif
+};
+
+class RenderBufferStateIntl : public RenderStorageStateIntl
+{
+public:
+ RenderBufferStateIntl()
+ : mMappedPtr(NULL)
+#if APEX_CUDA_SUPPORT
+ , mMappedCudaPtr(NULL)
+#endif
+ , mMapOffset(0)
+ , mMapSize(SIZE_MAX)
+ {
+ }
+
+ PX_INLINE void* getMappedPtr() const
+ {
+ return mMappedPtr;
+ }
+
+ PX_INLINE void setMapRange(size_t offset, size_t size)
+ {
+ mMapOffset = offset;
+ mMapSize = size;
+ }
+
+ virtual bool map(UserRenderStorage* renderStorage, RenderMapType::Enum mapType)
+ {
+ if ((mMapFlags & MAPPED) == 0)
+ {
+ PX_ASSERT(renderStorage->getType() == UserRenderStorage::BUFFER);
+ UserRenderBuffer* renderBuffer = static_cast<UserRenderBuffer*>(renderStorage);
+ mMappedPtr = renderBuffer->map(mapType, mMapOffset, mMapSize);
+ if (mMappedPtr != NULL)
+ {
+ mMapFlags += MAPPED;
+ }
+ }
+ return (mMapFlags & MAPPED) != 0;
+ }
+ virtual void unmap(UserRenderStorage* renderStorage)
+ {
+ if ((mMapFlags & MAPPED) != 0)
+ {
+ PX_ASSERT(renderStorage->getType() == UserRenderStorage::BUFFER);
+ UserRenderBuffer* renderBuffer = static_cast<UserRenderBuffer*>(renderStorage);
+ renderBuffer->unmap();
+ mMappedPtr = NULL;
+ mMapFlags -= MAPPED;
+ //reset map range
+ mMapOffset = 0;
+ mMapSize = SIZE_MAX;
+ }
+ }
+
+#if APEX_CUDA_SUPPORT
+ PX_INLINE CUdeviceptr getMappedCudaPtr() const
+ {
+ return mMappedCudaPtr;
+ }
+
+ virtual CUgraphicsResource getCudaHandle(UserRenderStorage* renderStorage)
+ {
+ if (mInteropHandle == NULL)
+ {
+ PX_ASSERT(renderStorage->getType() == UserRenderStorage::BUFFER);
+ UserRenderBuffer* renderBuffer = static_cast<UserRenderBuffer*>(renderStorage);
+ renderBuffer->getCUDAgraphicsResource(mInteropHandle);
+ }
+ return mInteropHandle;
+ }
+
+ virtual bool getCudaMapppedResult(UserRenderStorage* renderStorage)
+ {
+ PX_UNUSED(renderStorage);
+ if (mInteropHandle != NULL)
+ {
+ size_t size = 0;
+ if (cuGraphicsResourceGetMappedPointer(&mMappedCudaPtr, &size, mInteropHandle) == CUDA_SUCCESS)
+ {
+ return true;
+ }
+ mMappedCudaPtr = NULL;
+ }
+ return false;
+ }
+#endif
+
+private:
+ void* mMappedPtr;
+#if APEX_CUDA_SUPPORT
+ CUdeviceptr mMappedCudaPtr;
+#endif
+ size_t mMapOffset;
+ size_t mMapSize;
+};
+
+class RenderSurfaceStateIntl : public RenderStorageStateIntl
+{
+public:
+ RenderSurfaceStateIntl()
+#if APEX_CUDA_SUPPORT
+ : mMappedCudaArray(NULL)
+#endif
+ {
+ }
+
+ PX_INLINE const UserRenderSurface::MappedInfo& getMappedInfo() const
+ {
+ return mMappedInfo;
+ }
+
+ virtual bool map(UserRenderStorage* renderStorage, RenderMapType::Enum mapType)
+ {
+ if ((mMapFlags & MAPPED) == 0)
+ {
+ PX_ASSERT(renderStorage->getType() == UserRenderStorage::SURFACE);
+ UserRenderSurface* renderSurface = static_cast<UserRenderSurface*>(renderStorage);
+ if (renderSurface->map(mapType, mMappedInfo))
+ {
+ mMapFlags += MAPPED;
+ }
+ }
+ return (mMapFlags & MAPPED) != 0;
+ }
+ virtual void unmap(UserRenderStorage* renderStorage)
+ {
+ if ((mMapFlags & MAPPED) != 0)
+ {
+ PX_ASSERT(renderStorage->getType() == UserRenderStorage::SURFACE);
+ UserRenderSurface* renderSurface = static_cast<UserRenderSurface*>(renderStorage);
+ renderSurface->unmap();
+ mMapFlags -= MAPPED;
+ }
+ }
+#if APEX_CUDA_SUPPORT
+ PX_INLINE CUarray getMappedCudaArray() const
+ {
+ return mMappedCudaArray;
+ }
+
+ virtual CUgraphicsResource getCudaHandle(UserRenderStorage* renderStorage)
+ {
+ if (mInteropHandle == NULL)
+ {
+ PX_ASSERT(renderStorage->getType() == UserRenderStorage::SURFACE);
+ UserRenderSurface* renderSurface = static_cast<UserRenderSurface*>(renderStorage);
+ renderSurface->getCUDAgraphicsResource(mInteropHandle);
+ }
+ return mInteropHandle;
+ }
+
+ virtual bool getCudaMapppedResult(UserRenderStorage* renderStorage)
+ {
+ PX_UNUSED(renderStorage);
+ if (mInteropHandle != NULL)
+ {
+ //TODO: cuGraphicsSubResourceGetMappedArray arrayIndex & mipLevel???
+ if (cuGraphicsSubResourceGetMappedArray(&mMappedCudaArray, mInteropHandle, 0, 0) == CUDA_SUCCESS)
+ {
+ return true;
+ }
+ mMappedCudaArray = NULL;
+ }
+ return false;
+ }
+#endif
+
+private:
+ UserRenderSurface::MappedInfo mMappedInfo;
+#if APEX_CUDA_SUPPORT
+ CUarray mMappedCudaArray;
+#endif
+};
+
+class RenderEntityIntl : public UserAllocated
+{
+public:
+ virtual ~RenderEntityIntl() { PX_ASSERT(mRefCount == 0); }
+
+ virtual void free() = 0;
+
+
+ virtual void map() = 0;
+ virtual void unmap() = 0;
+
+#if APEX_CUDA_SUPPORT
+ virtual void fillMapUnmapArraysForInterop(nvidia::Array<CUgraphicsResource> &toMapArray, nvidia::Array<CUgraphicsResource> &toUnmapArray) = 0;
+ virtual void mapBufferResultsForInterop(bool mapSuccess, bool unmapSuccess) = 0;
+#endif
+
+ //
+ virtual void release()
+ {
+ bool triggerDelete = false;
+ mRefCountLock.lock();
+ if (mRefCount > 0)
+ {
+ triggerDelete = (--mRefCount) == 0;
+ }
+ mRefCountLock.unlock();
+ if (triggerDelete)
+ {
+ destroy();
+ }
+ }
+
+ // Returns this if successful, NULL otherwise
+ RenderEntityIntl* incrementReferenceCount()
+ {
+ RenderEntityIntl* returnValue = NULL;
+ mRefCountLock.lock();
+ if (mRefCount > 0)
+ {
+ ++mRefCount;
+ returnValue = this;
+ }
+ mRefCountLock.unlock();
+ return returnValue;
+ }
+
+ PX_INLINE int32_t getReferenceCount() const
+ {
+ return mRefCount;
+ }
+
+protected:
+ RenderEntityIntl()
+ : mRefCount(1) // Ref count initialized to 1, assuming that whoever calls this constructor will store a reference
+ {
+ }
+
+ virtual void destroy()
+ {
+ PX_DELETE(this);
+ }
+
+ volatile int32_t mRefCount;
+ physx::shdfnd::Mutex mRefCountLock;
+
+
+private:
+ RenderEntityIntl& operator=(const RenderEntityIntl&);
+};
+
+template <typename Impl, typename Base>
+class RenderEntityIntlImpl : public Base
+{
+protected:
+ PX_INLINE const Impl* getImpl() const { return static_cast<const Impl*>(this); }
+ PX_INLINE Impl* getImpl() { return static_cast<Impl*>(this); }
+
+public:
+ virtual ~RenderEntityIntlImpl()
+ {
+ getImpl()->freeAllRenderStorage();
+ }
+
+
+ virtual void free()
+ {
+ getImpl()->freeAllRenderStorage();
+ mMapState = RenderEntityMapStateIntl::UNMAPPED;
+ }
+
+ virtual void map()
+ {
+ if (mMapState == RenderEntityMapStateIntl::UNMAPPED)
+ {
+ if (mInteropFlags == RenderInteropFlags::CUDA_INTEROP)
+ {
+ mMapState = RenderEntityMapStateIntl::PENDING_MAP;
+ return;
+ }
+
+ bool result = false;
+ if (getImpl()->getRenderStorageCount() > 0)
+ {
+ //map render resources
+ result = true;
+ for (uint32_t i = 0; result && i < getImpl()->getRenderStorageCount(); ++i)
+ {
+ UserRenderStorage* renderStorage;
+ RenderInteropFlags::Enum interopFlags;
+ RenderStorageStateIntl& renderStorageState = getImpl()->getRenderStorage(i, renderStorage, interopFlags);
+ if (renderStorage)
+ {
+ getImpl()->onMapRenderStorage(i);
+ result &= renderStorageState.map(renderStorage, RenderMapType::MAP_WRITE_DISCARD);
+ }
+ }
+ if (!result)
+ {
+ //unmap in case of failure
+ unmapRenderResources();
+ }
+ }
+ if (result)
+ {
+ mMapState = RenderEntityMapStateIntl::MAPPED;
+ }
+ }
+ }
+
+ virtual void unmap()
+ {
+ if (mMapState == RenderEntityMapStateIntl::MAPPED)
+ {
+ if (mInteropFlags == RenderInteropFlags::CUDA_INTEROP)
+ {
+ mMapState = RenderEntityMapStateIntl::PENDING_UNMAP;
+ return;
+ }
+
+ unmapRenderResources();
+
+ mMapState = RenderEntityMapStateIntl::UNMAPPED;
+ }
+ }
+
+#if APEX_CUDA_SUPPORT
+ virtual void fillMapUnmapArraysForInterop(physx::Array<CUgraphicsResource> &toMapArray, physx::Array<CUgraphicsResource> &toUnmapArray)
+ {
+ if (mMapState == RenderEntityMapStateIntl::PENDING_MAP || mMapState == RenderEntityMapStateIntl::PENDING_UNMAP)
+ {
+ for (uint32_t i = 0; i < getImpl()->getRenderStorageCount(); ++i)
+ {
+ UserRenderStorage* renderStorage;
+ RenderInteropFlags::Enum interopFlags;
+ RenderStorageStateIntl& renderStorageState = getImpl()->getRenderStorage(i, renderStorage, interopFlags);
+ if (renderStorage && interopFlags == RenderInteropFlags::CUDA_INTEROP)
+ {
+ CUgraphicsResource interopHandle = renderStorageState.getCudaHandle(renderStorage);
+ if (interopHandle != NULL)
+ {
+ if (mMapState == RenderEntityMapStateIntl::PENDING_MAP && !renderStorageState.isCudaMapped())
+ {
+ toMapArray.pushBack(interopHandle);
+ }
+ if (mMapState == RenderEntityMapStateIntl::PENDING_UNMAP && renderStorageState.isCudaMapped())
+ {
+ toUnmapArray.pushBack(interopHandle);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ virtual void mapBufferResultsForInterop(bool mapSuccess, bool unmapSuccess)
+ {
+ PX_UNUSED(unmapSuccess);
+ if (mMapState == RenderEntityMapStateIntl::PENDING_MAP || mMapState == RenderEntityMapStateIntl::PENDING_UNMAP)
+ {
+ bool result = false;
+ if (getImpl()->getRenderStorageCount() > 0)
+ {
+ //map render resources
+ result = true;
+ for (uint32_t i = 0; i < getImpl()->getRenderStorageCount(); ++i)
+ {
+ UserRenderStorage* renderStorage;
+ RenderInteropFlags::Enum interopFlags;
+ RenderStorageStateIntl& renderStorageState = getImpl()->getRenderStorage(i, renderStorage, interopFlags);
+ if (renderStorage)
+ {
+ bool mappedResult = false;
+ if (renderStorageState.checkCudaHandle())
+ {
+ if (mMapState == RenderEntityMapStateIntl::PENDING_MAP && mapSuccess)
+ {
+ renderStorageState.setCudaMapped();
+ mappedResult = renderStorageState.getCudaMapppedResult(renderStorage);
+ }
+ if (mMapState == RenderEntityMapStateIntl::PENDING_UNMAP && unmapSuccess)
+ {
+ renderStorageState.resetCudaMapped();
+ }
+ renderStorageState.resetCudaHandle();
+ }
+ if (mMapState == RenderEntityMapStateIntl::PENDING_MAP && !mappedResult)
+ {
+ //fall back to CPU mapping
+ if (result)
+ {
+ getImpl()->onMapRenderStorage(i);
+ result &= renderStorageState.map(renderStorage, RenderMapType::MAP_WRITE_DISCARD);
+ }
+ }
+ }
+ }
+ }
+
+ if (result && mMapState == RenderEntityMapStateIntl::PENDING_MAP)
+ {
+ mMapState = RenderEntityMapStateIntl::MAPPED;
+ }
+ else
+ {
+ unmapRenderResources();
+
+ mMapState = RenderEntityMapStateIntl::UNMAPPED;
+ }
+ }
+ }
+#endif
+
+protected:
+ RenderEntityIntlImpl(RenderInteropFlags::Enum interopFlags)
+ : mInteropFlags(interopFlags)
+ , mMapState(RenderEntityMapStateIntl::UNMAPPED)
+ {
+ }
+
+ void unmapRenderResources()
+ {
+ for (uint32_t i = 0; i < getImpl()->getRenderStorageCount(); ++i)
+ {
+ UserRenderStorage* renderStorage;
+ RenderInteropFlags::Enum interopFlags;
+ RenderStorageStateIntl& renderStorageState = getImpl()->getRenderStorage(i, renderStorage, interopFlags);
+ if (renderStorage)
+ {
+ renderStorageState.unmap(renderStorage);
+ }
+ }
+ }
+
+ RenderInteropFlags::Enum mInteropFlags;
+ RenderEntityMapStateIntl::Enum mMapState;
+};
+
+
+}
+} // end namespace nvidia::apex
+
+#endif // #ifndef RENDER_API_INTL_H
diff --git a/APEX_1.4/common/include/RenderMeshAssetIntl.h b/APEX_1.4/common/include/RenderMeshAssetIntl.h
new file mode 100644
index 00000000..74c23923
--- /dev/null
+++ b/APEX_1.4/common/include/RenderMeshAssetIntl.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef RENDER_MESH_ASSET_INTL_H
+#define RENDER_MESH_ASSET_INTL_H
+
+#include "ApexUsingNamespace.h"
+#include "RenderMeshActor.h"
+#include "RenderMeshAsset.h"
+#include "PsArray.h"
+#include "PxMat44.h"
+
+
+/**
+\brief Describes how bones are to be assigned to render resources.
+*/
+struct RenderMeshActorSkinningMode
+{
+ enum Enum
+ {
+ Default, // Currently the same as OneBonePerPart
+ OneBonePerPart, // Used by destruction, default behavior
+ AllBonesPerPart, // All bones are written to each render resource, up to the maximum bones per material given by UserRenderResourceManager::getMaxBonesForMaterial
+
+ Count
+ };
+};
+
+
+namespace nvidia
+{
+namespace apex
+{
+
+// Forward declarations
+class DebugRenderParams;
+class RenderDebugInterface;
+
+class VertexBufferIntl : public VertexBuffer
+{
+public:
+ virtual ~VertexBufferIntl() {}
+ virtual VertexFormat& getFormatWritable() = 0;
+ virtual void build(const VertexFormat& format, uint32_t vertexCount) = 0;
+ virtual void applyTransformation(const PxMat44& transformation) = 0;
+ virtual void applyScale(float scale) = 0;
+ virtual bool mergeBinormalsIntoTangents() = 0;
+};
+
+class RenderSubmeshIntl : public RenderSubmesh
+{
+public:
+ virtual ~RenderSubmeshIntl() {}
+
+ virtual VertexBufferIntl& getVertexBufferWritable() = 0;
+ virtual uint32_t* getIndexBufferWritable(uint32_t partIndex) = 0;
+ virtual void applyPermutation(const Array<uint32_t>& old2new, const Array<uint32_t>& new2old) = 0;
+};
+
+
+/**
+* Framework interface to ApexRenderMesh for use by modules
+*/
+class RenderMeshAssetIntl : public RenderMeshAsset
+{
+public:
+ virtual RenderSubmeshIntl& getInternalSubmesh(uint32_t submeshIndex) = 0;
+ virtual void permuteBoneIndices(const physx::Array<int32_t>& old2new) = 0;
+ virtual void applyTransformation(const PxMat44& transformation, float scale) = 0;
+ virtual void reverseWinding() = 0;
+ virtual void applyScale(float scale) = 0;
+ virtual bool mergeBinormalsIntoTangents() = 0;
+ virtual void setOwnerModuleId(AuthObjTypeID id) = 0;
+ virtual TextureUVOrigin::Enum getTextureUVOrigin() const = 0;
+
+};
+
+class RenderMeshAssetAuthoringIntl : public RenderMeshAssetAuthoring
+{
+public:
+ virtual RenderSubmeshIntl& getInternalSubmesh(uint32_t submeshIndex) = 0;
+ virtual void permuteBoneIndices(const physx::Array<int32_t>& old2new) = 0;
+ virtual void applyTransformation(const PxMat44& transformation, float scale) = 0;
+ virtual void reverseWinding() = 0;
+ virtual void applyScale(float scale) = 0;
+};
+
+
+class RenderMeshActorIntl : public RenderMeshActor
+{
+public:
+ virtual void updateRenderResources(bool useBones, bool rewriteBuffers, void* userRenderData) = 0;
+
+ // add a buffer that will replace the dynamic buffer for the submesh
+ virtual void addVertexBuffer(uint32_t submeshIndex, bool alwaysDirty, PxVec3* position, PxVec3* normal, PxVec4* tangents) = 0;
+ virtual void removeVertexBuffer(uint32_t submeshIndex) = 0;
+
+ virtual void setStaticPositionReplacement(uint32_t submeshIndex, const PxVec3* staticPositions) = 0;
+ virtual void setStaticColorReplacement(uint32_t submeshIndex, const ColorRGBA* staticColors) = 0;
+
+ virtual void visualize(RenderDebugInterface& batcher, nvidia::apex::DebugRenderParams* debugParams, PxMat33* scaledRotations = NULL, PxVec3* translations = NULL, uint32_t stride = 0, uint32_t numberOfTransforms = 0) const = 0;
+
+ virtual void dispatchRenderResources(UserRenderer& renderer, const PxMat44& globalPose) = 0;
+
+ // Access to previous frame's transforms (if the buffer exists)
+ virtual void setLastFrameTM(const PxMat44& tm, uint32_t boneIndex = 0) = 0;
+ virtual void setLastFrameTM(const PxMat44& tm, const PxVec3& scale, uint32_t boneIndex = 0) = 0;
+
+ virtual void setSkinningMode(RenderMeshActorSkinningMode::Enum mode) = 0;
+ virtual RenderMeshActorSkinningMode::Enum getSkinningMode() const = 0;
+};
+
+} // end namespace apex
+} // end namespace nvidia
+
+#endif // RENDER_MESH_ASSET_INTL_H
diff --git a/APEX_1.4/common/include/ResourceProviderIntl.h b/APEX_1.4/common/include/ResourceProviderIntl.h
new file mode 100644
index 00000000..d4179f83
--- /dev/null
+++ b/APEX_1.4/common/include/ResourceProviderIntl.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef RESOURCE_PROVIDER_INTL_H
+#define RESOURCE_PROVIDER_INTL_H
+
+#include "ResourceProvider.h"
+#include "ApexString.h"
+#include "PsUserAllocated.h"
+
+/**
+Framework/Module interface to named resource provider
+*/
+
+namespace nvidia
+{
+namespace apex
+{
+
+typedef uint32_t ResID;
+
+const ResID INVALID_RESOURCE_ID = ResID(-1);
+
+/* Used for storing asset name/resID pairs */
+class AssetNameIDMapping : public UserAllocated
+{
+public:
+ AssetNameIDMapping()
+ {}
+
+ AssetNameIDMapping(const char* inAssetName, const char* inIosAssetTypeName, ResID inResID):
+ assetName(inAssetName),
+ iosAssetTypeName(inIosAssetTypeName),
+ resID(inResID),
+ isOpaqueMesh(false)
+ {}
+
+ AssetNameIDMapping(const char* inAssetName, const char* inIosAssetTypeName):
+ assetName(inAssetName),
+ iosAssetTypeName(inIosAssetTypeName),
+ resID(INVALID_RESOURCE_ID),
+ isOpaqueMesh(false)
+ {}
+
+ AssetNameIDMapping(const char* inAssetName, ResID inResID):
+ assetName(inAssetName),
+ iosAssetTypeName(""),
+ resID(inResID),
+ isOpaqueMesh(false)
+ {}
+
+ AssetNameIDMapping(const char* inAssetName, bool _isOpaqueMesh):
+ assetName(inAssetName),
+ iosAssetTypeName(""),
+ resID(INVALID_RESOURCE_ID),
+ isOpaqueMesh(_isOpaqueMesh)
+ {}
+
+ void setIsOpaqueMesh(bool state)
+ {
+ isOpaqueMesh = state;
+ }
+
+ ApexSimpleString assetName;
+ ApexSimpleString iosAssetTypeName;
+ ResID resID;
+ bool isOpaqueMesh;
+};
+
+
+class ResourceProviderIntl : public ResourceProvider
+{
+public:
+ /**
+ The Apex framework and modules can create name spaces in which unique names can be stored.
+ The user setResource() interface can also implicitly create new name spaces if they set a
+ resource in a new name space. This function will return the existing ID in this case.
+ The calling code must store this handle and call releaseResource() when appropriate.
+ releaseAtExit determines whether the NRP will call releaseResource() on items in this
+ namespace when the ApexSDK exits.
+ */
+ virtual ResID createNameSpace(const char* nameSpace, bool releaseAtExit = true) = 0;
+
+ /**
+ The Apex Authorable Object needs a way to tell the resource provider that the resource
+ value is no longer set and the app's callback should be used once again if the name
+ is queried
+ */
+ virtual void setResource(const char* nameSpace, const char* name, void* resource, bool valueIsSet, bool incRefCount = false) = 0;
+
+ /**
+ The Apex framework and modules should use this function to release their reference to a named
+ resource when they no longer use it. If the named resource's reference count reaches zero,
+ ResourceCallback::releaseResource() will be called.
+ */
+ virtual void releaseResource(ResID id) = 0;
+
+ /**
+ Create a named resource inside a specific name space. Returns a resource ID which must be
+ stored by the calling module or framework code.
+ */
+ virtual ResID createResource(ResID nameSpace, const char* name, bool refCount = true) = 0;
+
+ /**
+ Returns true if named resource has a specified pointer
+ */
+ virtual bool checkResource(ResID nameSpace, const char* name) = 0;
+
+ /**
+ Returns true if named resource has a specified pointer
+ */
+ virtual bool checkResource(ResID id) = 0;
+
+ /**
+ Modifies name parameter such that it is unique in its namespace
+ */
+ virtual void generateUniqueName(ResID nameSpace, ApexSimpleString& name) = 0;
+
+ /**
+ Retrieve the named resource pointer, which has been provided by the user interface. If the
+ named resource has never been set by the user API and the request callback has been specified,
+ the callback will be called to provide the void*. The user callback will be called only once
+ per named resource.
+ */
+ virtual void* getResource(ResID id) = 0;
+
+ /**
+ Retrieve the named resource name.
+ */
+ virtual const char* getResourceName(ResID id) = 0;
+
+ /**
+ Given a namespace name, this method will fill in all of the resource IDs registered
+ in the namespace. 'inCount' contains the total space allocated for the 'outResIDs' list.
+ 'outCount' will contain how many IDs are in the 'outResIDs' list. If 'inCount' is not
+ large enough, the method will return false
+ */
+ virtual bool getResourceIDs(const char* nameSpace, ResID* outResIDs, uint32_t& outCount, uint32_t inCount) = 0;
+
+ /**
+ \brief Returns if the resource provider is operating in a case sensitive mode.
+
+ \note By default the resource provider is NOT case sensitive
+ */
+ virtual bool isCaseSensitive() = 0;
+
+ /**
+ Retrieve the named resource name space.
+ */
+ virtual const char* getResourceNameSpace(ResID id) = 0;
+
+};
+
+} // namespace apex
+} // namespace nvidia
+
+#endif // RESOURCE_PROVIDER_INTL_H
diff --git a/APEX_1.4/common/include/SceneIntl.h b/APEX_1.4/common/include/SceneIntl.h
new file mode 100644
index 00000000..7f5a5077
--- /dev/null
+++ b/APEX_1.4/common/include/SceneIntl.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef SCENE_INTL_H
+#define SCENE_INTL_H
+
+#define APEX_CHECK_STAT_TIMER(name)// { PX_PROFILE_ZONE(name, GetInternalApexSDK()->getContextId()); }
+
+#include "Scene.h"
+#include "ApexUsingNamespace.h"
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+// PH prevent PxScene.h from including PxPhysX.h, it will include sooo many files that it will break the clothing embedded branch
+#define PX_PHYSICS_NX_PHYSICS
+#include "PxScene.h"
+#undef PX_PHYSICS_NX_PHYSICS
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ModuleSceneIntl;
+class ApexContext;
+class RenderDebugInterface;
+class PhysX3Interface;
+class ApexCudaTestManager;
+
+/**
+ * Framework interface to ApexScenes for use by modules
+ */
+class SceneIntl : public Scene
+{
+public:
+ /**
+ When a module has been released by the end-user, the module must release
+ its ModuleScenesIntl and notify those Scenes that their module
+ scenes no longer exist
+ */
+ virtual void moduleReleased(ModuleSceneIntl& moduleScene) = 0;
+
+#if PX_PHYSICS_VERSION_MAJOR == 3
+ virtual void lockRead(const char *fileName,uint32_t lineo) = 0;
+ virtual void lockWrite(const char *fileName,uint32_t lineno) = 0;
+ virtual void unlockRead() = 0;
+ virtual void unlockWrite() = 0;
+
+ virtual void addModuleUserNotifier(physx::PxSimulationEventCallback& notify) = 0;
+ virtual void removeModuleUserNotifier(physx::PxSimulationEventCallback& notify) = 0;
+ virtual void addModuleUserContactModify(physx::PxContactModifyCallback& notify) = 0;
+ virtual void removeModuleUserContactModify(physx::PxContactModifyCallback& notify) = 0;
+ virtual PhysX3Interface* getApexPhysX3Interface() const = 0;
+#endif
+
+ virtual ApexContext* getApexContext() = 0;
+ virtual float getElapsedTime() const = 0;
+
+ /* Get total elapsed simulation time, in integer milliseconds */
+ virtual uint32_t getTotalElapsedMS() const = 0;
+
+ virtual bool isSimulating() const = 0;
+ virtual bool physXElapsedTime(float& dt) const = 0;
+
+ virtual float getPhysXSimulateTime() const = 0;
+
+ virtual bool isFinalStep() const = 0;
+
+ virtual uint32_t getSeed() = 0; // Not necessarily const
+
+ enum ApexStatsDataEnum
+ {
+ NumberOfActors,
+ NumberOfShapes,
+ NumberOfAwakeShapes,
+ NumberOfCpuShapePairs,
+ ApexBeforeTickTime,
+ ApexDuringTickTime,
+ ApexPostTickTime,
+ PhysXSimulationTime,
+ ClothingSimulationTime,
+ ParticleSimulationTime,
+ TurbulenceSimulationTime,
+ PhysXFetchResultTime,
+ UserDelayedFetchTime,
+ RbThroughput,
+ SimulatedSpriteParticlesCount,
+ SimulatedMeshParticlesCount,
+ VisibleDestructibleChunkCount,
+ DynamicDestructibleChunkIslandCount,
+
+ // insert new items before this line
+ NumberOfApexStats // The number of stats
+ };
+
+ virtual void setApexStatValue(int32_t index, StatValue dataVal) = 0;
+
+#if APEX_CUDA_SUPPORT
+ virtual ApexCudaTestManager& getApexCudaTestManager() = 0;
+ virtual bool isUsingCuda() const = 0;
+#endif
+ virtual ModuleSceneIntl* getInternalModuleScene(const char* moduleName) = 0;
+};
+
+/* ApexScene task names */
+#define APEX_DURING_TICK_TIMING_FIX 1
+
+#define AST_PHYSX_SIMULATE "ApexScene::PhysXSimulate"
+#define AST_PHYSX_BETWEEN_STEPS "ApexScene::PhysXBetweenSteps"
+
+#if APEX_DURING_TICK_TIMING_FIX
+# define AST_DURING_TICK_COMPLETE "ApexScene::DuringTickComplete"
+#endif
+
+#define AST_PHYSX_CHECK_RESULTS "ApexScene::CheckResults"
+#define AST_PHYSX_FETCH_RESULTS "ApexScene::FetchResults"
+
+
+
+
+}
+} // end namespace nvidia::apex
+
+
+#endif // SCENE_INTL_H
diff --git a/APEX_1.4/common/include/SimplexNoise.h b/APEX_1.4/common/include/SimplexNoise.h
new file mode 100644
index 00000000..352c542b
--- /dev/null
+++ b/APEX_1.4/common/include/SimplexNoise.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef SIMPLEX_NOISE_H
+#define SIMPLEX_NOISE_H
+
+#include "PxVec4.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class SimplexNoise
+{
+ static const int X_NOISE_GEN = 1619;
+ static const int Y_NOISE_GEN = 31337;
+ static const int Z_NOISE_GEN = 6971;
+ static const int W_NOISE_GEN = 1999;
+ static const int SEED_NOISE_GEN = 1013;
+ static const int SHIFT_NOISE_GEN = 8;
+
+ PX_CUDA_CALLABLE static PX_INLINE int fastfloor(float x)
+ {
+ return (x >= 0) ? (int)x : (int)(x - 1);
+ }
+
+public:
+
+ // 4D simplex noise
+ // returns: (x,y,z) = noise grad, w = noise value
+ PX_CUDA_CALLABLE static physx::PxVec4 eval4D(float x, float y, float z, float w, int seed)
+ {
+ // The skewing and unskewing factors are hairy again for the 4D case
+ const float F4 = (physx::PxSqrt(5.0f) - 1.0f) / 4.0f;
+ const float G4 = (5.0f - physx::PxSqrt(5.0f)) / 20.0f;
+ // Skew the (x,y,z,w) space to determine which cell of 24 simplices we're in
+ float s = (x + y + z + w) * F4; // Factor for 4D skewing
+ int ix = fastfloor(x + s);
+ int iy = fastfloor(y + s);
+ int iz = fastfloor(z + s);
+ int iw = fastfloor(w + s);
+ float t = (ix + iy + iz + iw) * G4; // Factor for 4D unskewing
+ // Unskew the cell origin back to (x,y,z,w) space
+ float x0 = x - (ix - t); // The x,y,z,w distances from the cell origin
+ float y0 = y - (iy - t);
+ float z0 = z - (iz - t);
+ float w0 = w - (iw - t);
+
+ int c = (x0 > y0) ? (1 << 0) : (1 << 2);
+ c += (x0 > z0) ? (1 << 0) : (1 << 4);
+ c += (x0 > w0) ? (1 << 0) : (1 << 6);
+ c += (y0 > z0) ? (1 << 2) : (1 << 4);
+ c += (y0 > w0) ? (1 << 2) : (1 << 6);
+ c += (z0 > w0) ? (1 << 4) : (1 << 6);
+
+ physx::PxVec4 res;
+ res.setZero();
+
+ // Calculate the contribution from the five corners
+#ifdef __CUDACC__
+#pragma unroll
+#endif
+ for (int p = 4; p >= 0; --p)
+ {
+ int ixp = ((c >> 0) & 3) >= p ? 1 : 0;
+ int iyp = ((c >> 2) & 3) >= p ? 1 : 0;
+ int izp = ((c >> 4) & 3) >= p ? 1 : 0;
+ int iwp = ((c >> 6) & 3) >= p ? 1 : 0;
+
+ float xp = x0 - ixp + (4 - p) * G4;
+ float yp = y0 - iyp + (4 - p) * G4;
+ float zp = z0 - izp + (4 - p) * G4;
+ float wp = w0 - iwp + (4 - p) * G4;
+
+ float t = 0.6f - xp * xp - yp * yp - zp * zp - wp * wp;
+ if (t > 0)
+ {
+ //get index
+ int gradIndex = int((
+ X_NOISE_GEN * (ix + ixp)
+ + Y_NOISE_GEN * (iy + iyp)
+ + Z_NOISE_GEN * (iz + izp)
+ + W_NOISE_GEN * (iw + iwp)
+ + SEED_NOISE_GEN * seed)
+ & 0xffffffff);
+ gradIndex ^= (gradIndex >> SHIFT_NOISE_GEN);
+ gradIndex &= 31;
+
+ physx::PxVec4 g;
+ {
+ const int h = gradIndex;
+ const int hs = 2 - (h >> 4);
+ const int h1 = (h >> 3);
+ g.x = (h1 == 0) ? 0.0f : ((h & 4) ? -1.0f : 1.0f);
+ g.y = (h1 == 1) ? 0.0f : ((h & (hs << 1)) ? -1.0f : 1.0f);
+ g.z = (h1 == 2) ? 0.0f : ((h & hs) ? -1.0f : 1.0f);
+ g.w = (h1 == 3) ? 0.0f : ((h & 1) ? -1.0f : 1.0f);
+ }
+ float gdot = (g.x * xp + g.y * yp + g.z * zp + g.w * wp);
+
+ float t2 = t * t;
+ float t3 = t2 * t;
+ float t4 = t3 * t;
+
+ float dt4gdot = 8 * t3 * gdot;
+
+ res.x += t4 * g.x - dt4gdot * xp;
+ res.y += t4 * g.y - dt4gdot * yp;
+ res.z += t4 * g.z - dt4gdot * zp;
+ res.w += t4 * gdot;
+ }
+ }
+ // scale the result to cover the range [-1,1]
+ res *= 27;
+ return res;
+ }
+};
+
+
+}
+} // namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/Spline.h b/APEX_1.4/common/include/Spline.h
new file mode 100644
index 00000000..39e3bf60
--- /dev/null
+++ b/APEX_1.4/common/include/Spline.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef SPLINE_H
+
+#define SPLINE_H
+
+
+/** @file spline.h
+ * @brief A utility class to manage 3d spline curves.
+ *
+ * This is used heavily by the terrain terraforming tools for roads, lakes, and flatten operations.
+ *
+ * @author John W. Ratcliff
+*/
+
+/** @file spline.cpp
+ * @brief A utility class to manage 3d spline curves.
+ *
+ * This is used heavily by the terrain terraforming tools for roads, lakes, and flatten operations.
+ *
+ * @author John W. Ratcliff
+*/
+
+
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+#include "PxVec3.h"
+#include "ApexUsingNamespace.h"
+
+class SplineNode
+{
+public:
+ float GetY(void) const
+ {
+ return y;
+ };
+ float x; // time/distance x-axis component.
+ float y; // y component.
+ float u;
+ float p;
+};
+
+typedef nvidia::Array< SplineNode > SplineNodeVector;
+typedef nvidia::Array< physx::PxVec3 > PxVec3Vector;
+typedef nvidia::Array< uint32_t > uint32_tVector;
+
+class Spline : public nvidia::UserAllocated
+{
+public:
+ void Reserve(int32_t size)
+ {
+ mNodes.reserve((uint32_t)size);
+ };
+ void AddNode(float x,float y);
+ void ComputeSpline(void);
+ float Evaluate(float x,uint32_t &index,float &fraction) const; // evaluate Y component based on X
+ int32_t GetSize(void) const
+ {
+ return (int32_t)mNodes.size();
+ }
+ float GetEntry(int32_t i) const
+ {
+ return mNodes[(uint32_t)i].GetY();
+ };
+ void Clear(void)
+ {
+ mNodes.clear();
+ };
+private:
+ SplineNodeVector mNodes; // nodes.
+};
+
+class SplineCurve : public nvidia::UserAllocated
+{
+public:
+ void Reserve(int32_t size)
+ {
+ mXaxis.Reserve(size);
+ mYaxis.Reserve(size);
+ mZaxis.Reserve(size);
+ };
+
+ void setControlPoints(const PxVec3Vector &points)
+ {
+ Clear();
+ Reserve( (int32_t)points.size() );
+ for (uint32_t i=0; i<points.size(); i++)
+ {
+ AddControlPoint(points[i]);
+ }
+ ComputeSpline();
+ }
+
+ float AddControlPoint(const physx::PxVec3& p); // add control point, time is computed based on distance along the curve.
+ void AddControlPoint(const physx::PxVec3& p,float t); // add control point.
+
+ void GetPointOnSpline(float t,physx::PxVec3 &pos)
+ {
+ float d = t*mTime;
+ uint32_t index;
+ float fraction;
+ pos = Evaluate(d,index,fraction);
+ }
+
+ physx::PxVec3 Evaluate(float dist,uint32_t &index,float &fraction);
+
+ float GetLength(void) { return mTime; }; //total length of spline
+
+ int32_t GetSize(void) { return mXaxis.GetSize(); };
+
+ physx::PxVec3 GetEntry(int32_t i);
+
+ void ComputeSpline(void); // compute spline.
+
+ void Clear(void)
+ {
+ mXaxis.Clear();
+ mYaxis.Clear();
+ mZaxis.Clear();
+ mTime = 0;
+ };
+
+ float Set(const PxVec3Vector &vlist)
+ {
+ Clear();
+ int32_t count = (int32_t)vlist.size();
+ Reserve(count);
+ for (uint32_t i=0; i<vlist.size(); i++)
+ {
+ AddControlPoint( vlist[i] );
+ }
+ ComputeSpline();
+ return mTime;
+ };
+
+ void ResampleControlPoints(const PxVec3Vector &inputpoints,
+ PxVec3Vector &outputpoints,
+ uint32_tVector &outputIndex,
+ float dtime)
+ {
+ float length = Set(inputpoints);
+ for (float l=0; l<=length; l+=dtime)
+ {
+ uint32_t index;
+ float fraction;
+ physx::PxVec3 pos = Evaluate(l,index,fraction);
+ outputpoints.pushBack(pos);
+ outputIndex.pushBack(index);
+ }
+ };
+
+private:
+ float mTime; // time/distance traveled.
+ Spline mXaxis;
+ Spline mYaxis;
+ Spline mZaxis;
+};
+
+#endif
diff --git a/APEX_1.4/common/include/TableLookup.h b/APEX_1.4/common/include/TableLookup.h
new file mode 100644
index 00000000..9b32d68e
--- /dev/null
+++ b/APEX_1.4/common/include/TableLookup.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef TABLE_LOOKUP_H
+#define TABLE_LOOKUP_H
+
+namespace nvidia
+{
+namespace apex
+{
+
+// Stored Tables
+const float custom[] = { 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f };
+const float linear[] = { 1.00f, 0.90f, 0.80f, 0.70f, 0.60f, 0.50f, 0.40f, 0.30f, 0.20f, 0.10f, 0.00f };
+const float steep[] = { 1.00f, 0.99f, 0.96f, 0.91f, 0.84f, 0.75f, 0.64f, 0.51f, 0.36f, 0.19f, 0.00f };
+const float scurve[] = { 1.00f, 0.99f, 0.96f, 0.91f, 0.80f, 0.50f, 0.20f, 0.09f, 0.04f, 0.01f, 0.00f };
+
+// Table sizes must be same for all stored tables
+#define NUM_ELEMENTS(X) (sizeof(X)/sizeof(*(X)))
+#define TABLE_SIZE NUM_ELEMENTS(custom)
+
+// Stored Table enums
+struct TableName
+{
+ enum Enum
+ {
+ CUSTOM = 0,
+ LINEAR,
+ STEEP,
+ SCURVE,
+ };
+};
+
+struct TableLookup
+{
+ float xVals[TABLE_SIZE];
+ float yVals[TABLE_SIZE];
+ float x1;
+ float x2;
+ float multiplier;
+
+#ifdef __CUDACC__
+ __device__ TableLookup() {}
+#else
+ TableLookup():
+ x1(0),
+ x2(0),
+ multiplier(0)
+ {
+ zeroTable();
+ }
+#endif
+
+ PX_CUDA_CALLABLE void zeroTable()
+ {
+ for (size_t i = 0; i < TABLE_SIZE; ++i)
+ {
+ xVals[i] = 0.0f;
+ yVals[i] = 0.0f;
+ }
+ }
+
+ PX_CUDA_CALLABLE void applyStoredTable(TableName::Enum tableName)
+ {
+ // build y values
+ for (size_t i = 0; i < TABLE_SIZE; ++i)
+ {
+ if (tableName == TableName::LINEAR)
+ {
+ yVals[i] = linear[i];
+ }
+ else if (tableName == TableName::STEEP)
+ {
+ yVals[i] = steep[i];
+ }
+ else if (tableName == TableName::SCURVE)
+ {
+ yVals[i] = scurve[i];
+ }
+ else if (tableName == TableName::CUSTOM)
+ {
+ yVals[i] = custom[i];
+ }
+ }
+ }
+
+ PX_CUDA_CALLABLE void buildTable()
+ {
+ // build x values
+ float interval = (x2 - x1) / (TABLE_SIZE - 1);
+ for (size_t i = 0; i < TABLE_SIZE; ++i)
+ {
+ xVals[i] = x1 + i * interval;
+ }
+
+ // apply multipler to y values
+ if (multiplier >= -1.0f && multiplier <= 1.0f)
+ {
+ for (size_t i = 0; i < TABLE_SIZE; ++i)
+ {
+ yVals[i] = yVals[i] * multiplier;
+ }
+
+ // offset = max y value in array
+ float max = yVals[0];
+ for (size_t i = 1; i < TABLE_SIZE; ++i)
+ {
+ if (yVals[i] > max)
+ {
+ max = yVals[i];
+ }
+ }
+
+ // apply offset
+ for (size_t i = 0; i < TABLE_SIZE; ++i)
+ {
+ yVals[i] = yVals[i] + (1.0f - max);
+ }
+ }
+ }
+
+ PX_CUDA_CALLABLE float lookupTableValue(float x) const
+ {
+ if (x <= xVals[0])
+ {
+ return yVals[0];
+ }
+ else if (x >= xVals[TABLE_SIZE-1])
+ {
+ return yVals[TABLE_SIZE-1];
+ }
+ else
+ {
+ // linear interpolation between x values
+ float interval = (xVals[TABLE_SIZE-1] - xVals[0]) / (TABLE_SIZE - 1);
+ uint32_t lerpPos = (uint32_t)((x - xVals[0]) / interval);
+ float yDiff = yVals[lerpPos+1] - yVals[lerpPos];
+ return yVals[lerpPos] + (x - xVals[lerpPos]) / interval * yDiff;
+ }
+ }
+};
+
+}
+} // namespace nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/WriteCheck.h b/APEX_1.4/common/include/WriteCheck.h
new file mode 100644
index 00000000..23710813
--- /dev/null
+++ b/APEX_1.4/common/include/WriteCheck.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef WRITE_CHECK_H
+#define WRITE_CHECK_H
+
+#include "PxSimpleTypes.h"
+#include "ApexUsingNamespace.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class ApexRWLockable;
+
+// RAII wrapper around the Scene::startWrite() method, note that this
+// object does not acquire any scene locks, it is an error checking only mechanism
+class WriteCheck
+{
+public:
+ WriteCheck(ApexRWLockable* scene, const char* functionName, bool allowReentry=true);
+ ~WriteCheck();
+
+private:
+ ApexRWLockable* mLockable;
+ const char* mName;
+ bool mAllowReentry;
+ uint32_t mErrorCount;
+};
+
+#if PX_DEBUG || PX_CHECKED
+ // Creates a scoped write check object that detects whether appropriate scene locks
+ // have been acquired and checks if reads/writes overlap, this macro should typically
+ // be placed at the beginning of any non-const API methods that are not multi-thread safe.
+ // By default re-entrant write calls by the same thread are allowed, the error conditions
+ // checked can be summarized as:
+
+ // 1. Other threads were already reading, or began reading during the object lifetime
+ // 2. Other threads were already writing, or began writing during the object lifetime
+ #define WRITE_ZONE() WriteCheck __writeCheck(static_cast<ApexRWLockable*>(this), __FUNCTION__);
+
+ // Creates a scoped write check object that disallows re-entrant writes, this is used by
+ // the Scene::simulate method to detect when callbacks make write calls to the API
+ #define WRITE_ZONE_NOREENTRY() WriteCheck __writeCheck(static_cast<ApexRWLockable*>(this), __FUNCTION__, false);
+#else
+ #define WRITE_ZONE()
+ #define WRITE_ZONE_NOREENTRY()
+#endif
+
+}
+}
+
+#endif // WRITE_CHECK_H
diff --git a/APEX_1.4/common/include/autogen/ConvexHullParameters.h b/APEX_1.4/common/include/autogen/ConvexHullParameters.h
new file mode 100644
index 00000000..ba369ddd
--- /dev/null
+++ b/APEX_1.4/common/include/autogen/ConvexHullParameters.h
@@ -0,0 +1,276 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved.
+
+// This file was generated by NvParameterized/scripts/GenParameterized.pl
+
+
+#ifndef HEADER_ConvexHullParameters_h
+#define HEADER_ConvexHullParameters_h
+
+#include "NvParametersTypes.h"
+
+#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS
+#include "nvparameterized/NvParameterized.h"
+#include "nvparameterized/NvParameterizedTraits.h"
+#include "NvParameters.h"
+#include "NvTraitsInternal.h"
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
+#endif
+
+namespace ConvexHullParametersNS
+{
+
+struct Plane_Type;
+
+struct VEC3_DynamicArray1D_Type
+{
+ physx::PxVec3* buf;
+ bool isAllocated;
+ int32_t elementSize;
+ int32_t arraySizes[1];
+};
+
+struct Plane_DynamicArray1D_Type
+{
+ Plane_Type* buf;
+ bool isAllocated;
+ int32_t elementSize;
+ int32_t arraySizes[1];
+};
+
+struct F32_DynamicArray1D_Type
+{
+ float* buf;
+ bool isAllocated;
+ int32_t elementSize;
+ int32_t arraySizes[1];
+};
+
+struct U32_DynamicArray1D_Type
+{
+ uint32_t* buf;
+ bool isAllocated;
+ int32_t elementSize;
+ int32_t arraySizes[1];
+};
+
+struct Plane_Type
+{
+ physx::PxVec3 normal;
+ float d;
+};
+
+struct ParametersStruct
+{
+
+ VEC3_DynamicArray1D_Type vertices;
+ Plane_DynamicArray1D_Type uniquePlanes;
+ F32_DynamicArray1D_Type widths;
+ U32_DynamicArray1D_Type edges;
+ U32_DynamicArray1D_Type adjacentFaces;
+ physx::PxBounds3 bounds;
+ float volume;
+ uint32_t uniqueEdgeDirectionCount;
+ uint32_t planeCount;
+
+};
+
+static const uint32_t checksum[] = { 0x8a50b89e, 0xda2f896b, 0x82da2553, 0x3b609a3d, };
+
+} // namespace ConvexHullParametersNS
+
+#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS
+class ConvexHullParameters : public NvParameterized::NvParameters, public ConvexHullParametersNS::ParametersStruct
+{
+public:
+ ConvexHullParameters(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0);
+
+ virtual ~ConvexHullParameters();
+
+ virtual void destroy();
+
+ static const char* staticClassName(void)
+ {
+ return("ConvexHullParameters");
+ }
+
+ const char* className(void) const
+ {
+ return(staticClassName());
+ }
+
+ static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)1;
+
+ static uint32_t staticVersion(void)
+ {
+ return ClassVersion;
+ }
+
+ uint32_t version(void) const
+ {
+ return(staticVersion());
+ }
+
+ static const uint32_t ClassAlignment = 8;
+
+ static const uint32_t* staticChecksum(uint32_t& bits)
+ {
+ bits = 8 * sizeof(ConvexHullParametersNS::checksum);
+ return ConvexHullParametersNS::checksum;
+ }
+
+ static void freeParameterDefinitionTable(NvParameterized::Traits* traits);
+
+ const uint32_t* checksum(uint32_t& bits) const
+ {
+ return staticChecksum(bits);
+ }
+
+ const ConvexHullParametersNS::ParametersStruct& parameters(void) const
+ {
+ ConvexHullParameters* tmpThis = const_cast<ConvexHullParameters*>(this);
+ return *(static_cast<ConvexHullParametersNS::ParametersStruct*>(tmpThis));
+ }
+
+ ConvexHullParametersNS::ParametersStruct& parameters(void)
+ {
+ return *(static_cast<ConvexHullParametersNS::ParametersStruct*>(this));
+ }
+
+ virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const;
+ virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle);
+
+ void initDefaults(void);
+
+protected:
+
+ virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void);
+ virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const;
+
+
+ virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const;
+
+private:
+
+ void buildTree(void);
+ void initDynamicArrays(void);
+ void initStrings(void);
+ void initReferences(void);
+ void freeDynamicArrays(void);
+ void freeStrings(void);
+ void freeReferences(void);
+
+ static bool mBuiltFlag;
+ static NvParameterized::MutexType mBuiltFlagMutex;
+};
+
+class ConvexHullParametersFactory : public NvParameterized::Factory
+{
+ static const char* const vptr;
+
+public:
+
+ virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits)
+ {
+ ConvexHullParameters::freeParameterDefinitionTable(traits);
+ }
+
+ virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits)
+ {
+ // placement new on this class using mParameterizedTraits
+
+ void* newPtr = paramTraits->alloc(sizeof(ConvexHullParameters), ConvexHullParameters::ClassAlignment);
+ if (!NvParameterized::IsAligned(newPtr, ConvexHullParameters::ClassAlignment))
+ {
+ NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class ConvexHullParameters");
+ paramTraits->free(newPtr);
+ return 0;
+ }
+
+ memset(newPtr, 0, sizeof(ConvexHullParameters)); // always initialize memory allocated to zero for default values
+ return NV_PARAM_PLACEMENT_NEW(newPtr, ConvexHullParameters)(paramTraits);
+ }
+
+ virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount)
+ {
+ if (!NvParameterized::IsAligned(bufObj, ConvexHullParameters::ClassAlignment)
+ || !NvParameterized::IsAligned(bufStart, ConvexHullParameters::ClassAlignment))
+ {
+ NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class ConvexHullParameters");
+ return 0;
+ }
+
+ // Init NvParameters-part
+ // We used to call empty constructor of ConvexHullParameters here
+ // but it may call default constructors of members and spoil the data
+ NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount);
+
+ // Init vtable (everything else is already initialized)
+ *(const char**)bufObj = vptr;
+
+ return (ConvexHullParameters*)bufObj;
+ }
+
+ virtual const char* getClassName()
+ {
+ return (ConvexHullParameters::staticClassName());
+ }
+
+ virtual uint32_t getVersion()
+ {
+ return (ConvexHullParameters::staticVersion());
+ }
+
+ virtual uint32_t getAlignment()
+ {
+ return (ConvexHullParameters::ClassAlignment);
+ }
+
+ virtual const uint32_t* getChecksum(uint32_t& bits)
+ {
+ return (ConvexHullParameters::staticChecksum(bits));
+ }
+};
+#endif // NV_PARAMETERIZED_ONLY_LAYOUTS
+
+} // namespace apex
+} // namespace nvidia
+
+#if PX_VC
+#pragma warning(pop)
+#endif
+
+#endif
diff --git a/APEX_1.4/common/include/autogen/DebugColorParams.h b/APEX_1.4/common/include/autogen/DebugColorParams.h
new file mode 100644
index 00000000..bc650c9f
--- /dev/null
+++ b/APEX_1.4/common/include/autogen/DebugColorParams.h
@@ -0,0 +1,265 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved.
+
+// This file was generated by NvParameterized/scripts/GenParameterized.pl
+
+
+#ifndef HEADER_DebugColorParams_h
+#define HEADER_DebugColorParams_h
+
+#include "NvParametersTypes.h"
+
+#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS
+#include "nvparameterized/NvParameterized.h"
+#include "nvparameterized/NvParameterizedTraits.h"
+#include "NvParameters.h"
+#include "NvTraitsInternal.h"
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
+#endif
+
+namespace DebugColorParamsNS
+{
+
+
+
+struct ParametersStruct
+{
+
+ uint32_t Default;
+ uint32_t PoseArrows;
+ uint32_t MeshStatic;
+ uint32_t MeshDynamic;
+ uint32_t Shape;
+ uint32_t Text0;
+ uint32_t Text1;
+ uint32_t ForceArrowsLow;
+ uint32_t ForceArrowsNorm;
+ uint32_t ForceArrowsHigh;
+ uint32_t Color0;
+ uint32_t Color1;
+ uint32_t Color2;
+ uint32_t Color3;
+ uint32_t Color4;
+ uint32_t Color5;
+ uint32_t Red;
+ uint32_t Green;
+ uint32_t Blue;
+ uint32_t DarkRed;
+ uint32_t DarkGreen;
+ uint32_t DarkBlue;
+ uint32_t LightRed;
+ uint32_t LightGreen;
+ uint32_t LightBlue;
+ uint32_t Purple;
+ uint32_t DarkPurple;
+ uint32_t Yellow;
+ uint32_t Orange;
+ uint32_t Gold;
+ uint32_t Emerald;
+ uint32_t White;
+ uint32_t Black;
+ uint32_t Gray;
+ uint32_t LightGray;
+ uint32_t DarkGray;
+
+};
+
+static const uint32_t checksum[] = { 0x21b30efd, 0xaea10022, 0x72a4df62, 0x8fab217f, };
+
+} // namespace DebugColorParamsNS
+
+#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS
+class DebugColorParams : public NvParameterized::NvParameters, public DebugColorParamsNS::ParametersStruct
+{
+public:
+ DebugColorParams(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0);
+
+ virtual ~DebugColorParams();
+
+ virtual void destroy();
+
+ static const char* staticClassName(void)
+ {
+ return("DebugColorParams");
+ }
+
+ const char* className(void) const
+ {
+ return(staticClassName());
+ }
+
+ static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0;
+
+ static uint32_t staticVersion(void)
+ {
+ return ClassVersion;
+ }
+
+ uint32_t version(void) const
+ {
+ return(staticVersion());
+ }
+
+ static const uint32_t ClassAlignment = 8;
+
+ static const uint32_t* staticChecksum(uint32_t& bits)
+ {
+ bits = 8 * sizeof(DebugColorParamsNS::checksum);
+ return DebugColorParamsNS::checksum;
+ }
+
+ static void freeParameterDefinitionTable(NvParameterized::Traits* traits);
+
+ const uint32_t* checksum(uint32_t& bits) const
+ {
+ return staticChecksum(bits);
+ }
+
+ const DebugColorParamsNS::ParametersStruct& parameters(void) const
+ {
+ DebugColorParams* tmpThis = const_cast<DebugColorParams*>(this);
+ return *(static_cast<DebugColorParamsNS::ParametersStruct*>(tmpThis));
+ }
+
+ DebugColorParamsNS::ParametersStruct& parameters(void)
+ {
+ return *(static_cast<DebugColorParamsNS::ParametersStruct*>(this));
+ }
+
+ virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const;
+ virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle);
+
+ void initDefaults(void);
+
+protected:
+
+ virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void);
+ virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const;
+
+
+ virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const;
+
+private:
+
+ void buildTree(void);
+ void initDynamicArrays(void);
+ void initStrings(void);
+ void initReferences(void);
+ void freeDynamicArrays(void);
+ void freeStrings(void);
+ void freeReferences(void);
+
+ static bool mBuiltFlag;
+ static NvParameterized::MutexType mBuiltFlagMutex;
+};
+
+class DebugColorParamsFactory : public NvParameterized::Factory
+{
+ static const char* const vptr;
+
+public:
+
+ virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits)
+ {
+ DebugColorParams::freeParameterDefinitionTable(traits);
+ }
+
+ virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits)
+ {
+ // placement new on this class using mParameterizedTraits
+
+ void* newPtr = paramTraits->alloc(sizeof(DebugColorParams), DebugColorParams::ClassAlignment);
+ if (!NvParameterized::IsAligned(newPtr, DebugColorParams::ClassAlignment))
+ {
+ NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class DebugColorParams");
+ paramTraits->free(newPtr);
+ return 0;
+ }
+
+ memset(newPtr, 0, sizeof(DebugColorParams)); // always initialize memory allocated to zero for default values
+ return NV_PARAM_PLACEMENT_NEW(newPtr, DebugColorParams)(paramTraits);
+ }
+
+ virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount)
+ {
+ if (!NvParameterized::IsAligned(bufObj, DebugColorParams::ClassAlignment)
+ || !NvParameterized::IsAligned(bufStart, DebugColorParams::ClassAlignment))
+ {
+ NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class DebugColorParams");
+ return 0;
+ }
+
+ // Init NvParameters-part
+ // We used to call empty constructor of DebugColorParams here
+ // but it may call default constructors of members and spoil the data
+ NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount);
+
+ // Init vtable (everything else is already initialized)
+ *(const char**)bufObj = vptr;
+
+ return (DebugColorParams*)bufObj;
+ }
+
+ virtual const char* getClassName()
+ {
+ return (DebugColorParams::staticClassName());
+ }
+
+ virtual uint32_t getVersion()
+ {
+ return (DebugColorParams::staticVersion());
+ }
+
+ virtual uint32_t getAlignment()
+ {
+ return (DebugColorParams::ClassAlignment);
+ }
+
+ virtual const uint32_t* getChecksum(uint32_t& bits)
+ {
+ return (DebugColorParams::staticChecksum(bits));
+ }
+};
+#endif // NV_PARAMETERIZED_ONLY_LAYOUTS
+
+} // namespace apex
+} // namespace nvidia
+
+#if PX_VC
+#pragma warning(pop)
+#endif
+
+#endif
diff --git a/APEX_1.4/common/include/autogen/DebugRenderParams.h b/APEX_1.4/common/include/autogen/DebugRenderParams.h
new file mode 100644
index 00000000..0c992de6
--- /dev/null
+++ b/APEX_1.4/common/include/autogen/DebugRenderParams.h
@@ -0,0 +1,248 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved.
+
+// This file was generated by NvParameterized/scripts/GenParameterized.pl
+
+
+#ifndef HEADER_DebugRenderParams_h
+#define HEADER_DebugRenderParams_h
+
+#include "NvParametersTypes.h"
+
+#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS
+#include "nvparameterized/NvParameterized.h"
+#include "nvparameterized/NvParameterizedTraits.h"
+#include "NvParameters.h"
+#include "NvTraitsInternal.h"
+#endif
+
+namespace nvidia
+{
+namespace apex
+{
+
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
+#endif
+
+namespace DebugRenderParamsNS
+{
+
+
+struct REF_DynamicArray1D_Type
+{
+ NvParameterized::Interface** buf;
+ bool isAllocated;
+ int32_t elementSize;
+ int32_t arraySizes[1];
+};
+
+
+struct ParametersStruct
+{
+
+ bool Enable;
+ float Scale;
+ float LodBenefits;
+ float RelativeLodBenefitsScreenPos;
+ float RelativeLodBenefitsThickness;
+ float LodDistanceScale;
+ float RenderNormals;
+ float RenderTangents;
+ float RenderBitangents;
+ bool Bounds;
+ REF_DynamicArray1D_Type moduleName;
+
+};
+
+static const uint32_t checksum[] = { 0x0679a129, 0x4119501f, 0xde4ce2b2, 0xecb0049b, };
+
+} // namespace DebugRenderParamsNS
+
+#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS
+class DebugRenderParams : public NvParameterized::NvParameters, public DebugRenderParamsNS::ParametersStruct
+{
+public:
+ DebugRenderParams(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0);
+
+ virtual ~DebugRenderParams();
+
+ virtual void destroy();
+
+ static const char* staticClassName(void)
+ {
+ return("DebugRenderParams");
+ }
+
+ const char* className(void) const
+ {
+ return(staticClassName());
+ }
+
+ static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)1;
+
+ static uint32_t staticVersion(void)
+ {
+ return ClassVersion;
+ }
+
+ uint32_t version(void) const
+ {
+ return(staticVersion());
+ }
+
+ static const uint32_t ClassAlignment = 8;
+
+ static const uint32_t* staticChecksum(uint32_t& bits)
+ {
+ bits = 8 * sizeof(DebugRenderParamsNS::checksum);
+ return DebugRenderParamsNS::checksum;
+ }
+
+ static void freeParameterDefinitionTable(NvParameterized::Traits* traits);
+
+ const uint32_t* checksum(uint32_t& bits) const
+ {
+ return staticChecksum(bits);
+ }
+
+ const DebugRenderParamsNS::ParametersStruct& parameters(void) const
+ {
+ DebugRenderParams* tmpThis = const_cast<DebugRenderParams*>(this);
+ return *(static_cast<DebugRenderParamsNS::ParametersStruct*>(tmpThis));
+ }
+
+ DebugRenderParamsNS::ParametersStruct& parameters(void)
+ {
+ return *(static_cast<DebugRenderParamsNS::ParametersStruct*>(this));
+ }
+
+ virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const;
+ virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle);
+
+ void initDefaults(void);
+
+protected:
+
+ virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void);
+ virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const;
+
+
+ virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const;
+
+private:
+
+ void buildTree(void);
+ void initDynamicArrays(void);
+ void initStrings(void);
+ void initReferences(void);
+ void freeDynamicArrays(void);
+ void freeStrings(void);
+ void freeReferences(void);
+
+ static bool mBuiltFlag;
+ static NvParameterized::MutexType mBuiltFlagMutex;
+};
+
+class DebugRenderParamsFactory : public NvParameterized::Factory
+{
+ static const char* const vptr;
+
+public:
+
+ virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits)
+ {
+ DebugRenderParams::freeParameterDefinitionTable(traits);
+ }
+
+ virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits)
+ {
+ // placement new on this class using mParameterizedTraits
+
+ void* newPtr = paramTraits->alloc(sizeof(DebugRenderParams), DebugRenderParams::ClassAlignment);
+ if (!NvParameterized::IsAligned(newPtr, DebugRenderParams::ClassAlignment))
+ {
+ NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class DebugRenderParams");
+ paramTraits->free(newPtr);
+ return 0;
+ }
+
+ memset(newPtr, 0, sizeof(DebugRenderParams)); // always initialize memory allocated to zero for default values
+ return NV_PARAM_PLACEMENT_NEW(newPtr, DebugRenderParams)(paramTraits);
+ }
+
+ virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount)
+ {
+ if (!NvParameterized::IsAligned(bufObj, DebugRenderParams::ClassAlignment)
+ || !NvParameterized::IsAligned(bufStart, DebugRenderParams::ClassAlignment))
+ {
+ NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class DebugRenderParams");
+ return 0;
+ }
+
+ // Init NvParameters-part
+ // We used to call empty constructor of DebugRenderParams here
+ // but it may call default constructors of members and spoil the data
+ NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount);
+
+ // Init vtable (everything else is already initialized)
+ *(const char**)bufObj = vptr;
+
+ return (DebugRenderParams*)bufObj;
+ }
+
+ virtual const char* getClassName()
+ {
+ return (DebugRenderParams::staticClassName());
+ }
+
+ virtual uint32_t getVersion()
+ {
+ return (DebugRenderParams::staticVersion());
+ }
+
+ virtual uint32_t getAlignment()
+ {
+ return (DebugRenderParams::ClassAlignment);
+ }
+
+ virtual const uint32_t* getChecksum(uint32_t& bits)
+ {
+ return (DebugRenderParams::staticChecksum(bits));
+ }
+};
+#endif // NV_PARAMETERIZED_ONLY_LAYOUTS
+
+} // namespace apex
+} // namespace nvidia
+
+#if PX_VC
+#pragma warning(pop)
+#endif
+
+#endif
diff --git a/APEX_1.4/common/include/autogen/ModuleCommonRegistration.h b/APEX_1.4/common/include/autogen/ModuleCommonRegistration.h
new file mode 100644
index 00000000..febd3743
--- /dev/null
+++ b/APEX_1.4/common/include/autogen/ModuleCommonRegistration.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef MODULE_MODULECOMMONREGISTRATIONH_H
+#define MODULE_MODULECOMMONREGISTRATIONH_H
+
+#include "PsAllocator.h"
+#include "NvRegistrationsForTraitsBase.h"
+#include "nvparameterized/NvParameterizedTraits.h"
+#include "PxAssert.h"
+#include <stdint.h>
+
+// INCLUDE GENERATED FACTORIES
+#include "ConvexHullParameters.h"
+#include "DebugColorParams.h"
+#include "DebugRenderParams.h"
+
+
+// INCLUDE GENERATED CONVERSION
+
+
+namespace nvidia {
+namespace apex {
+
+
+class ModuleCommonRegistration : public NvParameterized::RegistrationsForTraitsBase
+{
+public:
+ static void invokeRegistration(NvParameterized::Traits* parameterizedTraits)
+ {
+ if (parameterizedTraits)
+ {
+ ModuleCommonRegistration().registerAll(*parameterizedTraits);
+ }
+ }
+
+ static void invokeUnregistration(NvParameterized::Traits* parameterizedTraits)
+ {
+ if (parameterizedTraits)
+ {
+ ModuleCommonRegistration().unregisterAll(*parameterizedTraits);
+ }
+ }
+
+ void registerAvailableFactories(NvParameterized::Traits& parameterizedTraits)
+ {
+ ::NvParameterized::Factory* factoriesToRegister[] = {
+// REGISTER GENERATED FACTORIES
+ new nvidia::apex::ConvexHullParametersFactory(),
+ new nvidia::apex::DebugColorParamsFactory(),
+ new nvidia::apex::DebugRenderParamsFactory(),
+
+ };
+
+ for (size_t i = 0; i < sizeof(factoriesToRegister)/sizeof(factoriesToRegister[0]); ++i)
+ {
+ parameterizedTraits.registerFactory(*factoriesToRegister[i]);
+ }
+ }
+
+ virtual void registerAvailableConverters(NvParameterized::Traits& parameterizedTraits)
+ {
+// REGISTER GENERATED CONVERSION
+PX_UNUSED(parameterizedTraits);
+
+ }
+
+ void unregisterAvailableFactories(NvParameterized::Traits& parameterizedTraits)
+ {
+ struct FactoryDesc
+ {
+ const char* name;
+ uint32_t version;
+ };
+
+ ::NvParameterized::Factory* factoriesToUnregister[] = {
+// UNREGISTER GENERATED FACTORIES
+ new nvidia::apex::ConvexHullParametersFactory(),
+ new nvidia::apex::DebugColorParamsFactory(),
+ new nvidia::apex::DebugRenderParamsFactory(),
+
+ };
+
+ for (size_t i = 0; i < sizeof(factoriesToUnregister)/sizeof(factoriesToUnregister[0]); ++i)
+ {
+ ::NvParameterized::Factory* removedFactory = parameterizedTraits.removeFactory(factoriesToUnregister[i]->getClassName(), factoriesToUnregister[i]->getVersion());
+ if (!removedFactory)
+ {
+ PX_ASSERT_WITH_MESSAGE(0, "Factory can not be removed!");
+ }
+ else
+ {
+ removedFactory->freeParameterDefinitionTable(&parameterizedTraits);
+ delete removedFactory;
+ delete factoriesToUnregister[i];
+ }
+ }
+ }
+
+ virtual void unregisterAvailableConverters(NvParameterized::Traits& parameterizedTraits)
+ {
+// UNREGISTER GENERATED CONVERSION
+PX_UNUSED(parameterizedTraits);
+
+ }
+
+};
+
+
+}
+} //nvidia::apex
+
+#endif
diff --git a/APEX_1.4/common/include/variable_oscillator.h b/APEX_1.4/common/include/variable_oscillator.h
new file mode 100644
index 00000000..02d4aeea
--- /dev/null
+++ b/APEX_1.4/common/include/variable_oscillator.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef VARIABLE_OSCILLATOR_H
+#define VARIABLE_OSCILLATOR_H
+
+#include "ApexUsingNamespace.h"
+#include "PsUserAllocated.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class variableOscillator : public UserAllocated
+{
+public:
+ variableOscillator(float min, float max, float initial, float period);
+ ~variableOscillator();
+ float updateVariableOscillator(float deltaTime);
+
+private:
+ float computeEndVal(float current, float max_or_min);
+
+private:
+ float mMin;
+ float mMax;
+ float mPeriod;
+
+ float mCumTime;
+ float mStartVal;
+ float mEndVal;
+ float mLastVal;
+ bool mGoingUp;
+};
+
+}
+} // namespace nvidia::apex
+
+#endif