diff options
| author | git perforce import user <a@b> | 2016-10-25 12:29:14 -0600 |
|---|---|---|
| committer | Sheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees> | 2016-10-25 18:56:37 -0500 |
| commit | 3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch) | |
| tree | fa6485c169e50d7415a651bf838f5bcd0fd3bfbd /APEX_1.4/framework | |
| download | physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip | |
Initial commit:
PhysX 3.4.0 Update @ 21294896
APEX 1.4.0 Update @ 21275617
[CL 21300167]
Diffstat (limited to 'APEX_1.4/framework')
92 files changed, 31124 insertions, 0 deletions
diff --git a/APEX_1.4/framework/include/ApexAssetPreviewScene.h b/APEX_1.4/framework/include/ApexAssetPreviewScene.h new file mode 100644 index 00000000..4a091bc4 --- /dev/null +++ b/APEX_1.4/framework/include/ApexAssetPreviewScene.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_ASSET_PREVIEW_SCENE_H +#define APEX_ASSET_PREVIEW_SCENE_H + +#include "Apex.h" +#include "ApexResource.h" +#include "PsUserAllocated.h" +#include "ApexSDKImpl.h" +#include "AssetPreviewScene.h" +#include "ModuleIntl.h" +#include "ApexContext.h" +#include "PsMutex.h" + +#if PX_PHYSICS_VERSION_MAJOR == 3 +#include "PxScene.h" +#include "PxRenderBuffer.h" +#endif + +#include "ApexSceneUserNotify.h" + +#include "PsSync.h" +#include "PxTask.h" +#include "PxTaskManager.h" + +#include "ApexGroupsFiltering.h" +#include "ApexRWLockable.h" + +namespace nvidia +{ +namespace apex +{ + +class ApexAssetPreviewScene : public AssetPreviewScene, public ApexRWLockable, public UserAllocated +{ +public: + APEX_RW_LOCKABLE_BOILERPLATE + + ApexAssetPreviewScene(ApexSDKImpl* sdk); + virtual ~ApexAssetPreviewScene() {} + + //Sets the view matrix. Should be called whenever the view matrix needs to be updated. + virtual void setCameraMatrix(const PxMat44& viewTransform); + + //Returns the view matrix set by the user for the given viewID. + virtual PxMat44 getCameraMatrix() const; + + virtual void setShowFullInfo(bool showFullInfo); + + virtual bool getShowFullInfo() const; + + virtual void release(); + + void destroy(); + +private: + ApexSDKImpl* mApexSDK; + + PxMat44 mCameraMatrix; // the pose for the preview rendering + bool mShowFullInfo; +}; + +} +} // end namespace nvidia::apex + +#endif // APEX_ASSET_PREVIEW_SCENE_H diff --git a/APEX_1.4/framework/include/ApexCustomBufferIterator.h b/APEX_1.4/framework/include/ApexCustomBufferIterator.h new file mode 100644 index 00000000..14e388cf --- /dev/null +++ b/APEX_1.4/framework/include/ApexCustomBufferIterator.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef __APEX_CUSTOM_BUFFER_ITERARTOR_H__ +#define __APEX_CUSTOM_BUFFER_ITERARTOR_H__ + +#include "CustomBufferIterator.h" +#include <PsUserAllocated.h> +#include <PsArray.h> +#include <ApexUsingNamespace.h> + +namespace nvidia +{ +namespace apex +{ + +class ApexCustomBufferIterator : public CustomBufferIterator, public UserAllocated +{ +public: + ApexCustomBufferIterator(); + + // CustomBufferIterator methods + + virtual void setData(void* data, uint32_t elemSize, uint32_t maxTriangles); + + virtual void addCustomBuffer(const char* name, RenderDataFormat::Enum format, uint32_t offset); + + virtual void* getVertex(uint32_t triangleIndex, uint32_t vertexIndex) const; + + virtual int32_t getAttributeIndex(const char* attributeName) const; + + virtual void* getVertexAttribute(uint32_t triangleIndex, uint32_t vertexIndex, const char* attributeName, RenderDataFormat::Enum& outFormat) const; + + virtual void* getVertexAttribute(uint32_t triangleIndex, uint32_t vertexIndex, uint32_t attributeIndex, RenderDataFormat::Enum& outFormat, const char*& outName) const; + +private: + uint8_t* mData; + uint32_t mElemSize; + uint32_t mMaxTriangles; + struct CustomBuffer + { + const char* name; + uint32_t offset; + RenderDataFormat::Enum format; + }; + physx::Array<CustomBuffer> mCustomBuffers; +}; + +} +} // end namespace nvidia::apex + + +#endif // __APEX_CUSTOM_BUFFER_ITERARTOR_H__ diff --git a/APEX_1.4/framework/include/ApexDefaultStream.h b/APEX_1.4/framework/include/ApexDefaultStream.h new file mode 100644 index 00000000..d1d4fc96 --- /dev/null +++ b/APEX_1.4/framework/include/ApexDefaultStream.h @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef __APEX_DEFAULT_STREAM_ +#define __APEX_DEFAULT_STREAM_ + + +#endif
\ No newline at end of file diff --git a/APEX_1.4/framework/include/ApexInteropableBuffer.h b/APEX_1.4/framework/include/ApexInteropableBuffer.h new file mode 100644 index 00000000..a7608977 --- /dev/null +++ b/APEX_1.4/framework/include/ApexInteropableBuffer.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef __APEX_INTEROPABLE_BUFFER_H__ +#define __APEX_INTEROPABLE_BUFFER_H__ + +typedef struct CUgraphicsResource_st *CUgraphicsResource; + +namespace physx +{ + class PxCudaContextManager; +} + +namespace nvidia +{ +namespace apex +{ + +class ApexInteropableBuffer +{ +public: + ApexInteropableBuffer(bool mustBeRegistered = false, PxCudaContextManager *interopContext = NULL) + : m_mustBeRegisteredInCUDA(mustBeRegistered) + , m_registeredInCUDA(false) + , m_interopContext(interopContext) + , m_InteropHandle(NULL) + { + } + + virtual bool getInteropResourceHandle(CUgraphicsResource &handle) + { + if(m_registeredInCUDA && m_InteropHandle) + { + handle = m_InteropHandle; + + return true; + } + + return false; + } + +protected: + + bool m_mustBeRegisteredInCUDA; + bool m_registeredInCUDA; + PxCudaContextManager *m_interopContext; + CUgraphicsResource m_InteropHandle; +}; + + +} +} // end namespace nvidia::apex + + +#endif // __APEX_INTEROPABLE_BUFFER_H__ diff --git a/APEX_1.4/framework/include/ApexPhysXObjectDesc.h b/APEX_1.4/framework/include/ApexPhysXObjectDesc.h new file mode 100644 index 00000000..781908c3 --- /dev/null +++ b/APEX_1.4/framework/include/ApexPhysXObjectDesc.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef __APEX_PHYSX_OBJECT_DESC_H__ +#define __APEX_PHYSX_OBJECT_DESC_H__ + +#include "Apex.h" +#include "PhysXObjectDescIntl.h" + +namespace nvidia +{ +namespace apex +{ + +class ApexPhysXObjectDesc : public PhysXObjectDescIntl +{ +public: + typedef PhysXObjectDescIntl Parent; + ApexPhysXObjectDesc() : mNext(0), mPrev(0) + { + mFlags = 0; + userData = NULL; + mPhysXObject = NULL; + } + + // Need a copy constructor because we contain an array, and we are in arrays + ApexPhysXObjectDesc(const ApexPhysXObjectDesc& desc) : PhysXObjectDescIntl(desc) + { + *this = desc; + } + + ApexPhysXObjectDesc& operator = (const ApexPhysXObjectDesc& desc) + { + mFlags = desc.mFlags; + userData = desc.userData; + mApexActors = desc.mApexActors; + mPhysXObject = desc.mPhysXObject; + mNext = desc.mNext; + mPrev = desc.mPrev; + return *this; + } + + void swap(ApexPhysXObjectDesc& rhs) + { + Parent::swap(rhs); + shdfnd::swap(mNext, rhs.mNext); + shdfnd::swap(mPrev, rhs.mPrev); + } + + static uint16_t makeHash(size_t hashable); + + uint32_t mNext, mPrev; + + friend class ApexSDKImpl; + virtual ~ApexPhysXObjectDesc(void) + { + + } +}; + +} +} // end namespace nvidia::apex + +#endif // __APEX_PHYSX_OBJECT_DESC_H__ diff --git a/APEX_1.4/framework/include/ApexRenderDebug.h b/APEX_1.4/framework/include/ApexRenderDebug.h new file mode 100644 index 00000000..5c8522d1 --- /dev/null +++ b/APEX_1.4/framework/include/ApexRenderDebug.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_RENDER_DEBUG_H +#define APEX_RENDER_DEBUG_H + +#include "RenderDebugInterface.h" + +namespace nvidia +{ +namespace apex +{ + +class ApexSDKImpl; + +RenderDebugInterface* createApexRenderDebug(ApexSDKImpl* a, RENDER_DEBUG::RenderDebugInterface* interface, bool useRemote); +void releaseApexRenderDebug(RenderDebugInterface* n); + +} +} // end namespace nvidia::apex + +#endif diff --git a/APEX_1.4/framework/include/ApexRenderMeshActor.h b/APEX_1.4/framework/include/ApexRenderMeshActor.h new file mode 100644 index 00000000..8092fbb7 --- /dev/null +++ b/APEX_1.4/framework/include/ApexRenderMeshActor.h @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_RENDERMESH_ACTOR_H +#define APEX_RENDERMESH_ACTOR_H + +#include "ApexUsingNamespace.h" +#include "RenderMeshAssetIntl.h" +#include "ApexActor.h" + +#include "ApexRenderMeshAsset.h" +#include "ApexSharedUtils.h" +#include "ApexRWLockable.h" +#include "ReadCheck.h" +#include "WriteCheck.h" + +namespace nvidia +{ +namespace apex +{ + +// enables a hack that removes dead particles from the instance list prior to sending it to the application. +// this is a bad hack because it requires significant additional memory and copies. +#define ENABLE_INSTANCED_MESH_CLEANUP_HACK 0 + + +/* + ApexRenderMeshActor - an instantiation of an ApexRenderMeshAsset + */ +class ApexRenderMeshActor : public RenderMeshActorIntl, public ApexResourceInterface, public ApexResource, public ApexActor, public ApexRWLockable +{ +public: + APEX_RW_LOCKABLE_BOILERPLATE + + void release(); + Asset* getOwner() const + { + READ_ZONE(); + return mRenderMeshAsset; + } + PxBounds3 getBounds() const + { + READ_ZONE(); + return ApexRenderable::getBounds(); + } + void lockRenderResources() + { + ApexRenderable::renderDataLock(); + } + void unlockRenderResources() + { + ApexRenderable::renderDataUnLock(); + } + + // RenderMeshActors have global context, ignore ApexActor scene methods +#if PX_PHYSICS_VERSION_MAJOR == 3 + void setPhysXScene(PxScene*) { } + PxScene* getPhysXScene() const + { + return NULL; + } +#endif + + bool getVisibilities(uint8_t* visibilityArray, uint32_t visibilityArraySize) const; + + bool setVisibility(bool visible, uint16_t partIndex = 0); + bool isVisible(uint16_t partIndex = 0) const + { + READ_ZONE(); + return mVisiblePartsForAPI.isUsed(partIndex); + } + + uint32_t visiblePartCount() const + { + READ_ZONE(); + return mVisiblePartsForAPI.usedCount(); + } + const uint32_t* getVisibleParts() const + { + READ_ZONE(); + return mVisiblePartsForAPI.usedIndices(); + } + + uint32_t getRenderVisiblePartCount() const + { + return mBufferVisibility ? mVisiblePartsForRendering.size() : mVisiblePartsForAPI.usedCount(); + } + const uint32_t* getRenderVisibleParts() const + { + return mBufferVisibility ? mVisiblePartsForRendering.begin() : mVisiblePartsForAPI.usedIndices(); + } + + virtual uint32_t getBoneCount() const + { + READ_ZONE(); + return mRenderMeshAsset->getBoneCount(); + } + + void setTM(const PxMat44& tm, uint32_t boneIndex = 0); + void setTM(const PxMat44& tm, const PxVec3& scale, uint32_t boneIndex = 0); + + const PxMat44 getTM(uint32_t boneIndex = 0) const + { + READ_ZONE(); + return accessTM(boneIndex); + } + + void setLastFrameTM(const PxMat44& tm, uint32_t boneIndex = 0); + void setLastFrameTM(const PxMat44& tm, const PxVec3& scale, uint32_t boneIndex = 0); + + void setSkinningMode(RenderMeshActorSkinningMode::Enum mode); + RenderMeshActorSkinningMode::Enum getSkinningMode() const; + + void syncVisibility(bool useLock = true); + + void updateBounds(); + void updateRenderResources(bool rewriteBuffers, void* userRenderData); + void updateRenderResources(bool useBones, bool rewriteBuffers, void* userRenderData); + void dispatchRenderResources(UserRenderer&); + void dispatchRenderResources(UserRenderer&, const PxMat44&); + + void setReleaseResourcesIfNothingToRender(bool value); + + void setBufferVisibility(bool bufferVisibility); + + void setOverrideMaterial(uint32_t index, const char* overrideMaterialName); + + //UserRenderVertexBuffer* getUserVertexBuffer(uint32_t submeshIndex) { if (submeshIndex < mSubmeshData.size()) return renderMeshAsset->vertexBuffers[submeshIndex]; return NULL; } + //UserRenderIndexBuffer* getUserIndexBuffer(uint32_t submeshIndex) { if (submeshIndex < mSubmeshData.size()) return mSubmeshData[submeshIndex].indexBuffer; return NULL; } + + void addVertexBuffer(uint32_t submeshIndex, bool alwaysDirty, PxVec3* position, PxVec3* normal, PxVec4* tangents); + void removeVertexBuffer(uint32_t submeshIndex); + + void setStaticPositionReplacement(uint32_t submeshIndex, const PxVec3* staticPositions); + void setStaticColorReplacement(uint32_t submeshIndex, const ColorRGBA* staticColors); + + virtual UserRenderInstanceBuffer* getInstanceBuffer() const + { + READ_ZONE(); + return mInstanceBuffer; + } + /// \sa RenderMeshActor::setInstanceBuffer + virtual void setInstanceBuffer(UserRenderInstanceBuffer* instBuf); + virtual void setMaxInstanceCount(uint32_t count); + /// \sa RenderMeshActor::setInstanceBufferRange + virtual void setInstanceBufferRange(uint32_t from, uint32_t count); + + // ApexResourceInterface methods + void setListIndex(ResourceList& list, uint32_t index) + { + m_listIndex = index; + m_list = &list; + } + uint32_t getListIndex() const + { + return m_listIndex; + } + + virtual void getLodRange(float& min, float& max, bool& intOnly) const; + virtual float getActiveLod() const; + virtual void forceLod(float lod); + /** + \brief Selectively enables/disables debug visualization of a specific APEX actor. Default value it true. + */ + virtual void setEnableDebugVisualization(bool state) + { + WRITE_ZONE(); + ApexActor::setEnableDebugVisualization(state); + } + + virtual bool rayCast(RenderMeshActorRaycastHitData& hitData, + const PxVec3& worldOrig, const PxVec3& worldDisp, + RenderMeshActorRaycastFlags::Enum flags = RenderMeshActorRaycastFlags::VISIBLE_PARTS, + RenderCullMode::Enum winding = RenderCullMode::CLOCKWISE, + int32_t partIndex = -1) const; + + virtual void visualize(RenderDebugInterface& batcher, nvidia::apex::DebugRenderParams* debugParams, PxMat33* scaledRotations, PxVec3* translations, uint32_t stride, uint32_t numberOfTransforms) const; + +protected: + ApexRenderMeshActor(const RenderMeshActorDesc& desc, ApexRenderMeshAsset& _renderMeshData, ResourceList& list); + virtual ~ApexRenderMeshActor(); + + struct SubmeshData; + void loadMaterial(SubmeshData& submeshData); + void init(const RenderMeshActorDesc& desc, uint16_t partCount, uint16_t boneCount); + void destroy(); + + PxMat44& accessTM(uint32_t boneIndex = 0) const + { + return (PxMat44&)mTransforms[mKeepVisibleBonesPacked ? mVisiblePartsForAPI.getRank(boneIndex) : boneIndex]; + } + + PxMat44& accessLastFrameTM(uint32_t boneIndex = 0) const + { + return (PxMat44&)mTransformsLastFrame[mKeepVisibleBonesPacked ? mVisiblePartsForAPILastFrame.getRank(boneIndex) : boneIndex]; + } + + /* Internal rendering APIs */ + void createRenderResources(bool useBones, void* userRenderData); + void updatePartVisibility(uint32_t submeshIndex, bool useBones, void* userRenderData); + void updateBonePoses(uint32_t submeshIndex); + void updateInstances(uint32_t submeshIndex); + void releaseSubmeshRenderResources(uint32_t submeshIndex); + void releaseRenderResources(); + bool submeshHasVisibleTriangles(uint32_t submeshIndex) const; + + // Fallback skinning + void createFallbackSkinning(uint32_t submeshIndex); + void distributeFallbackData(uint32_t submeshIndex); + void updateFallbackSkinning(uint32_t submeshIndex); + void writeUserBuffers(uint32_t submeshIndex); + + // Debug rendering + void visualizeTangentSpace(RenderDebugInterface& batcher, float normalScale, float tangentScale, float bitangentScale, PxMat33* scaledRotations, PxVec3* translations, uint32_t stride, uint32_t numberOfTransforms) const; + + ApexRenderMeshAsset* mRenderMeshAsset; + Array<PxMat44> mTransforms; + Array<PxMat44> mTransformsLastFrame; + + struct ResourceData + { + ResourceData() : resource(NULL), vertexCount(0), boneCount(0) {} + UserRenderResource* resource; + uint32_t vertexCount; + uint32_t boneCount; + }; + + struct SubmeshData + { + SubmeshData(); + ~SubmeshData(); + + Array<ResourceData> renderResources; + UserRenderIndexBuffer* indexBuffer; + void* fallbackSkinningMemory; + UserRenderVertexBuffer* userDynamicVertexBuffer; + UserRenderInstanceBuffer* instanceBuffer; + + PxVec3* userPositions; + PxVec3* userNormals; + PxVec4* userTangents4; + + // And now we have colors + const ColorRGBA* staticColorReplacement; + bool staticColorReplacementDirty; + + // These are needed if the positions vary from what the asset has stored. Can happen with morph targets. + // so far we only replace positions, more can be added here + const PxVec3* staticPositionReplacement; + UserRenderVertexBuffer* staticBufferReplacement; + UserRenderVertexBuffer* dynamicBufferReplacement; + + uint32_t fallbackSkinningMemorySize; + uint32_t visibleTriangleCount; + ResID materialID; + void* material; + bool isMaterialPointerValid; + uint32_t maxBonesPerMaterial; + uint32_t indexBufferRequestedSize; + bool userSpecifiedData; + bool userVertexBufferAlwaysDirty; + bool userIndexBufferChanged; + bool fallbackSkinningDirty; + }; + + nvidia::Array<SubmeshData> mSubmeshData; + IndexBank<uint32_t> mVisiblePartsForAPI; + IndexBank<uint32_t> mVisiblePartsForAPILastFrame; + RenderBufferHint::Enum mIndexBufferHint; + + Array<uint32_t> mVisiblePartsForRendering; + + Array<UserRenderVertexBuffer*> mPerActorVertexBuffers; + + // Instancing + uint32_t mMaxInstanceCount; + uint32_t mInstanceCount; + uint32_t mInstanceOffset; + UserRenderInstanceBuffer* mInstanceBuffer; + UserRenderResource* mRenderResource; + + // configuration + bool mRenderWithoutSkinning; + bool mForceBoneIndexChannel; + bool mApiVisibilityChanged; + bool mPartVisibilityChanged; + bool mInstanceCountChanged; + bool mKeepVisibleBonesPacked; + bool mForceFallbackSkinning; + bool mBonePosesDirty; + bool mOneUserVertexBufferChanged; // this will be true if one or more user vertex buffers changed + bool mBoneBufferInUse; + bool mReleaseResourcesIfNothingToRender; // is this ever set? I think we should remove it. + bool mCreateRenderResourcesAfterInit; // only needed when mRenderWithoutSkinning is set to true + bool mBufferVisibility; + bool mKeepPreviousFrameBoneBuffer; + bool mPreviousFrameBoneBufferValid; + RenderMeshActorSkinningMode::Enum mSkinningMode; + Array<uint32_t> mTMSwapBuffer; + Array<PxMat44> mRemappedPreviousBoneTMs; + + // temporary + Array<uint16_t> mBoneIndexTempBuffer; + + // CM: For some reason the new operator doesn't see these members and allocates not enough memory for the class (on a 64 bit build) + // Even though ENABLE_INSTANCED_MESH_CLEANUP_HACK is defined to be 1 earlier in this file. + // For now, these members are permanently here, since it's a tiny amount of memory if the define is off, and it fixes the allocation problem. +//#if ENABLE_INSTANCED_MESH_CLEANUP_HACK + void* mOrderedInstanceTemp; + uint32_t mOrderedInstanceTempSize; +//#endif + + Array<uint32_t> mPartIndexTempBuffer; + + friend class ApexRenderMeshAsset; +}; + +} // namespace apex +} // namespace nvidia + +#endif // APEX_RENDERMESH_ACTOR_H diff --git a/APEX_1.4/framework/include/ApexRenderMeshAsset.h b/APEX_1.4/framework/include/ApexRenderMeshAsset.h new file mode 100644 index 00000000..8f2d613d --- /dev/null +++ b/APEX_1.4/framework/include/ApexRenderMeshAsset.h @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_RENDERMESH_ASSET_H +#define APEX_RENDERMESH_ASSET_H + +#include "RenderMeshAssetIntl.h" +#include "ApexResource.h" +#include "ApexSDKHelpers.h" +#include "ResourceProviderIntl.h" +#include "ApexRenderSubmesh.h" + +#include "RenderMeshAssetParameters.h" +#include "ApexRWLockable.h" + +namespace nvidia +{ +namespace apex +{ + +/** +ApexRenderMeshAsset - a collection of ApexRenderMeshParts and submesh extra data +*/ +class ApexRenderMeshAsset : public RenderMeshAssetIntl, public ApexResourceInterface, public ApexRWLockable, public ApexResource +{ +public: + APEX_RW_LOCKABLE_BOILERPLATE + + ApexRenderMeshAsset(ResourceList& list, const char* name, AuthObjTypeID ownerModuleID); + ~ApexRenderMeshAsset(); + + struct SubmeshData + { + UserRenderVertexBuffer* staticVertexBuffer; + UserRenderVertexBuffer* skinningVertexBuffer; + UserRenderVertexBuffer* dynamicVertexBuffer; + bool needsStaticData; + bool needsDynamicData; + }; + struct CustomSubmeshData + { + Array<RenderDataFormat::Enum> customBufferFormats; + Array<void*> customBufferVoidPtrs; + }; + + + void release() + { + GetApexSDK()->releaseAsset(*this); + } + + void destroy(); + + AuthObjTypeID getObjTypeID() const + { + return mObjTypeID; + } + const char* getObjTypeName() const + { + return getClassName(); + } + uint32_t forceLoadAssets(); + void deleteStaticBuffersAfterUse(bool set) + { + mParams->deleteStaticBuffersAfterUse = set; + } + + RenderMeshActor* createActor(const RenderMeshActorDesc& desc); + void releaseActor(RenderMeshActor& renderMeshActor); + + const char* getName(void) const + { + return mName.c_str(); + } + uint32_t getSubmeshCount() const + { + return (uint32_t)mParams->submeshes.arraySizes[0]; + } + uint32_t getPartCount() const + { + return (uint32_t)mParams->partBounds.arraySizes[0]; + } + uint32_t getBoneCount() const + { + return mParams->boneCount; + } + const RenderSubmesh& getSubmesh(uint32_t submeshIndex) const + { + return *mSubmeshes[submeshIndex]; + } + const PxBounds3& getBounds(uint32_t partIndex = 0) const + { + return mParams->partBounds.buf[partIndex]; + } + void getStats(RenderMeshAssetStats& stats) const; + + // from RenderMeshAssetIntl + RenderSubmeshIntl& getInternalSubmesh(uint32_t submeshIndex) + { + return *mSubmeshes[submeshIndex]; + } + void permuteBoneIndices(const Array<int32_t>& old2new); + void applyTransformation(const PxMat44& transformation, float scale); + void reverseWinding(); + void applyScale(float scale); + bool mergeBinormalsIntoTangents(); + void setOwnerModuleId(AuthObjTypeID id) + { + mOwnerModuleID = id; + } + TextureUVOrigin::Enum getTextureUVOrigin() const; + + const char* getMaterialName(uint32_t submeshIndex) const + { + return mParams->materialNames.buf[submeshIndex]; + } + + // ApexResourceInterface methods + void setListIndex(ResourceList& list, uint32_t index) + { + m_listIndex = index; + m_list = &list; + } + uint32_t getListIndex() const + { + return m_listIndex; + } + + /* Common data for all ApexRenderMeshAssets */ + static AuthObjTypeID mObjTypeID; + static const char* getClassName() + { + return RENDER_MESH_AUTHORING_TYPE_NAME; + }; + + const NvParameterized::Interface* getAssetNvParameterized() const + { + return mParams; + } + + /** + * \brief Releases the ApexAsset but returns the NvParameterized::Interface and *ownership* to the caller. + */ + virtual NvParameterized::Interface* releaseAndReturnNvParameterizedInterface(void) + { + NvParameterized::Interface* ret = mParams; + mParams = NULL; + release(); + return ret; + } + NvParameterized::Interface* getDefaultActorDesc() + { + APEX_INVALID_OPERATION("Not yet implemented!"); + return NULL; + }; + + NvParameterized::Interface* getDefaultAssetPreviewDesc() + { + APEX_INVALID_OPERATION("Not yet implemented!"); + return NULL; + }; + + virtual Actor* createApexActor(const NvParameterized::Interface& /*parms*/, Scene& /*apexScene*/) + { + APEX_INVALID_OPERATION("Not yet implemented!"); + return NULL; + } + + virtual AssetPreview* createApexAssetPreview(const NvParameterized::Interface& /*params*/, AssetPreviewScene* /*previewScene*/) + { + APEX_INVALID_OPERATION("Not yet implemented!"); + return NULL; + } + + void setOpaqueMesh(UserOpaqueMesh* om) + { + mOpaqueMesh = om; + } + + virtual UserOpaqueMesh* getOpaqueMesh(void) const + { + return mOpaqueMesh; + } + + virtual bool isValidForActorCreation(const ::NvParameterized::Interface& /*parms*/, Scene& /*apexScene*/) const + { + return true; // TODO implement this method + } + + virtual bool isDirty() const + { + return false; + } + +protected: + ApexRenderMeshAsset() {} // Nop constructor for use by RenderMeshAssetAuthoring + + void setSubmeshCount(uint32_t submeshCount); + + + void createLocalData(); + + bool createFromParameters(RenderMeshAssetParameters* params); + + void updatePartBounds(); + + AuthObjTypeID mOwnerModuleID; + RenderMeshAssetParameters* mParams; + UserOpaqueMesh* mOpaqueMesh; + + // Name should not be serialized + ApexSimpleString mName; + + // Local (not serialized) data + Array<ApexRenderSubmesh*> mSubmeshes; + + Array<ResID> mMaterialIDs; + ResourceList mActorList; + Array<SubmeshData> mRuntimeSubmeshData; + Array<CustomSubmeshData> mRuntimeCustomSubmeshData; + + friend class ApexRenderMeshActor; + friend class RenderMeshAuthorableObject; +}; + +} // namespace apex +} // namespace nvidia + +#endif // APEX_RENDERMESH_ASSET_H diff --git a/APEX_1.4/framework/include/ApexRenderMeshAssetAuthoring.h b/APEX_1.4/framework/include/ApexRenderMeshAssetAuthoring.h new file mode 100644 index 00000000..9cb5ce51 --- /dev/null +++ b/APEX_1.4/framework/include/ApexRenderMeshAssetAuthoring.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_RENDERMESH_ASSET_AUTHORING_H +#define APEX_RENDERMESH_ASSET_AUTHORING_H + +#include "ApexRenderMeshAsset.h" +#include "ApexSharedUtils.h" +#include "RenderMeshAssetIntl.h" +#include "ResourceProviderIntl.h" +#include "ApexResource.h" +#include "ApexActor.h" +#include "ApexAssetAuthoring.h" +#include "ApexString.h" +#include "ApexVertexFormat.h" +#include "ApexSDKImpl.h" +#include "ApexUsingNamespace.h" +#include "ApexRWLockable.h" + +#ifndef WITHOUT_APEX_AUTHORING + +namespace nvidia +{ +namespace apex +{ + +// PHTODO, put this into the authoring asset +struct VertexReductionExtraData +{ + void set(const ExplicitRenderTriangle& xTriangle) + { + smoothingMask = xTriangle.smoothingMask; + } + + bool canMerge(const VertexReductionExtraData& other) const + { + return (smoothingMask & other.smoothingMask) != 0 || smoothingMask == 0 || other.smoothingMask == 0; + } + + uint32_t smoothingMask; +}; + + +class ApexRenderMeshAssetAuthoring : public ApexRenderMeshAsset, public ApexAssetAuthoring, public RenderMeshAssetAuthoringIntl +{ +public: + APEX_RW_LOCKABLE_BOILERPLATE + + ApexRenderMeshAssetAuthoring(ResourceList& list, RenderMeshAssetParameters* params, const char* name); + + void release() + { + GetApexSDK()->releaseAssetAuthoring(*this); + } + + void createRenderMesh(const MeshDesc& desc, bool createMappingInformation); + uint32_t createReductionMap(uint32_t* map, const Vertex* vertices, const uint32_t* smoothingGroups, uint32_t vertexCount, + const PxVec3& positionTolerance, float normalTolerance, float UVTolerance); + + void deleteStaticBuffersAfterUse(bool set) + { + ApexRenderMeshAsset::deleteStaticBuffersAfterUse(set); + } + + const char* getName(void) const + { + return ApexRenderMeshAsset::getName(); + } + const char* getObjTypeName() const + { + return ApexRenderMeshAsset::getClassName(); + } + bool prepareForPlatform(nvidia::apex::PlatformTag) + { + APEX_INVALID_OPERATION("Not Implemented."); + return false; + } + void setToolString(const char* toolName, const char* toolVersion, uint32_t toolChangelist) + { + ApexAssetAuthoring::setToolString(toolName, toolVersion, toolChangelist); + } + uint32_t getSubmeshCount() const + { + return ApexRenderMeshAsset::getSubmeshCount(); + } + uint32_t getPartCount() const + { + return ApexRenderMeshAsset::getPartCount(); + } + const char* getMaterialName(uint32_t submeshIndex) const + { + return ApexRenderMeshAsset::getMaterialName(submeshIndex); + } + void setMaterialName(uint32_t submeshIndex, const char* name); + virtual void setWindingOrder(uint32_t submeshIndex, RenderCullMode::Enum winding); + virtual RenderCullMode::Enum getWindingOrder(uint32_t submeshIndex) const; + const RenderSubmesh& getSubmesh(uint32_t submeshIndex) const + { + return ApexRenderMeshAsset::getSubmesh(submeshIndex); + } + RenderSubmesh& getSubmeshWritable(uint32_t submeshIndex) + { + return *mSubmeshes[submeshIndex]; + } + const PxBounds3& getBounds(uint32_t partIndex = 0) const + { + return ApexRenderMeshAsset::getBounds(partIndex); + } + void getStats(RenderMeshAssetStats& stats) const + { + ApexRenderMeshAsset::getStats(stats); + } + + // From RenderMeshAssetAuthoringIntl + RenderSubmeshIntl& getInternalSubmesh(uint32_t submeshIndex) + { + return *ApexRenderMeshAsset::mSubmeshes[submeshIndex]; + } + void permuteBoneIndices(const physx::Array<int32_t>& old2new) + { + ApexRenderMeshAsset::permuteBoneIndices(old2new); + } + void applyTransformation(const PxMat44& transformation, float scale) + { + ApexRenderMeshAsset::applyTransformation(transformation, scale); + } + void applyScale(float scale) + { + ApexRenderMeshAsset::applyScale(scale); + } + void reverseWinding() + { + ApexRenderMeshAsset::reverseWinding(); + } + NvParameterized::Interface* getNvParameterized() const + { + return mParams; + } + /** + * \brief Releases the ApexAsset but returns the NvParameterized::Interface and *ownership* to the caller. + */ + virtual NvParameterized::Interface* releaseAndReturnNvParameterizedInterface(void) + { + NvParameterized::Interface* ret = mParams; + mParams = NULL; + release(); + return ret; + } + +protected: + // helper structs + struct VertexPart + { + uint32_t part, vertexIndex; + PX_INLINE bool operator()(const VertexPart& a, const VertexPart& b) const + { + if (a.part != b.part) + { + return a.part < b.part; + } + return a.vertexIndex < b.vertexIndex; + } + PX_INLINE static int cmp(const void* A, const void* B) + { + // Sorts by part, then vertexIndex + const int delta = (int)((VertexPart*)A)->part - (int)((VertexPart*)B)->part; + return delta != 0 ? delta : ((int)((VertexPart*)A)->vertexIndex - (int)((VertexPart*)B)->vertexIndex); + } + }; + + // helper methods + template<typename PxU> + bool fillSubmeshMap(physx::Array<VertexPart>& submeshMap, const void* const partIndicesVoid, uint32_t numParts, + const void* const vertexIndicesVoid, uint32_t numSubmeshIndices, uint32_t numSubmeshVertices); + + // protected constructors + ApexRenderMeshAssetAuthoring(ResourceList& list); + virtual ~ApexRenderMeshAssetAuthoring(); +}; + +} +} // end namespace nvidia::apex + +#endif // WITHOUT_APEX_AUTHORING + +#endif // APEX_RENDERMESH_ASSET_H diff --git a/APEX_1.4/framework/include/ApexRenderSubmesh.h b/APEX_1.4/framework/include/ApexRenderSubmesh.h new file mode 100644 index 00000000..f0a4e5c6 --- /dev/null +++ b/APEX_1.4/framework/include/ApexRenderSubmesh.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_RENDER_SUBMESH_H +#define APEX_RENDER_SUBMESH_H + +#include "RenderMeshAssetIntl.h" +#include "ApexVertexBuffer.h" +#include "SubmeshParameters.h" + +namespace nvidia +{ +namespace apex +{ + +class ApexRenderSubmesh : public RenderSubmeshIntl, public UserAllocated +{ +public: + ApexRenderSubmesh() : mParams(NULL) {} + ~ApexRenderSubmesh() {} + + // from RenderSubmesh + virtual uint32_t getVertexCount(uint32_t partIndex) const + { + return mParams->vertexPartition.buf[partIndex + 1] - mParams->vertexPartition.buf[partIndex]; + } + + virtual const VertexBufferIntl& getVertexBuffer() const + { + return mVertexBuffer; + } + + virtual uint32_t getFirstVertexIndex(uint32_t partIndex) const + { + return mParams->vertexPartition.buf[partIndex]; + } + + virtual uint32_t getIndexCount(uint32_t partIndex) const + { + return mParams->indexPartition.buf[partIndex + 1] - mParams->indexPartition.buf[partIndex]; + } + + virtual const uint32_t* getIndexBuffer(uint32_t partIndex) const + { + return mParams->indexBuffer.buf + mParams->indexPartition.buf[partIndex]; + } + + virtual const uint32_t* getSmoothingGroups(uint32_t partIndex) const + { + return mParams->smoothingGroups.buf != NULL ? (mParams->smoothingGroups.buf + mParams->indexPartition.buf[partIndex]/3) : NULL; + } + + + // from RenderSubmeshIntl + virtual VertexBufferIntl& getVertexBufferWritable() + { + return mVertexBuffer; + } + + virtual uint32_t* getIndexBufferWritable(uint32_t partIndex) + { + return mParams->indexBuffer.buf + mParams->indexPartition.buf[partIndex]; + } + + virtual void applyPermutation(const Array<uint32_t>& old2new, const Array<uint32_t>& new2old); + + // own methods + + uint32_t getTotalIndexCount() const + { + return (uint32_t)mParams->indexBuffer.arraySizes[0]; + } + + uint32_t* getIndexBufferWritable(uint32_t partIndex) const + { + return mParams->indexBuffer.buf + mParams->indexPartition.buf[partIndex]; + } + + bool createFromParameters(SubmeshParameters* params); + + void setParams(SubmeshParameters* submeshParams, VertexBufferParameters* vertexBufferParams); + + void addStats(RenderMeshAssetStats& stats) const; + + void buildVertexBuffer(const VertexFormat& format, uint32_t vertexCount); + + SubmeshParameters* mParams; + +private: + ApexVertexBuffer mVertexBuffer; + + // No assignment + ApexRenderSubmesh& operator = (const ApexRenderSubmesh&); +}; + +} // namespace apex +} // namespace nvidia + +#endif // APEX_RENDER_SUBMESH_H diff --git a/APEX_1.4/framework/include/ApexResourceProvider.h b/APEX_1.4/framework/include/ApexResourceProvider.h new file mode 100644 index 00000000..1869aa75 --- /dev/null +++ b/APEX_1.4/framework/include/ApexResourceProvider.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef __APEX_RESOURCE_PROVIDER_H__ +#define __APEX_RESOURCE_PROVIDER_H__ + +#include "Apex.h" +#include "PsUserAllocated.h" +#include "PsHashMap.h" +#include "ResourceProviderIntl.h" +#include "ApexString.h" +#include "PsArray.h" + +namespace nvidia +{ +namespace apex +{ + +class ApexResourceProvider : public ResourceProviderIntl, public UserAllocated +{ +public: + /* == Public ResourceProvider interface == */ + virtual void registerCallback(ResourceCallback* impl); + virtual void setResource(const char* nameSpace, const char* name, void* resource, bool incRefCount); + virtual void setResourceU32(const char* nameSpace, const char* name, uint32_t id, bool incRefCount); + virtual void* getResource(const char* nameSpace, const char* name); + virtual uint32_t releaseAllResourcesInNamespace(const char* nameSpace); + virtual uint32_t releaseResource(const char* nameSpace, const char* name); + virtual bool findRefCount(const char* nameSpace, const char* name, uint32_t& refCount); + virtual void* findResource(const char* nameSpace, const char* name); + virtual uint32_t findResourceU32(const char* nameSpace, const char* name); // find an existing resource. + virtual void** findAllResources(const char* nameSpace, uint32_t& count); // find all resources in this namespace + virtual const char** findAllResourceNames(const char* nameSpace, uint32_t& count); // find all resources in this namespace + virtual const char** findNameSpaces(uint32_t& count); + virtual void dumpResourceTable(); + + /* == Internal ResourceProviderIntl interface == */ + void setResource(const char* nameSpace, const char* name, void* resource, bool valueIsSet, bool incRefCount); + ResID createNameSpace(const char* nameSpace, bool releaseAtExit) + { + return createNameSpaceInternal(nameSpace, releaseAtExit); + } + ResID createResource(ResID nameSpace, const char* name, bool refCount); + bool checkResource(ResID nameSpace, const char* name); + bool checkResource(ResID id); + void releaseResource(ResID id); + void generateUniqueName(ResID nameSpace, ApexSimpleString& name); + void* getResource(ResID id); + const char* getResourceName(ResID id); + const char* getResourceNameSpace(ResID id); + bool getResourceIDs(const char* nameSpace, ResID* outResIDs, uint32_t& outCount, uint32_t inCount); + + // [PVD INTEGRATION CODE] =========================================== + PX_INLINE uint32_t getResourceCount() const + { + return mResources.size(); + } + // ================================================================== + + // the NRP can either be case sensitive or not, this method takes care of that option + PX_INLINE bool isCaseSensitive() + { + return mCaseSensitive; + } + + /** + \brief Sets the resource provider's case sensitive mode. + + \note This must be done immediately after initialization so no names are hashed + using the wrong mode. + */ + void setCaseSensitivity(bool caseSensitive) + { + mCaseSensitive = caseSensitive; + } + + // uses the correct string matching function based on the case sensitivity mode + bool stringsMatch(const char* str0, const char* str1); + +protected: + ApexResourceProvider(); + virtual ~ApexResourceProvider(); + void destroy(); + +private: + ResID createNameSpaceInternal(const char* &nameSpace, bool releaseAtExit); + + class NameSpace: public UserAllocated + { + public: + NameSpace(ApexResourceProvider* arp, ResID nsid, bool releaseAtExit, const char* nameSpace); + ~NameSpace(); + + ResID getOrCreateID(const char* &name, const char* NSName); + ResID getID() const + { + return mId; + } + bool releaseAtExit() const + { + return mReleaseAtExit; + } + const char* getNameSpace(void) const + { + return mNameSpace; + }; + + private: + struct entryHeader + { + const char* nextEntry; + ResID id; + }; + enum { HashSize = 1024 }; + bool mReleaseAtExit; + uint16_t genHash(const char* name); + const char* hash[HashSize]; + ApexResourceProvider* mArp; + ResID mId; + char* mNameSpace; + }; + + // NOTE: someone thinks this struct needs to be padded to nearest 16 or 32 bytes + // it's not padded at the moment, but watch out for this + struct resource + { + void* ptr; + const char* name; + const char* nameSpace; + uint16_t refCount; + uint8_t valueIsSet; + uint8_t usedGetResource; + }; + + typedef physx::HashMap<ResID, ResID> HashMapNSID; + + ResID getNSID(const char* nsName); + uint32_t getNSIndex(ResID nameSpace); + + HashMapNSID mNSID; + + NameSpace mNSNames; + physx::Array<resource> mResources; + physx::Array<NameSpace*> mNameSpaces; + ResourceCallback* mUserCallback; + physx::Array< const char* > mCharResults; + bool mCaseSensitive; + + + enum { UnknownValue = 0xFFFFFFFF }; + + friend class ApexSDKImpl; +}; + +} +} // end namespace nvidia::apex + +#endif // __APEX_RESOURCE_PROVIDER_H__ diff --git a/APEX_1.4/framework/include/ApexSDKImpl.h b/APEX_1.4/framework/include/ApexSDKImpl.h new file mode 100644 index 00000000..ec977ae2 --- /dev/null +++ b/APEX_1.4/framework/include/ApexSDKImpl.h @@ -0,0 +1,507 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_SDK_IMPL_H +#define APEX_SDK_IMPL_H + +#include "Apex.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "ApexInterface.h" +#include "ApexSDKHelpers.h" +#include "ApexContext.h" +#include "ApexPhysXObjectDesc.h" +#include "FrameworkPerfScope.h" +#include "ApexSDKIntl.h" +#include "AuthorableObjectIntl.h" +#include "SceneIntl.h" +#include "RenderDebugInterface.h" +#include "ApexRenderMeshAsset.h" +#include "ApexAuthorableObject.h" +#include "ModuleIntl.h" +#include "DebugColorParamsEx.h" + +#include "ModuleCommonRegistration.h" +#include "ModuleFrameworkRegistration.h" + +#include "ApexSDKCachedDataImpl.h" + +namespace physx +{ + class PxCudaContextManager; + class PxCudaContextManagerDesc; + + namespace pvdsdk + { + class PvdDataStream; + class ApexPvdClient; + } +} + +#if APEX_CUDA_SUPPORT +namespace nvidia +{ + class PhysXGpuIndicator; +} +#endif + +namespace nvidia +{ + namespace apex + { + + class ApexResourceProvider; + class ApexScene; + class RenderMeshAsset; + + typedef PxFileBuf* (CreateStreamFunc)(const char* filename, PxFileBuf::OpenMode mode); + + + // ApexAuthorableObject needs a "module", so we'll give it one that + // describes the APEX framework with regards to render mesh stuff + // needed: ModuleIntl, no methods implemented + class ModuleFramework : public ModuleIntl + { + void destroy() {} + + ModuleSceneIntl* createInternalModuleScene(SceneIntl& apexScene, RenderDebugInterface* rd) + { + PX_UNUSED(apexScene); + PX_UNUSED(rd); + return NULL; + } + + void releaseModuleSceneIntl(ModuleSceneIntl& moduleScene) + { + PX_UNUSED(moduleScene); + } + + public: + + void init(NvParameterized::Traits* t); + void release(NvParameterized::Traits* t); + }; + + + class ApexSDKImpl : public ApexSDKIntl, public ApexRWLockable, public UserAllocated + { + public: + APEX_RW_LOCKABLE_BOILERPLATE + + ApexSDKImpl(ApexCreateError* errorCode, uint32_t APEXsdkVersion); + void init(const ApexSDKDesc& desc); + + /* ApexSDK */ + Scene* createScene(const SceneDesc&); + void releaseScene(Scene* scene); + AssetPreviewScene* createAssetPreviewScene(); + void releaseAssetPreviewScene(AssetPreviewScene* nxScene); + + Module* createModule(const char* name, ApexCreateError* err); + +#if PX_PHYSICS_VERSION_MAJOR == 0 + PxCpuDispatcher* createCpuDispatcher(uint32_t numThreads); + PxCpuDispatcher* getDefaultThreadPool(); + void releaseCpuDispatcher(PxCpuDispatcher& cd); +#endif + + /** + Creates/releases an ApexDebugRender interface + */ + virtual RenderDebugInterface* createApexRenderDebug(RENDER_DEBUG::RenderDebugInterface* iface, bool useRemoteDebugVisualization); + virtual void releaseApexRenderDebug(RenderDebugInterface& debug); + + + /** + Create/release ApexShape interfaces + */ + + virtual SphereShape* createApexSphereShape(); + virtual CapsuleShape* createApexCapsuleShape(); + virtual BoxShape* createApexBoxShape(); + virtual HalfSpaceShape* createApexHalfSpaceShape(); + + virtual void releaseApexShape(Shape& shape); + +#if PX_PHYSICS_VERSION_MAJOR == 3 + PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxActor*); + PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxShape*); + PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxJoint*); + PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxCloth*); + PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxParticleSystem*); + PhysXObjectDescIntl* createObjectDesc(const Actor*, const PxParticleFluid*); + const PhysXObjectDesc* getPhysXObjectInfo(const PxActor*) const; + const PhysXObjectDesc* getPhysXObjectInfo(const PxShape*) const; + const PhysXObjectDesc* getPhysXObjectInfo(const PxJoint*) const; + const PhysXObjectDesc* getPhysXObjectInfo(const PxCloth*) const; + const PhysXObjectDesc* getPhysXObjectInfo(const PxParticleSystem*) const; + const PhysXObjectDesc* getPhysXObjectInfo(const PxParticleFluid*) const; + PxCooking* getCookingInterface(); + PxPhysics* getPhysXSDK(); +#endif // PX_PHYSICS_VERSION_MAJOR == 3 + + ApexActor* getApexActor(Actor*) const; + + // deprecated, use getErrorCallback() + PxErrorCallback* getOutputStream(); + PxFoundation* getFoundation() const; + PxErrorCallback* getErrorCallback() const; + PxAllocatorCallback* getAllocator() const; + ResourceProvider* getNamedResourceProvider(); + ResourceProviderIntl* getInternalResourceProvider(); + PxFileBuf* createStream(const char* filename, PxFileBuf::OpenMode mode); + PxFileBuf* createMemoryReadStream(const void* mem, uint32_t len); + PxFileBuf* createMemoryWriteStream(uint32_t alignment = 0); + const void* getMemoryWriteBuffer(PxFileBuf& stream, uint32_t& len); + void releaseMemoryReadStream(PxFileBuf& stream); + void releaseMemoryWriteStream(PxFileBuf& stream); + + uint32_t getNbModules(); + Module** getModules(); + ModuleIntl** getInternalModules(); + void releaseModule(Module* module); + ModuleIntl* getInternalModuleByName(const char* name); + + uint32_t forceLoadAssets(); + + const char* checkAssetName(AuthorableObjectIntl& ao, const char* inName, ApexSimpleString& autoNameStorage); + AssetAuthoring* createAssetAuthoring(const char* authorTypeName); + AssetAuthoring* createAssetAuthoring(const char* authorTypeName, const char* name); + AssetAuthoring* createAssetAuthoring(NvParameterized::Interface* params, const char* name); + Asset* createAsset(AssetAuthoring&, const char*); + Asset* createAsset(NvParameterized::Interface* params, const char* name); + virtual Asset* createAsset(const char* opaqueMeshName, UserOpaqueMesh* om); + void releaseAsset(Asset& nxasset); + void releaseAssetAuthoring(AssetAuthoring&); + /* ApexSDKIntl */ + void reportError(PxErrorCode::Enum code, const char* file, int line, const char* functionName, const char* message, ...); + void* getTempMemory(uint32_t size); + void releaseTempMemory(void* data); + NvParameterized::Traits* getParameterizedTraits() + { + return mParameterizedTraits; + } + uint32_t getCookingVersion() const + { + return cookingVersion; + } + void registerExternalModule(Module* nx, ModuleIntl* ni) + { + registerModule(nx, ni); + } + + AuthObjTypeID registerAuthObjType(const char*, ResID nsid); + AuthObjTypeID registerAuthObjType(const char*, AuthorableObjectIntl* authObjPtr); + AuthObjTypeID registerNvParamAuthType(const char*, AuthorableObjectIntl* authObjPtr); + void unregisterAuthObjType(const char*); + void unregisterNvParamAuthType(const char*); + AuthorableObjectIntl* getAuthorableObject(const char*); + AuthorableObjectIntl* getParamAuthObject(const char*); + bool getAuthorableObjectNames(const char** authTypeNames, uint32_t& outCount, uint32_t inCount); + ResID getMaterialNameSpace() const + { + return mMaterialNS; + } + ResID getOpaqueMeshNameSpace() const + { + return mOpaqueMeshNS; + } + ResID getCustomVBNameSpace() const + { + return mCustomVBNS; + } + ResID getApexMeshNameSpace(); + ResID getCollisionGroupNameSpace() const + { + return mCollGroupNS; + } + ResID getCollisionGroup128NameSpace() const + { + return mCollGroup128NS; + } + ResID getCollisionGroup64NameSpace() const + { + return mCollGroup64NS; + } + ResID getCollisionGroupMaskNameSpace() const + { + return mCollGroupMaskNS; + } + ResID getPhysicalMaterialNameSpace() const + { + return mPhysMatNS; + } + ResID getAuthorableTypesNameSpace() const + { + return mObjTypeNS; + } + + void releaseObjectDesc(void*); + UserRenderResourceManager* getUserRenderResourceManager() const + { + return renderResourceManagerWrapper ? renderResourceManagerWrapper : renderResourceManager; + } + + const char* getWireframeMaterial(); + const char* getSolidShadedMaterial(); + virtual pvdsdk::ApexPvdClient* getApexPvdClient(); + virtual profile::PxProfileZone * getProfileZone(); + virtual profile::PxProfileZoneManager * getProfileZoneManager(); + + virtual void setEnableApexStats(bool enableApexStats) + { + mEnableApexStats = enableApexStats; + } + + virtual void setEnableConcurrencyCheck(bool enableConcurrencyCheck) + { + mEnableConcurrencyCheck = enableConcurrencyCheck; + } + + virtual bool isConcurrencyCheckEnabled() + { + return mEnableConcurrencyCheck; + } + + bool isApexStatsEnabled() const + { + return mEnableApexStats; + } + +#if PX_WINDOWS_FAMILY + virtual const char* getAppGuid(); +#endif + +#if APEX_CUDA_SUPPORT + virtual PhysXGpuIndicator* registerPhysXIndicatorGpuClient(); + virtual void unregisterPhysXIndicatorGpuClient(PhysXGpuIndicator* gpuIndicator); +#endif + + ApexSDKCachedData& getCachedData() const + { + return *mCachedData; + } + + NvParameterized::Serializer* createSerializer(NvParameterized::Serializer::SerializeType type); + NvParameterized::Serializer* createSerializer(NvParameterized::Serializer::SerializeType type, NvParameterized::Traits* traits); + + NvParameterized::Serializer::SerializeType getSerializeType(const void* data, uint32_t dlen); + NvParameterized::Serializer::SerializeType getSerializeType(PxFileBuf& stream); + + NvParameterized::Serializer::ErrorType getSerializePlatform(const void* data, uint32_t dlen, NvParameterized::SerializePlatform& platform); + NvParameterized::Serializer::ErrorType getSerializePlatform(PxFileBuf& stream, NvParameterized::SerializePlatform& platform); + void getCurrentPlatform(NvParameterized::SerializePlatform& platform) const; + bool getPlatformFromString(const char* name, NvParameterized::SerializePlatform& platform) const; + const char* getPlatformName(const NvParameterized::SerializePlatform& platform) const; + + NvParameterized::Interface* getDebugColorParams() const + { + return mDebugColorParams; + } + void updateDebugColorParams(const char* color, uint32_t val); + + bool getRMALoadMaterialsLazily(); + + // applications can append strings to the APEX DLL filenames + const char* getCustomDllNamePostfix() const + { + return mCustomDllNamePostfix.c_str(); + } + + virtual ModuleIntl *getInternalModule(Module *module); + virtual Module *getModule(ModuleIntl *module); + + virtual void enterURR(); + virtual void leaveURR(); + virtual void checkURR(); + + protected: + virtual ~ApexSDKImpl(); + void registerModule(Module*, ModuleIntl*); + void release(); + + private: + + void debugAsset(Asset* asset, const char* name); + + ApexSimpleString mDllLoadPath; + ApexSimpleString mCustomDllNamePostfix; + + ApexSimpleString mWireframeMaterial; + ApexSimpleString mSolidShadedMaterial; + +#if PX_WINDOWS_FAMILY + ApexSimpleString mAppGuid; +#endif + + ResourceList* mAuthorableObjects; + + physx::Array<Module*> modules; + physx::Array<Module*> moduleListForAPI; + physx::Array<ModuleIntl*> imodules; + physx::Array<Scene*> mScenes; + + PhysXObjectDescIntl* createObjectDesc(const Actor*, const void*); + PhysXObjectDescIntl* getGenericPhysXObjectInfo(const void*) const; + + enum { DescHashSize = 1024U * 32U }; + + uint32_t mBatchSeedSize; + + mutable nvidia::Mutex mPhysXObjDescsLock; + + physx::Array<ApexPhysXObjectDesc> mPhysXObjDescs; + uint32_t mPhysXObjDescHash[DescHashSize]; + uint32_t mDescFreeList; + ResID mObjTypeNS; + ResID mNxParamObjTypeNS; + + char* mErrorString; + //temp memories: + struct TempMemory + { + TempMemory() : memory(NULL), size(0), used(0) {} + void* memory; + uint32_t size; + uint32_t used; + }; + physx::Array<TempMemory> mTempMemories; + uint32_t mNumTempMemoriesActive; //temp memories are a LIFO, mNumTempMemoriesActive <= mTempMemories.size() + nvidia::Mutex mTempMemoryLock; + nvidia::Mutex mReportErrorLock; + + PxFoundation* foundation; + +#if PX_PHYSICS_VERSION_MAJOR == 0 + PxCpuDispatcher* mApexThreadPool; + physx::Array<PxCpuDispatcher*> mUserAllocThreadPools; +#else + PxPhysics* physXSDK; + PxCooking* cooking; +#endif // PX_PHYSICS_VERSION_MAJOR == 0 + + UserRenderResourceManager* renderResourceManager; + UserRenderResourceManager* renderResourceManagerWrapper; + ApexResourceProvider* apexResourceProvider; + uint32_t physXsdkVersion; + uint32_t cookingVersion; + + NvParameterized::Traits* mParameterizedTraits; + + ResID mMaterialNS; + ResID mOpaqueMeshNS; + ResID mCustomVBNS; + ResID mCollGroupNS; + ResID mCollGroup128NS; + ResID mCollGroup64NS; + ResID mCollGroupMaskNS; + ResID mPhysMatNS; + + ModuleFramework frameworkModule; + + ApexSDKCachedDataImpl* mCachedData; + + DebugColorParamsEx* mDebugColorParams; + + bool mRMALoadMaterialsLazily; + +#ifdef PHYSX_PROFILE_SDK + pvdsdk::ApexPvdClient* mApexPvdClient; + profile::PxProfileZone *mProfileZone; +#endif + + uint32_t mURRdepthTLSslot; + + bool mEnableApexStats; + bool mEnableConcurrencyCheck; + }; + + + /////////////////////////////////////////////////////////////////////////////// + // ApexRenderMeshAssetAuthoring + /////////////////////////////////////////////////////////////////////////////// + // Didn't use ApexAuthorableObject<> here because there were enough differences + // from a "normal" asset to make it difficult. I could probably bend the will + // of the APEX render mesh to get it to comply, but it wasn't worth it at the time. + // -LRR + + class RenderMeshAuthorableObject : public AuthorableObjectIntl + { + public: + RenderMeshAuthorableObject(ModuleIntl* m, ResourceList& list, const char* parameterizedName) : + AuthorableObjectIntl(m, list, ApexRenderMeshAsset::getClassName()) + { + // Register the proper authorable object type in the NRP (override) + mAOResID = GetInternalApexSDK()->getInternalResourceProvider()->createNameSpace(mAOTypeName.c_str()); + mAOPtrResID = GetInternalApexSDK()->registerAuthObjType(mAOTypeName.c_str(), this); + + mParameterizedName = parameterizedName; + + // Register the parameterized name in the NRP to point to this authorable object + GetInternalApexSDK()->registerNvParamAuthType(mParameterizedName.c_str(), this); + } + + Asset* createAsset(AssetAuthoring& author, const char* name); + Asset* createAsset(NvParameterized::Interface* params, const char* name); + + void releaseAsset(Asset& nxasset); + + AssetAuthoring* createAssetAuthoring(); + AssetAuthoring* createAssetAuthoring(const char* name); + AssetAuthoring* createAssetAuthoring(NvParameterized::Interface* params, const char* name); + void releaseAssetAuthoring(AssetAuthoring& nxauthor); + + uint32_t forceLoadAssets(); + virtual uint32_t getAssetCount() + { + return mAssets.getSize(); + } + virtual bool getAssetList(Asset** outAssets, uint32_t& outAssetCount, uint32_t inAssetCount) + { + PX_ASSERT(outAssets); + PX_ASSERT(inAssetCount >= mAssets.getSize()); + + if (!outAssets || inAssetCount < mAssets.getSize()) + { + outAssetCount = 0; + return false; + } + + outAssetCount = mAssets.getSize(); + for (uint32_t i = 0; i < mAssets.getSize(); i++) + { + ApexRenderMeshAsset* asset = static_cast<ApexRenderMeshAsset*>(mAssets.getResource(i)); + outAssets[i] = static_cast<Asset*>(asset); + } + + return true; + } + + + ResID getResID() + { + return mAOResID; + } + + ApexSimpleString& getName() + { + return mAOTypeName; + } + + // Resource methods + void release(); + void destroy(); + }; + + } +} // end namespace nvidia::apex + + +#endif // APEX_SDK_IMPL_H diff --git a/APEX_1.4/framework/include/ApexScene.h b/APEX_1.4/framework/include/ApexScene.h new file mode 100644 index 00000000..949a25ac --- /dev/null +++ b/APEX_1.4/framework/include/ApexScene.h @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_SCENE_H +#define APEX_SCENE_H + +#include "Apex.h" +#include "ApexResource.h" +#include "PsUserAllocated.h" +#include "ApexSDKImpl.h" +#include "SceneIntl.h" +#include "ModuleIntl.h" +#include "ApexContext.h" + +#include "PsMutex.h" +#include "PsThread.h" +#include "PairFilter.h" + +#if PX_PHYSICS_VERSION_MAJOR == 3 +#include "PxScene.h" +#include "PxRenderBuffer.h" +#endif + +#include "MirrorSceneImpl.h" +#include "ApexSceneUserNotify.h" + +#include "PsSync.h" +#include "PxTask.h" +#include "PxTaskManager.h" + +#include "ApexGroupsFiltering.h" +#include "ApexRWLockable.h" +#include "ReadCheck.h" +#include "WriteCheck.h" + +class PxDefaultSpuDispatcher; + + +namespace nvidia +{ +namespace apex +{ + +class ApexCudaTestManager; +class ApexCudaProfileManager; + +// Tasks forward declarations +class PhysXSimulateTask; +class PhysXBetweenStepsTask; + +#if APEX_DURING_TICK_TIMING_FIX +class DuringTickCompleteTask; +#endif + +class CheckResultsTask; +class FetchResultsTask; + + +class ApexScene : public SceneIntl, public ApexContext, public ApexRWLockable, public UserAllocated +{ +public: + APEX_RW_LOCKABLE_BOILERPLATE + + /* == Public Scene interface == */ + void simulate(float elapsedTime, + bool finalStep = true, + PxBaseTask *completionTask = NULL, + void* scratchMemBlock = 0, + uint32_t scratchMemBlockSize = 0); + + bool fetchResults(bool block, uint32_t* errorState); + void fetchPhysXStats(); + void fetchApexStats(); + bool checkResults(bool block) const; + + void initDebugColorParams(); + void updateDebugColorParams(const char* color, uint32_t val); + NvParameterized::Interface* getDebugRenderParams() const; + NvParameterized::Interface* getModuleDebugRenderParams(const char* name) const; + + uint32_t allocViewMatrix(ViewMatrixType::Enum); + uint32_t allocProjMatrix(ProjMatrixType::Enum); + uint32_t getNumViewMatrices() const; + uint32_t getNumProjMatrices() const; + + void setViewMatrix(const PxMat44& viewTransform, const uint32_t viewID = 0); + PxMat44 getViewMatrix(const uint32_t viewID = 0) const; + void setProjMatrix(const PxMat44& projTransform, const uint32_t projID = 0); + PxMat44 getProjMatrix(const uint32_t projID = 0) const; + + void setUseViewProjMatrix(const uint32_t viewID = 0, const uint32_t projID = 0); +#if 0 //lionel: work in progress + const PxMat44& getViewProjMatrix() const; +#endif + void setViewParams(const PxVec3& eyePosition, + const PxVec3& eyeDirection, + const PxVec3& worldUpDirection, + const uint32_t viewID = 0); + + void setProjParams(float nearPlaneDistance, + float farPlaneDistance, + float fieldOfViewDegree, + uint32_t viewportWidth, + uint32_t viewportHeight, + const uint32_t projID = 0); + + PxVec3 getEyePosition(const uint32_t viewID = 0) const; + PxVec3 getEyeDirection(const uint32_t viewID = 0) const; + + +#if 0 //lionel: work in progress + const PxMat44& buildViewMatrix(const uint32_t viewID = 0); + const PxMat44& buildProjMatrix(const uint32_t projID = 0); + //viewportToWorld?, worldToViewport? (and screenspace) + const SceneCalculator* const calculate() + { + return mSceneCalculator; + } +#endif + + float getElapsedTime() const + { + return mElapsedTime; + } + + const SceneStats* getStats(void) const; + void createApexStats(void); + void destroyApexStats(void); + void setApexStatValue(int32_t index, StatValue dataVal); + + + bool isSimulating() const + { + return mSimulating; + } + bool physXElapsedTime(float& dt) const + { + dt = mPxLastElapsedTime; + return mPxStepWasValid; + } + float getPhysXSimulateTime() const + { + return mPhysXSimulateTime; + } + + PxVec3 getGravity() const + { +#if PX_PHYSICS_VERSION_MAJOR == 3 + if (mPhysXScene) + { + mPhysXScene->lockRead(); + PxVec3 ret = mPhysXScene->getGravity(); + mPhysXScene->unlockRead(); + return ret; + } +#endif + return mGravity; + } + + void setGravity(const PxVec3& gravity) + { + mGravity = gravity; +#if PX_PHYSICS_VERSION_MAJOR == 3 + if (mPhysXScene) + { + mPhysXScene->lockWrite(); + mPhysXScene->setGravity(gravity); + mPhysXScene->unlockWrite(); + } +#endif + } + + void release() + { + mApexSDK->releaseScene(this); + } + + /* == Public Context interface == */ + ApexContext* getApexContext() + { + return DYNAMIC_CAST(ApexContext*)(this); + } + void removeAllActors(); + RenderableIterator* createRenderableIterator() + { + return ApexContext::createRenderableIterator(); + } + void releaseRenderableIterator(RenderableIterator& iter) + { + ApexContext::releaseRenderableIterator(iter); + } + uint32_t addActor(ApexActor& actor, ApexActor* actorPtr = NULL); + + /* == Renderable interface == */ + void lockRenderResources(); + void unlockRenderResources(); + void updateRenderResources(bool rewriteBuffers = false, void* userRenderData = 0); + void dispatchRenderResources(UserRenderer& renderer); + PxBounds3 getBounds() const; + + void visualize(); + + /* == SceneIntl interface == */ + void moduleReleased(ModuleSceneIntl&); + virtual PxTaskManager* getTaskManager() const + { + READ_ZONE(); + return mTaskManager; + }; + + uint32_t getTotalElapsedMS() const + { + return mTotalElapsedMS; + } + + bool isFinalStep() const + { + return mFinalStep; + } + +#if PX_PHYSICS_VERSION_MAJOR == 3 + virtual void lockRead(const char *fileName,uint32_t lineno) + { + if (mPhysXScene != NULL) + { + mPhysXScene->lockRead(fileName,lineno); + } + } + + virtual void lockWrite(const char *fileName,uint32_t lineno) + { + if (mPhysXScene != NULL) + { + mPhysXScene->lockWrite(fileName,lineno); + } + } + + + virtual void unlockRead () + { + if (mPhysXScene != NULL) + { + mPhysXScene->unlockRead(); + } + } + + virtual void unlockWrite () + { + if (mPhysXScene != NULL) + { + mPhysXScene->unlockWrite(); + } + } + + virtual void addActorPair(PxActor *actor0,PxActor *actor1); + virtual void removeActorPair(PxActor *actor0,PxActor *actor1); + virtual bool findActorPair(PxActor *actor0,PxActor *actor1) const; +#endif + + virtual void addBoundingBox(const PxBounds3& bounds, UserBoundingBoxFlags::Enum flags) + { + WRITE_ZONE(); + mBBs.pushBack(UserDefinedBoundingBox(bounds, flags)); + } + + virtual const PxBounds3 getBoundingBox(const uint32_t index) const + { + READ_ZONE(); + PX_ASSERT(index < mBBs.size()); + if(index < mBBs.size()) + { + return mBBs[index].bb; + } + return PxBounds3(PxVec3(0.0f), PxVec3(0.0f)); + } + + virtual UserBoundingBoxFlags::Enum getBoundingBoxFlags(const uint32_t index) const + { + READ_ZONE(); + PX_ASSERT(index < mBBs.size()); + if(index < mBBs.size()) + { + return mBBs[index].flags; + } + return UserBoundingBoxFlags::NONE; + } + + virtual uint32_t getBoundingBoxCount() const + { + READ_ZONE(); + return mBBs.size(); + } + + virtual void removeBoundingBox(const uint32_t index) + { + WRITE_ZONE(); + PX_ASSERT(index < mBBs.size()); + if(index < mBBs.size()) + { + mBBs.remove(index); + } + } + + virtual void removeAllBoundingBoxes() + { + WRITE_ZONE(); + mBBs.clear(); + } + + void allocateTasks(); + void freeTasks(); + void setUseDebugRenderable(bool state); + + ApexScene(const SceneDesc& desc, ApexSDKImpl* sdk); + ~ApexScene(); + + void moduleCreated(ModuleIntl&); + void destroy(); + + const PxRenderBuffer* getRenderBuffer() const; + const PxRenderBuffer* getRenderBufferScreenSpace() const; + +#if PX_PHYSICS_VERSION_MAJOR == 3 + + virtual MirrorScene *createMirrorScene(nvidia::apex::Scene &mirrorScene, + MirrorScene::MirrorFilter &mirrorFilter, + float mirrorStaticDistance, + float mirrorDynamicDistance, + float mirrorDistanceThreshold); + + + void setPhysXScene(PxScene* s); + PxScene* getPhysXScene() const + { + READ_ZONE(); + return mPhysXScene; + } + + PxScene* mPhysXScene; + + mutable PhysXRenderBuffer mRenderBuffer; + mutable PhysXRenderBuffer mRenderBufferScreenSpace; + + void addModuleUserNotifier(physx::PxSimulationEventCallback& notify) + { + mUserNotify.addModuleNotifier(notify); + } + void removeModuleUserNotifier(physx::PxSimulationEventCallback& notify) + { + mUserNotify.removeModuleNotifier(notify); + } + void addModuleUserContactModify(physx::PxContactModifyCallback& contactModify) + { + mUserContactModify.addModuleContactModify(contactModify); + } + void removeModuleUserContactModify(physx::PxContactModifyCallback& contactModify) + { + mUserContactModify.removeModuleContactModify(contactModify); + } + + ApexSceneUserNotify mUserNotify; + ApexSceneUserContactModify mUserContactModify; + + PhysX3Interface* getApexPhysX3Interface() const + { + return mPhysX3Interface; + } + + PhysX3Interface* mPhysX3Interface; +#endif + + ModuleSceneIntl* getInternalModuleScene(const char* moduleName); + + PX_INLINE void* getCudaTestManager() const + { + return mCudaTestManager; + } + PX_INLINE ApexCudaTestManager& getApexCudaTestManager() + { + return *mCudaTestManager; + } + PX_INLINE void* getCudaProfileManager() const + { + return mCudaProfileManager; + } + bool isUsingCuda() const + { + return mUseCuda; + } + + virtual void setCudaKernelCheckEnabled(bool enabled) + { + mCudaKernelCheckEnabled = enabled; + } + virtual bool getCudaKernelCheckEnabled() const + { + return mCudaKernelCheckEnabled; + } + + float mElapsedTime; + ApexSDKImpl* mApexSDK; + Array<ModuleSceneIntl*> mModuleScenes; + RenderDebugInterface* mSceneRenderDebug; + + uint32_t mOrigSceneMaxIter; + float mOrigSceneSubstepSize; + + PxTaskManager* mTaskManager; + + Mutex mPhysXLock; + + bool mSimulating; + bool mUseDebugRenderable; + float mUsedResource; + float mSumBenefit; + mutable Sync mFetchResultsReady; + Sync mSimulationComplete; + + PhysXSimulateTask* mPhysXSimulate; + PhysXBetweenStepsTask* mBetweenstepTasks; +#if APEX_DURING_TICK_TIMING_FIX + DuringTickCompleteTask* mDuringTickComplete; +#endif + CheckResultsTask* mCheckResults; + FetchResultsTask* mFetchResults; + + uint32_t mTotalElapsedMS; + float mTimeRemainder; + float mPhysXRemainder; + float mPhysXSimulateTime; + + float mPxLastElapsedTime; + float mPxAccumElapsedTime; + bool mPxStepWasValid; + bool mFinalStep; + + bool mUseCuda; + + + static double mQPC2MilliSeconds; + static PX_INLINE float ticksToMilliseconds(uint64_t t0, uint64_t t1) + { + return (float)((double)(t1 - t0) * mQPC2MilliSeconds); + } + + uint64_t mApexSimulateTickCount; + uint64_t mPhysXSimulateTickCount; + + static const uint32_t IgnoredSeed = UINT32_MAX; + + uint32_t getSeed(); + +private: + void updateGravity(); + + SceneStats mApexSceneStats; + + mutable ApexCudaTestManager* mCudaTestManager; + mutable ApexCudaProfileManager* mCudaProfileManager; + bool mCudaKernelCheckEnabled; + + /* transforms info */ +#if 0 //lionel: work in progress + void getColMajColVecArray(const PxMat44& colVecMat44, float* const result); + void getColVecMat44(const float* const colMajColVecArray, PxMat44& result); + void multiplyColMajColVecArray(const float* const fromSpace, const float* const toSpace, float* const result); + float mViewColMajColVecArray[16]; + float mProjColMajColVecArray[16]; + float mViewProjColMajColVecArray[16]; +#endif + struct ViewMatrixProperties : public UserAllocated + { + ViewMatrixProperties() {} + ~ViewMatrixProperties() {} + ViewMatrixProperties(PxMat44 v, bool l) : + viewMatrix(v), isLookAt(l), pvdCreated(false) {} + + PxMat44 viewMatrix; + bool isUserCustomized; + bool isLookAt; + bool pvdCreated; + + ApexSimpleString cameraName; + }; + + struct ViewMatrixLookAt : public ViewMatrixProperties + { + ViewMatrixLookAt() {} + ~ViewMatrixLookAt() {} + ViewMatrixLookAt(PxMat44 v, bool l, bool r) : + ViewMatrixProperties(v, l), isRightHand(r) {} + + bool isRightHand; + }; + + struct ProjMatrixProperties : public UserAllocated + { + ProjMatrixProperties() {} + ~ProjMatrixProperties() {} + ProjMatrixProperties(PxMat44 p, bool u, bool f) : + projMatrix(p), isUserCustomized(u), isPerspectiveFOV(f) {} + + PxMat44 projMatrix; + bool isUserCustomized; + bool isPerspectiveFOV; + }; + + struct ProjMatrixUserCustomized : public ProjMatrixProperties + { + ProjMatrixUserCustomized() {} + ~ProjMatrixUserCustomized() {} + ProjMatrixUserCustomized(PxMat44 p, bool u, bool f, float near, float far, float fov, uint32_t w, uint32_t h) : + ProjMatrixProperties(p, u, f), nearPlaneDistance(near), farPlaneDistance(far), fieldOfViewDegree(fov), viewportWidth(w), viewportHeight(h) {} + + float nearPlaneDistance; + float farPlaneDistance; + float fieldOfViewDegree; + uint32_t viewportWidth; //only one instance? + uint32_t viewportHeight; //only one instance? + }; + + struct ProjMatrixPerspectiveFOV : public ProjMatrixProperties + { + ProjMatrixPerspectiveFOV() {} + ~ProjMatrixPerspectiveFOV() {} + ProjMatrixPerspectiveFOV(PxMat44 p, bool u, bool f, bool i) : + ProjMatrixProperties(p, u, f), isZinvert(i) {} + + bool isZinvert; + }; + + Array<ViewMatrixProperties*> mViewMatrices; + Array<ProjMatrixProperties*> mProjMatrices; + PxMat44 mViewProjMatrix; +#if 0 //lionel: work in progress + uint32_t mCurrentViewID; + uint32_t mCurrentProjID; + SceneCalculator* mSceneCalculator; + friend class SceneCalculator; + //class SceneCalculator + //{ + //public: + // SceneCalculator():s(NULL) {} + // ~SceneCalculator() {s=NULL;} + // float distanceFromEye(PxVec3 to) {return (-s->getEyePosition(0) + to).magnitude();} //lionel: use currentIDs when multiple matrices allowed + // friend class ApexScene; + //private: + // void construct(const physx::ApexScene * scene) {s = scene;} + // void destruct() {s = NULL;} + // const physx::ApexScene * s; + //}; +#endif + + DebugRenderParams* mDebugRenderParams; + DebugColorParams* mDebugColorParams; + HashMap<const char*, RENDER_DEBUG::DebugColors::Enum> mColorMap; + + PxVec3 mGravity; + + PairFilter mPairFilter; + + struct UserDefinedBoundingBox + { + PxBounds3 bb; + UserBoundingBoxFlags::Enum flags; + + UserDefinedBoundingBox(const PxBounds3& _bb, UserBoundingBoxFlags::Enum _flags) : + bb(_bb), flags(_flags) {} + }; + Array<UserDefinedBoundingBox> mBBs; +}; + + +} +} // end namespace nvidia::apex + +#endif // APEX_SCENE_H diff --git a/APEX_1.4/framework/include/ApexSceneTasks.h b/APEX_1.4/framework/include/ApexSceneTasks.h new file mode 100644 index 00000000..b24bfba2 --- /dev/null +++ b/APEX_1.4/framework/include/ApexSceneTasks.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_SCENE_TASKS_H +#define APEX_SCENE_TASKS_H + +#include "ApexScene.h" + +#include "PsAllocator.h" + +namespace nvidia +{ +namespace apex +{ + +class PhysXSimulateTask : public PxTask, public UserAllocated +{ +public: + PhysXSimulateTask(ApexScene& scene, CheckResultsTask& checkResultsTask); + ~PhysXSimulateTask(); + + const char* getName() const; + void run(); + void setElapsedTime(float elapsedTime); + void setFollowingTask(PxBaseTask* following); + +#if PX_PHYSICS_VERSION_MAJOR == 3 + void setScratchBlock(void* scratchBlock, uint32_t size) + { + mScratchBlock = scratchBlock; + mScratchBlockSize = size; + } +#endif + +protected: + ApexScene* mScene; + float mElapsedTime; + + PxBaseTask* mFollowingTask; + CheckResultsTask& mCheckResultsTask; + +#if PX_PHYSICS_VERSION_MAJOR == 3 + void* mScratchBlock; + uint32_t mScratchBlockSize; +#endif + +private: + PhysXSimulateTask& operator=(const PhysXSimulateTask&); +}; + + + +class CheckResultsTask : public PxTask, public UserAllocated +{ +public: + CheckResultsTask(ApexScene& scene) : mScene(&scene) {} + + const char* getName() const; + void run(); + +protected: + ApexScene* mScene; +}; + + + +class FetchResultsTask : public PxTask, public UserAllocated +{ +public: + FetchResultsTask(ApexScene& scene) + : mScene(&scene) + , mFollowingTask(NULL) + {} + + const char* getName() const; + void run(); + + /** + * \brief Called by dispatcher after Task has been run. + * + * If you re-implement this method, you must call this base class + * version before returning. + */ + void release(); + + void setFollowingTask(PxBaseTask* following); + +protected: + ApexScene* mScene; + PxBaseTask* mFollowingTask; +}; + + +/** +* This task is solely meant to record the duration of APEX's "during tick" tasks. +* It could be removed and replaced with only the check results task if it is found +* to be a performance issue. +*/ +#if APEX_DURING_TICK_TIMING_FIX +class DuringTickCompleteTask : public PxTask, public UserAllocated +{ +public: + DuringTickCompleteTask(ApexScene& scene) : mScene(&scene) {} + + const char* getName() const; + void run(); + +protected: + ApexScene* mScene; +}; +#endif + +/* This tasks loops all intermediate steps until the final fetchResults can be called */ +class PhysXBetweenStepsTask : public PxLightCpuTask, public UserAllocated +{ +public: + PhysXBetweenStepsTask(ApexScene& scene) : mScene(scene), mSubStepSize(0.0f), + mNumSubSteps(0), mSubStepNumber(0), mLast(NULL) {} + + const char* getName() const; + void run(); + void setSubstepSize(float substepSize, uint32_t numSubSteps); + void setFollower(uint32_t substepNumber, PxTask* last); + +protected: + ApexScene& mScene; + float mSubStepSize; + uint32_t mNumSubSteps; + + uint32_t mSubStepNumber; + PxTask* mLast; + +private: + PhysXBetweenStepsTask& operator=(const PhysXBetweenStepsTask&); +}; + +} +} + +#endif diff --git a/APEX_1.4/framework/include/ApexSceneUserNotify.h b/APEX_1.4/framework/include/ApexSceneUserNotify.h new file mode 100644 index 00000000..6cf54241 --- /dev/null +++ b/APEX_1.4/framework/include/ApexSceneUserNotify.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_SCENE_USER_NOTIFY_H +#define APEX_SCENE_USER_NOTIFY_H + +#include "ApexDefs.h" + +#if PX_PHYSICS_VERSION_MAJOR == 3 + +#include <ApexUsingNamespace.h> + +#include <PxSimulationEventCallback.h> +#include <PxContactModifyCallback.h> + +#include "PxSimpleTypes.h" +#include <PsArray.h> +#include <PsAllocator.h> + + +namespace nvidia +{ +namespace apex +{ + +class ApexSceneUserNotify : public physx::PxSimulationEventCallback +{ +public: + ApexSceneUserNotify() : mAppNotify(NULL), mBatchAppNotify(false) {} + virtual ~ApexSceneUserNotify(); + + void addModuleNotifier(physx::PxSimulationEventCallback& notify); + void removeModuleNotifier(physx::PxSimulationEventCallback& notify); + + void setApplicationNotifier(physx::PxSimulationEventCallback* notify) + { + mAppNotify = notify; + } + PxSimulationEventCallback* getApplicationNotifier() const + { + return mAppNotify; + } + + void setBatchAppNotify(bool enable) + { + mBatchAppNotify = enable; + } + void playBatchedNotifications(); + +private: + // from PxSimulationEventCallback + virtual void onConstraintBreak(physx::PxConstraintInfo* constraints, uint32_t count); + virtual void onWake(PxActor** actors, uint32_t count); + virtual void onSleep(PxActor** actors, uint32_t count); + virtual void onContact(const physx::PxContactPairHeader& pairHeader, const physx::PxContactPair* pairs, uint32_t nbPairs); + virtual void onTrigger(physx::PxTriggerPair* pairs, uint32_t count); + virtual void onAdvance(const PxRigidBody*const* bodyBuffer, const PxTransform* poseBuffer, const PxU32 count); + +private: + Array<physx::PxSimulationEventCallback*> mModuleNotifiers; + PxSimulationEventCallback* mAppNotify; + + + // for batch notification + bool mBatchAppNotify; + + // onConstraintBreak + Array<physx::PxConstraintInfo> mBatchedBreakNotifications; + + // onContact + struct BatchedContactNotification + { + BatchedContactNotification(const physx::PxContactPairHeader& _pairHeader, const physx::PxContactPair* _pairs, uint32_t _nbPairs) + { + pairHeader = _pairHeader; + nbPairs = _nbPairs; + + pairs = (physx::PxContactPair *)PX_ALLOC(sizeof(physx::PxContactPair) * nbPairs, PX_DEBUG_EXP("BatchedContactNotifications")); + PX_ASSERT(pairs != NULL); + for (uint32_t i=0; i<nbPairs; i++) + { + pairs[i] = _pairs[i]; + } + } + + ~BatchedContactNotification() + { + if (pairs) + { + PX_FREE(pairs); + pairs = NULL; + } + } + + physx::PxContactPairHeader pairHeader; + physx::PxContactPair * pairs; + uint32_t nbPairs; + }; + Array<BatchedContactNotification> mBatchedContactNotifications; + Array<uint32_t> mBatchedContactStreams; + + // onWake/onSleep + struct SleepWakeBorders + { + SleepWakeBorders(uint32_t s, uint32_t c, bool sleep) : start(s), count(c), sleepEvents(sleep) {} + uint32_t start; + uint32_t count; + bool sleepEvents; + }; + Array<SleepWakeBorders> mBatchedSleepWakeEventBorders; + Array<PxActor*> mBatchedSleepEvents; + Array<PxActor*> mBatchedWakeEvents; + + // onTrigger + Array<physx::PxTriggerPair> mBatchedTriggerReports; +}; + + +class ApexSceneUserContactModify : public PxContactModifyCallback +{ +public: + ApexSceneUserContactModify(); + virtual ~ApexSceneUserContactModify(); + + void addModuleContactModify(physx::PxContactModifyCallback& contactModify); + void removeModuleContactModify(physx::PxContactModifyCallback& contactModify); + + void setApplicationContactModify(physx::PxContactModifyCallback* contactModify); + PxContactModifyCallback* getApplicationContactModify() const + { + return mAppContactModify; + } + +private: + // from PxContactModifyCallback + virtual void onContactModify(physx::PxContactModifyPair* const pairs, uint32_t count); + +private: + Array<physx::PxContactModifyCallback*> mModuleContactModify; + PxContactModifyCallback* mAppContactModify; +}; + +} +} // namespace nvidia::apex + +#endif // PX_PHYSICS_VERSION_MAJOR == 3 + +#endif
\ No newline at end of file diff --git a/APEX_1.4/framework/include/ApexStubPxProfileZone.h b/APEX_1.4/framework/include/ApexStubPxProfileZone.h new file mode 100644 index 00000000..4e7cc2a6 --- /dev/null +++ b/APEX_1.4/framework/include/ApexStubPxProfileZone.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_STUB_PX_PROFILE_ZONE_H +#define APEX_STUB_PX_PROFILE_ZONE_H + +#include "Px.h" +#include "PxProfileZone.h" +#include "PsUserAllocated.h" + +namespace physx +{ + namespace profile + { + class PxUserCustomProfiler; + } +} + +namespace nvidia +{ +using namespace physx::profile; + +namespace apex +{ + +// This class provides a stub implementation of PhysX's PxProfileZone. +// It would be nice to not be forced to do this, but our scoped profile event macros +// cannot have an if(gProfileZone) because it would ruin the scope. So here we just +// create a stub that will be called so that the user need not create a PxProfileZoneManager +// in debug mode (and suffer an assertion). + +class ApexStubPxProfileZone : public PxProfileZone, public UserAllocated +{ +public: + + // PxProfileZone methods + virtual const char* getName() { return 0; } + virtual void release() { PX_DELETE(this); } + + virtual void setProfileZoneManager(PxProfileZoneManager* ) {} + virtual profile::PxProfileZoneManager* getProfileZoneManager() { return 0; } + + virtual uint16_t getEventIdForName( const char* ) { return 0; } + + virtual void flushEventIdNameMap() {} + + virtual uint16_t getEventIdsForNames( const char** , uint32_t ) { return 0; } + virtual void setUserCustomProfiler(PxUserCustomProfiler* ) {}; + + // physx::PxProfileEventBufferClientManager methods + virtual void addClient( PxProfileZoneClient& ) {} + virtual void removeClient( PxProfileZoneClient& ) {} + virtual bool hasClients() const { return false; } + + // physx::PxProfileNameProvider methods + virtual PxProfileNames getProfileNames() const { return PxProfileNames(); } + + // profile::PxProfileEventSender methods + virtual void startEvent( uint16_t , uint64_t ) {} + virtual void stopEvent( uint16_t , uint64_t ) {} + + virtual void startEvent( uint16_t , uint64_t , uint32_t ) {} + virtual void stopEvent( uint16_t , uint64_t , uint32_t ) {} + virtual void eventValue( uint16_t , uint64_t , int64_t ) {} + + // physx::PxProfileEventFlusher methods + virtual void flushProfileEvents() {} +}; + +} +} // end namespace nvidia::apex + +#endif // APEX_STUB_PX_PROFILE_ZONE_H diff --git a/APEX_1.4/framework/include/ApexVertexBuffer.h b/APEX_1.4/framework/include/ApexVertexBuffer.h new file mode 100644 index 00000000..8e44c8f0 --- /dev/null +++ b/APEX_1.4/framework/include/ApexVertexBuffer.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef APEX_VERTEX_BUFFER_H +#define APEX_VERTEX_BUFFER_H + +#include "RenderMeshAssetIntl.h" +#include "ApexVertexFormat.h" +#include "VertexBufferParameters.h" +#include <nvparameterized/NvParameterized.h> +#include "ApexSharedUtils.h" +#include "ApexInteropableBuffer.h" + +namespace nvidia +{ +namespace apex +{ + +class ApexVertexBuffer : public VertexBufferIntl, public ApexInteropableBuffer, public NvParameterized::SerializationCallback +{ +public: + ApexVertexBuffer(); + ~ApexVertexBuffer(); + + // from VertexBuffer + const VertexFormat& getFormat() const + { + return mFormat; + } + uint32_t getVertexCount() const + { + return mParams->vertexCount; + } + void* getBuffer(uint32_t bufferIndex); + void* getBufferAndFormatWritable(RenderDataFormat::Enum& format, uint32_t bufferIndex) + { + return getBufferAndFormat(format, bufferIndex); + } + + void* getBufferAndFormat(RenderDataFormat::Enum& format, uint32_t bufferIndex) + { + format = getFormat().getBufferFormat(bufferIndex); + return getBuffer(bufferIndex); + } + bool getBufferData(void* dstBuffer, nvidia::RenderDataFormat::Enum dstBufferFormat, uint32_t dstBufferStride, uint32_t bufferIndex, + uint32_t startVertexIndex, uint32_t elementCount) const; + PX_INLINE const void* getBuffer(uint32_t bufferIndex) const + { + return (const void*)((ApexVertexBuffer*)this)->getBuffer(bufferIndex); + } + PX_INLINE const void* getBufferAndFormat(RenderDataFormat::Enum& format, uint32_t bufferIndex) const + { + return (const void*)((ApexVertexBuffer*)this)->getBufferAndFormat(format, bufferIndex); + } + + // from VertexBufferIntl + void build(const VertexFormat& format, uint32_t vertexCount); + + VertexFormat& getFormatWritable() + { + return mFormat; + } + void applyTransformation(const PxMat44& transformation); + void applyScale(float scale); + bool mergeBinormalsIntoTangents(); + + void copy(uint32_t dstIndex, uint32_t srcIndex, ApexVertexBuffer* srcBufferPtr = NULL); + void resize(uint32_t vertexCount); + + // from NvParameterized::SerializationCallback + + void preSerialize(void* userData_); + + void setParams(VertexBufferParameters* param); + VertexBufferParameters* getParams() + { + return mParams; + } + + uint32_t getAllocationSize() const; + + void applyPermutation(const Array<uint32_t>& permutation); + +protected: + VertexBufferParameters* mParams; + + ApexVertexFormat mFormat; // Wrapper class for mParams->vertexFormat +}; + + +} // namespace apex +} // namespace nvidia + + +#endif // APEX_VERTEX_BUFFER_H diff --git a/APEX_1.4/framework/include/ApexVertexFormat.h b/APEX_1.4/framework/include/ApexVertexFormat.h new file mode 100644 index 00000000..7192bee6 --- /dev/null +++ b/APEX_1.4/framework/include/ApexVertexFormat.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef __APEX_VERTEX_FORMAT_H__ +#define __APEX_VERTEX_FORMAT_H__ + + +#include "VertexFormat.h" +#include "PsUserAllocated.h" +#include "PsArray.h" +#include "VertexFormatParameters.h" +#include "ApexSharedUtils.h" + +namespace nvidia +{ +namespace apex +{ + + +class ApexVertexFormat : public VertexFormat, public UserAllocated +{ +public: + ApexVertexFormat(); + ApexVertexFormat(VertexFormatParameters* params); + ~ApexVertexFormat(); + explicit ApexVertexFormat(const ApexVertexFormat& f); + + // VertexFormat methods + + /** \brief Resets the format to the initial state */ + virtual void reset(); + + + /** \brief Sets the winding (cull mode) for this format */ + virtual void setWinding(RenderCullMode::Enum winding); + + /** \brief Sets whether or not a separate bone buffer is used */ + virtual void setHasSeparateBoneBuffer(bool hasSeparateBoneBuffer); + + /** \brief Accessor to read winding (cull mode) */ + virtual RenderCullMode::Enum getWinding() const; + + /** \brief Accessor to read if a seperate vertex buffer for bone indices and weights is generated */ + virtual bool hasSeparateBoneBuffer() const; + + /** \brief Returns a buffer name for a semantic. Returns NULL if the semantic is invalid. */ + virtual const char* getSemanticName(RenderVertexSemantic::Enum semantic) const; + + /** \brief Returns a buffer ID for a semantic. For custom buffers, use the getID() function. */ + virtual BufferID getSemanticID(RenderVertexSemantic::Enum semantic) const; + + /** \brief Returns a buffer ID for a named buffer. For standard semantics, the getSemanticID( semantic ) function is faster, but + is equivalent to getID( getSemanticName( semantic ) ). Returns 0 if name == NULL */ + virtual BufferID getID(const char* name) const; + + + /** \brief Adds a vertex buffer channel to this format + \param [in] name the name of a new buffer (use getSemanticName for standard semantics) + \return The buffer index. If the buffer for the semantic already exists, the index of the existing buffer is returned. Returns -1 if there is an error (e.g. name == NULL). + */ + virtual int32_t addBuffer(const char* name); + + /** \brief Removes a buffer + \param [in] index the buffer to remove + \return True if successful, false otherwise (if the buffer index was invalid) + */ + virtual bool bufferReplaceWithLast(uint32_t index); + + + /** \brief Set the format for a buffer + \return True if successful, false otherwise (if the buffer index was invalid) + */ + virtual bool setBufferFormat(uint32_t index, RenderDataFormat::Enum format); + + /** \brief Set the access type for a buffer (static, dynamic, etc.) + \return True if successful, false otherwise (if the buffer index was invalid) + */ + virtual bool setBufferAccess(uint32_t index, RenderDataAccess::Enum access); + + /** \brief Set whether or not the buffer should be serialized + \return True if successful, false otherwise (if the buffer index was invalid) + */ + virtual bool setBufferSerialize(uint32_t index, bool serialize); + + + /** \brief Accessor to read the name of a given buffer + \return The buffer name if successful, NULL otherwise. + */ + virtual const char* getBufferName(uint32_t index) const; + + /** \brief Accessor to read the semantic of a given buffer + \return The buffer semantic if successful, RenderVertexSemantic::NUM_SEMANTICS otherwise. + */ + virtual RenderVertexSemantic::Enum getBufferSemantic(uint32_t index) const; + + /** \brief Accessor to read the ID of a given buffer + \return The buffer semantic if successful, 0 otherwise. + */ + virtual BufferID getBufferID(uint32_t index) const; + + /** \brief Get the format for a buffer + \return The buffer format if successful, RenderDataFormat::UNSPECIFIED otherwise. + */ + virtual RenderDataFormat::Enum getBufferFormat(uint32_t index) const; + + /** \brief Get the access type for a buffer (static, dynamic, etc.) + \return The buffer access if successful, RenderDataAccess::ACCESS_TYPE_COUNT otherwise. + */ + virtual RenderDataAccess::Enum getBufferAccess(uint32_t index) const; + + /** \brief Get whether or not the buffer should be serialized + \return Whether or not the buffer should be serialized if successful, false otherwise. + */ + virtual bool getBufferSerialize(uint32_t index) const; + + + /** \brief Accessor to read the number of buffers */ + virtual uint32_t getBufferCount() const; + + /** \brief Returns the number of buffers that are user-specified */ + virtual uint32_t getCustomBufferCount() const; + + /** \brief Accessor to get the buffer index + If the buffer is not found, -1 is returned + */ + virtual int32_t getBufferIndexFromID(BufferID id) const; + + // ApexVertexFormat internal methods + + bool operator == (const VertexFormat& format) const; + bool operator != (const VertexFormat& format) const + { + return !(*this == format); + } + + void copy(const ApexVertexFormat& other); + +private: + void clearBuffers(); + + ApexVertexFormat& operator = (const ApexVertexFormat&) + { + return *this; // No assignment + } + + struct CustomBuffer + { + char* name; + RenderDataFormat::Enum format; + bool serialize; + }; + + VertexFormatParameters* mParams; + bool mOwnsParams; + + friend class ApexVertexBuffer; +}; + +} +} // end namespace nvidia::apex + + +#endif // __APEX_VERTEX_FORMAT_H__ diff --git a/APEX_1.4/framework/include/CudaProfileManager.h b/APEX_1.4/framework/include/CudaProfileManager.h new file mode 100644 index 00000000..5811272a --- /dev/null +++ b/APEX_1.4/framework/include/CudaProfileManager.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef CUDA_PROFILE_MANAGER_H +#define CUDA_PROFILE_MANAGER_H + +/*! +\file +\brief classes CudaProfileManager +*/ + +#include <ApexDefs.h> +#include <PxSimpleTypes.h> + +namespace nvidia +{ +namespace apex +{ + +PX_PUSH_PACK_DEFAULT + +/** +\brief Interface for options of ApexCudaProfileManager + */ +class CudaProfileManager +{ +public: + /** + * Normalized time unit for profile data + */ + enum TimeFormat + { + MILLISECOND = 1, + MICROSECOND = 1000, + NANOSECOND = 1000000 + }; + + /** + \brief Set path for writing results + */ + virtual void setPath(const char* path) = 0; + /** + \brief Set kernel for profile + */ + virtual void setKernel(const char* functionName, const char* moduleName) = 0; + /** + \brief Set normailized time unit + */ + virtual void setTimeFormat(TimeFormat tf) = 0; + /** + \brief Set state (on/off) for profile manager + */ + virtual void enable(bool state) = 0; + /** + \brief Get state (on/off) of profile manager + */ + virtual bool isEnabled() const = 0; +}; + +PX_POP_PACK + +} +} + +#endif // CUDA_PROFILE_MANAGER_H + diff --git a/APEX_1.4/framework/include/FrameworkEventDefs.h b/APEX_1.4/framework/include/FrameworkEventDefs.h new file mode 100644 index 00000000..ffd40640 --- /dev/null +++ b/APEX_1.4/framework/include/FrameworkEventDefs.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +// This file is used to define a list of AgPerfMon events. +// +// This file should only contain event definitions, using the +// DEFINE_EVENT macro. E.g.: +// +// DEFINE_EVENT(sample_name_1) +// DEFINE_EVENT(sample_name_2) +// DEFINE_EVENT(sample_name_3) + +// Framework only event definitions + +DEFINE_EVENT(ApexScene_simulate) +DEFINE_EVENT(ApexScene_fetchResults) +DEFINE_EVENT(ApexScene_checkResults) +DEFINE_EVENT(ApexSceneManualSubstep) +DEFINE_EVENT(ModuleSceneManualSubstep) +DEFINE_EVENT(ApexSceneBeforeStep) +DEFINE_EVENT(ApexSceneDuringStep) +DEFINE_EVENT(ApexSceneAfterStep) +DEFINE_EVENT(ApexScenePostFetchResults) +DEFINE_EVENT(ApexSceneLODUsedResource) +DEFINE_EVENT(ApexSceneLODSumBenefit) +DEFINE_EVENT(ApexRenderMeshUpdateRenderResources) +DEFINE_EVENT(ApexRenderMeshCreateRenderResources) +DEFINE_EVENT(ApexRenderMeshDispatchRenderResources) +DEFINE_EVENT(ApexRenderMeshUpdateInstances) +DEFINE_EVENT(ApexRenderMeshUpdateInstancesWritePoses) +DEFINE_EVENT(ApexRenderMeshUpdateInstancesWriteScales)
\ No newline at end of file diff --git a/APEX_1.4/framework/include/FrameworkPerfScope.h b/APEX_1.4/framework/include/FrameworkPerfScope.h new file mode 100644 index 00000000..7e2dc09b --- /dev/null +++ b/APEX_1.4/framework/include/FrameworkPerfScope.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + +// Copyright (C) 2002-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (C) 2001-2006 NovodeX. All rights reserved. + +#ifndef __FRAMEWORK_PERF_SCOPE_H___ +#define __FRAMEWORK_PERF_SCOPE_H___ + +#include "PxSimpleTypes.h" + +#ifndef PHYSX_PROFILE_SDK +#define PX_DISABLE_USER_PROFILER_CALLBACK +#endif + +#include "ProfilerCallback.h" + +namespace nvidia +{ +namespace apex +{ +class ApexSDKImpl; +}; +}; + +namespace Framework +{ +void initFrameworkProfiling(nvidia::apex::ApexSDKImpl*); +void releaseFrameworkProfiling(); +}; + + +#endif diff --git a/APEX_1.4/framework/include/MirrorSceneImpl.h b/APEX_1.4/framework/include/MirrorSceneImpl.h new file mode 100644 index 00000000..22a8de5f --- /dev/null +++ b/APEX_1.4/framework/include/MirrorSceneImpl.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef MIRROR_SCENE_IMPL_H + +#define MIRROR_SCENE_IMPL_H + +#include "PhysXSDKVersion.h" + +#if PX_PHYSICS_VERSION_MAJOR == 3 + +#include "MirrorScene.h" +#include "PsUserAllocated.h" +#include "PsMutex.h" +#include "PxSimulationEventCallback.h" +#include "PxClient.h" +#include "PsHashMap.h" +#include "PxDeletionListener.h" +#include "PxTransform.h" +#include "PsArray.h" + +namespace nvidia +{ + namespace apex + { + class MirrorSceneImpl; + class MirrorActor; + + enum MirrorCommandType + { + MCT_CREATE_ACTOR, + MCT_RELEASE_ACTOR, + MCT_UPDATE_POSE, + MCT_LAST + }; + + class MirrorCommand + { + public: + MirrorCommand(MirrorCommandType type,MirrorActor *ma) + { + mType = type; + mMirrorActor = ma; + } + MirrorCommand(MirrorCommandType type,MirrorActor *ma,const PxTransform &pose) + { + mType = type; + mMirrorActor = ma; + mPose = pose; + } + MirrorCommandType mType; + MirrorActor *mMirrorActor; + PxTransform mPose; + }; + + class MirrorActor : public shdfnd::UserAllocated + { + public: + + // The constructor is only ever called from the PrimaryScene thread + MirrorActor(size_t actorHash,physx::PxRigidActor &actor,MirrorSceneImpl &parentScene); + // The destructor is only ever called from the MirrorSceneImpl thread + virtual ~MirrorActor(void); + + // Increments the reference count for the number of shapes on this actor + // currently inside the trigger volume. + // Only ever called from the primary scene thread + void addShape(void) + { + mShapeCount++; + } + + // Decrements the reference count for the number of shapes on this actor + // which are currently in the trigger volume. + // If the reference count goes to zero, then no part of this actor is + // any longer inside the trigger volume and it's mirror should in turn be released + // This is only ever called from the PrimaryScene thread + bool removeShape(void) + { + mShapeCount--; + return mShapeCount == 0; + } + + // This method is called when the reference count goes to zero and/or the primary + // actor is released. + // This method posts on the MirrorSceneImpl thread queue for this object to be deleted + // the next time it does an update. + // At this point the primary scene should remove this actor from the hash table. + void release(void); + + + // Required by the PxObserver class we inherited + virtual uint32_t getObjectSize()const + { + return sizeof(MirrorActor); + } + + // This method is only called by the PrimaryScene thread + // If the pose of the actor we are mirroring has + void synchronizePose(void); // called from the primary scene thread; see if the pose of the mirrored actor has changed. + + void createActor(PxScene &scene); + void updatePose(const PxTransform &pose); + + MirrorSceneImpl &mMirrorScene; + uint32_t mShapeCount; + uint32_t mMirrorShapeCount; + PxRigidActor *mPrimaryActor; + PxRigidActor *mMirrorActor; + bool mReleasePosted; + PxTransform mPrimaryGlobalPose; + size_t mActorHash; // hash in primary scene. + + private: + MirrorActor& operator=(const MirrorActor&); + }; + + typedef nvidia::Array< MirrorCommand > MirrorCommandArray; + + class MirrorSceneImpl : public MirrorScene, public shdfnd::UserAllocated, + public physx::PxSimulationEventCallback, physx::PxDeletionListener + { + public: + + typedef shdfnd::HashMap< size_t, MirrorActor * > ActorHash; + typedef shdfnd::HashMap< size_t, MirrorActor * > ShapeHash; + + enum ActorChange + { + AC_DELETED, + AC_FOUND, + AC_LOST + }; + + MirrorSceneImpl(physx::PxScene &primaryScene, + physx::PxScene &mirrorScene, + MirrorScene::MirrorFilter &mirrorFilter, + float mirrorStaticDistance, + float mirrorDynamicDistance, + float mirrorDistanceThreshold); + + MirrorScene::MirrorFilter & getMirrorFilter(void) const + { + return mMirrorFilter; + } + + virtual void synchronizePrimaryScene(const PxVec3 &cameraPos); + virtual void synchronizeMirrorScene(void); + + virtual void release(void); + + void postCommand(const MirrorCommand &mc); + + + /** + physx::PxSimulationEventCallback interface + */ + virtual void onConstraintBreak(physx::PxConstraintInfo* constraints, uint32_t count); + virtual void onWake(PxActor** actors, uint32_t count); + virtual void onSleep(PxActor** actors, uint32_t count); + virtual void onContact(const physx::PxContactPairHeader& pairHeader, const physx::PxContactPair* pairs, uint32_t nbPairs); + virtual void onTrigger(physx::PxTriggerPair* pairs, uint32_t count); + virtual void onAdvance(const PxRigidBody*const* bodyBuffer, const PxTransform* poseBuffer, const PxU32 count); + + // This is a notification even that the PrimayScene actor we are mirroring has been + // deleted. If this is the case, we need to zero out PrimaryActor pointer so we + // no longer attempt to access it. + // However...we do not call release, because we should get trigger events which cause + // the reference count to go to zero. + virtual void onRelease(const PxBase* observed, + void* userData, + physx::PxDeletionEventFlag::Enum deletionEvent); + + + + protected: + virtual ~MirrorSceneImpl(void); + private: + + void mirrorShape(const physx::PxTriggerPair &tp); + + void createTriggerActor(const PxVec3 &cameraPosition); + + physx::PxScene &mPrimaryScene; + physx::PxScene &mMirrorScene; + MirrorScene::MirrorFilter &mMirrorFilter; + float mMirrorStaticDistance; + float mMirrorDynamicDistance; + float mMirrorDistanceThreshold; + PxVec3 mLastCameraLocation; + physx::PxRigidDynamic *mTriggerActor; + PxMaterial *mTriggerMaterial; + physx::PxShape *mTriggerShapeStatic; + physx::PxShape *mTriggerShapeDynamic; + ActorHash mActors; + ShapeHash mShapes; + shdfnd::Mutex mMirrorCommandMutex; + MirrorCommandArray mMirrorCommands; + physx::PxSimulationEventCallback *mSimulationEventCallback; + nvidia::Array< physx::PxTriggerPair > mTriggerPairs; + }; + + }; // end of apex namespace +}; // end of physx namespace + +#endif // PX_PHYSICS_VERSION_MAJOR + +#endif // MIRROR_SCENE_IMPL_H diff --git a/APEX_1.4/framework/include/PVDBindingErrorStream.h b/APEX_1.4/framework/include/PVDBindingErrorStream.h new file mode 100644 index 00000000..2de77bd0 --- /dev/null +++ b/APEX_1.4/framework/include/PVDBindingErrorStream.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef PVDBINDING_ERROR_STREAM_H +#define PVDBINDING_ERROR_STREAM_H + +#include "PxErrorCallback.h" +#include "PxProfileBase.h" + +#include <stdio.h> + +namespace physx { namespace profile { + +inline void printInfo(const char* format, ...) +{ + PX_UNUSED(format); +#if PRINT_TEST_INFO + va_list va; + va_start(va, format); + vprintf(format, va); + va_end(va); +#endif +} + +class PVDBindingErrorStream : public PxErrorCallback +{ +public: + + PVDBindingErrorStream() {} + void reportError(PxErrorCode::Enum e, const char* message, const char* file, int line) + { + PX_UNUSED(line); + PX_UNUSED(file); + switch (e) + { + case PxErrorCode::eINVALID_PARAMETER: + printf( "on invalid parameter: %s", message ); + break; + case PxErrorCode::eINVALID_OPERATION: + printf( "on invalid operation: %s", message ); + break; + case PxErrorCode::eOUT_OF_MEMORY: + printf( "on out of memory: %s", message ); + break; + case PxErrorCode::eDEBUG_INFO: + printf( "on debug info: %s", message ); + break; + case PxErrorCode::eDEBUG_WARNING: + printf( "on debug warning: %s", message ); + break; + default: + printf( "on unknown error: %s", message ); + break; + } + } +}; + +}} + +#endif // PVDBINDING_ERROR_STREAM_H diff --git a/APEX_1.4/framework/include/RenderResourceManagerWrapper.h b/APEX_1.4/framework/include/RenderResourceManagerWrapper.h new file mode 100644 index 00000000..def98e65 --- /dev/null +++ b/APEX_1.4/framework/include/RenderResourceManagerWrapper.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef RENDER_RESOURCE_MANAGER_WRAPPER_H +#define RENDER_RESOURCE_MANAGER_WRAPPER_H + +#include "ApexSDKImpl.h" + +namespace nvidia +{ +namespace apex +{ + + +class RenderResourceManagerWrapper : public UserRenderResourceManager, public UserAllocated +{ + PX_NOCOPY(RenderResourceManagerWrapper); +public: + RenderResourceManagerWrapper(UserRenderResourceManager& rrm) : + mRrm(rrm) + { + } + + + virtual UserRenderVertexBuffer* createVertexBuffer(const UserRenderVertexBufferDesc& desc) + { + URR_CHECK; + return mRrm.createVertexBuffer(desc); + } + + virtual void releaseVertexBuffer(UserRenderVertexBuffer& buffer) + { + mRrm.releaseVertexBuffer(buffer); + } + + virtual UserRenderIndexBuffer* createIndexBuffer(const UserRenderIndexBufferDesc& desc) + { + URR_CHECK; + return mRrm.createIndexBuffer(desc); + } + + virtual void releaseIndexBuffer(UserRenderIndexBuffer& buffer) + { + mRrm.releaseIndexBuffer(buffer); + } + + virtual UserRenderBoneBuffer* createBoneBuffer(const UserRenderBoneBufferDesc& desc) + { + URR_CHECK; + return mRrm.createBoneBuffer(desc); + } + + virtual void releaseBoneBuffer(UserRenderBoneBuffer& buffer) + { + mRrm.releaseBoneBuffer(buffer); + } + + virtual UserRenderInstanceBuffer* createInstanceBuffer(const UserRenderInstanceBufferDesc& desc) + { + URR_CHECK; + return mRrm.createInstanceBuffer(desc); + } + + virtual void releaseInstanceBuffer(UserRenderInstanceBuffer& buffer) + { + mRrm.releaseInstanceBuffer(buffer); + } + + virtual UserRenderSpriteBuffer* createSpriteBuffer(const UserRenderSpriteBufferDesc& desc) + { + URR_CHECK; + return mRrm.createSpriteBuffer(desc); + } + + virtual void releaseSpriteBuffer(UserRenderSpriteBuffer& buffer) + { + mRrm.releaseSpriteBuffer(buffer); + } + + virtual UserRenderSurfaceBuffer* createSurfaceBuffer(const UserRenderSurfaceBufferDesc& desc) + { + URR_CHECK; + return mRrm.createSurfaceBuffer(desc); + } + + virtual void releaseSurfaceBuffer(UserRenderSurfaceBuffer& buffer) + { + mRrm.releaseSurfaceBuffer(buffer); + } + + virtual UserRenderResource* createResource(const UserRenderResourceDesc& desc) + { + URR_CHECK; + return mRrm.createResource(desc); + } + + virtual void releaseResource(UserRenderResource& resource) + { + mRrm.releaseResource(resource); + } + + virtual uint32_t getMaxBonesForMaterial(void* material) + { + return mRrm.getMaxBonesForMaterial(material); + } + + virtual bool getSpriteLayoutData(uint32_t spriteCount, uint32_t spriteSemanticsBitmap, UserRenderSpriteBufferDesc* textureDescArray) + { + return mRrm.getSpriteLayoutData(spriteCount, spriteSemanticsBitmap, textureDescArray); + } + + virtual bool getInstanceLayoutData(uint32_t spriteCount, uint32_t spriteSemanticsBitmap, UserRenderInstanceBufferDesc* instanceDescArray) + { + return mRrm.getInstanceLayoutData(spriteCount, spriteSemanticsBitmap, instanceDescArray); + } + +private: + UserRenderResourceManager& mRrm; +}; + + +} +} // end namespace nvidia::apex + + +#endif // RENDER_RESOURCE_MANAGER_WRAPPER_H diff --git a/APEX_1.4/framework/include/ThreadPool.h b/APEX_1.4/framework/include/ThreadPool.h new file mode 100644 index 00000000..f6f4be69 --- /dev/null +++ b/APEX_1.4/framework/include/ThreadPool.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#ifndef _THREAD_POOL_H_ +#define _THREAD_POOL_H_ + +#include "PxTask.h" +#include "PxTaskManager.h" +#include "PxCpuDispatcher.h" +#include "PsSList.h" +#include "PsSync.h" +#include "PsThread.h" +#include "ApexUsingNamespace.h" + +namespace physx +{ + namespace pvdsdk + { + class ApexPvdClient; + } +} + +namespace nvidia +{ +namespace apex +{ + +PxCpuDispatcher* createDefaultThreadPool(unsigned int numThreads); + +class SharedQueueEntry : public shdfnd::SListEntry +{ +public: + SharedQueueEntry(void* objectRef) : mObjectRef(objectRef), mPooledEntry(false) {} + SharedQueueEntry() : mObjectRef(NULL), mPooledEntry(true) {} + +public: + void* mObjectRef; + bool mPooledEntry; // True if the entry was preallocated in a pool +}; + +template<class Alloc = typename shdfnd::AllocatorTraits<SharedQueueEntry>::Type > +class SharedQueueEntryPool : private Alloc +{ +public: + SharedQueueEntryPool(uint32_t poolSize, const Alloc& alloc = Alloc()); + ~SharedQueueEntryPool(); + + SharedQueueEntry* getEntry(void* objectRef); + void putEntry(SharedQueueEntry& entry); + +private: + SharedQueueEntry* mTaskEntryPool; + shdfnd::SList mTaskEntryPtrPool; +}; + +template <class Alloc> +SharedQueueEntryPool<Alloc>::SharedQueueEntryPool(uint32_t poolSize, const Alloc& alloc) + : Alloc(alloc) +{ + shdfnd::AlignedAllocator<PX_SLIST_ALIGNMENT, Alloc> alignedAlloc; + + mTaskEntryPool = poolSize ? (SharedQueueEntry*)alignedAlloc.allocate(sizeof(SharedQueueEntry) * poolSize, __FILE__, __LINE__) : NULL; + + if (mTaskEntryPool) + { + for (uint32_t i = 0; i < poolSize; i++) + { + PX_ASSERT((size_t(&mTaskEntryPool[i]) & (PX_SLIST_ALIGNMENT - 1)) == 0); // The SList entry must be aligned according to PX_SLIST_ALIGNMENT + + PX_PLACEMENT_NEW(&mTaskEntryPool[i], SharedQueueEntry)(); + PX_ASSERT(mTaskEntryPool[i].mPooledEntry == true); + mTaskEntryPtrPool.push(mTaskEntryPool[i]); + } + } +} + + +template <class Alloc> +SharedQueueEntryPool<Alloc>::~SharedQueueEntryPool() +{ + if (mTaskEntryPool) + { + shdfnd::AlignedAllocator<PX_SLIST_ALIGNMENT, Alloc> alignedAlloc; + alignedAlloc.deallocate(mTaskEntryPool); + } +} + + +template <class Alloc> +SharedQueueEntry* SharedQueueEntryPool<Alloc>::getEntry(void* objectRef) +{ + SharedQueueEntry* e = static_cast<SharedQueueEntry*>(mTaskEntryPtrPool.pop()); + if (e) + { + PX_ASSERT(e->mPooledEntry == true); + e->mObjectRef = objectRef; + return e; + } + else + { + shdfnd::AlignedAllocator<PX_SLIST_ALIGNMENT, Alloc> alignedAlloc; + e = (SharedQueueEntry*)alignedAlloc.allocate(sizeof(SharedQueueEntry), __FILE__, __LINE__); + if (e) + { + PX_PLACEMENT_NEW(e, SharedQueueEntry)(objectRef); + PX_ASSERT(e->mPooledEntry == false); + } + + return e; + } +} + + +template <class Alloc> +void SharedQueueEntryPool<Alloc>::putEntry(SharedQueueEntry& entry) +{ + if (entry.mPooledEntry) + { + entry.mObjectRef = NULL; + mTaskEntryPtrPool.push(entry); + } + else + { + shdfnd::AlignedAllocator<PX_SLIST_ALIGNMENT, Alloc> alignedAlloc; + alignedAlloc.deallocate(&entry); + } +} + +#define TASK_QUEUE_ENTRY_POOL_SIZE 128 + +class TaskQueueHelper +{ +public: + static PxBaseTask* fetchTask(shdfnd::SList& taskQueue, SharedQueueEntryPool<>& entryPool) + { + SharedQueueEntry* entry = static_cast<SharedQueueEntry*>(taskQueue.pop()); + if (entry) + { + PxBaseTask* task = reinterpret_cast<PxBaseTask*>(entry->mObjectRef); + entryPool.putEntry(*entry); + return task; + } + else + { + return NULL; + } + } +}; + + +class DefaultCpuDispatcher; + +class CpuWorkerThread : public shdfnd::Thread +{ +public: + CpuWorkerThread(); + ~CpuWorkerThread(); + + void initialize(DefaultCpuDispatcher* ownerDispatcher); + void execute(); + bool tryAcceptJobToLocalQueue(PxBaseTask& task, shdfnd::Thread::Id taskSubmitionThread); + PxBaseTask* giveUpJob(); + +protected: + SharedQueueEntryPool<> mQueueEntryPool; + DefaultCpuDispatcher* mOwner; + shdfnd::SList mLocalJobList; + shdfnd::Thread::Id mThreadId; + pvdsdk::ApexPvdClient *mApexPvdClient; +}; + +/* + * Default CpuDispatcher implementation, if none is provided + */ +class DefaultCpuDispatcher : public PxCpuDispatcher, public shdfnd::UserAllocated +{ + friend class TaskQueueHelper; + +private: + DefaultCpuDispatcher() : mQueueEntryPool(0) {} + ~DefaultCpuDispatcher(); + +public: + DefaultCpuDispatcher(uint32_t numThreads, uint32_t* affinityMasks); + void submitTask(PxBaseTask& task); + void flush( PxBaseTask& task, int32_t targetRef); + uint32_t getWorkerCount() const; + void release(); + + PxBaseTask* getJob(); + PxBaseTask* stealJob(); + void waitForWork() + { + mWorkReady.wait(); + } + void resetWakeSignal(); + static uint32_t getAffinityMask(uint32_t affinityMask); + +protected: + CpuWorkerThread* mWorkerThreads; + SharedQueueEntryPool<> mQueueEntryPool; + shdfnd::SList mJobList; + shdfnd::Sync mWorkReady; + uint32_t mNumThreads; + bool mShuttingDown; + pvdsdk::ApexPvdClient *mApexPvdClient; +}; + +} // end pxtask namespace +} // end physx namespace + +#endif // _THREAD_POOL_H_ diff --git a/APEX_1.4/framework/include/autogen/BufferF32x1.h b/APEX_1.4/framework/include/autogen/BufferF32x1.h new file mode 100644 index 00000000..3b2f372c --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferF32x1.h @@ -0,0 +1,238 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferF32x1_h +#define HEADER_BufferF32x1_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferF32x1NS +{ + + +struct F32_DynamicArray1D_Type +{ + float* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + + +struct ParametersStruct +{ + + F32_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0xcdd104f7, 0x061ac76e, 0xc255d951, 0x1e7cffef, }; + +} // namespace BufferF32x1NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferF32x1 : public NvParameterized::NvParameters, public BufferF32x1NS::ParametersStruct +{ +public: + BufferF32x1(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferF32x1(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferF32x1"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferF32x1NS::checksum); + return BufferF32x1NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferF32x1NS::ParametersStruct& parameters(void) const + { + BufferF32x1* tmpThis = const_cast<BufferF32x1*>(this); + return *(static_cast<BufferF32x1NS::ParametersStruct*>(tmpThis)); + } + + BufferF32x1NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferF32x1NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferF32x1Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferF32x1::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferF32x1), BufferF32x1::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferF32x1::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferF32x1"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferF32x1)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferF32x1)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferF32x1::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferF32x1::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferF32x1"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferF32x1 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferF32x1*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferF32x1::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferF32x1::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferF32x1::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferF32x1::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferF32x2.h b/APEX_1.4/framework/include/autogen/BufferF32x2.h new file mode 100644 index 00000000..59a6b0d9 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferF32x2.h @@ -0,0 +1,244 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferF32x2_h +#define HEADER_BufferF32x2_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferF32x2NS +{ + +struct F32x2_Type; + +struct F32x2_DynamicArray1D_Type +{ + F32x2_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct F32x2_Type +{ + float x; + float y; +}; + +struct ParametersStruct +{ + + F32x2_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x788349ee, 0x95c560e2, 0x9633945e, 0x8cc784a0, }; + +} // namespace BufferF32x2NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferF32x2 : public NvParameterized::NvParameters, public BufferF32x2NS::ParametersStruct +{ +public: + BufferF32x2(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferF32x2(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferF32x2"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferF32x2NS::checksum); + return BufferF32x2NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferF32x2NS::ParametersStruct& parameters(void) const + { + BufferF32x2* tmpThis = const_cast<BufferF32x2*>(this); + return *(static_cast<BufferF32x2NS::ParametersStruct*>(tmpThis)); + } + + BufferF32x2NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferF32x2NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferF32x2Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferF32x2::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferF32x2), BufferF32x2::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferF32x2::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferF32x2"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferF32x2)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferF32x2)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferF32x2::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferF32x2::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferF32x2"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferF32x2 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferF32x2*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferF32x2::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferF32x2::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferF32x2::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferF32x2::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferF32x3.h b/APEX_1.4/framework/include/autogen/BufferF32x3.h new file mode 100644 index 00000000..79a0c924 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferF32x3.h @@ -0,0 +1,245 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferF32x3_h +#define HEADER_BufferF32x3_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferF32x3NS +{ + +struct F32x3_Type; + +struct VEC3_DynamicArray1D_Type +{ + physx::PxVec3* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct F32x3_Type +{ + float x; + float y; + float z; +}; + +struct ParametersStruct +{ + + VEC3_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x458b554a, 0x7ed3e930, 0x0299ff33, 0x9d69c11b, }; + +} // namespace BufferF32x3NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferF32x3 : public NvParameterized::NvParameters, public BufferF32x3NS::ParametersStruct +{ +public: + BufferF32x3(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferF32x3(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferF32x3"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferF32x3NS::checksum); + return BufferF32x3NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferF32x3NS::ParametersStruct& parameters(void) const + { + BufferF32x3* tmpThis = const_cast<BufferF32x3*>(this); + return *(static_cast<BufferF32x3NS::ParametersStruct*>(tmpThis)); + } + + BufferF32x3NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferF32x3NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferF32x3Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferF32x3::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferF32x3), BufferF32x3::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferF32x3::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferF32x3"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferF32x3)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferF32x3)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferF32x3::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferF32x3::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferF32x3"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferF32x3 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferF32x3*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferF32x3::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferF32x3::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferF32x3::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferF32x3::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferF32x4.h b/APEX_1.4/framework/include/autogen/BufferF32x4.h new file mode 100644 index 00000000..7e596c8e --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferF32x4.h @@ -0,0 +1,246 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferF32x4_h +#define HEADER_BufferF32x4_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferF32x4NS +{ + +struct F32x4_Type; + +struct F32x4_DynamicArray1D_Type +{ + F32x4_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct F32x4_Type +{ + PX_ALIGN(16, float x); + float y; + float z; + float w; +}; + +struct ParametersStruct +{ + + F32x4_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x80321851, 0xa99e95a1, 0xd26ec9a8, 0x14206b37, }; + +} // namespace BufferF32x4NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferF32x4 : public NvParameterized::NvParameters, public BufferF32x4NS::ParametersStruct +{ +public: + BufferF32x4(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferF32x4(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferF32x4"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)1; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferF32x4NS::checksum); + return BufferF32x4NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferF32x4NS::ParametersStruct& parameters(void) const + { + BufferF32x4* tmpThis = const_cast<BufferF32x4*>(this); + return *(static_cast<BufferF32x4NS::ParametersStruct*>(tmpThis)); + } + + BufferF32x4NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferF32x4NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferF32x4Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferF32x4::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferF32x4), BufferF32x4::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferF32x4::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferF32x4"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferF32x4)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferF32x4)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferF32x4::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferF32x4::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferF32x4"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferF32x4 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferF32x4*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferF32x4::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferF32x4::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferF32x4::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferF32x4::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU16x1.h b/APEX_1.4/framework/include/autogen/BufferU16x1.h new file mode 100644 index 00000000..b093c47c --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU16x1.h @@ -0,0 +1,238 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU16x1_h +#define HEADER_BufferU16x1_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU16x1NS +{ + + +struct U16_DynamicArray1D_Type +{ + uint16_t* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + + +struct ParametersStruct +{ + + U16_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x3c20aee2, 0x4e6abe8c, 0x8c1e5625, 0x7cabf8fc, }; + +} // namespace BufferU16x1NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU16x1 : public NvParameterized::NvParameters, public BufferU16x1NS::ParametersStruct +{ +public: + BufferU16x1(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU16x1(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU16x1"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU16x1NS::checksum); + return BufferU16x1NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU16x1NS::ParametersStruct& parameters(void) const + { + BufferU16x1* tmpThis = const_cast<BufferU16x1*>(this); + return *(static_cast<BufferU16x1NS::ParametersStruct*>(tmpThis)); + } + + BufferU16x1NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU16x1NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU16x1Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU16x1::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU16x1), BufferU16x1::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU16x1::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU16x1"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU16x1)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU16x1)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU16x1::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU16x1::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU16x1"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU16x1 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU16x1*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU16x1::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU16x1::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU16x1::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU16x1::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU16x2.h b/APEX_1.4/framework/include/autogen/BufferU16x2.h new file mode 100644 index 00000000..75931956 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU16x2.h @@ -0,0 +1,244 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU16x2_h +#define HEADER_BufferU16x2_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU16x2NS +{ + +struct U16x2_Type; + +struct U16x2_DynamicArray1D_Type +{ + U16x2_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U16x2_Type +{ + uint16_t x; + uint16_t y; +}; + +struct ParametersStruct +{ + + U16x2_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x35087bb2, 0x98bdb0fd, 0x22e601f6, 0x79ba85e9, }; + +} // namespace BufferU16x2NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU16x2 : public NvParameterized::NvParameters, public BufferU16x2NS::ParametersStruct +{ +public: + BufferU16x2(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU16x2(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU16x2"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU16x2NS::checksum); + return BufferU16x2NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU16x2NS::ParametersStruct& parameters(void) const + { + BufferU16x2* tmpThis = const_cast<BufferU16x2*>(this); + return *(static_cast<BufferU16x2NS::ParametersStruct*>(tmpThis)); + } + + BufferU16x2NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU16x2NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU16x2Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU16x2::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU16x2), BufferU16x2::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU16x2::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU16x2"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU16x2)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU16x2)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU16x2::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU16x2::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU16x2"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU16x2 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU16x2*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU16x2::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU16x2::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU16x2::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU16x2::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU16x3.h b/APEX_1.4/framework/include/autogen/BufferU16x3.h new file mode 100644 index 00000000..c4e6675b --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU16x3.h @@ -0,0 +1,245 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU16x3_h +#define HEADER_BufferU16x3_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU16x3NS +{ + +struct U16x3_Type; + +struct U16x3_DynamicArray1D_Type +{ + U16x3_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U16x3_Type +{ + uint16_t x; + uint16_t y; + uint16_t z; +}; + +struct ParametersStruct +{ + + U16x3_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x904a928f, 0x9f3f0b91, 0x29168b39, 0x31638e80, }; + +} // namespace BufferU16x3NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU16x3 : public NvParameterized::NvParameters, public BufferU16x3NS::ParametersStruct +{ +public: + BufferU16x3(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU16x3(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU16x3"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU16x3NS::checksum); + return BufferU16x3NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU16x3NS::ParametersStruct& parameters(void) const + { + BufferU16x3* tmpThis = const_cast<BufferU16x3*>(this); + return *(static_cast<BufferU16x3NS::ParametersStruct*>(tmpThis)); + } + + BufferU16x3NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU16x3NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU16x3Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU16x3::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU16x3), BufferU16x3::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU16x3::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU16x3"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU16x3)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU16x3)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU16x3::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU16x3::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU16x3"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU16x3 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU16x3*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU16x3::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU16x3::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU16x3::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU16x3::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU16x4.h b/APEX_1.4/framework/include/autogen/BufferU16x4.h new file mode 100644 index 00000000..19307114 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU16x4.h @@ -0,0 +1,246 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU16x4_h +#define HEADER_BufferU16x4_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU16x4NS +{ + +struct U16x4_Type; + +struct U16x4_DynamicArray1D_Type +{ + U16x4_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U16x4_Type +{ + uint16_t x; + uint16_t y; + uint16_t z; + uint16_t w; +}; + +struct ParametersStruct +{ + + U16x4_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x17ce6b83, 0x32ba98aa, 0xd03f98f6, 0x26918369, }; + +} // namespace BufferU16x4NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU16x4 : public NvParameterized::NvParameters, public BufferU16x4NS::ParametersStruct +{ +public: + BufferU16x4(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU16x4(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU16x4"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU16x4NS::checksum); + return BufferU16x4NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU16x4NS::ParametersStruct& parameters(void) const + { + BufferU16x4* tmpThis = const_cast<BufferU16x4*>(this); + return *(static_cast<BufferU16x4NS::ParametersStruct*>(tmpThis)); + } + + BufferU16x4NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU16x4NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU16x4Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU16x4::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU16x4), BufferU16x4::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU16x4::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU16x4"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU16x4)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU16x4)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU16x4::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU16x4::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU16x4"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU16x4 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU16x4*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU16x4::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU16x4::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU16x4::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU16x4::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU32x1.h b/APEX_1.4/framework/include/autogen/BufferU32x1.h new file mode 100644 index 00000000..3af9e699 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU32x1.h @@ -0,0 +1,238 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU32x1_h +#define HEADER_BufferU32x1_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU32x1NS +{ + + +struct U32_DynamicArray1D_Type +{ + uint32_t* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + + +struct ParametersStruct +{ + + U32_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0xaea8bce2, 0x233c95f7, 0xdaa9d0d3, 0x43951dbe, }; + +} // namespace BufferU32x1NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU32x1 : public NvParameterized::NvParameters, public BufferU32x1NS::ParametersStruct +{ +public: + BufferU32x1(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU32x1(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU32x1"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU32x1NS::checksum); + return BufferU32x1NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU32x1NS::ParametersStruct& parameters(void) const + { + BufferU32x1* tmpThis = const_cast<BufferU32x1*>(this); + return *(static_cast<BufferU32x1NS::ParametersStruct*>(tmpThis)); + } + + BufferU32x1NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU32x1NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU32x1Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU32x1::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU32x1), BufferU32x1::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU32x1::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU32x1"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU32x1)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU32x1)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU32x1::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU32x1::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU32x1"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU32x1 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU32x1*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU32x1::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU32x1::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU32x1::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU32x1::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU32x2.h b/APEX_1.4/framework/include/autogen/BufferU32x2.h new file mode 100644 index 00000000..2d051d1f --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU32x2.h @@ -0,0 +1,244 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU32x2_h +#define HEADER_BufferU32x2_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU32x2NS +{ + +struct U32x2_Type; + +struct U32x2_DynamicArray1D_Type +{ + U32x2_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U32x2_Type +{ + uint32_t x; + uint32_t y; +}; + +struct ParametersStruct +{ + + U32x2_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x8f8dc3d4, 0x45e914f5, 0xe6432284, 0xf7487471, }; + +} // namespace BufferU32x2NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU32x2 : public NvParameterized::NvParameters, public BufferU32x2NS::ParametersStruct +{ +public: + BufferU32x2(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU32x2(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU32x2"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU32x2NS::checksum); + return BufferU32x2NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU32x2NS::ParametersStruct& parameters(void) const + { + BufferU32x2* tmpThis = const_cast<BufferU32x2*>(this); + return *(static_cast<BufferU32x2NS::ParametersStruct*>(tmpThis)); + } + + BufferU32x2NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU32x2NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU32x2Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU32x2::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU32x2), BufferU32x2::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU32x2::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU32x2"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU32x2)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU32x2)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU32x2::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU32x2::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU32x2"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU32x2 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU32x2*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU32x2::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU32x2::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU32x2::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU32x2::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU32x3.h b/APEX_1.4/framework/include/autogen/BufferU32x3.h new file mode 100644 index 00000000..cfa74c7c --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU32x3.h @@ -0,0 +1,245 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU32x3_h +#define HEADER_BufferU32x3_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU32x3NS +{ + +struct U32x3_Type; + +struct U32x3_DynamicArray1D_Type +{ + U32x3_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U32x3_Type +{ + uint32_t x; + uint32_t y; + uint32_t z; +}; + +struct ParametersStruct +{ + + U32x3_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0xc0b737db, 0x1cae35c4, 0xe957acc9, 0x68cb2306, }; + +} // namespace BufferU32x3NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU32x3 : public NvParameterized::NvParameters, public BufferU32x3NS::ParametersStruct +{ +public: + BufferU32x3(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU32x3(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU32x3"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU32x3NS::checksum); + return BufferU32x3NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU32x3NS::ParametersStruct& parameters(void) const + { + BufferU32x3* tmpThis = const_cast<BufferU32x3*>(this); + return *(static_cast<BufferU32x3NS::ParametersStruct*>(tmpThis)); + } + + BufferU32x3NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU32x3NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU32x3Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU32x3::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU32x3), BufferU32x3::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU32x3::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU32x3"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU32x3)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU32x3)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU32x3::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU32x3::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU32x3"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU32x3 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU32x3*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU32x3::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU32x3::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU32x3::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU32x3::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU32x4.h b/APEX_1.4/framework/include/autogen/BufferU32x4.h new file mode 100644 index 00000000..81879495 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU32x4.h @@ -0,0 +1,246 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU32x4_h +#define HEADER_BufferU32x4_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU32x4NS +{ + +struct U32x4_Type; + +struct U32x4_DynamicArray1D_Type +{ + U32x4_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U32x4_Type +{ + uint32_t x; + uint32_t y; + uint32_t z; + uint32_t w; +}; + +struct ParametersStruct +{ + + U32x4_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x8e5349d3, 0xbf3dae15, 0x22c57bb9, 0x7c602608, }; + +} // namespace BufferU32x4NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU32x4 : public NvParameterized::NvParameters, public BufferU32x4NS::ParametersStruct +{ +public: + BufferU32x4(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU32x4(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU32x4"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU32x4NS::checksum); + return BufferU32x4NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU32x4NS::ParametersStruct& parameters(void) const + { + BufferU32x4* tmpThis = const_cast<BufferU32x4*>(this); + return *(static_cast<BufferU32x4NS::ParametersStruct*>(tmpThis)); + } + + BufferU32x4NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU32x4NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU32x4Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU32x4::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU32x4), BufferU32x4::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU32x4::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU32x4"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU32x4)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU32x4)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU32x4::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU32x4::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU32x4"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU32x4 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU32x4*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU32x4::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU32x4::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU32x4::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU32x4::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU8x1.h b/APEX_1.4/framework/include/autogen/BufferU8x1.h new file mode 100644 index 00000000..3466041b --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU8x1.h @@ -0,0 +1,238 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU8x1_h +#define HEADER_BufferU8x1_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU8x1NS +{ + + +struct U8_DynamicArray1D_Type +{ + uint8_t* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + + +struct ParametersStruct +{ + + U8_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x9c4137b9, 0x1fe24c98, 0xae580b55, 0x7e908e02, }; + +} // namespace BufferU8x1NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU8x1 : public NvParameterized::NvParameters, public BufferU8x1NS::ParametersStruct +{ +public: + BufferU8x1(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU8x1(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU8x1"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU8x1NS::checksum); + return BufferU8x1NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU8x1NS::ParametersStruct& parameters(void) const + { + BufferU8x1* tmpThis = const_cast<BufferU8x1*>(this); + return *(static_cast<BufferU8x1NS::ParametersStruct*>(tmpThis)); + } + + BufferU8x1NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU8x1NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU8x1Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU8x1::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU8x1), BufferU8x1::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU8x1::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU8x1"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU8x1)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU8x1)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU8x1::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU8x1::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU8x1"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU8x1 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU8x1*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU8x1::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU8x1::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU8x1::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU8x1::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU8x2.h b/APEX_1.4/framework/include/autogen/BufferU8x2.h new file mode 100644 index 00000000..c125d61b --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU8x2.h @@ -0,0 +1,244 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU8x2_h +#define HEADER_BufferU8x2_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU8x2NS +{ + +struct U8x2_Type; + +struct U8x2_DynamicArray1D_Type +{ + U8x2_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U8x2_Type +{ + uint8_t x; + uint8_t y; +}; + +struct ParametersStruct +{ + + U8x2_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x9857276c, 0xc24be4d6, 0x1aee732c, 0xf8979343, }; + +} // namespace BufferU8x2NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU8x2 : public NvParameterized::NvParameters, public BufferU8x2NS::ParametersStruct +{ +public: + BufferU8x2(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU8x2(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU8x2"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU8x2NS::checksum); + return BufferU8x2NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU8x2NS::ParametersStruct& parameters(void) const + { + BufferU8x2* tmpThis = const_cast<BufferU8x2*>(this); + return *(static_cast<BufferU8x2NS::ParametersStruct*>(tmpThis)); + } + + BufferU8x2NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU8x2NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU8x2Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU8x2::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU8x2), BufferU8x2::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU8x2::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU8x2"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU8x2)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU8x2)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU8x2::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU8x2::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU8x2"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU8x2 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU8x2*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU8x2::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU8x2::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU8x2::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU8x2::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU8x3.h b/APEX_1.4/framework/include/autogen/BufferU8x3.h new file mode 100644 index 00000000..0d2e5030 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU8x3.h @@ -0,0 +1,245 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU8x3_h +#define HEADER_BufferU8x3_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU8x3NS +{ + +struct U8x3_Type; + +struct U8x3_DynamicArray1D_Type +{ + U8x3_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U8x3_Type +{ + uint8_t x; + uint8_t y; + uint8_t z; +}; + +struct ParametersStruct +{ + + U8x3_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0xf5cdaf40, 0x1d38c18b, 0x85082025, 0x4cefb88a, }; + +} // namespace BufferU8x3NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU8x3 : public NvParameterized::NvParameters, public BufferU8x3NS::ParametersStruct +{ +public: + BufferU8x3(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU8x3(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU8x3"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU8x3NS::checksum); + return BufferU8x3NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU8x3NS::ParametersStruct& parameters(void) const + { + BufferU8x3* tmpThis = const_cast<BufferU8x3*>(this); + return *(static_cast<BufferU8x3NS::ParametersStruct*>(tmpThis)); + } + + BufferU8x3NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU8x3NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU8x3Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU8x3::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU8x3), BufferU8x3::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU8x3::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU8x3"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU8x3)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU8x3)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU8x3::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU8x3::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU8x3"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU8x3 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU8x3*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU8x3::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU8x3::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU8x3::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU8x3::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/BufferU8x4.h b/APEX_1.4/framework/include/autogen/BufferU8x4.h new file mode 100644 index 00000000..0e9b9c7a --- /dev/null +++ b/APEX_1.4/framework/include/autogen/BufferU8x4.h @@ -0,0 +1,246 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_BufferU8x4_h +#define HEADER_BufferU8x4_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace BufferU8x4NS +{ + +struct U8x4_Type; + +struct U8x4_DynamicArray1D_Type +{ + U8x4_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct U8x4_Type +{ + uint8_t x; + uint8_t y; + uint8_t z; + uint8_t w; +}; + +struct ParametersStruct +{ + + U8x4_DynamicArray1D_Type data; + +}; + +static const uint32_t checksum[] = { 0x8364dd0f, 0xbc7542d0, 0x1a22d2d2, 0x01fc1923, }; + +} // namespace BufferU8x4NS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class BufferU8x4 : public NvParameterized::NvParameters, public BufferU8x4NS::ParametersStruct +{ +public: + BufferU8x4(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~BufferU8x4(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("BufferU8x4"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(BufferU8x4NS::checksum); + return BufferU8x4NS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const BufferU8x4NS::ParametersStruct& parameters(void) const + { + BufferU8x4* tmpThis = const_cast<BufferU8x4*>(this); + return *(static_cast<BufferU8x4NS::ParametersStruct*>(tmpThis)); + } + + BufferU8x4NS::ParametersStruct& parameters(void) + { + return *(static_cast<BufferU8x4NS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class BufferU8x4Factory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + BufferU8x4::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(BufferU8x4), BufferU8x4::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, BufferU8x4::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU8x4"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(BufferU8x4)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, BufferU8x4)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, BufferU8x4::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, BufferU8x4::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class BufferU8x4"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of BufferU8x4 here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (BufferU8x4*)bufObj; + } + + virtual const char* getClassName() + { + return (BufferU8x4::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (BufferU8x4::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (BufferU8x4::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (BufferU8x4::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/ModuleFrameworkRegistration.h b/APEX_1.4/framework/include/autogen/ModuleFrameworkRegistration.h new file mode 100644 index 00000000..3b357bd6 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/ModuleFrameworkRegistration.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + +#ifndef MODULE_MODULEFRAMEWORKREGISTRATIONH_H +#define MODULE_MODULEFRAMEWORKREGISTRATIONH_H + +#include "PsAllocator.h" +#include "NvRegistrationsForTraitsBase.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "PxAssert.h" +#include <stdint.h> + +// INCLUDE GENERATED FACTORIES +#include "VertexFormatParameters.h" +#include "VertexBufferParameters.h" +#include "SurfaceBufferParameters.h" +#include "SubmeshParameters.h" +#include "RenderMeshAssetParameters.h" +#include "BufferU8x1.h" +#include "BufferU8x2.h" +#include "BufferU8x3.h" +#include "BufferU8x4.h" +#include "BufferU16x1.h" +#include "BufferU16x2.h" +#include "BufferU16x3.h" +#include "BufferU16x4.h" +#include "BufferU32x1.h" +#include "BufferU32x2.h" +#include "BufferU32x3.h" +#include "BufferU32x4.h" +#include "BufferF32x1.h" +#include "BufferF32x2.h" +#include "BufferF32x3.h" +#include "BufferF32x4.h" + + +// INCLUDE GENERATED CONVERSION + + +namespace nvidia { +namespace apex { + + +class ModuleFrameworkRegistration : public NvParameterized::RegistrationsForTraitsBase +{ +public: + static void invokeRegistration(NvParameterized::Traits* parameterizedTraits) + { + if (parameterizedTraits) + { + ModuleFrameworkRegistration().registerAll(*parameterizedTraits); + } + } + + static void invokeUnregistration(NvParameterized::Traits* parameterizedTraits) + { + if (parameterizedTraits) + { + ModuleFrameworkRegistration().unregisterAll(*parameterizedTraits); + } + } + + void registerAvailableFactories(NvParameterized::Traits& parameterizedTraits) + { + ::NvParameterized::Factory* factoriesToRegister[] = { +// REGISTER GENERATED FACTORIES + new nvidia::apex::VertexFormatParametersFactory(), + new nvidia::apex::VertexBufferParametersFactory(), + new nvidia::apex::SurfaceBufferParametersFactory(), + new nvidia::apex::SubmeshParametersFactory(), + new nvidia::apex::RenderMeshAssetParametersFactory(), + new nvidia::apex::BufferU8x1Factory(), + new nvidia::apex::BufferU8x2Factory(), + new nvidia::apex::BufferU8x3Factory(), + new nvidia::apex::BufferU8x4Factory(), + new nvidia::apex::BufferU16x1Factory(), + new nvidia::apex::BufferU16x2Factory(), + new nvidia::apex::BufferU16x3Factory(), + new nvidia::apex::BufferU16x4Factory(), + new nvidia::apex::BufferU32x1Factory(), + new nvidia::apex::BufferU32x2Factory(), + new nvidia::apex::BufferU32x3Factory(), + new nvidia::apex::BufferU32x4Factory(), + new nvidia::apex::BufferF32x1Factory(), + new nvidia::apex::BufferF32x2Factory(), + new nvidia::apex::BufferF32x3Factory(), + new nvidia::apex::BufferF32x4Factory(), + + }; + + for (size_t i = 0; i < sizeof(factoriesToRegister)/sizeof(factoriesToRegister[0]); ++i) + { + parameterizedTraits.registerFactory(*factoriesToRegister[i]); + } + } + + virtual void registerAvailableConverters(NvParameterized::Traits& parameterizedTraits) + { +// REGISTER GENERATED CONVERSION +PX_UNUSED(parameterizedTraits); + + } + + void unregisterAvailableFactories(NvParameterized::Traits& parameterizedTraits) + { + struct FactoryDesc + { + const char* name; + uint32_t version; + }; + + ::NvParameterized::Factory* factoriesToUnregister[] = { +// UNREGISTER GENERATED FACTORIES + new nvidia::apex::VertexFormatParametersFactory(), + new nvidia::apex::VertexBufferParametersFactory(), + new nvidia::apex::SurfaceBufferParametersFactory(), + new nvidia::apex::SubmeshParametersFactory(), + new nvidia::apex::RenderMeshAssetParametersFactory(), + new nvidia::apex::BufferU8x1Factory(), + new nvidia::apex::BufferU8x2Factory(), + new nvidia::apex::BufferU8x3Factory(), + new nvidia::apex::BufferU8x4Factory(), + new nvidia::apex::BufferU16x1Factory(), + new nvidia::apex::BufferU16x2Factory(), + new nvidia::apex::BufferU16x3Factory(), + new nvidia::apex::BufferU16x4Factory(), + new nvidia::apex::BufferU32x1Factory(), + new nvidia::apex::BufferU32x2Factory(), + new nvidia::apex::BufferU32x3Factory(), + new nvidia::apex::BufferU32x4Factory(), + new nvidia::apex::BufferF32x1Factory(), + new nvidia::apex::BufferF32x2Factory(), + new nvidia::apex::BufferF32x3Factory(), + new nvidia::apex::BufferF32x4Factory(), + + }; + + for (size_t i = 0; i < sizeof(factoriesToUnregister)/sizeof(factoriesToUnregister[0]); ++i) + { + ::NvParameterized::Factory* removedFactory = parameterizedTraits.removeFactory(factoriesToUnregister[i]->getClassName(), factoriesToUnregister[i]->getVersion()); + if (!removedFactory) + { + PX_ASSERT_WITH_MESSAGE(0, "Factory can not be removed!"); + } + else + { + removedFactory->freeParameterDefinitionTable(¶meterizedTraits); + delete removedFactory; + delete factoriesToUnregister[i]; + } + } + } + + virtual void unregisterAvailableConverters(NvParameterized::Traits& parameterizedTraits) + { +// UNREGISTER GENERATED CONVERSION +PX_UNUSED(parameterizedTraits); + + } + +}; + + +} +} //nvidia::apex + +#endif diff --git a/APEX_1.4/framework/include/autogen/RenderMeshAssetParameters.h b/APEX_1.4/framework/include/autogen/RenderMeshAssetParameters.h new file mode 100644 index 00000000..bb56f5a0 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/RenderMeshAssetParameters.h @@ -0,0 +1,260 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_RenderMeshAssetParameters_h +#define HEADER_RenderMeshAssetParameters_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace RenderMeshAssetParametersNS +{ + + +struct REF_DynamicArray1D_Type +{ + NvParameterized::Interface** buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct STRING_DynamicArray1D_Type +{ + NvParameterized::DummyStringStruct* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct BOUNDS3_DynamicArray1D_Type +{ + physx::PxBounds3* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + + +struct ParametersStruct +{ + + REF_DynamicArray1D_Type submeshes; + STRING_DynamicArray1D_Type materialNames; + BOUNDS3_DynamicArray1D_Type partBounds; + uint32_t textureUVOrigin; + uint32_t boneCount; + bool deleteStaticBuffersAfterUse; + bool isReferenced; + +}; + +static const uint32_t checksum[] = { 0x119d6f62, 0x8d1ff03d, 0x19864d20, 0x93421fd0, }; + +} // namespace RenderMeshAssetParametersNS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class RenderMeshAssetParameters : public NvParameterized::NvParameters, public RenderMeshAssetParametersNS::ParametersStruct +{ +public: + RenderMeshAssetParameters(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~RenderMeshAssetParameters(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("RenderMeshAssetParameters"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(RenderMeshAssetParametersNS::checksum); + return RenderMeshAssetParametersNS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const RenderMeshAssetParametersNS::ParametersStruct& parameters(void) const + { + RenderMeshAssetParameters* tmpThis = const_cast<RenderMeshAssetParameters*>(this); + return *(static_cast<RenderMeshAssetParametersNS::ParametersStruct*>(tmpThis)); + } + + RenderMeshAssetParametersNS::ParametersStruct& parameters(void) + { + return *(static_cast<RenderMeshAssetParametersNS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class RenderMeshAssetParametersFactory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + RenderMeshAssetParameters::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(RenderMeshAssetParameters), RenderMeshAssetParameters::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, RenderMeshAssetParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class RenderMeshAssetParameters"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(RenderMeshAssetParameters)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, RenderMeshAssetParameters)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, RenderMeshAssetParameters::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, RenderMeshAssetParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class RenderMeshAssetParameters"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of RenderMeshAssetParameters here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (RenderMeshAssetParameters*)bufObj; + } + + virtual const char* getClassName() + { + return (RenderMeshAssetParameters::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (RenderMeshAssetParameters::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (RenderMeshAssetParameters::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (RenderMeshAssetParameters::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/SubmeshParameters.h b/APEX_1.4/framework/include/autogen/SubmeshParameters.h new file mode 100644 index 00000000..feca716f --- /dev/null +++ b/APEX_1.4/framework/include/autogen/SubmeshParameters.h @@ -0,0 +1,242 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_SubmeshParameters_h +#define HEADER_SubmeshParameters_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace SubmeshParametersNS +{ + + +struct U32_DynamicArray1D_Type +{ + uint32_t* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + + +struct ParametersStruct +{ + + NvParameterized::Interface* vertexBuffer; + U32_DynamicArray1D_Type indexBuffer; + U32_DynamicArray1D_Type vertexPartition; + U32_DynamicArray1D_Type indexPartition; + U32_DynamicArray1D_Type smoothingGroups; + +}; + +static const uint32_t checksum[] = { 0xb2b4f308, 0x5f4b8da6, 0x4d45daeb, 0xbfc7d9b0, }; + +} // namespace SubmeshParametersNS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class SubmeshParameters : public NvParameterized::NvParameters, public SubmeshParametersNS::ParametersStruct +{ +public: + SubmeshParameters(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~SubmeshParameters(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("SubmeshParameters"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)1; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(SubmeshParametersNS::checksum); + return SubmeshParametersNS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const SubmeshParametersNS::ParametersStruct& parameters(void) const + { + SubmeshParameters* tmpThis = const_cast<SubmeshParameters*>(this); + return *(static_cast<SubmeshParametersNS::ParametersStruct*>(tmpThis)); + } + + SubmeshParametersNS::ParametersStruct& parameters(void) + { + return *(static_cast<SubmeshParametersNS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class SubmeshParametersFactory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + SubmeshParameters::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(SubmeshParameters), SubmeshParameters::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, SubmeshParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class SubmeshParameters"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(SubmeshParameters)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, SubmeshParameters)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, SubmeshParameters::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, SubmeshParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class SubmeshParameters"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of SubmeshParameters here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (SubmeshParameters*)bufObj; + } + + virtual const char* getClassName() + { + return (SubmeshParameters::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (SubmeshParameters::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (SubmeshParameters::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (SubmeshParameters::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/SurfaceBufferParameters.h b/APEX_1.4/framework/include/autogen/SurfaceBufferParameters.h new file mode 100644 index 00000000..63ba3766 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/SurfaceBufferParameters.h @@ -0,0 +1,233 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_SurfaceBufferParameters_h +#define HEADER_SurfaceBufferParameters_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace SurfaceBufferParametersNS +{ + + + +struct ParametersStruct +{ + + uint32_t width; + uint32_t height; + uint32_t surfaceFormat; + NvParameterized::Interface* buffer; + +}; + +static const uint32_t checksum[] = { 0x8c5efe70, 0x9563a3a7, 0x2a52ffa7, 0x43e1a4fb, }; + +} // namespace SurfaceBufferParametersNS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class SurfaceBufferParameters : public NvParameterized::NvParameters, public SurfaceBufferParametersNS::ParametersStruct +{ +public: + SurfaceBufferParameters(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~SurfaceBufferParameters(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("SurfaceBufferParameters"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)1; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(SurfaceBufferParametersNS::checksum); + return SurfaceBufferParametersNS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const SurfaceBufferParametersNS::ParametersStruct& parameters(void) const + { + SurfaceBufferParameters* tmpThis = const_cast<SurfaceBufferParameters*>(this); + return *(static_cast<SurfaceBufferParametersNS::ParametersStruct*>(tmpThis)); + } + + SurfaceBufferParametersNS::ParametersStruct& parameters(void) + { + return *(static_cast<SurfaceBufferParametersNS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class SurfaceBufferParametersFactory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + SurfaceBufferParameters::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(SurfaceBufferParameters), SurfaceBufferParameters::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, SurfaceBufferParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class SurfaceBufferParameters"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(SurfaceBufferParameters)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, SurfaceBufferParameters)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, SurfaceBufferParameters::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, SurfaceBufferParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class SurfaceBufferParameters"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of SurfaceBufferParameters here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (SurfaceBufferParameters*)bufObj; + } + + virtual const char* getClassName() + { + return (SurfaceBufferParameters::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (SurfaceBufferParameters::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (SurfaceBufferParameters::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (SurfaceBufferParameters::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/VertexBufferParameters.h b/APEX_1.4/framework/include/autogen/VertexBufferParameters.h new file mode 100644 index 00000000..00cc33fb --- /dev/null +++ b/APEX_1.4/framework/include/autogen/VertexBufferParameters.h @@ -0,0 +1,240 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_VertexBufferParameters_h +#define HEADER_VertexBufferParameters_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace VertexBufferParametersNS +{ + + +struct REF_DynamicArray1D_Type +{ + NvParameterized::Interface** buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + + +struct ParametersStruct +{ + + uint32_t vertexCount; + NvParameterized::Interface* vertexFormat; + REF_DynamicArray1D_Type buffers; + +}; + +static const uint32_t checksum[] = { 0x14ae7314, 0xe50741cb, 0x15eb480c, 0x63f6c571, }; + +} // namespace VertexBufferParametersNS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class VertexBufferParameters : public NvParameterized::NvParameters, public VertexBufferParametersNS::ParametersStruct +{ +public: + VertexBufferParameters(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~VertexBufferParameters(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("VertexBufferParameters"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)1; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(VertexBufferParametersNS::checksum); + return VertexBufferParametersNS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const VertexBufferParametersNS::ParametersStruct& parameters(void) const + { + VertexBufferParameters* tmpThis = const_cast<VertexBufferParameters*>(this); + return *(static_cast<VertexBufferParametersNS::ParametersStruct*>(tmpThis)); + } + + VertexBufferParametersNS::ParametersStruct& parameters(void) + { + return *(static_cast<VertexBufferParametersNS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class VertexBufferParametersFactory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + VertexBufferParameters::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(VertexBufferParameters), VertexBufferParameters::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, VertexBufferParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class VertexBufferParameters"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(VertexBufferParameters)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, VertexBufferParameters)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, VertexBufferParameters::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, VertexBufferParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class VertexBufferParameters"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of VertexBufferParameters here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (VertexBufferParameters*)bufObj; + } + + virtual const char* getClassName() + { + return (VertexBufferParameters::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (VertexBufferParameters::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (VertexBufferParameters::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (VertexBufferParameters::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/include/autogen/VertexFormatParameters.h b/APEX_1.4/framework/include/autogen/VertexFormatParameters.h new file mode 100644 index 00000000..232a72d1 --- /dev/null +++ b/APEX_1.4/framework/include/autogen/VertexFormatParameters.h @@ -0,0 +1,250 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#ifndef HEADER_VertexFormatParameters_h +#define HEADER_VertexFormatParameters_h + +#include "NvParametersTypes.h" + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParameterizedTraits.h" +#include "NvParameters.h" +#include "NvTraitsInternal.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +#if PX_VC +#pragma warning(push) +#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#endif + +namespace VertexFormatParametersNS +{ + +struct BufferFormat_Type; + +struct BufferFormat_DynamicArray1D_Type +{ + BufferFormat_Type* buf; + bool isAllocated; + int32_t elementSize; + int32_t arraySizes[1]; +}; + +struct BufferFormat_Type +{ + NvParameterized::DummyStringStruct name; + int32_t semantic; + uint32_t id; + uint32_t format; + uint32_t access; + bool serialize; +}; + +struct ParametersStruct +{ + + uint32_t winding; + bool hasSeparateBoneBuffer; + BufferFormat_DynamicArray1D_Type bufferFormats; + +}; + +static const uint32_t checksum[] = { 0xa7c1ed95, 0x570ed2b1, 0x55717659, 0x9951d139, }; + +} // namespace VertexFormatParametersNS + +#ifndef NV_PARAMETERIZED_ONLY_LAYOUTS +class VertexFormatParameters : public NvParameterized::NvParameters, public VertexFormatParametersNS::ParametersStruct +{ +public: + VertexFormatParameters(NvParameterized::Traits* traits, void* buf = 0, int32_t* refCount = 0); + + virtual ~VertexFormatParameters(); + + virtual void destroy(); + + static const char* staticClassName(void) + { + return("VertexFormatParameters"); + } + + const char* className(void) const + { + return(staticClassName()); + } + + static const uint32_t ClassVersion = ((uint32_t)0 << 16) + (uint32_t)0; + + static uint32_t staticVersion(void) + { + return ClassVersion; + } + + uint32_t version(void) const + { + return(staticVersion()); + } + + static const uint32_t ClassAlignment = 8; + + static const uint32_t* staticChecksum(uint32_t& bits) + { + bits = 8 * sizeof(VertexFormatParametersNS::checksum); + return VertexFormatParametersNS::checksum; + } + + static void freeParameterDefinitionTable(NvParameterized::Traits* traits); + + const uint32_t* checksum(uint32_t& bits) const + { + return staticChecksum(bits); + } + + const VertexFormatParametersNS::ParametersStruct& parameters(void) const + { + VertexFormatParameters* tmpThis = const_cast<VertexFormatParameters*>(this); + return *(static_cast<VertexFormatParametersNS::ParametersStruct*>(tmpThis)); + } + + VertexFormatParametersNS::ParametersStruct& parameters(void) + { + return *(static_cast<VertexFormatParametersNS::ParametersStruct*>(this)); + } + + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle) const; + virtual NvParameterized::ErrorType getParameterHandle(const char* long_name, NvParameterized::Handle& handle); + + void initDefaults(void); + +protected: + + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void); + virtual const NvParameterized::DefinitionImpl* getParameterDefinitionTree(void) const; + + + virtual void getVarPtr(const NvParameterized::Handle& handle, void*& ptr, size_t& offset) const; + +private: + + void buildTree(void); + void initDynamicArrays(void); + void initStrings(void); + void initReferences(void); + void freeDynamicArrays(void); + void freeStrings(void); + void freeReferences(void); + + static bool mBuiltFlag; + static NvParameterized::MutexType mBuiltFlagMutex; +}; + +class VertexFormatParametersFactory : public NvParameterized::Factory +{ + static const char* const vptr; + +public: + + virtual void freeParameterDefinitionTable(NvParameterized::Traits* traits) + { + VertexFormatParameters::freeParameterDefinitionTable(traits); + } + + virtual NvParameterized::Interface* create(NvParameterized::Traits* paramTraits) + { + // placement new on this class using mParameterizedTraits + + void* newPtr = paramTraits->alloc(sizeof(VertexFormatParameters), VertexFormatParameters::ClassAlignment); + if (!NvParameterized::IsAligned(newPtr, VertexFormatParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class VertexFormatParameters"); + paramTraits->free(newPtr); + return 0; + } + + memset(newPtr, 0, sizeof(VertexFormatParameters)); // always initialize memory allocated to zero for default values + return NV_PARAM_PLACEMENT_NEW(newPtr, VertexFormatParameters)(paramTraits); + } + + virtual NvParameterized::Interface* finish(NvParameterized::Traits* paramTraits, void* bufObj, void* bufStart, int32_t* refCount) + { + if (!NvParameterized::IsAligned(bufObj, VertexFormatParameters::ClassAlignment) + || !NvParameterized::IsAligned(bufStart, VertexFormatParameters::ClassAlignment)) + { + NV_PARAM_TRAITS_WARNING(paramTraits, "Unaligned memory allocation for class VertexFormatParameters"); + return 0; + } + + // Init NvParameters-part + // We used to call empty constructor of VertexFormatParameters here + // but it may call default constructors of members and spoil the data + NV_PARAM_PLACEMENT_NEW(bufObj, NvParameterized::NvParameters)(paramTraits, bufStart, refCount); + + // Init vtable (everything else is already initialized) + *(const char**)bufObj = vptr; + + return (VertexFormatParameters*)bufObj; + } + + virtual const char* getClassName() + { + return (VertexFormatParameters::staticClassName()); + } + + virtual uint32_t getVersion() + { + return (VertexFormatParameters::staticVersion()); + } + + virtual uint32_t getAlignment() + { + return (VertexFormatParameters::ClassAlignment); + } + + virtual const uint32_t* getChecksum(uint32_t& bits) + { + return (VertexFormatParameters::staticChecksum(bits)); + } +}; +#endif // NV_PARAMETERIZED_ONLY_LAYOUTS + +} // namespace apex +} // namespace nvidia + +#if PX_VC +#pragma warning(pop) +#endif + +#endif diff --git a/APEX_1.4/framework/src/ApexAssetPreviewScene.cpp b/APEX_1.4/framework/src/ApexAssetPreviewScene.cpp new file mode 100644 index 00000000..ea79202f --- /dev/null +++ b/APEX_1.4/framework/src/ApexAssetPreviewScene.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "Apex.h" +#include "ApexDefs.h" +#include "ApexAssetPreviewScene.h" +#include "ApexSceneTasks.h" +#include "ApexSDKImpl.h" +#include "ApexActor.h" +#include "FrameworkPerfScope.h" +#include "ApexRenderDebug.h" +#include "ModuleIntl.h" +#include "ApexPvdClient.h" +#include "PsTime.h" +#include "ApexUsingNamespace.h" +#include "PsSync.h" +#include "PxTask.h" +#include "PxTaskManager.h" +#include "PxGpuDispatcher.h" +#include "PxCudaContextManager.h" + + +namespace nvidia +{ +namespace apex +{ + + ApexAssetPreviewScene::ApexAssetPreviewScene(ApexSDKImpl* sdk) : mApexSDK(sdk) + , mShowFullInfo(false) + { + mCameraMatrix = PxMat44(PxIdentity); + } + + void ApexAssetPreviewScene::setCameraMatrix(const PxMat44& cameraMatrix) + { + mCameraMatrix = cameraMatrix; + } + + PxMat44 ApexAssetPreviewScene::getCameraMatrix() const + { + return mCameraMatrix; + } + + void ApexAssetPreviewScene::setShowFullInfo(bool showFullInfo) + { + mShowFullInfo = showFullInfo; + } + + bool ApexAssetPreviewScene::getShowFullInfo() const + { + return mShowFullInfo; + } + + void ApexAssetPreviewScene::release() + { + mApexSDK->releaseAssetPreviewScene(this); + } + + void ApexAssetPreviewScene::destroy() + { + PX_DELETE(this); + } +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexCreateSDK.cpp b/APEX_1.4/framework/src/ApexCreateSDK.cpp new file mode 100644 index 00000000..2f37d62d --- /dev/null +++ b/APEX_1.4/framework/src/ApexCreateSDK.cpp @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "Apex.h" +#include "ApexSDKImpl.h" +#include "ProfilerCallback.h" +#include "PxTaskManager.h" +#include "PxCpuDispatcher.h" +#include "ApexCudaContextManager.h" +#if APEX_CUDA_SUPPORT && !defined(INSTALLER) +#include "CudaContextManager.h" +#include "PhysXDeviceSettings.h" +#endif +#include "PxCudaContextManager.h" +#include "PxErrorCallback.h" + +#if PX_PHYSICS_VERSION_MAJOR == 0 +#include "ThreadPool.h" +#endif + +namespace nvidia +{ +namespace apex +{ + +ApexSDKImpl* gApexSdk = NULL; // For an SDK singleton + +APEX_API ApexSDK* CALL_CONV GetApexSDK() +{ + return gApexSdk; +} +APEX_API ApexSDKIntl* CALL_CONV GetInternalApexSDK() +{ + return gApexSdk; +} + +ApexSDK* CreateApexSDK(const ApexSDKDesc& desc, ApexCreateError* errorCode, uint32_t APEXsdkVersion, PxAllocatorCallback* alloc) +{ + PX_UNUSED(alloc); + + if (errorCode) + { + *errorCode = APEX_CE_NO_ERROR; + } + + if (gApexSdk != NULL) + { + return gApexSdk; + } + + if (APEXsdkVersion != APEX_SDK_VERSION) + { + if (errorCode) + { + *errorCode = APEX_CE_WRONG_VERSION; + } + return NULL; + } + + if (!desc.isValid()) //this checks for SDK and cooking version mismatch! + { + if (errorCode) + { + if (desc.physXSDKVersion != PX_PHYSICS_VERSION) + { + *errorCode = APEX_CE_WRONG_VERSION; + } + else + { + *errorCode = APEX_CE_DESCRIPTOR_INVALID; + } + } + return NULL; + } + + + PX_ASSERT(alloc == 0); + + gApexSdk = PX_NEW(ApexSDKImpl)(errorCode, APEXsdkVersion); + gApexSdk->init(desc); + + return gApexSdk; +} + +int GetSuggestedCudaDeviceOrdinal(PxErrorCallback& errc) +{ +#if APEX_CUDA_SUPPORT && !defined(INSTALLER) + return PhysXDeviceSettings::getSuggestedCudaDeviceOrdinal(errc); +#else + PX_UNUSED(errc); + return -1; +#endif +} + +PxCudaContextManager* CreateCudaContextManager(const PxCudaContextManagerDesc& desc, PxErrorCallback& errorCallback) +{ +#if APEX_CUDA_SUPPORT && !defined(INSTALLER) + return physx::createCudaContextManager(desc, errorCallback); +#else + PX_UNUSED(desc); + PX_UNUSED(errorCallback); + return NULL; +#endif +} + + +#if PX_PHYSICS_VERSION_MAJOR == 0 + +/* We route allocations of these objects through the APEX SDK because PXTASK objects + * require a foundation instance. + */ + +PxCpuDispatcher* ApexSDKImpl::getDefaultThreadPool() +{ + if (!mApexThreadPool) + { + mApexThreadPool = createDefaultThreadPool(0); + } + + return mApexThreadPool; +} + +PxCpuDispatcher* ApexSDKImpl::createCpuDispatcher(uint32_t numThreads) +{ + PxCpuDispatcher* cd = createDefaultThreadPool(numThreads); + mUserAllocThreadPools.pushBack(cd); + return cd; +} + +void ApexSDKImpl::releaseCpuDispatcher(PxCpuDispatcher& cd) +{ + if (&cd == mApexThreadPool) + { + PX_DELETE(mApexThreadPool); + mApexThreadPool = 0; + return; + } + for (uint32_t i = 0; i < mUserAllocThreadPools.size(); i++) + { + if (&cd == mUserAllocThreadPools[i]) + { + PX_DELETE(&cd); + mUserAllocThreadPools.replaceWithLast(i); + return; + } + } +} + +#endif + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexCustomBufferIterator.cpp b/APEX_1.4/framework/src/ApexCustomBufferIterator.cpp new file mode 100644 index 00000000..c472c388 --- /dev/null +++ b/APEX_1.4/framework/src/ApexCustomBufferIterator.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexCustomBufferIterator.h" +#include "PsString.h" + +namespace nvidia +{ +namespace apex +{ + +ApexCustomBufferIterator::ApexCustomBufferIterator() : + mData(NULL), + mElemSize(0), + mMaxTriangles(0) +{ +} + +void ApexCustomBufferIterator::setData(void* data, uint32_t elemSize, uint32_t maxTriangles) +{ + mData = (uint8_t*)data; + mElemSize = elemSize; + mMaxTriangles = maxTriangles; +} + +void ApexCustomBufferIterator::addCustomBuffer(const char* name, RenderDataFormat::Enum format, uint32_t offset) +{ + CustomBuffer buffer; + buffer.name = name; + buffer.offset = offset; + buffer.format = format; + + mCustomBuffers.pushBack(buffer); +} +void* ApexCustomBufferIterator::getVertex(uint32_t triangleIndex, uint32_t vertexIndex) const +{ + if (mData == NULL || triangleIndex >= mMaxTriangles) + { + return NULL; + } + + return mData + mElemSize * (triangleIndex * 3 + vertexIndex); +} +int32_t ApexCustomBufferIterator::getAttributeIndex(const char* attributeName) const +{ + if (attributeName == NULL || attributeName[0] == 0) + { + return -1; + } + + for (uint32_t i = 0; i < mCustomBuffers.size(); i++) + { + if (nvidia::strcmp(mCustomBuffers[i].name, attributeName) == 0) + { + return (int32_t)i; + } + } + return -1; +} +void* ApexCustomBufferIterator::getVertexAttribute(uint32_t triangleIndex, uint32_t vertexIndex, const char* attributeName, RenderDataFormat::Enum& outFormat) const +{ + outFormat = RenderDataFormat::UNSPECIFIED; + + uint8_t* elementData = (uint8_t*)getVertex(triangleIndex, vertexIndex); + if (elementData == NULL) + { + return NULL; + } + + + for (uint32_t i = 0; i < mCustomBuffers.size(); i++) + { + if (nvidia::strcmp(mCustomBuffers[i].name, attributeName) == 0) + { + outFormat = mCustomBuffers[i].format; + return elementData + mCustomBuffers[i].offset; + } + } + return NULL; +} + +void* ApexCustomBufferIterator::getVertexAttribute(uint32_t triangleIndex, uint32_t vertexIndex, uint32_t attributeIndex, RenderDataFormat::Enum& outFormat, const char*& outName) const +{ + outFormat = RenderDataFormat::UNSPECIFIED; + outName = NULL; + + uint8_t* elementData = (uint8_t*)getVertex(triangleIndex, vertexIndex); + if (elementData == NULL || attributeIndex >= mCustomBuffers.size()) + { + return NULL; + } + + outName = mCustomBuffers[attributeIndex].name; + outFormat = mCustomBuffers[attributeIndex].format; + return elementData + mCustomBuffers[attributeIndex].offset; +} + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexRenderDebug.cpp b/APEX_1.4/framework/src/ApexRenderDebug.cpp new file mode 100644 index 00000000..414be4d4 --- /dev/null +++ b/APEX_1.4/framework/src/ApexRenderDebug.cpp @@ -0,0 +1,567 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexUsingNamespace.h" +#include "ApexRenderDebug.h" +#include "ApexRenderable.h" + +#pragma warning(disable:4996) +#pragma warning(disable:4100) +#pragma warning(disable:4189) + +#if PX_PHYSICS_VERSION_MAJOR == 3 +#include <PxRenderBuffer.h> +#endif // PX_PHYSICS_VERSION_MAJOR + +#include "RenderDebugInterface.h" +#include "UserRenderer.h" +#include "ApexSDKImpl.h" +#include "PxIntrinsics.h" +#include "PsString.h" +#include "RenderDebugTyped.h" + +namespace nvidia +{ +namespace apex +{ + +#if defined(WITHOUT_DEBUG_VISUALIZE) + +RenderDebugInterface* createApexRenderDebug(ApexSDKImpl* /*a*/) +{ + return NULL; +} +void releaseApexRenderDebug(RenderDebugInterface* /*n*/) +{ +} + +#else + +typedef physx::Array< RenderContext > RenderContextVector; +typedef physx::Array< UserRenderResource*> RenderResourceVector; + + +class ApexRenderDebug : public RenderDebugInterface, public ApexRWLockable, public UserAllocated +{ +public: + APEX_RW_LOCKABLE_BOILERPLATE + + ApexRenderDebug(ApexSDKImpl* sdk, RENDER_DEBUG::RenderDebugInterface* iface, bool useRemote) + : mRenderDebugIface(iface) + { + mApexSDK = sdk; + if (mRenderDebugUntyped == NULL) + { + RENDER_DEBUG::RenderDebug::Desc desc; + desc.runMode = useRemote ? RENDER_DEBUG::RenderDebug::RM_CLIENT : RENDER_DEBUG::RenderDebug::RM_LOCAL; + desc.foundation = mApexSDK->getFoundation(); + mRenderDebugUntyped = createRenderDebugExport(desc); + } + if (mRenderDebugUntyped) + { + mRenderDebug = mRenderDebugUntyped->getRenderDebugTyped(); + } + else + { + PX_ASSERT(0); + } + mWireFrameMaterial = INVALID_RESOURCE_ID; + mSolidShadedMaterial = INVALID_RESOURCE_ID; + mLastRenderSolidCount = 0; + mLastRenderLineCount = 0; + mRenderSolidCount = 0; + mRenderLineCount = 0; + mUseDebugRenderable = false; + } + + virtual RENDER_DEBUG::RenderDebugTyped* getRenderDebugInterface() + { + PX_ASSERT(mRenderDebugUntyped != NULL && mRenderDebug != NULL); + return mRenderDebug; + } + + virtual ~ApexRenderDebug(void) + { + if (mRenderDebug) + { + mRenderDebugUntyped->release(); + mRenderDebugUntyped = NULL; + } + // APEX specific stuff + { + RenderResourceVector::Iterator i; + for (i = mRenderLineResources.begin(); i != mRenderLineResources.end(); ++i) + { + UserRenderResource* resource = (*i); + PX_ASSERT(resource); + UserRenderVertexBuffer* vbuffer = resource->getVertexBuffer(0); + PX_ASSERT(vbuffer); + mApexSDK->getUserRenderResourceManager()->releaseResource(*resource); + mApexSDK->getUserRenderResourceManager()->releaseVertexBuffer(*vbuffer); + } + } + { + RenderResourceVector::Iterator i; + for (i = mRenderSolidResources.begin(); i != mRenderSolidResources.end(); ++i) + { + UserRenderResource* resource = (*i); + PX_ASSERT(resource); + UserRenderVertexBuffer* vbuffer = resource->getVertexBuffer(0); + PX_ASSERT(vbuffer); + mApexSDK->getUserRenderResourceManager()->releaseResource(*resource); + mApexSDK->getUserRenderResourceManager()->releaseVertexBuffer(*vbuffer); + } + } + mApexSDK->getInternalResourceProvider()->releaseResource(mWireFrameMaterial); + mApexSDK->getInternalResourceProvider()->releaseResource(mSolidShadedMaterial); + }; + + /* + \brief Method to support rendering to a legacy PhysX SDK DebugRenderable object instead + of to the APEX Render Resources API (i.e.: Renderable). + + This method is used to enable or disable the use of a legacy DebugRenderable. When enabled, + use the getDebugRenderable() method to get a legacy DebugRenerable object that will contain + all the debug output. + */ + virtual void setUseDebugRenderable(bool state) + { + mUseDebugRenderable = state; + if (state == false) + { +#if PX_PHYSICS_VERSION_MAJOR == 3 + mPxDebugTriangles.clear(); + mPxDebugLines.clear(); + mPxDebugTrianglesScreenSpace.clear(); + mPxDebugLinesScreenSpace.clear(); +#endif + } + } + + + +#if PX_PHYSICS_VERSION_MAJOR == 3 + + /* + \brief Method to support rendering to a legacy PhysX SDK PxRenderBuffer object instead + of to the APEX Render Resources API (i.e.: Renderable). + + When enabled with a call to setUseDebugRenderable(true), this method will return a legacy + DebugRenderable object that contains all of the output of the RenderDebug class. + */ + virtual void getRenderBuffer(PhysXRenderBuffer& renderable) + { + renderable.mNbPoints = 0; + renderable.mPoints = NULL; + renderable.mNbLines = mPxDebugLines.size(); + renderable.mLines = renderable.mNbLines ? &mPxDebugLines[0] : NULL; + renderable.mNbTriangles = mPxDebugTriangles.size(); + renderable.mTriangles = renderable.mNbTriangles ? &mPxDebugTriangles[0] : NULL; + renderable.mNbTexts = 0; + renderable.mTexts = NULL; + } + + /* + \brief Method to support rendering to a legacy PhysX SDK PxRenderBuffer object instead + of to the APEX Render Resources API (i.e.: Renderable). + + When enabled with a call to setUseDebugRenderable(true), this method will return a legacy + DebugRenderable object that contains all of the output of the RenderDebug class. + */ + virtual void getRenderBufferScreenSpace(PhysXRenderBuffer& renderable) + { + renderable.mNbPoints = 0; + renderable.mPoints = NULL; + renderable.mNbLines = mPxDebugLinesScreenSpace.size(); + renderable.mLines = renderable.mNbLines ? &mPxDebugLinesScreenSpace[0] : NULL; + renderable.mNbTriangles = mPxDebugTrianglesScreenSpace.size(); + renderable.mTriangles = renderable.mNbTriangles ? &mPxDebugTrianglesScreenSpace[0] : NULL; + renderable.mNbTexts = 0; + renderable.mTexts = NULL; + } + + + virtual void addDebugRenderable(const physx::PxRenderBuffer& renderBuffer) + { + // Points + mRenderDebug->pushRenderState(); + + const uint32_t color = mRenderDebug->getCurrentColor();; + const uint32_t arrowColor = mRenderDebug->getCurrentArrowColor(); + + const uint32_t numPoints = renderBuffer.getNbPoints(); + const physx::PxDebugPoint* points = renderBuffer.getPoints(); + for (uint32_t i = 0; i < numPoints; i++) + { + const physx::PxDebugPoint& point = points[i]; + mRenderDebug->setCurrentColor(point.color, arrowColor); + mRenderDebug->debugPoint(point.pos, 0.01f); + } + + // Lines + const uint32_t numLines = renderBuffer.getNbLines(); + const physx::PxDebugLine* lines = renderBuffer.getLines(); + for (uint32_t i = 0; i < numLines; i++) + { + const physx::PxDebugLine& line = lines[i]; + mRenderDebug->debugGradientLine(line.pos0, line.pos1, line.color0, line.color1); + } + + // Triangles + const uint32_t numTriangles = renderBuffer.getNbTriangles(); + const physx::PxDebugTriangle* triangles = renderBuffer.getTriangles(); + for (uint32_t i = 0; i < numTriangles; i++) + { + const physx::PxDebugTriangle& triangle = triangles[i]; + mRenderDebug->debugGradientTri(triangle.pos0, triangle.pos1, triangle.pos2, triangle.color0, triangle.color1, triangle.color2); + } + + // Texts + const uint32_t numTexts = renderBuffer.getNbTexts(); + const physx::PxDebugText* texts = renderBuffer.getTexts(); + for (uint32_t i = 0; i < numTexts; i++) + { + const physx::PxDebugText& text = texts[i]; + mRenderDebug->debugText(text.position, text.string); + } + + mRenderDebug->setCurrentColor(color, arrowColor); + + mRenderDebug->popRenderState(); + } +#endif // PX_PHYSICS_VERSION_MAJOR == 3 + + /** + \brief Release an object instance. + + Calling this will unhook the class and delete it from memory. + You should not keep any reference to this class instance after calling release + */ + virtual void release() + { + delete this; + } + + virtual void lockRenderResources() + { + + } + + /** + \brief Unlocks the renderable data of this Renderable actor. + + See locking semantics for xRenderDataProvider::lockRenderResources(). + */ + virtual void unlockRenderResources() + { + + } + + /** + \brief Update the renderable data of this Renderable actor. + + When called, this method will use the UserRenderResourceManager interface to inform the user + about its render resource needs. It will also call the writeBuffer() methods of various graphics + buffers. It must be called by the user each frame before any calls to dispatchRenderResources(). + If the actor is not being rendered, this function may also be skipped. + */ + virtual void updateRenderResources(bool /*rewriteBuffers*/ = false, void* /*userRenderData*/ = 0) + { + URR_SCOPE; + + mRenderSolidContexts.clear(); + mRenderLineContexts.clear(); + + // Free up the line draw vertex buffer resources if the debug renderer is now using a lot less memory than the last frame. + if (mRenderLineCount < (mLastRenderLineCount / 2)) + { + RenderResourceVector::Iterator i; + for (i = mRenderLineResources.begin(); i != mRenderLineResources.end(); ++i) + { + UserRenderResource* resource = (*i); + PX_ASSERT(resource); + UserRenderVertexBuffer* vbuffer = resource->getVertexBuffer(0); + PX_ASSERT(vbuffer); + mApexSDK->getUserRenderResourceManager()->releaseResource(*resource); + mApexSDK->getUserRenderResourceManager()->releaseVertexBuffer(*vbuffer); + } + mRenderLineResources.clear(); + } + // free up the solid shaded triangle vertex buffers if the debug renderer is now using a lot less memory than the last frame. + if (mRenderSolidCount < mLastRenderSolidCount / 2) // if we have less than 1/2 the number of solid shaded triangles we did last frame, free up the resources. + { + RenderResourceVector::Iterator i; + for (i = mRenderSolidResources.begin(); i != mRenderSolidResources.end(); ++i) + { + UserRenderResource* resource = (*i); + PX_ASSERT(resource); + UserRenderVertexBuffer* vbuffer = resource->getVertexBuffer(0); + PX_ASSERT(vbuffer); + mApexSDK->getUserRenderResourceManager()->releaseResource(*resource); + mApexSDK->getUserRenderResourceManager()->releaseVertexBuffer(*vbuffer); + } + mRenderSolidResources.clear(); + } + + mLastRenderSolidCount = mRenderSolidCount; + mLastRenderLineCount = mRenderLineCount; + mRenderSolidCount = 0; + mRenderLineCount = 0; + +#if PX_PHYSICS_VERSION_MAJOR == 3 + mPxDebugLines.clear(); + mPxDebugTriangles.clear(); + mPxDebugLinesScreenSpace.clear(); + mPxDebugTrianglesScreenSpace.clear(); +#endif + } + + virtual void dispatchRenderResources(UserRenderer& renderer) + { + mRenderDebug->render(1.0f / 60.0f, mRenderDebugIface); + } + + virtual void debugRenderLines(uint32_t lcount, const RENDER_DEBUG::RenderDebugVertex* vertices, bool /*useZ*/, bool isScreenSpace) + { +#if PX_PHYSICS_VERSION_MAJOR == 3 + if (mUseDebugRenderable) + { + for (uint32_t i = 0; i < lcount; i++) + { + PxVec3 v1( vertices[0].mPos[0], vertices[0].mPos[1], vertices[0].mPos[2] ); + PxVec3 v2( vertices[1].mPos[0], vertices[1].mPos[1], vertices[1].mPos[2] ); + PxDebugLine l(v1,v2,vertices->mColor); + l.color1 = vertices[1].mColor; + if ( isScreenSpace ) + { + mPxDebugLinesScreenSpace.pushBack(l); + } + else + { + mPxDebugLines.pushBack(l); + } + vertices += 2; + } + } + else +#endif + { + mRenderLineCount += (lcount * 2); + if (mWireFrameMaterial == INVALID_RESOURCE_ID) + { + const char* mname = mApexSDK->getWireframeMaterial(); + ResID name_space = mApexSDK->getInternalResourceProvider()->createNameSpace(APEX_MATERIALS_NAME_SPACE); + mWireFrameMaterial = mApexSDK->getInternalResourceProvider()->createResource(name_space, mname, true); + } + + const uint32_t MAX_LINE_VERTEX = 2048; + + PX_ASSERT((lcount * 2) <= MAX_LINE_VERTEX); + + uint32_t rcount = (uint32_t)mRenderLineContexts.size(); + RenderContext context; + + UserRenderResource* resource; + + if (rcount < mRenderLineResources.size()) + { + resource = mRenderLineResources[rcount]; + } + else + { + UserRenderResourceDesc resourceDesc; + UserRenderVertexBufferDesc vbdesc; + vbdesc.hint = RenderBufferHint::DYNAMIC; + vbdesc.buffersRequest[RenderVertexSemantic::POSITION] = RenderDataFormat::FLOAT3; + vbdesc.buffersRequest[RenderVertexSemantic::COLOR] = RenderDataFormat::B8G8R8A8; + vbdesc.maxVerts = MAX_LINE_VERTEX; + resourceDesc.cullMode = RenderCullMode::NONE; + + for (uint32_t i = 0; i < RenderVertexSemantic::NUM_SEMANTICS; i++) + { + PX_ASSERT(vbdesc.buffersRequest[i] == RenderDataFormat::UNSPECIFIED || vertexSemanticFormatValid((RenderVertexSemantic::Enum)i, vbdesc.buffersRequest[i])); + } + + UserRenderVertexBuffer* vb = mApexSDK->getUserRenderResourceManager()->createVertexBuffer(vbdesc); + UserRenderVertexBuffer* vertexBuffers[1] = { vb }; + resourceDesc.vertexBuffers = vertexBuffers; + resourceDesc.numVertexBuffers = 1; + + resourceDesc.primitives = RenderPrimitiveType::LINES; + + resource = mApexSDK->getUserRenderResourceManager()->createResource(resourceDesc); + resource->setMaterial(mApexSDK->getInternalResourceProvider()->getResource(mWireFrameMaterial)); + mRenderLineResources.pushBack(resource); + } + + UserRenderVertexBuffer* vb = resource->getVertexBuffer(0); + + resource->setVertexBufferRange(0, lcount * 2); + + RenderVertexBufferData writeData; + writeData.setSemanticData(RenderVertexSemantic::POSITION, vertices[0].mPos, sizeof(RENDER_DEBUG::RenderDebugVertex), RenderDataFormat::FLOAT3); + writeData.setSemanticData(RenderVertexSemantic::COLOR, &vertices[0].mColor, sizeof(RENDER_DEBUG::RenderDebugVertex), RenderDataFormat::B8G8R8A8); + vb->writeBuffer(writeData, 0, lcount * 2); + + context.isScreenSpace = isScreenSpace; + context.local2world = PxMat44(PxIdentity); + context.renderResource = 0; + context.world2local = PxMat44(PxIdentity); + + mRenderLineContexts.pushBack(context); + } + + } + + virtual void debugRenderTriangles(uint32_t tcount, const RENDER_DEBUG::RenderDebugSolidVertex* vertices, bool /*useZ*/, bool isScreenSpace) + { +#if PX_PHYSICS_VERSION_MAJOR == 3 + if (mUseDebugRenderable) + { + for (uint32_t i = 0; i < tcount; i++) + { + PxVec3 v1( vertices[0].mPos[0], vertices[0].mPos[1], vertices[0].mPos[2] ); + PxVec3 v2( vertices[1].mPos[0], vertices[1].mPos[1], vertices[1].mPos[2] ); + PxVec3 v3( vertices[2].mPos[0], vertices[2].mPos[1], vertices[2].mPos[2] ); + PxDebugTriangle t( v1,v2, v3, vertices->mColor ); + t.color1 = vertices[1].mColor; + t.color2 = vertices[2].mColor; + if ( isScreenSpace ) + { + mPxDebugTrianglesScreenSpace.pushBack(t); + } + else + { + mPxDebugTriangles.pushBack(t); + } + vertices += 3; + } + } + else +#endif + { + + mRenderSolidCount += (tcount * 3); + + if (mSolidShadedMaterial == INVALID_RESOURCE_ID) + { + const char* mname = mApexSDK->getSolidShadedMaterial(); + ResID name_space = mApexSDK->getInternalResourceProvider()->createNameSpace(APEX_MATERIALS_NAME_SPACE); + mSolidShadedMaterial = mApexSDK->getInternalResourceProvider()->createResource(name_space, mname, true); + } + + const uint32_t MAX_SOLID_VERTEX = 2048; + PX_ASSERT((tcount * 3) <= MAX_SOLID_VERTEX); + + uint32_t rcount = (uint32_t)mRenderSolidContexts.size(); + RenderContext context; + + UserRenderResource* resource; + + if (rcount < mRenderSolidResources.size()) + { + resource = mRenderSolidResources[rcount]; + } + else + { + UserRenderResourceDesc renderResourceDesc; + UserRenderVertexBufferDesc vbdesc; + vbdesc.hint = RenderBufferHint::DYNAMIC; + vbdesc.buffersRequest[RenderVertexSemantic::POSITION] = RenderDataFormat::FLOAT3; + vbdesc.buffersRequest[RenderVertexSemantic::NORMAL] = RenderDataFormat::FLOAT3; + vbdesc.buffersRequest[RenderVertexSemantic::COLOR] = RenderDataFormat::B8G8R8A8; + vbdesc.maxVerts = MAX_SOLID_VERTEX; + renderResourceDesc.cullMode = RenderCullMode::COUNTER_CLOCKWISE; + + for (uint32_t i = 0; i < RenderVertexSemantic::NUM_SEMANTICS; i++) + { + PX_ASSERT(vbdesc.buffersRequest[i] == RenderDataFormat::UNSPECIFIED || vertexSemanticFormatValid((RenderVertexSemantic::Enum)i, vbdesc.buffersRequest[i])); + } + + UserRenderVertexBuffer* vb = mApexSDK->getUserRenderResourceManager()->createVertexBuffer(vbdesc); + UserRenderVertexBuffer* vertexBuffers[1] = { vb }; + renderResourceDesc.vertexBuffers = vertexBuffers; + renderResourceDesc.numVertexBuffers = 1; + + renderResourceDesc.primitives = RenderPrimitiveType::TRIANGLES; + resource = mApexSDK->getUserRenderResourceManager()->createResource(renderResourceDesc); + resource->setMaterial(mApexSDK->getInternalResourceProvider()->getResource(mSolidShadedMaterial)); + mRenderSolidResources.pushBack(resource); + } + + UserRenderVertexBuffer* vb = resource->getVertexBuffer(0); + + resource->setVertexBufferRange(0, tcount * 3); + + RenderVertexBufferData writeData; + writeData.setSemanticData(RenderVertexSemantic::POSITION, vertices[0].mPos, sizeof(RENDER_DEBUG::RenderDebugSolidVertex), RenderDataFormat::FLOAT3); + writeData.setSemanticData(RenderVertexSemantic::NORMAL, vertices[0].mNormal, sizeof(RENDER_DEBUG::RenderDebugSolidVertex), RenderDataFormat::FLOAT3); + writeData.setSemanticData(RenderVertexSemantic::COLOR, &vertices[0].mColor, sizeof(RENDER_DEBUG::RenderDebugSolidVertex), RenderDataFormat::B8G8R8A8); + vb->writeBuffer(writeData, 0, tcount * 3); + + context.isScreenSpace = isScreenSpace; + context.local2world = PxMat44(PxIdentity); + context.renderResource = 0; + context.world2local = PxMat44(PxIdentity); + + mRenderSolidContexts.pushBack(context); + } + } + + virtual PxBounds3 getBounds() const { return PxBounds3(); } + + virtual void releaseRenderDebug(void) + { + release(); + } +private: + static RENDER_DEBUG::RenderDebug* mRenderDebugUntyped; + RENDER_DEBUG::RenderDebugTyped* mRenderDebug; + uint32_t mRenderSolidCount; + uint32_t mRenderLineCount; + + uint32_t mLastRenderLineCount; + uint32_t mLastRenderSolidCount; + ApexSDKImpl* mApexSDK; + ResID mWireFrameMaterial; + ResID mSolidShadedMaterial; + RenderResourceVector mRenderLineResources; + RenderContextVector mRenderLineContexts; + + RenderResourceVector mRenderSolidResources; + RenderContextVector mRenderSolidContexts; + + bool mUseDebugRenderable; +#if PX_PHYSICS_VERSION_MAJOR == 3 + physx::Array<PxDebugLine> mPxDebugLines; + physx::Array<PxDebugTriangle> mPxDebugTriangles; + physx::Array<PxDebugLine> mPxDebugLinesScreenSpace; + physx::Array<PxDebugTriangle> mPxDebugTrianglesScreenSpace; +#endif + RENDER_DEBUG::RenderDebugInterface* mRenderDebugIface; +}; + +RENDER_DEBUG::RenderDebug* ApexRenderDebug::mRenderDebugUntyped = NULL; + +RenderDebugInterface* createApexRenderDebug(ApexSDKImpl* a, RENDER_DEBUG::RenderDebugInterface* iface, bool useRemote) +{ + return PX_NEW(ApexRenderDebug)(a, iface, useRemote); +} + +void releaseApexRenderDebug(RenderDebugInterface* n) +{ + delete static_cast< ApexRenderDebug*>(n); +} + +#endif // WITHOUT_DEBUG_VISUALIZE + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexRenderMeshActor.cpp b/APEX_1.4/framework/src/ApexRenderMeshActor.cpp new file mode 100644 index 00000000..a7bdc298 --- /dev/null +++ b/APEX_1.4/framework/src/ApexRenderMeshActor.cpp @@ -0,0 +1,2784 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexRenderMeshActor.h" +#include "FrameworkPerfScope.h" +#include "PsAllocator.h" +#include "RenderDebugInterface.h" +#include "DebugRenderParams.h" + +#include "RenderContext.h" +#include "RenderMeshActorDesc.h" +#include "UserRenderBoneBuffer.h" +#include "UserRenderBoneBufferDesc.h" +#include "UserRenderIndexBuffer.h" +#include "UserRenderIndexBufferDesc.h" +#include "UserRenderResource.h" +#include "UserRenderResourceDesc.h" +#include "UserRenderVertexBuffer.h" +#include "UserRenderer.h" + +#include "PsIntrinsics.h" +#include "PxProfiler.h" + +#define VERBOSE 0 + +namespace nvidia +{ +namespace apex +{ +// ApexRenderMeshActor methods + +ApexRenderMeshActor::ApexRenderMeshActor(const RenderMeshActorDesc& desc, ApexRenderMeshAsset& asset, ResourceList& list) : + mRenderMeshAsset(&asset), + mIndexBufferHint(RenderBufferHint::STATIC), + mMaxInstanceCount(0), + mInstanceCount(0), + mInstanceOffset(0), + mInstanceBuffer(NULL), + mRenderResource(NULL), + mRenderWithoutSkinning(false), + mForceBoneIndexChannel(false), + mApiVisibilityChanged(false), + mPartVisibilityChanged(false), + mInstanceCountChanged(false), + mKeepVisibleBonesPacked(false), + mForceFallbackSkinning(false), + mBonePosesDirty(false), + mOneUserVertexBufferChanged(false), + mBoneBufferInUse(false), + mReleaseResourcesIfNothingToRender(true), + mCreateRenderResourcesAfterInit(false), + mBufferVisibility(false), + mKeepPreviousFrameBoneBuffer(false), + mPreviousFrameBoneBufferValid(false), + mSkinningMode(RenderMeshActorSkinningMode::Default) +{ +#if ENABLE_INSTANCED_MESH_CLEANUP_HACK + mOrderedInstanceTemp = 0; + mOrderedInstanceTempSize = 0; +#endif + list.add(*this); + init(desc, (uint16_t) asset.getPartCount(), (uint16_t) asset.getBoneCount()); +} + +ApexRenderMeshActor::~ApexRenderMeshActor() +{ +#if ENABLE_INSTANCED_MESH_CLEANUP_HACK + if (mOrderedInstanceTemp) + { + PX_FREE(mOrderedInstanceTemp); + mOrderedInstanceTemp = 0; + } +#endif +} + +void ApexRenderMeshActor::release() +{ + if (mInRelease) + { + return; + } + mInRelease = true; + mRenderMeshAsset->releaseActor(*this); +} + +void ApexRenderMeshActor::destroy() +{ + ApexActor::destroy(); + + releaseRenderResources(); + + // Release named resources + ResourceProviderIntl* resourceProvider = GetInternalApexSDK()->getInternalResourceProvider(); + if (resourceProvider != NULL) + { + for (uint32_t i = 0; i < mSubmeshData.size(); i++) + { + if (mSubmeshData[i].materialID != mRenderMeshAsset->mMaterialIDs[i]) + { + resourceProvider->releaseResource(mSubmeshData[i].materialID); + } + mSubmeshData[i].materialID = INVALID_RESOURCE_ID; + mSubmeshData[i].material = NULL; + mSubmeshData[i].isMaterialPointerValid = false; + } + } + + delete this; +} + + +void ApexRenderMeshActor::loadMaterial(SubmeshData& submeshData) +{ + ResourceProviderIntl* resourceProvider = GetInternalApexSDK()->getInternalResourceProvider(); + submeshData.material = resourceProvider->getResource(submeshData.materialID); + submeshData.isMaterialPointerValid = true; + + if (submeshData.material != NULL) + { + UserRenderResourceManager* rrm = GetInternalApexSDK()->getUserRenderResourceManager(); + submeshData.maxBonesPerMaterial = rrm->getMaxBonesForMaterial(submeshData.material); + + if (submeshData.maxBonesPerMaterial == 0) + { + submeshData.maxBonesPerMaterial = mRenderMeshAsset->getBoneCount(); + } + } +} + + +void ApexRenderMeshActor::init(const RenderMeshActorDesc& desc, uint16_t partCount, uint16_t boneCount) +{ + // TODO - LRR - This happened once, it shouldn't happen any more, let me know if it does + //PX_ASSERT(boneCount != 0); + //if (boneCount == 0) + // boneCount = partCount; + + mRenderWithoutSkinning = desc.renderWithoutSkinning; + mForceBoneIndexChannel = desc.forceBoneIndexChannel; + + if (mRenderWithoutSkinning) + { + boneCount = 1; + } + + const uint32_t oldBoneCount = mTransforms.size(); + mTransforms.resize(boneCount); + for (uint32_t i = oldBoneCount; i < boneCount; ++i) + { + mTransforms[i] = PxMat44(PxIdentity); + } + + mVisiblePartsForAPI.reserve(partCount); + mVisiblePartsForAPI.lockCapacity(true); + mVisiblePartsForRendering.reserve(partCount); + mVisiblePartsForRendering.reset(); // size to 0, without reallocating + + mBufferVisibility = desc.bufferVisibility; + + mKeepPreviousFrameBoneBuffer = desc.keepPreviousFrameBoneBuffer; + mPreviousFrameBoneBufferValid = false; + + mApiVisibilityChanged = true; + mPartVisibilityChanged = !mBufferVisibility; + + for (uint32_t i = 0; i < partCount; ++i) + { + setVisibility(desc.visible, (uint16_t) i); + } + + mIndexBufferHint = desc.indexBufferHint; + mMaxInstanceCount = desc.maxInstanceCount; + mInstanceCount = 0; + mInstanceOffset = 0; + mInstanceBuffer = NULL; + + if (desc.keepVisibleBonesPacked && !mRenderWithoutSkinning) + { + if (mRenderMeshAsset->getBoneCount() == mRenderMeshAsset->getPartCount()) + { + mKeepVisibleBonesPacked = true; + } + else + { + APEX_INVALID_PARAMETER("RenderMeshActorDesc::keepVisibleBonesPacked is only allowed when the number of bones (%d) equals the number of parts (%d)\n", + mRenderMeshAsset->getBoneCount(), mRenderMeshAsset->getPartCount()); + } + } + + if (desc.forceFallbackSkinning) + { + if (mKeepVisibleBonesPacked) + { + APEX_INVALID_PARAMETER("RenderMeshActorDesc::forceFallbackSkinning can not be used when RenderMeshActorDesc::keepVisibleBonesPacked is also used!\n"); + } + else + { + mForceFallbackSkinning = true; + } + } + + ResourceProviderIntl* resourceProvider = GetInternalApexSDK()->getInternalResourceProvider(); + ResID materialNS = GetInternalApexSDK()->getMaterialNameSpace(); + + bool loadMaterialsOnActorCreation = !GetInternalApexSDK()->getRMALoadMaterialsLazily(); + + if (mRenderWithoutSkinning) + { + // make sure that createRenderResources() is called in this special case! + mCreateRenderResourcesAfterInit = true; + } + + mSubmeshData.reserve(mRenderMeshAsset->getSubmeshCount()); + for (uint32_t submeshIndex = 0; submeshIndex < mRenderMeshAsset->getSubmeshCount(); submeshIndex++) + { + SubmeshData submeshData; + + // Resolve override material names using the NRP... + if (submeshIndex < desc.overrideMaterialCount && resourceProvider != NULL) + { + submeshData.materialID = resourceProvider->createResource(materialNS, desc.overrideMaterials[submeshIndex]);; + } + else + { + submeshData.materialID = mRenderMeshAsset->mMaterialIDs[submeshIndex]; + } + + submeshData.maxBonesPerMaterial = 0; + + if (loadMaterialsOnActorCreation) + { + loadMaterial(submeshData); + } + + mSubmeshData.pushBack(submeshData); + } +} + +bool ApexRenderMeshActor::setVisibility(bool visible, uint16_t partIndex) +{ + WRITE_ZONE(); + uint32_t oldRank = UINT32_MAX, newRank = UINT32_MAX; + + bool changed; + + if (visible) + { + changed = mVisiblePartsForAPI.useAndReturnRanks(partIndex, newRank, oldRank); + if (changed) + { + mApiVisibilityChanged = true; + if (!mBufferVisibility) + { + mPartVisibilityChanged = true; + } + if (mKeepVisibleBonesPacked && newRank != oldRank) + { + mTMSwapBuffer.pushBack(newRank << 16 | oldRank); + } + } + } + else + { + changed = mVisiblePartsForAPI.freeAndReturnRanks(partIndex, newRank, oldRank); + if (changed) + { + mApiVisibilityChanged = true; + if (!mBufferVisibility) + { + mPartVisibilityChanged = true; + } + if (mKeepVisibleBonesPacked && newRank != oldRank) + { + mTMSwapBuffer.pushBack(newRank << 16 | oldRank); + } + } + } + + return changed; +} + +bool ApexRenderMeshActor::getVisibilities(uint8_t* visibilityArray, uint32_t visibilityArraySize) const +{ + READ_ZONE(); + uint8_t changed = 0; + const uint32_t numParts = PxMin(mRenderMeshAsset->getPartCount(), visibilityArraySize); + for (uint32_t index = 0; index < numParts; ++index) + { + const uint8_t newVisibility = (uint8_t)isVisible((uint16_t) index); + changed |= newVisibility ^(*visibilityArray); + *visibilityArray++ = newVisibility; + } + return changed != 0; +} + +void ApexRenderMeshActor::updateRenderResources(bool rewriteBuffers, void* userRenderData) +{ + updateRenderResources(!mRenderWithoutSkinning, rewriteBuffers, userRenderData); +} + +void ApexRenderMeshActor::updateRenderResources(bool useBones, bool rewriteBuffers, void* userRenderData) +{ + URR_SCOPE; + +#if VERBOSE > 1 + printf("updateRenderResources(useBones=%s, rewriteBuffers=%s, userRenderData=0x%p)\n", useBones ? "true" : "false", rewriteBuffers ? "true" : "false", userRenderData); +#endif + + UserRenderResourceManager* rrm = GetInternalApexSDK()->getUserRenderResourceManager(); + + // fill out maxBonesPerMaterial (if it hasn't already been filled out). Also create fallback skinning if necessary. + for (uint32_t submeshIndex = 0; submeshIndex < mSubmeshData.size(); submeshIndex++) + { + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + + if (submeshData.maxBonesPerMaterial == 0 && rrm != NULL) + { + if (!submeshData.isMaterialPointerValid) + { + // this should only be reached, when renderMeshActorLoadMaterialsLazily is true. + // URR may not be called asynchronously in that case (for example in a render thread) + ResourceProviderIntl* nrp = GetInternalApexSDK()->getInternalResourceProvider(); + submeshData.material = nrp->getResource(submeshData.materialID); + submeshData.isMaterialPointerValid = true; + } + + submeshData.maxBonesPerMaterial = rrm->getMaxBonesForMaterial(submeshData.material); + + if (submeshData.maxBonesPerMaterial == 0) + { + submeshData.maxBonesPerMaterial = mRenderMeshAsset->getBoneCount(); + } + } + + bool needsFallbackSkinning = mForceFallbackSkinning || submeshData.maxBonesPerMaterial < mTransforms.size(); + if (needsFallbackSkinning && !mKeepVisibleBonesPacked && submeshData.fallbackSkinningMemory == NULL) + { + createFallbackSkinning(submeshIndex); + } + } + + PX_PROFILE_ZONE("ApexRenderMesh::updateRenderResources", GetInternalApexSDK()->getContextId()); + + const bool invisible = visiblePartCount() == 0; + const bool instanceless = mMaxInstanceCount > 0 && mInstanceCount == 0; + if ((mReleaseResourcesIfNothingToRender && ((mPartVisibilityChanged && invisible) || (mInstanceCountChanged && instanceless))) || rewriteBuffers) + { + // First send out signals that the resource is no longer needed. + for (uint32_t submeshIndex = 0; submeshIndex < mSubmeshData.size(); ++submeshIndex) + { + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + for (uint32_t i = 0; i < submeshData.renderResources.size(); ++i) + { + UserRenderResource* renderResource = submeshData.renderResources[i].resource; + if (renderResource != NULL) + { + if (renderResource->getBoneBuffer() != NULL) + { + renderResource->setBoneBufferRange(0, 0); + } + if (renderResource->getInstanceBuffer() != NULL) + { + renderResource->setInstanceBufferRange(0, 0); + } + } + } + } + + // Now release the resources + releaseRenderResources(); + mPartVisibilityChanged = false; + mBonePosesDirty = false; + mInstanceCountChanged = false; + + // Rewrite buffers condition + if (rewriteBuffers) + { + mCreateRenderResourcesAfterInit = true; // createRenderResources + mPartVisibilityChanged = true; // writeBuffer for submesh data + } + + return; + } + + if (mCreateRenderResourcesAfterInit || mOneUserVertexBufferChanged || mPartVisibilityChanged || mBoneBufferInUse != useBones) + { + createRenderResources(useBones, userRenderData); + mCreateRenderResourcesAfterInit = false; + } + if (mRenderResource) + { + mRenderResource->setInstanceBufferRange(mInstanceOffset, mInstanceCount); + } + + PX_ASSERT(mSubmeshData.size() == mRenderMeshAsset->getSubmeshCount()); + + for (uint32_t submeshIndex = 0; submeshIndex < mRenderMeshAsset->getSubmeshCount(); ++submeshIndex) + { + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + + if (submeshData.indexBuffer == NULL) + { + continue; + } + + if (mPartVisibilityChanged || submeshData.staticColorReplacementDirty) + { + updatePartVisibility(submeshIndex, useBones, userRenderData); + submeshData.staticColorReplacementDirty = false; + } + if (mBonePosesDirty) + { + if (submeshData.userDynamicVertexBuffer && !submeshData.userSpecifiedData) + { + updateFallbackSkinning(submeshIndex); + } + + // Set up the previous bone buffer, if requested and available. If we're packing bones, we need to do a remapping + PX_ASSERT(!mPreviousFrameBoneBufferValid || mTransformsLastFrame.size() == mTransforms.size()); + const uint32_t tmBufferSize = mKeepVisibleBonesPacked ? getRenderVisiblePartCount() : mTransforms.size(); + if (mPreviousFrameBoneBufferValid && mTransformsLastFrame.size() == mTransforms.size() && mKeepVisibleBonesPacked) + { + mRemappedPreviousBoneTMs.resize(tmBufferSize); + for (uint32_t tmNum = 0; tmNum < tmBufferSize; ++tmNum) + { + mRemappedPreviousBoneTMs[tmNum] = mTransformsLastFrame[mVisiblePartsForAPILastFrame.getRank(mVisiblePartsForAPI.usedIndices()[tmNum])]; + } + } + else + { + mRemappedPreviousBoneTMs.resize(0); + } + + updateBonePoses(submeshIndex); + + // move this under the render lock because the fracture buffer processing accesses these arrays + // this used to be at the end of dispatchRenderResources + if (mKeepPreviousFrameBoneBuffer) + { + PX_ASSERT(mTransforms.size() != 0); + mTransformsLastFrame = mTransforms; + mVisiblePartsForAPILastFrame = mVisiblePartsForAPI; + mPreviousFrameBoneBufferValid = true; + } + } + + if (submeshData.userDynamicVertexBuffer && (submeshData.fallbackSkinningDirty || submeshData.userVertexBufferAlwaysDirty)) + { + writeUserBuffers(submeshIndex); + submeshData.fallbackSkinningDirty = false; + } + + if (mMaxInstanceCount) + { + updateInstances(submeshIndex); + } + + if (!submeshData.isMaterialPointerValid) + { + // this should only be reached, when renderMeshActorLoadMaterialsLazily is true. + // URR may not be called asynchronously in that case (for example in a render thread) + ResourceProviderIntl* nrp = GetInternalApexSDK()->getInternalResourceProvider(); + submeshData.material = nrp->getResource(submeshData.materialID); + submeshData.isMaterialPointerValid = true; + } + + for (uint32_t i = 0; i < submeshData.renderResources.size(); ++i) + { + UserRenderResource* res = submeshData.renderResources[i].resource; + if (res != NULL) + { + // LRR - poor workaround for http://nvbugs/534501, you'll crash here if you have more than 60 bones/material + // and keepVisibleBonesPacked == false + res->setMaterial(submeshData.material); + } + } + } + mBonePosesDirty = false; + mPartVisibilityChanged = false; +} + + + +void ApexRenderMeshActor::dispatchRenderResources(UserRenderer& renderer) +{ + dispatchRenderResources(renderer, PxMat44(PxIdentity)); +} + + + +void ApexRenderMeshActor::dispatchRenderResources(UserRenderer& renderer, const PxMat44& globalPose) +{ + PX_PROFILE_ZONE("ApexRenderMesh::dispatchRenderResources", GetInternalApexSDK()->getContextId()); + + RenderContext context; + + // Assign the transform to the context when there is 1 part and no instancing + + // if there are no parts to render, return early + // else if using instancing and there are not instances, return early + // else if not using instancing and there is just 1 part (no bone buffer), save the transform to the context + // else (using instancing and/or multiple parts), just assign identity to context + if (mRenderMeshAsset->getPartCount() == 0 && !mRenderMeshAsset->getOpaqueMesh()) + { + return; + } + else if (mInstanceCount == 0 && mMaxInstanceCount > 0) + { + return; + } + else if (mMaxInstanceCount == 0 && mTransforms.size() == 1) + { + context.local2world = globalPose * mTransforms[0]; // provide context for non-instanced ARMs with a single bone + context.world2local = context.local2world.inverseRT(); + } + else + { + context.local2world = globalPose; + context.world2local = globalPose.inverseRT(); + } + if (mRenderMeshAsset->getOpaqueMesh()) + { + if (mRenderResource) + { + context.renderResource = mRenderResource; + renderer.renderResource(context); + } + } + else + { + for (uint32_t submeshIndex = 0; submeshIndex < mSubmeshData.size(); ++submeshIndex) + { + for (uint32_t i = 0; i < mSubmeshData[submeshIndex].renderResources.size(); ++i) + { + context.renderResource = mSubmeshData[submeshIndex].renderResources[i].resource; + + // no reason to render if we don't have any indices + if ((mSubmeshData[submeshIndex].indexBuffer && (mSubmeshData[submeshIndex].visibleTriangleCount == 0)) || (mSubmeshData[submeshIndex].renderResources[i].vertexCount == 0)) + { + continue; + } + + if (context.renderResource) + { + context.renderMeshName = mRenderMeshAsset->getName(); + renderer.renderResource(context); + } + } + } + } +} + + + +void ApexRenderMeshActor::addVertexBuffer(uint32_t submeshIndex, bool alwaysDirty, PxVec3* position, PxVec3* normal, PxVec4* tangents) +{ +#if VERBOSE + GetInternalApexSDK()->getErrorCallback().reportError(PxErrorCode::eNO_ERROR, "addVertexBuffer\n", __FILE__, __LINE__); + printf("addVertexBuffer(submeshIndex=%d)\n", submeshIndex); +#endif + if (submeshIndex < mSubmeshData.size()) + { + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + + if (submeshData.userSpecifiedData) + { + APEX_INVALID_PARAMETER("Cannot add user buffer to submesh %d, it's already assigned!", submeshIndex); + } + else + { + submeshData.userSpecifiedData = true; + submeshData.userPositions = position; + submeshData.userNormals = normal; + submeshData.userTangents4 = tangents; + + submeshData.userVertexBufferAlwaysDirty = alwaysDirty; + mOneUserVertexBufferChanged = true; + } + } +} + + + +void ApexRenderMeshActor::removeVertexBuffer(uint32_t submeshIndex) +{ +#if VERBOSE + GetInternalApexSDK()->getErrorCallback().reportError(PxErrorCode::eNO_ERROR, "removeVertexBuffer\n", __FILE__, __LINE__); + printf("removeVertexBuffer(submeshIndex=%d)\n", submeshIndex); +#endif + if (submeshIndex < mSubmeshData.size()) + { + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + + if (!submeshData.userSpecifiedData) + { + APEX_INVALID_PARAMETER("Cannot remove user buffer to submesh %d, it's not assigned!", submeshIndex); + } + else + { + submeshData.userSpecifiedData = false; + submeshData.userPositions = NULL; + submeshData.userNormals = NULL; + submeshData.userTangents4 = NULL; + + submeshData.userVertexBufferAlwaysDirty = false; + mOneUserVertexBufferChanged = true; + + if (submeshData.fallbackSkinningMemory != NULL) + { + distributeFallbackData(submeshIndex); + } + } + } +} + + + +void ApexRenderMeshActor::setStaticPositionReplacement(uint32_t submeshIndex, const PxVec3* staticPositions) +{ + PX_ASSERT(staticPositions != NULL); + + PX_ASSERT(submeshIndex < mSubmeshData.size()); + if (submeshIndex < mSubmeshData.size()) + { + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + + PX_ASSERT(submeshData.staticPositionReplacement == NULL); + submeshData.staticPositionReplacement = staticPositions; + + PX_ASSERT(submeshData.staticBufferReplacement == NULL); + PX_ASSERT(submeshData.dynamicBufferReplacement == NULL); + } +} + +void ApexRenderMeshActor::setStaticColorReplacement(uint32_t submeshIndex, const ColorRGBA* staticColors) +{ + PX_ASSERT(staticColors != NULL); + + PX_ASSERT(submeshIndex < mSubmeshData.size()); + if (submeshIndex < mSubmeshData.size()) + { + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + + submeshData.staticColorReplacement = staticColors; + submeshData.staticColorReplacementDirty = true; + } +} + + + +void ApexRenderMeshActor::setInstanceBuffer(UserRenderInstanceBuffer* instBuf) +{ + WRITE_ZONE(); + mInstanceBuffer = instBuf; + + for (nvidia::Array<SubmeshData>::Iterator it = mSubmeshData.begin(), end = mSubmeshData.end(); it != end; ++it) + { + it->instanceBuffer = mInstanceBuffer; + it->userIndexBufferChanged = true; + } + + mOneUserVertexBufferChanged = true; +} + +void ApexRenderMeshActor::setMaxInstanceCount(uint32_t count) +{ + WRITE_ZONE(); + mMaxInstanceCount = count; +} + +void ApexRenderMeshActor::setInstanceBufferRange(uint32_t from, uint32_t count) +{ + WRITE_ZONE(); + mInstanceOffset = from; + mInstanceCountChanged = count != mInstanceCount; + mInstanceCount = count < mMaxInstanceCount ? count : mMaxInstanceCount; +} + + + +void ApexRenderMeshActor::getLodRange(float& min, float& max, bool& intOnly) const +{ + READ_ZONE(); + PX_UNUSED(min); + PX_UNUSED(max); + PX_UNUSED(intOnly); + APEX_INVALID_OPERATION("RenderMeshActor does not support this operation"); +} + + + +float ApexRenderMeshActor::getActiveLod() const +{ + READ_ZONE(); + APEX_INVALID_OPERATION("RenderMeshActor does not support this operation"); + return -1.0f; +} + +void ApexRenderMeshActor::forceLod(float lod) +{ + WRITE_ZONE(); + PX_UNUSED(lod); + APEX_INVALID_OPERATION("RenderMeshActor does not support this operation"); +} + + + +void ApexRenderMeshActor::createRenderResources(bool useBones, void* userRenderData) +{ +#if VERBOSE + printf("createRenderResources(useBones=%s, userRenderData=0x%p)\n", useBones ? "true" : "false", userRenderData); +#endif + + PX_PROFILE_ZONE("ApexRenderMesh::createRenderResources", GetInternalApexSDK()->getContextId()); + + if (mRenderMeshAsset->getOpaqueMesh()) + { + if (mRenderResource == NULL || mRenderResource->getInstanceBuffer() != mInstanceBuffer) + { + UserRenderResourceManager* rrm = GetInternalApexSDK()->getUserRenderResourceManager(); + if (mRenderResource != NULL) + { + rrm->releaseResource(*mRenderResource); + mRenderResource = NULL; + } + + UserRenderResourceDesc desc; + desc.instanceBuffer = mInstanceBuffer; + desc.opaqueMesh = mRenderMeshAsset->getOpaqueMesh(); + desc.userRenderData = userRenderData; + mRenderResource = rrm->createResource(desc); + } + } + + PX_ASSERT(mSubmeshData.size() == mRenderMeshAsset->getSubmeshCount()); + + UserRenderResourceManager* rrm = GetInternalApexSDK()->getUserRenderResourceManager(); + + bool createAndFillSharedVertexBuffersAll = mOneUserVertexBufferChanged; + if (mRenderMeshAsset->mRuntimeSubmeshData.empty()) + { + mRenderMeshAsset->mRuntimeSubmeshData.resize(mRenderMeshAsset->getSubmeshCount()); + memset(mRenderMeshAsset->mRuntimeSubmeshData.begin(), 0, sizeof(ApexRenderMeshAsset::SubmeshData) * mRenderMeshAsset->mRuntimeSubmeshData.size()); + createAndFillSharedVertexBuffersAll = true; + } + + bool fill2ndVertexBuffersAll = false; + if (!mPerActorVertexBuffers.size()) + { + // Create a separate (instanced) buffer for bone indices and/or colors + fill2ndVertexBuffersAll = mKeepVisibleBonesPacked; + for (uint32_t submeshIndex = 0; !fill2ndVertexBuffersAll && submeshIndex < mRenderMeshAsset->getSubmeshCount(); ++submeshIndex) + { + fill2ndVertexBuffersAll = (mSubmeshData[submeshIndex].staticColorReplacement != NULL); + } + if (fill2ndVertexBuffersAll) + { + mPerActorVertexBuffers.resize(mRenderMeshAsset->getSubmeshCount()); + } + } + + PX_ASSERT(mRenderMeshAsset->mRuntimeSubmeshData.size() == mSubmeshData.size()); + PX_ASSERT(mRenderMeshAsset->getSubmeshCount() == mSubmeshData.size()); + for (uint32_t submeshIndex = 0; submeshIndex < mRenderMeshAsset->getSubmeshCount(); ++submeshIndex) + { + ApexRenderSubmesh& submesh = *mRenderMeshAsset->mSubmeshes[submeshIndex]; + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + + if (submesh.getVertexBuffer().getVertexCount() == 0 || !submeshHasVisibleTriangles(submeshIndex)) + { + for (uint32_t i = 0; i < submeshData.renderResources.size(); ++i) + { + if (submeshData.renderResources[i].resource != NULL) + { + UserRenderBoneBuffer* boneBuffer = submeshData.renderResources[i].resource->getBoneBuffer(); + rrm->releaseResource(*submeshData.renderResources[i].resource); + if (boneBuffer) + { + rrm->releaseBoneBuffer(*boneBuffer); + } + submeshData.renderResources[i].resource = NULL; + } + } + continue; + } + + bool fill2ndVertexBuffers = fill2ndVertexBuffersAll; + // Handling color replacement through "2nd vertex buffer" + if ((mKeepVisibleBonesPacked || submeshData.staticColorReplacement != NULL) && mPerActorVertexBuffers[submeshIndex] == NULL) + { + fill2ndVertexBuffers = true; + } + + bool createAndFillSharedVertexBuffers = createAndFillSharedVertexBuffersAll; + + // create vertex buffers if some buffer replacements are present in the actor + if (submeshData.staticPositionReplacement != NULL && submeshData.staticBufferReplacement == NULL && submeshData.dynamicBufferReplacement == NULL) + { + createAndFillSharedVertexBuffers = true; + } + + { + ApexRenderMeshAsset::SubmeshData& runtimeSubmeshData = mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex]; + if (runtimeSubmeshData.staticVertexBuffer == NULL && runtimeSubmeshData.dynamicVertexBuffer == NULL && runtimeSubmeshData.skinningVertexBuffer == NULL) + { + createAndFillSharedVertexBuffers = true; + } + + // create vertex buffers if not all static vertex buffers have been created by the previous actors that were doing this + if (runtimeSubmeshData.needsStaticData && runtimeSubmeshData.staticVertexBuffer == NULL) + { + createAndFillSharedVertexBuffers = true; + } + + if (runtimeSubmeshData.needsDynamicData && runtimeSubmeshData.dynamicVertexBuffer == NULL) + { + createAndFillSharedVertexBuffers = true; + } + } + + + VertexBufferIntl& srcVB = submesh.getVertexBufferWritable(); + const VertexFormat& vf = srcVB.getFormat(); + + bool fillStaticSharedVertexBuffer = false; + bool fillDynamicSharedVertexBuffer = false; + bool fillSkinningSharedVertexBuffer = false; + + if (createAndFillSharedVertexBuffers) + { + UserRenderVertexBufferDesc staticBufDesc, dynamicBufDesc, boneBufDesc; + staticBufDesc.moduleIdentifier = mRenderMeshAsset->mOwnerModuleID; + staticBufDesc.maxVerts = srcVB.getVertexCount(); + staticBufDesc.hint = RenderBufferHint::STATIC; + staticBufDesc.uvOrigin = mRenderMeshAsset->getTextureUVOrigin(); + staticBufDesc.numCustomBuffers = 0; + staticBufDesc.canBeShared = true; + + dynamicBufDesc = staticBufDesc; + boneBufDesc = dynamicBufDesc; + bool useDynamicBuffer = false; + bool replaceStaticBuffer = false; + bool replaceDynamicBuffer = false; + + // extract all the buffers into one of the three descs + for (uint32_t i = 0; i < vf.getBufferCount(); ++i) + { + RenderVertexSemantic::Enum semantic = vf.getBufferSemantic(i); + if (semantic >= RenderVertexSemantic::POSITION && semantic <= RenderVertexSemantic::COLOR) + { + if (vf.getBufferAccess(i) == RenderDataAccess::STATIC) + { + staticBufDesc.buffersRequest[semantic] = vf.getBufferFormat(i); + + if (semantic == RenderVertexSemantic::POSITION && submeshData.staticPositionReplacement != NULL) + { + replaceStaticBuffer = true; + } + } + else + { + dynamicBufDesc.buffersRequest[semantic] = vf.getBufferFormat(i); + useDynamicBuffer = true; + + if (semantic == RenderVertexSemantic::POSITION && submeshData.staticPositionReplacement != NULL) + { + replaceDynamicBuffer = true; + } + } + } + else if (semantic == RenderVertexSemantic::CUSTOM) + { + ++staticBufDesc.numCustomBuffers; + } + } + + if (staticBufDesc.numCustomBuffers) + { + staticBufDesc.customBuffersIdents = &mRenderMeshAsset->mRuntimeCustomSubmeshData[submeshIndex].customBufferVoidPtrs[0]; + staticBufDesc.customBuffersRequest = &mRenderMeshAsset->mRuntimeCustomSubmeshData[submeshIndex].customBufferFormats[0]; + } + + // PH: only create bone indices/weights if more than one bone is present. one bone just needs local2world + if (mTransforms.size() > 1) + { + UserRenderVertexBufferDesc* boneDesc = vf.hasSeparateBoneBuffer() ? &boneBufDesc : &staticBufDesc; + + if (!fill2ndVertexBuffers) + { + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::BONE_INDEX)); + boneDesc->buffersRequest[RenderVertexSemantic::BONE_INDEX] = vf.getBufferFormat(bufferIndex); + bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::BONE_WEIGHT)); + boneDesc->buffersRequest[RenderVertexSemantic::BONE_WEIGHT] = vf.getBufferFormat(bufferIndex); + } + } + else + if (mForceBoneIndexChannel) + { + // Note, it is assumed here that this means there's an actor which will handle dynamic parts, and will require a shared bone index buffer + UserRenderVertexBufferDesc* boneDesc = vf.hasSeparateBoneBuffer() ? &boneBufDesc : &staticBufDesc; + + if (!fill2ndVertexBuffers) + { + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::BONE_INDEX)); + boneDesc->buffersRequest[RenderVertexSemantic::BONE_INDEX] = vf.getBufferFormat(bufferIndex); + } + } + + for (uint32_t semantic = RenderVertexSemantic::TEXCOORD0; semantic <= RenderVertexSemantic::TEXCOORD3; ++semantic) + { + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID((RenderVertexSemantic::Enum)semantic)); + staticBufDesc.buffersRequest[ semantic ] = vf.getBufferFormat(bufferIndex); + } + + { + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::DISPLACEMENT_TEXCOORD)); + staticBufDesc.buffersRequest[ RenderVertexSemantic::DISPLACEMENT_TEXCOORD ] = vf.getBufferFormat(bufferIndex); + bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::DISPLACEMENT_FLAGS)); + staticBufDesc.buffersRequest[ RenderVertexSemantic::DISPLACEMENT_FLAGS ] = vf.getBufferFormat(bufferIndex); + } + + // empty static buffer? + uint32_t numEntries = staticBufDesc.numCustomBuffers; + for (uint32_t i = 0; i < RenderVertexSemantic::NUM_SEMANTICS; i++) + { + numEntries += (staticBufDesc.buffersRequest[i] == RenderDataFormat::UNSPECIFIED) ? 0 : 1; + + PX_ASSERT(staticBufDesc.buffersRequest[i] == RenderDataFormat::UNSPECIFIED || vertexSemanticFormatValid((RenderVertexSemantic::Enum)i, staticBufDesc.buffersRequest[i])); + PX_ASSERT(dynamicBufDesc.buffersRequest[i] == RenderDataFormat::UNSPECIFIED || vertexSemanticFormatValid((RenderVertexSemantic::Enum)i, dynamicBufDesc.buffersRequest[i])); + PX_ASSERT(boneBufDesc.buffersRequest[i] == RenderDataFormat::UNSPECIFIED || vertexSemanticFormatValid((RenderVertexSemantic::Enum)i, boneBufDesc.buffersRequest[i])); + } + + if (numEntries > 0) + { + if (replaceStaticBuffer) + { + submeshData.staticBufferReplacement = rrm->createVertexBuffer(staticBufDesc); + fillStaticSharedVertexBuffer = true; + } + else if (mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].staticVertexBuffer == NULL) + { + mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].staticVertexBuffer = rrm->createVertexBuffer(staticBufDesc); + fillStaticSharedVertexBuffer = true; + } + mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].needsStaticData = true; + } + + if (useDynamicBuffer) + { + // only create this if we don't create a per-actor dynamic buffer + if (submeshData.fallbackSkinningMemory == NULL && !submeshData.userSpecifiedData) + { + if (replaceDynamicBuffer) + { + submeshData.dynamicBufferReplacement = rrm->createVertexBuffer(dynamicBufDesc); + fillDynamicSharedVertexBuffer = true; + } + else if (mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].dynamicVertexBuffer == NULL) + { + mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].dynamicVertexBuffer = rrm->createVertexBuffer(dynamicBufDesc); + fillDynamicSharedVertexBuffer = true; + } + } + mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].needsDynamicData = true; + } + + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::BONE_INDEX)); + const uint32_t bonesPerVertex = vertexSemanticFormatElementCount(RenderVertexSemantic::BONE_INDEX, vf.getBufferFormat(bufferIndex)); + if (vf.hasSeparateBoneBuffer() && bonesPerVertex > 0 && mTransforms.size() > 1 && useBones && mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].skinningVertexBuffer == NULL) + { + mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].skinningVertexBuffer = rrm->createVertexBuffer(boneBufDesc); + fillSkinningSharedVertexBuffer = true; + } + } + + if ((submeshData.fallbackSkinningMemory != NULL || submeshData.userSpecifiedData) && submeshData.userDynamicVertexBuffer == NULL) + { + UserRenderVertexBufferDesc perActorDynamicBufDesc; + perActorDynamicBufDesc.moduleIdentifier = mRenderMeshAsset->mOwnerModuleID; + perActorDynamicBufDesc.maxVerts = srcVB.getVertexCount(); + perActorDynamicBufDesc.uvOrigin = mRenderMeshAsset->getTextureUVOrigin(); + perActorDynamicBufDesc.hint = RenderBufferHint::DYNAMIC; + perActorDynamicBufDesc.canBeShared = false; + + if (submeshData.userPositions != NULL) + { + perActorDynamicBufDesc.buffersRequest[RenderVertexSemantic::POSITION] = RenderDataFormat::FLOAT3; + } + + if (submeshData.userNormals != NULL) + { + perActorDynamicBufDesc.buffersRequest[RenderVertexSemantic::NORMAL] = RenderDataFormat::FLOAT3; + } + + if (submeshData.userTangents4 != NULL) + { + perActorDynamicBufDesc.buffersRequest[RenderVertexSemantic::TANGENT] = RenderDataFormat::FLOAT4; + } + + submeshData.userDynamicVertexBuffer = rrm->createVertexBuffer(perActorDynamicBufDesc); + } + + if (fill2ndVertexBuffers) + { + UserRenderVertexBufferDesc bufDesc; + bufDesc.moduleIdentifier = mRenderMeshAsset->mOwnerModuleID; + bufDesc.maxVerts = srcVB.getVertexCount(); + bufDesc.hint = RenderBufferHint::DYNAMIC; + if (mKeepVisibleBonesPacked) + { + bufDesc.buffersRequest[ RenderVertexSemantic::BONE_INDEX ] = RenderDataFormat::USHORT1; + } + if (submeshData.staticColorReplacement) + { + bufDesc.buffersRequest[ RenderVertexSemantic::COLOR ] = RenderDataFormat::R8G8B8A8; + } + bufDesc.uvOrigin = mRenderMeshAsset->getTextureUVOrigin(); + bufDesc.canBeShared = false; + for (uint32_t i = 0; i < RenderVertexSemantic::NUM_SEMANTICS; i++) + { + PX_ASSERT(bufDesc.buffersRequest[i] == RenderDataFormat::UNSPECIFIED || vertexSemanticFormatValid((RenderVertexSemantic::Enum)i, bufDesc.buffersRequest[i])); + } + mPerActorVertexBuffers[submeshIndex] = rrm->createVertexBuffer(bufDesc); + } + + // creates and/or fills index buffers + updatePartVisibility(submeshIndex, useBones, userRenderData); + + if (fillStaticSharedVertexBuffer || fillDynamicSharedVertexBuffer || fillSkinningSharedVertexBuffer) + { + const VertexFormat& vf = srcVB.getFormat(); + + RenderVertexBufferData dynamicWriteData; + RenderVertexBufferData staticWriteData; + RenderVertexBufferData skinningWriteData; + RenderVertexBufferData& skinningWriteDataRef = vf.hasSeparateBoneBuffer() ? skinningWriteData : staticWriteData; + + UserRenderVertexBuffer* staticVb = mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].staticVertexBuffer; + UserRenderVertexBuffer* dynamicVb = mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].dynamicVertexBuffer; + UserRenderVertexBuffer* skinningVb = mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].skinningVertexBuffer; + + if (submeshData.staticBufferReplacement != NULL) + { + staticVb = submeshData.staticBufferReplacement; + } + + if (submeshData.dynamicBufferReplacement != NULL) + { + dynamicVb = submeshData.dynamicBufferReplacement; + } + + for (uint32_t semantic = RenderVertexSemantic::POSITION; semantic <= RenderVertexSemantic::COLOR; ++semantic) + { + if (semantic == RenderVertexSemantic::COLOR && submeshData.staticColorReplacement != NULL) + { + // Gets done in updatePartVisibility if submeshData.staticColorReplacement is used + continue; + } + + RenderDataFormat::Enum format; + int32_t bufferIndex = vf.getBufferIndexFromID(vf.getSemanticID((RenderVertexSemantic::Enum)semantic)); + if (bufferIndex < 0) + { + continue; + } + const void* src = srcVB.getBufferAndFormat(format, (uint32_t)bufferIndex); + + if (semantic == RenderVertexSemantic::POSITION && submeshData.staticPositionReplacement != NULL) + { + src = submeshData.staticPositionReplacement; + } + + if (format != RenderDataFormat::UNSPECIFIED) + { + if (srcVB.getFormat().getBufferAccess((uint32_t)bufferIndex) == RenderDataAccess::STATIC) + { + staticWriteData.setSemanticData((RenderVertexSemantic::Enum)semantic, src, RenderDataFormat::getFormatDataSize(format), format); + } + else + { + dynamicWriteData.setSemanticData((RenderVertexSemantic::Enum)semantic, src, RenderDataFormat::getFormatDataSize(format), format); + } + } + } + + for (uint32_t semantic = RenderVertexSemantic::TEXCOORD0; semantic <= RenderVertexSemantic::TEXCOORD3; ++semantic) + { + RenderDataFormat::Enum format; + int32_t bufferIndex = vf.getBufferIndexFromID(vf.getSemanticID((RenderVertexSemantic::Enum)semantic)); + if (bufferIndex < 0) + { + continue; + } + const void* src = srcVB.getBufferAndFormat(format, (uint32_t)bufferIndex); + if (format != RenderDataFormat::UNSPECIFIED) + { + staticWriteData.setSemanticData((RenderVertexSemantic::Enum)semantic, src, RenderDataFormat::getFormatDataSize(format), format); + } + } + + for (uint32_t semantic = RenderVertexSemantic::DISPLACEMENT_TEXCOORD; semantic <= RenderVertexSemantic::DISPLACEMENT_FLAGS; ++semantic) + { + int32_t bufferIndex = vf.getBufferIndexFromID(vf.getSemanticID((RenderVertexSemantic::Enum)semantic)); + if (bufferIndex >= 0) + { + RenderDataFormat::Enum format; + const void* src = srcVB.getBufferAndFormat(format, (uint32_t)bufferIndex); + if (format != RenderDataFormat::UNSPECIFIED) + { + staticWriteData.setSemanticData((RenderVertexSemantic::Enum)semantic, src, RenderDataFormat::getFormatDataSize(format), format); + } + } + } + + nvidia::Array<RenderSemanticData> semanticData; + + const uint32_t numCustom = vf.getCustomBufferCount(); + if (numCustom) + { + // NvParameterized::Handle custom vertex buffer semantics + semanticData.resize(numCustom); + + uint32_t writeIndex = 0; + for (uint32_t i = 0; i < vf.getBufferCount(); i++) + { + // Fill in a RenderSemanticData for each custom semantic + if (vf.getBufferSemantic(i) != RenderVertexSemantic::CUSTOM) + { + continue; + } + semanticData[writeIndex].data = srcVB.getBuffer(i); + RenderDataFormat::Enum fmt = mRenderMeshAsset->mRuntimeCustomSubmeshData[submeshIndex].customBufferFormats[writeIndex]; + semanticData[writeIndex].stride = RenderDataFormat::getFormatDataSize(fmt); + semanticData[writeIndex].format = fmt; + semanticData[writeIndex].ident = mRenderMeshAsset->mRuntimeCustomSubmeshData[submeshIndex].customBufferVoidPtrs[writeIndex]; + + writeIndex++; + } + PX_ASSERT(writeIndex == numCustom); + staticWriteData.setCustomSemanticData(&semanticData[0], numCustom); + } + + physx::Array<uint16_t> boneIndicesModuloMaxBoneCount; + + if (mTransforms.size() > 1 || mForceBoneIndexChannel) + { + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::BONE_INDEX)); + const uint32_t numBonesPerVertex = vertexSemanticFormatElementCount(RenderVertexSemantic::BONE_INDEX, vf.getBufferFormat(bufferIndex)); + if (numBonesPerVertex == 1) + { + // Gets done in updatePartVisibility if keepVisibleBonesPacked is true + if (!mKeepVisibleBonesPacked) + { + RenderDataFormat::Enum format; + const VertexFormat& vf = srcVB.getFormat(); + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::BONE_INDEX)); + const void* src = srcVB.getBufferAndFormat(format, bufferIndex); + if (format != RenderDataFormat::UNSPECIFIED) + { + if (mForceBoneIndexChannel && format == RenderDataFormat::USHORT1 && submeshData.maxBonesPerMaterial > 0 && submeshData.maxBonesPerMaterial < mRenderMeshAsset->getBoneCount()) + { + boneIndicesModuloMaxBoneCount.resize(srcVB.getVertexCount()); + uint16_t* srcBuf = (uint16_t*)src; + for (uint32_t vertexNum = 0; vertexNum < srcVB.getVertexCount(); ++vertexNum) + { + boneIndicesModuloMaxBoneCount[vertexNum] = *(srcBuf++)%submeshData.maxBonesPerMaterial; + } + src = &boneIndicesModuloMaxBoneCount[0]; + } + skinningWriteDataRef.setSemanticData(RenderVertexSemantic::BONE_INDEX, src, RenderDataFormat::getFormatDataSize(format), format); + } + } + } + else + { + RenderDataFormat::Enum format; + const void* src; + const VertexFormat& vf = srcVB.getFormat(); + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(nvidia::RenderVertexSemantic::BONE_INDEX)); + src = srcVB.getBufferAndFormat(format, bufferIndex); + if (format != RenderDataFormat::UNSPECIFIED) + { + skinningWriteDataRef.setSemanticData(RenderVertexSemantic::BONE_INDEX, src, RenderDataFormat::getFormatDataSize(format), format); + } + bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(nvidia::RenderVertexSemantic::BONE_WEIGHT)); + src = srcVB.getBufferAndFormat(format, bufferIndex); + if (format != RenderDataFormat::UNSPECIFIED) + { + skinningWriteDataRef.setSemanticData(RenderVertexSemantic::BONE_WEIGHT, src, RenderDataFormat::getFormatDataSize(format), format); + } + } + } + + if (staticVb != NULL && fillStaticSharedVertexBuffer) + { + staticVb->writeBuffer(staticWriteData, 0, srcVB.getVertexCount()); + } + if (dynamicVb != NULL && fillDynamicSharedVertexBuffer) + { + dynamicVb->writeBuffer(dynamicWriteData, 0, srcVB.getVertexCount()); + } + if (skinningVb != NULL && fillSkinningSharedVertexBuffer) + { + skinningVb->writeBuffer(skinningWriteData, 0, srcVB.getVertexCount()); + } + + // TODO - SJB - Beta2 - release submesh after updateRenderResources() returns. It requires acquiring the actor lock as game engine could delay these + // writes so long as it holds the render lock. Could be done in updateBounds(), which implictly has the lock. Perhaps we need to catch lock release + // so it happens immediately. + } + + // Delete static vertex buffers after writing them + if (mRenderMeshAsset->mParams->deleteStaticBuffersAfterUse) + { + ApexVertexFormat dynamicFormats; + dynamicFormats.copy((const ApexVertexFormat&)vf); + for (uint32_t semantic = RenderVertexSemantic::POSITION; semantic < RenderVertexSemantic::NUM_SEMANTICS; ++semantic) + { + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID((RenderVertexSemantic::Enum)semantic)); + if (dynamicFormats.getBufferAccess(bufferIndex) == RenderDataAccess::STATIC) + { + dynamicFormats.setBufferFormat(bufferIndex, RenderDataFormat::UNSPECIFIED); + } + } + srcVB.build(dynamicFormats, srcVB.getVertexCount()); + } + } + mOneUserVertexBufferChanged = false; + mPartVisibilityChanged = false; + mBoneBufferInUse = useBones; +} + +void ApexRenderMeshActor::updatePartVisibility(uint32_t submeshIndex, bool useBones, void* userRenderData) +{ +#if VERBOSE + printf("updatePartVisibility(submeshIndex=%d, useBones=%s, userRenderData=0x%p)\n", submeshIndex, useBones ? "true" : "false", userRenderData); + printf(" mPartVisibilityChanged=%s\n", mPartVisibilityChanged ? "true" : "false"); +#endif + + const ApexRenderSubmesh& submesh = *mRenderMeshAsset->mSubmeshes[submeshIndex]; + SubmeshData& submeshData = mSubmeshData[ submeshIndex ]; + + UserRenderResourceManager* rrm = GetInternalApexSDK()->getUserRenderResourceManager(); + PX_ASSERT(rrm != NULL); + + // we end up with a division by 0 otherwise :( + PX_ASSERT(submeshData.maxBonesPerMaterial > 0); + + const uint32_t partCount = mKeepVisibleBonesPacked ? getRenderVisiblePartCount() : mRenderMeshAsset->getPartCount(); + + // only use bones if there is no fallback skinning + useBones &= submeshData.fallbackSkinningMemory == NULL; + + uint32_t resourceCount; + if (!useBones) + { + // If we're not skinning, we only need one resource + resourceCount = 1; + } + else + { + // LRR - poor workaround for http://nvbugs/534501 + if (mKeepVisibleBonesPacked) + { + resourceCount = partCount == 0 ? 0 : (partCount + submeshData.maxBonesPerMaterial - 1) / submeshData.maxBonesPerMaterial; + } + else + { + resourceCount = partCount == 0 ? 0 : (mRenderMeshAsset->getBoneCount() + submeshData.maxBonesPerMaterial - 1) / submeshData.maxBonesPerMaterial; + } + } + + // Eliminate unneeded resources: + const uint32_t start = (submeshData.userIndexBufferChanged || useBones != mBoneBufferInUse) ? 0 : resourceCount; + for (uint32_t i = start; i < submeshData.renderResources.size(); ++i) + { + if (submeshData.renderResources[i].resource != NULL) + { + UserRenderBoneBuffer* boneBuffer = submeshData.renderResources[i].resource->getBoneBuffer(); + rrm->releaseResource(*submeshData.renderResources[i].resource); + + if (boneBuffer) + { + rrm->releaseBoneBuffer(*boneBuffer); + } + + submeshData.renderResources[i].resource = NULL; + } + } + submeshData.userIndexBufferChanged = false; + + uint16_t resourceBoneCount = 0; + uint32_t resourceNum = 0; + uint32_t startIndex = 0; + + if (mKeepVisibleBonesPacked) + { + if (mBoneIndexTempBuffer.size() < submesh.getVertexBuffer().getVertexCount()) + { + mBoneIndexTempBuffer.resize(submesh.getVertexBuffer().getVertexCount()); // A new index buffer to remap the bone indices to the smaller buffer + } + } + + uint32_t boneIndexStart = uint32_t(-1); + uint32_t boneIndexEnd = 0; + + // Figure out how many indices we'll need + uint32_t totalIndexCount = submesh.getTotalIndexCount(); // Worst case + + const uint32_t* visiblePartIndexPtr = getRenderVisibleParts(); + + if (mKeepVisibleBonesPacked) + { + // We can do better + totalIndexCount = 0; + for (uint32_t partNum = 0; partNum < partCount; ++partNum) + { + totalIndexCount += submesh.getIndexCount(visiblePartIndexPtr[partNum]); + } + } + + uint32_t newIndexBufferRequestSize = submeshData.indexBufferRequestedSize; + + // If there has not already been an index buffer request, set to exact size + if (newIndexBufferRequestSize == 0 || totalIndexCount >= 0x80000000) // special handling of potential overflow + { + newIndexBufferRequestSize = totalIndexCount; + } + else + { + // If the buffer has already been requested, see if we need to grow or shrink it + while (totalIndexCount > newIndexBufferRequestSize) + { + newIndexBufferRequestSize *= 2; + } + while (2*totalIndexCount < newIndexBufferRequestSize) + { + newIndexBufferRequestSize /= 2; + } + } + + // In case our doubling schedule gave it a larger size than we'll ever need + if (newIndexBufferRequestSize > submesh.getTotalIndexCount()) + { + newIndexBufferRequestSize = submesh.getTotalIndexCount(); + } + + if (submeshData.indexBuffer != NULL && newIndexBufferRequestSize != submeshData.indexBufferRequestedSize) + { + // Release the old buffer + rrm->releaseIndexBuffer(*submeshData.indexBuffer); + submeshData.indexBuffer = NULL; + releaseSubmeshRenderResources(submeshIndex); + } + + // Create the index buffer now if needed + if (submeshData.indexBuffer == NULL && newIndexBufferRequestSize > 0) + { + UserRenderIndexBufferDesc indexDesc; + indexDesc.maxIndices = newIndexBufferRequestSize; + indexDesc.hint = mIndexBufferHint; + indexDesc.format = RenderDataFormat::UINT1; + submeshData.indexBuffer = rrm->createIndexBuffer(indexDesc); + submeshData.indexBufferRequestedSize = newIndexBufferRequestSize; + } + + submeshData.renderResources.resize(resourceCount, ResourceData()); + + submeshData.visibleTriangleCount = 0; + // KHA - batch writes to temporary buffer so that index buffer is only locked once per frame + if(mPartIndexTempBuffer.size() < totalIndexCount) + { + mPartIndexTempBuffer.resize(totalIndexCount); + } + for (uint32_t partNum = 0; partNum < partCount;) + { + uint32_t partIndex; + bool partIsVisible; + if (mKeepVisibleBonesPacked) + { + partIndex = visiblePartIndexPtr[partNum++]; + partIsVisible = true; + + const uint32_t indexStart = submesh.getFirstVertexIndex(partIndex); + const uint32_t vertexCount = submesh.getVertexCount(partIndex); + + uint16_t* boneIndex = mBoneIndexTempBuffer.begin() + indexStart; + const uint16_t* boneIndexStop = boneIndex + vertexCount; + + boneIndexStart = PxMin(boneIndexStart, indexStart); + boneIndexEnd = PxMax(boneIndexEnd, indexStart + vertexCount); + + while (boneIndex < boneIndexStop) + { + *boneIndex++ = resourceBoneCount; + } + } + else + { + partIndex = partNum++; + partIsVisible = isVisible((uint16_t)partIndex); + } + + if (partIsVisible) + { + const uint32_t indexCount = submesh.getIndexCount(partIndex); + const uint32_t* indices = submesh.getIndexBuffer(partIndex); + const uint32_t currentIndexNum = submeshData.visibleTriangleCount * 3; + if (indexCount > 0 && mPartVisibilityChanged) + { + memcpy(mPartIndexTempBuffer.begin() + currentIndexNum, indices, indexCount * sizeof(uint32_t)); + } + submeshData.visibleTriangleCount += indexCount / 3; + } + + // LRR - poor workaround for http://nvbugs/534501 + bool generateNewRenderResource = false; + + const bool oneBonePerPart = mSkinningMode != RenderMeshActorSkinningMode::AllBonesPerPart; + + const uint16_t bonesToAdd = oneBonePerPart ? 1u : (uint16_t)mRenderMeshAsset->getBoneCount(); + resourceBoneCount = PxMin<uint16_t>((uint16_t)(resourceBoneCount + bonesToAdd), (uint16_t)submeshData.maxBonesPerMaterial); + + // Check if we exceed max bones limit or if this is the last part + if ((useBones && resourceBoneCount == submeshData.maxBonesPerMaterial) || partNum == partCount) + { + generateNewRenderResource = true; + } + if (generateNewRenderResource) + { + submeshData.renderResources[resourceNum].boneCount = resourceBoneCount; + submeshData.renderResources[resourceNum].vertexCount = submeshData.visibleTriangleCount * 3 - startIndex; + UserRenderResource*& renderResource = submeshData.renderResources[resourceNum].resource; // Next resource + ++resourceNum; + if (renderResource == NULL) // Create if needed + { + UserRenderVertexBuffer* vertexBuffers[5] = { NULL }; + uint32_t numVertexBuffers = 0; + + if (submeshData.staticBufferReplacement != NULL) + { + vertexBuffers[numVertexBuffers++] = submeshData.staticBufferReplacement; + } + else if (mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].staticVertexBuffer != NULL) + { + vertexBuffers[numVertexBuffers++] = mRenderMeshAsset->mRuntimeSubmeshData[ submeshIndex ].staticVertexBuffer; + } + + if (submeshData.userDynamicVertexBuffer != NULL && (submeshData.userSpecifiedData || submeshData.fallbackSkinningMemory != NULL)) + { + vertexBuffers[numVertexBuffers++] = submeshData.userDynamicVertexBuffer; + } + else if (submeshData.dynamicBufferReplacement != NULL) + { + vertexBuffers[numVertexBuffers++] = submeshData.dynamicBufferReplacement; + } + else if (mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].dynamicVertexBuffer != NULL) + { + if (submeshData.userDynamicVertexBuffer != NULL && mReleaseResourcesIfNothingToRender) + { + rrm->releaseVertexBuffer(*submeshData.userDynamicVertexBuffer); + submeshData.userDynamicVertexBuffer = NULL; + } + + vertexBuffers[numVertexBuffers++] = mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].dynamicVertexBuffer; + } + + if (useBones && mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].skinningVertexBuffer != NULL) + { + vertexBuffers[numVertexBuffers++] = mRenderMeshAsset->mRuntimeSubmeshData[submeshIndex].skinningVertexBuffer; + } + + // Separate (instanced) buffer for bone indices + if (mPerActorVertexBuffers.size()) + { + vertexBuffers[numVertexBuffers++] = mPerActorVertexBuffers[ submeshIndex ]; + } + + PX_ASSERT(numVertexBuffers <= 5); + + UserRenderResourceDesc resourceDesc; + resourceDesc.primitives = RenderPrimitiveType::TRIANGLES; + + resourceDesc.vertexBuffers = vertexBuffers; + resourceDesc.numVertexBuffers = numVertexBuffers; + + resourceDesc.numVerts = submesh.getVertexBuffer().getVertexCount(); + + resourceDesc.indexBuffer = submeshData.indexBuffer; + resourceDesc.firstIndex = startIndex; + resourceDesc.numIndices = submeshData.visibleTriangleCount * 3 - startIndex; + + // not assuming partcount == bonecount anymore + //if (mRenderMeshAsset->getPartCount() > 1) + const uint32_t numBones = mRenderMeshAsset->getBoneCount(); + if (numBones > 1 && useBones) + { + UserRenderBoneBufferDesc boneDesc; + // we don't need to use the minimum of numBones and max bones because the + // bone buffer update contains the proper range + boneDesc.maxBones = submeshData.maxBonesPerMaterial; + boneDesc.hint = RenderBufferHint::DYNAMIC; + boneDesc.buffersRequest[ RenderBoneSemantic::POSE ] = RenderDataFormat::FLOAT4x4; + if (mKeepPreviousFrameBoneBuffer) + { + boneDesc.buffersRequest[ RenderBoneSemantic::PREVIOUS_POSE ] = RenderDataFormat::FLOAT4x4; + } + resourceDesc.boneBuffer = rrm->createBoneBuffer(boneDesc); + PX_ASSERT(resourceDesc.boneBuffer); + if (resourceDesc.boneBuffer) + { + resourceDesc.numBones = numBones; + } + } + + resourceDesc.instanceBuffer = submeshData.instanceBuffer; + resourceDesc.numInstances = 0; + + if (!submeshData.isMaterialPointerValid) + { + // this should only be reached, when renderMeshActorLoadMaterialsLazily is true. + // URR may not be called asynchronously in that case (for example in a render thread) + ResourceProviderIntl* nrp = GetInternalApexSDK()->getInternalResourceProvider(); + if (nrp != NULL) + { + submeshData.material = nrp->getResource(submeshData.materialID); + submeshData.isMaterialPointerValid = true; + } + } + + resourceDesc.material = submeshData.material; + + resourceDesc.submeshIndex = submeshIndex; + + resourceDesc.userRenderData = userRenderData; + + resourceDesc.cullMode = submesh.getVertexBuffer().getFormat().getWinding(); + + if (resourceDesc.isValid()) // TODO: should probably make this an if-statement... -jgd // I did, -poh + { + renderResource = rrm->createResource(resourceDesc); + } + } + + if (renderResource != NULL) + { + renderResource->setIndexBufferRange(startIndex, submeshData.visibleTriangleCount * 3 - startIndex); + startIndex = submeshData.visibleTriangleCount * 3; + + if (renderResource->getBoneBuffer() != NULL) + { + renderResource->setBoneBufferRange(0, resourceBoneCount); + // TODO - LRR - make useBoneVisibilitySemantic work with >1 bone/part + // if visible bone optimization enabled (as set in the actor desc) + // { + // if (renderMesh->getBoneBuffer() != NULL) + // if we have a 1:1 bone:part mapping, as determined when asset is loaded (or authored) + // renderMesh->getBoneBuffer()->writeBuffer(RenderBoneSemantic::VISIBLE_INDEX, visibleParts.usedIndices(), sizeof(uint32_t), 0, visibleParts.usedCount()); + // else + // { + // // run through index buffer, and find all bones referenced by visible verts and store in visibleBones + // renderMesh->getBoneBuffer()->writeBuffer(RenderBoneSemantic::VISIBLE_INDEX, visibleBones.usedIndices(), sizeof(uint32_t), 0, visibleBones.usedCount()); + // } + // } + } + } + + resourceBoneCount = 0; + } + } + + if (boneIndexStart == uint32_t(-1)) + { + boneIndexStart = 0; + } + + // KHA - Write temporary buffer to index buffer + if(submeshData.indexBuffer != NULL && mPartVisibilityChanged) + { + submeshData.indexBuffer->writeBuffer(mPartIndexTempBuffer.begin(), sizeof(uint32_t), 0, submeshData.visibleTriangleCount*3); + } + + // Write re-mapped bone indices + if (mPerActorVertexBuffers.size()) + { + if (mTransforms.size() > 1 && mKeepVisibleBonesPacked) + { + RenderVertexBufferData skinningWriteData; + skinningWriteData.setSemanticData(RenderVertexSemantic::BONE_INDEX, mBoneIndexTempBuffer.begin() + boneIndexStart, sizeof(uint16_t), RenderDataFormat::USHORT1); + if (submeshData.staticColorReplacement != NULL) + { + skinningWriteData.setSemanticData(RenderVertexSemantic::COLOR, submeshData.staticColorReplacement + boneIndexStart, sizeof(ColorRGBA), RenderDataFormat::R8G8B8A8); + } + mPerActorVertexBuffers[submeshIndex]->writeBuffer(skinningWriteData, boneIndexStart, boneIndexEnd - boneIndexStart); + } + else + if (submeshData.staticColorReplacement != NULL) + { + RenderVertexBufferData skinningWriteData; + skinningWriteData.setSemanticData(RenderVertexSemantic::COLOR, submeshData.staticColorReplacement, sizeof(ColorRGBA), RenderDataFormat::R8G8B8A8); + mPerActorVertexBuffers[submeshIndex]->writeBuffer(skinningWriteData, 0, submesh.getVertexBuffer().getVertexCount()); + } + } + + mBonePosesDirty = true; + +#if VERBOSE + printf("-updatePartVisibility(submeshIndex=%d, useBones=%s, userRenderData=0x%p)\n", submeshIndex, useBones ? "true" : "false", userRenderData); +#endif +} + +void ApexRenderMeshActor::updateBonePoses(uint32_t submeshIndex) +{ +// There can now be >1 bones per part +// if (mRenderMeshAsset->getPartCount() > 1) + if (mRenderMeshAsset->getBoneCount() > 1) + { + SubmeshData& submeshData = mSubmeshData[ submeshIndex ]; + PxMat44* boneTMs = mTransforms.begin(); + const uint32_t tmBufferSize = mKeepVisibleBonesPacked ? getRenderVisiblePartCount() : mTransforms.size(); + + // Set up the previous bone buffer, if requested and available + PxMat44* previousBoneTMs = NULL; + if (!mPreviousFrameBoneBufferValid || mTransformsLastFrame.size() != mTransforms.size()) + { + previousBoneTMs = boneTMs; + } + else + if (!mKeepVisibleBonesPacked || mRemappedPreviousBoneTMs.size() == 0) + { + previousBoneTMs = mTransformsLastFrame.begin(); + } + else + { + previousBoneTMs = mRemappedPreviousBoneTMs.begin(); + } + + uint32_t tmsRemaining = tmBufferSize; + for (uint32_t i = 0; i < submeshData.renderResources.size(); ++i) + { + UserRenderResource* renderResource = submeshData.renderResources[i].resource; + const uint32_t resourceBoneCount = submeshData.renderResources[i].boneCount; + if (renderResource && renderResource->getBoneBuffer() != NULL) + { + RenderBoneBufferData boneWriteData; + boneWriteData.setSemanticData(RenderBoneSemantic::POSE, boneTMs, sizeof(PxMat44), RenderDataFormat::FLOAT4x4); + if (mKeepPreviousFrameBoneBuffer) + { + boneWriteData.setSemanticData(RenderBoneSemantic::PREVIOUS_POSE, previousBoneTMs, sizeof(PxMat44), RenderDataFormat::FLOAT4x4); + } + renderResource->getBoneBuffer()->writeBuffer(boneWriteData, 0, PxMin(tmsRemaining, resourceBoneCount)); + tmsRemaining -= resourceBoneCount; + boneTMs += resourceBoneCount; + previousBoneTMs += resourceBoneCount; + } + } + } +} + +void ApexRenderMeshActor::releaseSubmeshRenderResources(uint32_t submeshIndex) +{ +#if VERBOSE + printf("releaseSubmeshRenderResources()\n"); +#endif + + if (submeshIndex >= mSubmeshData.size()) + { + return; + } + + UserRenderResourceManager* rrm = GetInternalApexSDK()->getUserRenderResourceManager(); + + SubmeshData& submeshData = mSubmeshData[submeshIndex]; + for (uint32_t j = submeshData.renderResources.size(); j--;) + { + if (submeshData.renderResources[j].resource != NULL) + { + if (submeshData.renderResources[j].resource->getBoneBuffer() != NULL) + { + rrm->releaseBoneBuffer(*submeshData.renderResources[j].resource->getBoneBuffer()); + } + rrm->releaseResource(*submeshData.renderResources[j].resource); + submeshData.renderResources[j].resource = NULL; + } + } + submeshData.renderResources.reset(); +} + + +void ApexRenderMeshActor::releaseRenderResources() +{ +#if VERBOSE + printf("releaseRenderResources()\n"); +#endif + + UserRenderResourceManager* rrm = GetInternalApexSDK()->getUserRenderResourceManager(); + + for (uint32_t i = mSubmeshData.size(); i--;) + { + releaseSubmeshRenderResources(i); + + SubmeshData& submeshData = mSubmeshData[i]; + + if (submeshData.indexBuffer != NULL) + { + rrm->releaseIndexBuffer(*submeshData.indexBuffer); + submeshData.indexBuffer = NULL; + } + submeshData.instanceBuffer = NULL; + + if (submeshData.staticBufferReplacement != NULL) + { + rrm->releaseVertexBuffer(*submeshData.staticBufferReplacement); + submeshData.staticBufferReplacement = NULL; + } + + if (submeshData.dynamicBufferReplacement != NULL) + { + rrm->releaseVertexBuffer(*submeshData.dynamicBufferReplacement); + submeshData.dynamicBufferReplacement = NULL; + } + + if (submeshData.userDynamicVertexBuffer != NULL) + { + rrm->releaseVertexBuffer(*submeshData.userDynamicVertexBuffer); + submeshData.userDynamicVertexBuffer = NULL; + } + submeshData.userIndexBufferChanged = false; + } + + for (uint32_t i = mPerActorVertexBuffers.size(); i--;) + { + if (mPerActorVertexBuffers[i] != NULL) + { + rrm->releaseVertexBuffer(*mPerActorVertexBuffers[i]); + mPerActorVertexBuffers[i] = NULL; + } + } + mPerActorVertexBuffers.reset(); + + if (mRenderResource) + { + rrm->releaseResource(*mRenderResource); + mRenderResource = NULL; + } + + mBoneBufferInUse = false; +} + + + +bool ApexRenderMeshActor::submeshHasVisibleTriangles(uint32_t submeshIndex) const +{ + const ApexRenderSubmesh& submesh = *mRenderMeshAsset->mSubmeshes[submeshIndex]; + + const uint32_t partCount = getRenderVisiblePartCount(); + const uint32_t* visiblePartIndexPtr = getRenderVisibleParts(); + + for (uint32_t partNum = 0; partNum < partCount;) + { + const uint32_t partIndex = visiblePartIndexPtr[partNum++]; + const uint32_t indexCount = submesh.getIndexCount(partIndex); + + if (indexCount > 0) + { + return true; + } + } + + return false; +} + + + +void ApexRenderMeshActor::createFallbackSkinning(uint32_t submeshIndex) +{ + if (mTransforms.size() == 1) + { + return; + } + +#if VERBOSE + printf("createFallbackSkinning(submeshIndex=%d)\n", submeshIndex); +#endif + const VertexBuffer& vertexBuffer = mRenderMeshAsset->getSubmesh(submeshIndex).getVertexBuffer(); + const VertexFormat& format = vertexBuffer.getFormat(); + + const uint32_t bufferCount = format.getBufferCount(); + + uint32_t bufferSize = 0; + for (uint32_t bufferIndex = 0; bufferIndex < bufferCount; bufferIndex++) + { + if (format.getBufferAccess(bufferIndex) == RenderDataAccess::DYNAMIC) + { + RenderDataFormat::Enum bufferFormat = format.getBufferFormat(bufferIndex); + RenderVertexSemantic::Enum bufferSemantic = format.getBufferSemantic(bufferIndex); + + if (bufferSemantic == RenderVertexSemantic::POSITION || + bufferSemantic == RenderVertexSemantic::NORMAL || + bufferSemantic == RenderVertexSemantic::TANGENT) + { + if (bufferFormat == RenderDataFormat::FLOAT3) + { + bufferSize += sizeof(PxVec3); + } + else if (bufferFormat == RenderDataFormat::FLOAT4) + { + bufferSize += sizeof(PxVec4); + } + } + } + } + + if (bufferSize > 0) + { + PX_ASSERT(mSubmeshData[submeshIndex].fallbackSkinningMemory == NULL); + mSubmeshData[submeshIndex].fallbackSkinningMemorySize = bufferSize * vertexBuffer.getVertexCount(); + mSubmeshData[submeshIndex].fallbackSkinningMemory = PX_ALLOC(mSubmeshData[submeshIndex].fallbackSkinningMemorySize, "fallbackSkinnningMemory"); + + PX_ASSERT(mSubmeshData[submeshIndex].fallbackSkinningDirty == false); + + if (!mSubmeshData[submeshIndex].userSpecifiedData) + { + distributeFallbackData(submeshIndex); + mOneUserVertexBufferChanged = true; + } + } +} + + + +void ApexRenderMeshActor::distributeFallbackData(uint32_t submeshIndex) +{ + const VertexBuffer& vertexBuffer = mRenderMeshAsset->getSubmesh(submeshIndex).getVertexBuffer(); + const VertexFormat& format = vertexBuffer.getFormat(); + const uint32_t bufferCount = format.getBufferCount(); + const uint32_t vertexCount = vertexBuffer.getVertexCount(); + + unsigned char* memoryIterator = (unsigned char*)mSubmeshData[submeshIndex].fallbackSkinningMemory; + + uint32_t sizeUsed = 0; + for (uint32_t bufferIndex = 0; bufferIndex < bufferCount; bufferIndex++) + { + if (format.getBufferAccess(bufferIndex) == RenderDataAccess::DYNAMIC) + { + RenderDataFormat::Enum bufferFormat = format.getBufferFormat(bufferIndex); + RenderVertexSemantic::Enum bufferSemantic = format.getBufferSemantic(bufferIndex); + + if (bufferSemantic == RenderVertexSemantic::POSITION && bufferFormat == RenderDataFormat::FLOAT3) + { + mSubmeshData[submeshIndex].userPositions = (PxVec3*)memoryIterator; + memoryIterator += sizeof(PxVec3) * vertexCount; + sizeUsed += sizeof(PxVec3); + } + else if (bufferSemantic == RenderVertexSemantic::NORMAL && bufferFormat == RenderDataFormat::FLOAT3) + { + mSubmeshData[submeshIndex].userNormals = (PxVec3*)memoryIterator; + memoryIterator += sizeof(PxVec3) * vertexCount; + sizeUsed += sizeof(PxVec3); + } + else if (bufferSemantic == RenderVertexSemantic::TANGENT && bufferFormat == RenderDataFormat::FLOAT4) + { + mSubmeshData[submeshIndex].userTangents4 = (PxVec4*)memoryIterator; + memoryIterator += sizeof(PxVec4) * vertexCount; + sizeUsed += sizeof(PxVec4); + } + } + } + + PX_ASSERT(sizeUsed * vertexCount == mSubmeshData[submeshIndex].fallbackSkinningMemorySize); +} + + + +void ApexRenderMeshActor::updateFallbackSkinning(uint32_t submeshIndex) +{ + if (mSubmeshData[submeshIndex].fallbackSkinningMemory == NULL || mSubmeshData[submeshIndex].userSpecifiedData) + { + return; + } + +#if VERBOSE + printf("updateFallbackSkinning(submeshIndex=%d)\n", submeshIndex); +#endif + PX_PROFILE_ZONE("ApexRenderMesh::updateFallbackSkinning", GetInternalApexSDK()->getContextId()); + + const VertexBuffer& vertexBuffer = mRenderMeshAsset->getSubmesh(submeshIndex).getVertexBuffer(); + const VertexFormat& format = vertexBuffer.getFormat(); + + PxVec3* outPositions = mSubmeshData[submeshIndex].userPositions; + PxVec3* outNormals = mSubmeshData[submeshIndex].userNormals; + PxVec4* outTangents = mSubmeshData[submeshIndex].userTangents4; + + if (outPositions == NULL && outNormals == NULL && outTangents == NULL) + { + return; + } + + RenderDataFormat::Enum inFormat; + const uint32_t positionIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::POSITION)); + const PxVec3* inPositions = (const PxVec3*)vertexBuffer.getBufferAndFormat(inFormat, positionIndex); + PX_ASSERT(inPositions == NULL || inFormat == RenderDataFormat::FLOAT3); + + if (inPositions != NULL && mSubmeshData[submeshIndex].staticPositionReplacement != NULL) + { + inPositions = mSubmeshData[submeshIndex].staticPositionReplacement; + } + + const uint32_t normalIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::NORMAL)); + const PxVec3* inNormals = (const PxVec3*)vertexBuffer.getBufferAndFormat(inFormat, normalIndex); + PX_ASSERT(inNormals == NULL || inFormat == RenderDataFormat::FLOAT3); + + const uint32_t tangentIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::TANGENT)); + const PxVec4* inTangents = (const PxVec4*)vertexBuffer.getBufferAndFormat(inFormat, tangentIndex); + PX_ASSERT(inTangents == NULL || inFormat == RenderDataFormat::FLOAT4); + + const uint32_t boneIndexIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::BONE_INDEX)); + const uint16_t* inBoneIndices = (const uint16_t*)vertexBuffer.getBufferAndFormat(inFormat, boneIndexIndex); + PX_ASSERT(inBoneIndices == NULL || inFormat == RenderDataFormat::USHORT1 || inFormat == RenderDataFormat::USHORT2 || inFormat == RenderDataFormat::USHORT3 || inFormat == RenderDataFormat::USHORT4); + + const uint32_t boneWeightIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::BONE_WEIGHT)); + const float* inBoneWeights = (const float*)vertexBuffer.getBufferAndFormat(inFormat, boneWeightIndex); + PX_ASSERT(inBoneWeights == NULL || inFormat == RenderDataFormat::FLOAT1 || inFormat == RenderDataFormat::FLOAT2 || inFormat == RenderDataFormat::FLOAT3 || inFormat == RenderDataFormat::FLOAT4); + + uint32_t numBonesPerVertex = 0; + switch (inFormat) + { + case RenderDataFormat::FLOAT1: + numBonesPerVertex = 1; + break; + case RenderDataFormat::FLOAT2: + numBonesPerVertex = 2; + break; + case RenderDataFormat::FLOAT3: + numBonesPerVertex = 3; + break; + case RenderDataFormat::FLOAT4: + numBonesPerVertex = 4; + break; + default: + break; + } + + PX_ASSERT((inPositions != NULL) == (outPositions != NULL)); + PX_ASSERT((inNormals != NULL) == (outNormals != NULL)); + PX_ASSERT((inTangents != NULL) == (outTangents != NULL)); + + if (inBoneWeights == NULL || inBoneIndices == NULL || numBonesPerVertex == 0) + { + return; + } + + // clear all data + nvidia::intrinsics::memSet(mSubmeshData[submeshIndex].fallbackSkinningMemory, 0, mSubmeshData[submeshIndex].fallbackSkinningMemorySize); + + const uint32_t vertexCount = vertexBuffer.getVertexCount(); + for (uint32_t i = 0; i < vertexCount; i++) + { + for (uint32_t k = 0; k < numBonesPerVertex; k++) + { + const uint32_t boneIndex = inBoneIndices[i * numBonesPerVertex + k]; + PX_ASSERT(boneIndex < mTransforms.size()); + PxMat44& transform = mTransforms[boneIndex]; + + const float boneWeight = inBoneWeights[i * numBonesPerVertex + k]; + if (boneWeight > 0.0f) + { + if (outPositions != NULL) + { + outPositions[i] += transform.transform(inPositions[i]) * boneWeight; + } + + if (outNormals != NULL) + { + outNormals[i] += transform.rotate(inNormals[i]) * boneWeight; + } + + if (outTangents != NULL) + { + outTangents[i] += PxVec4(transform.rotate(inTangents[i].getXYZ()) * boneWeight, 0.0f); + } + } + } + if (outTangents != NULL) + { + outTangents[i].w = inTangents[i].w; + } + } + + mSubmeshData[submeshIndex].fallbackSkinningDirty = true; +} + + + +void ApexRenderMeshActor::writeUserBuffers(uint32_t submeshIndex) +{ + PxVec3* outPositions = mSubmeshData[submeshIndex].userPositions; + PxVec3* outNormals = mSubmeshData[submeshIndex].userNormals; + PxVec4* outTangents4 = mSubmeshData[submeshIndex].userTangents4; + + if (outPositions == NULL && outNormals == NULL && outTangents4 == NULL) + { + return; + } + + RenderVertexBufferData dynamicWriteData; + if (outPositions != NULL) + { + dynamicWriteData.setSemanticData(RenderVertexSemantic::POSITION, outPositions, sizeof(PxVec3), RenderDataFormat::FLOAT3); + } + + if (outNormals != NULL) + { + dynamicWriteData.setSemanticData(RenderVertexSemantic::NORMAL, outNormals, sizeof(PxVec3), RenderDataFormat::FLOAT3); + } + + if (outTangents4) + { + dynamicWriteData.setSemanticData(RenderVertexSemantic::TANGENT, outTangents4, sizeof(PxVec4), RenderDataFormat::FLOAT4); + } + + const uint32_t vertexCount = mRenderMeshAsset->mSubmeshes[submeshIndex]->getVertexBuffer().getVertexCount(); + + mSubmeshData[submeshIndex].userDynamicVertexBuffer->writeBuffer(dynamicWriteData, 0, vertexCount); +} + + + +void ApexRenderMeshActor::visualizeTangentSpace(RenderDebugInterface& batcher, float normalScale, float tangentScale, float bitangentScale, PxMat33* scaledRotations, PxVec3* translations, uint32_t stride, uint32_t numberOfTransforms) const +{ +#ifdef WITHOUT_DEBUG_VISUALIZE + PX_UNUSED(batcher); + PX_UNUSED(normalScale); + PX_UNUSED(tangentScale); + PX_UNUSED(bitangentScale); + PX_UNUSED(scaledRotations); + PX_UNUSED(translations); + PX_UNUSED(stride); + PX_UNUSED(numberOfTransforms); +#else + + if (normalScale <= 0.0f && tangentScale <= 0.0f && bitangentScale <= 0.0f) + { + return; + } + + using RENDER_DEBUG::DebugColors; + uint32_t debugColorRed = RENDER_DEBUG_IFACE(&batcher)->getDebugColor(DebugColors::Red); + uint32_t debugColorGreen = RENDER_DEBUG_IFACE(&batcher)->getDebugColor(DebugColors::Green); + uint32_t debugColorBlue = RENDER_DEBUG_IFACE(&batcher)->getDebugColor(DebugColors::Blue); + + RENDER_DEBUG_IFACE(&batcher)->pushRenderState(); + + const uint32_t submeshCount = mRenderMeshAsset->getSubmeshCount(); + PX_ASSERT(mSubmeshData.size() == submeshCount); + for (uint32_t submeshIndex = 0; submeshIndex < submeshCount; ++submeshIndex) + { + const PxVec3* positions = NULL; + const PxVec3* normals = NULL; + const PxVec3* tangents = NULL; + const PxVec4* tangents4 = NULL; + + const uint16_t* boneIndices = NULL; + const float* boneWeights = NULL; + + uint32_t numBonesPerVertex = 0; + + if (mSubmeshData[submeshIndex].userSpecifiedData || mSubmeshData[submeshIndex].fallbackSkinningMemory != NULL) + { + positions = mSubmeshData[submeshIndex].userPositions; + normals = mSubmeshData[submeshIndex].userNormals; + tangents4 = mSubmeshData[submeshIndex].userTangents4; + } + else + { + const VertexBuffer& vertexBuffer = mRenderMeshAsset->getSubmesh(submeshIndex).getVertexBuffer(); + const VertexFormat& format = vertexBuffer.getFormat(); + + RenderDataFormat::Enum inFormat; + const uint32_t positionIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::POSITION)); + positions = (const PxVec3*)vertexBuffer.getBufferAndFormat(inFormat, positionIndex); + PX_ASSERT(positions == NULL || inFormat == RenderDataFormat::FLOAT3); + + if (positions != NULL && mSubmeshData[submeshIndex].staticPositionReplacement != NULL) + { + positions = mSubmeshData[submeshIndex].staticPositionReplacement; + } + + const uint32_t normalIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::NORMAL)); + normals = (const PxVec3*)vertexBuffer.getBufferAndFormat(inFormat, normalIndex); + PX_ASSERT(normals == NULL || inFormat == RenderDataFormat::FLOAT3); + + const uint32_t tangentIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::TANGENT)); + tangents = (const PxVec3*)vertexBuffer.getBufferAndFormat(inFormat, tangentIndex); + PX_ASSERT(tangents == NULL || inFormat == RenderDataFormat::FLOAT3 || inFormat == RenderDataFormat::FLOAT4); + if (inFormat == RenderDataFormat::FLOAT4) + { + tangents4 = (const PxVec4*)tangents; + tangents = NULL; + } + + const uint32_t boneIndexIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::BONE_INDEX)); + boneIndices = (const uint16_t*)vertexBuffer.getBufferAndFormat(inFormat, boneIndexIndex); + PX_ASSERT(boneIndices == NULL || inFormat == RenderDataFormat::USHORT1 || inFormat == RenderDataFormat::USHORT2 || inFormat == RenderDataFormat::USHORT3 || inFormat == RenderDataFormat::USHORT4); + + switch (inFormat) + { + case RenderDataFormat::USHORT1: + numBonesPerVertex = 1; + break; + case RenderDataFormat::USHORT2: + numBonesPerVertex = 2; + break; + case RenderDataFormat::USHORT3: + numBonesPerVertex = 3; + break; + case RenderDataFormat::USHORT4: + numBonesPerVertex = 4; + break; + default: + break; + } + + const uint32_t boneWeightIndex = (uint32_t)format.getBufferIndexFromID(format.getSemanticID(RenderVertexSemantic::BONE_WEIGHT)); + boneWeights = (const float*)vertexBuffer.getBufferAndFormat(inFormat, boneWeightIndex); + PX_ASSERT(boneWeights == NULL || inFormat == RenderDataFormat::FLOAT1 || inFormat == RenderDataFormat::FLOAT2 || inFormat == RenderDataFormat::FLOAT3 || inFormat == RenderDataFormat::FLOAT4); + } + + const uint32_t partCount = visiblePartCount(); + const uint32_t* visibleParts = getVisibleParts(); + for (uint32_t visiblePartIndex = 0; visiblePartIndex < partCount; visiblePartIndex++) + { + const uint32_t partIndex = visibleParts[visiblePartIndex]; + + const RenderSubmesh& submesh = mRenderMeshAsset->getSubmesh(submeshIndex); + const uint32_t vertexStart = submesh.getFirstVertexIndex(partIndex); + const uint32_t vertexEnd = vertexStart + submesh.getVertexCount(partIndex); + + for (uint32_t i = vertexStart; i < vertexEnd; i++) + { + PxVec3 position(0.0f), tangent(0.0f), bitangent(0.0f), normal(0.0f); + if (numBonesPerVertex == 0) + { + position = positions[i]; + if (normals != NULL) + { + normal = normals[i].getNormalized(); + } + if (tangents4 != NULL) + { + tangent = tangents4[i].getXYZ().getNormalized(); + bitangent = normal.cross(tangent) * tangents4[i].w; + } + else if (tangents != NULL) + { + tangent = tangents[i].getNormalized(); + bitangent = normal.cross(tangent); + } + + } + else if (numBonesPerVertex == 1) + { + PX_ASSERT(boneIndices != NULL); + uint32_t boneIndex = 0; + if (mRenderWithoutSkinning) + { + boneIndex = 0; + } + else if (mKeepVisibleBonesPacked) + { + boneIndex = visiblePartIndex; + } + else + { + boneIndex = boneIndices[i]; + } + + const PxMat44& tm = mTransforms[boneIndex]; + position = tm.transform(positions[i]); + if (normals != NULL) + { + normal = tm.rotate(normals[i].getNormalized()); + } + if (tangents4 != NULL) + { + tangent = tm.rotate(tangents4[i].getXYZ().getNormalized()); + bitangent = normal.cross(tangent) * tangents4[i].w; + } + else if (tangents != NULL) + { + tangent = tm.rotate(tangents[i].getNormalized()); + bitangent = normal.cross(tangent); + } + } + else + { + position = tangent = bitangent = normal = PxVec3(0.0f); + for (uint32_t k = 0; k < numBonesPerVertex; k++) + { + const float weight = boneWeights[i * numBonesPerVertex + k]; + if (weight > 0.0f) + { + const PxMat44& tm = mTransforms[boneIndices[i * numBonesPerVertex + k]]; + position += tm.transform(positions[i]) * weight; + if (normals != NULL) + { + normal += tm.rotate(normals[i]) * weight; + } + if (tangents4 != NULL) + { + tangent += tm.rotate(tangents4[i].getXYZ()) * weight; + } + else if (tangents != NULL) + { + tangent += tm.rotate(tangents[i]) * weight; + } + } + } + normal.normalize(); + tangent.normalize(); + if (tangents4 != NULL) + { + bitangent = normal.cross(tangent) * tangents4[i].w; + } + else if (tangents != NULL) + { + bitangent = normal.cross(tangent); + } + } + + if (numberOfTransforms == 0 || scaledRotations == NULL || translations == NULL) + { + if (!tangent.isZero() && tangentScale > 0.0f) + { + RENDER_DEBUG_IFACE(&batcher)->setCurrentColor(debugColorRed); + RENDER_DEBUG_IFACE(&batcher)->debugLine(position, position + tangent * tangentScale); + } + + if (!bitangent.isZero() && bitangentScale > 0.0f) + { + RENDER_DEBUG_IFACE(&batcher)->setCurrentColor(debugColorGreen); + RENDER_DEBUG_IFACE(&batcher)->debugLine(position, position + bitangent * bitangentScale); + } + + if (!normal.isZero() && normalScale > 0.0f) + { + RENDER_DEBUG_IFACE(&batcher)->setCurrentColor(debugColorBlue); + RENDER_DEBUG_IFACE(&batcher)->debugLine(position, position + normal * normalScale); + } + } + else //instancing + { + for (uint32_t k = 0; k < numberOfTransforms; k++) + { + PxMat33& scaledRotation = *(PxMat33*)((uint8_t*)scaledRotations + k*stride); + PxVec3& translation = *(PxVec3*)((uint8_t*)translations + k*stride); + + PxVec3 newPos = scaledRotation.transform(position) + translation; //full transform + + PxVec3 newTangent = scaledRotation.transform(tangent); //without translation + PxVec3 newBitangent = scaledRotation.transform(bitangent); + + PxVec3 newNormal = (scaledRotation.getInverse()).getTranspose().transform(normal); + + if (!tangent.isZero() && tangentScale > 0.0f) + { + RENDER_DEBUG_IFACE(&batcher)->setCurrentColor(debugColorRed); + RENDER_DEBUG_IFACE(&batcher)->debugLine(newPos, newPos + newTangent * tangentScale); + } + + if (!bitangent.isZero() && bitangentScale > 0.0f) + { + RENDER_DEBUG_IFACE(&batcher)->setCurrentColor(debugColorGreen); + RENDER_DEBUG_IFACE(&batcher)->debugLine(newPos, newPos + newBitangent * bitangentScale); + } + + if (!normal.isZero() && normalScale > 0.0f) + { + RENDER_DEBUG_IFACE(&batcher)->setCurrentColor(debugColorBlue); + RENDER_DEBUG_IFACE(&batcher)->debugLine(newPos, newPos + newNormal * normalScale); + } + } + } + } + } + + } + + RENDER_DEBUG_IFACE(&batcher)->popRenderState(); +#endif +} + + + + +ApexRenderMeshActor::SubmeshData::SubmeshData() : + indexBuffer(NULL), + fallbackSkinningMemory(NULL), + userDynamicVertexBuffer(NULL), + instanceBuffer(NULL), + userPositions(NULL), + userNormals(NULL), + userTangents4(NULL), + staticColorReplacement(NULL), + staticColorReplacementDirty(false), + staticPositionReplacement(NULL), + staticBufferReplacement(NULL), + dynamicBufferReplacement(NULL), + fallbackSkinningMemorySize(0), + visibleTriangleCount(0), + materialID(INVALID_RESOURCE_ID), + material(NULL), + isMaterialPointerValid(false), + maxBonesPerMaterial(0), + indexBufferRequestedSize(0), + userSpecifiedData(false), + userVertexBufferAlwaysDirty(false), + userIndexBufferChanged(false), + fallbackSkinningDirty(false) +{ +} + + + +ApexRenderMeshActor::SubmeshData::~SubmeshData() +{ + if (fallbackSkinningMemory != NULL) + { + PX_FREE(fallbackSkinningMemory); + fallbackSkinningMemory = NULL; + } + fallbackSkinningMemorySize = 0; +} + + + +void ApexRenderMeshActor::setTM(const PxMat44& tm, uint32_t boneIndex /* = 0 */) +{ + WRITE_ZONE(); + PX_ASSERT(boneIndex < mRenderMeshAsset->getBoneCount()); + mBonePosesDirty = true; + PxMat44& boneTM = accessTM(boneIndex); + boneTM.column0 = tm.column0; + boneTM.column1 = tm.column1; + boneTM.column2 = tm.column2; + boneTM.column3 = tm.column3; +} + + + +void ApexRenderMeshActor::setTM(const PxMat44& tm, const PxVec3& scale, uint32_t boneIndex /* = 0 */) +{ + // Assumes tm is pure rotation. This can allow some optimization. + WRITE_ZONE(); + PX_ASSERT(boneIndex < mRenderMeshAsset->getBoneCount()); + mBonePosesDirty = true; + PxMat44& boneTM = accessTM(boneIndex); + boneTM.column0 = tm.column0; + boneTM.column1 = tm.column1; + boneTM.column2 = tm.column2; + boneTM.column3 = tm.column3; + boneTM.scale(PxVec4(scale, 1.f)); +} + + +void ApexRenderMeshActor::setLastFrameTM(const PxMat44& tm, uint32_t boneIndex /* = 0 */) +{ + if (!mPreviousFrameBoneBufferValid) + { + return; + } + + PX_ASSERT(boneIndex < mRenderMeshAsset->getBoneCount()); + PxMat44& boneTM = accessLastFrameTM(boneIndex); + boneTM.column0 = tm.column0; + boneTM.column1 = tm.column1; + boneTM.column2 = tm.column2; + boneTM.column3 = tm.column3; +} + + + +void ApexRenderMeshActor::setLastFrameTM(const PxMat44& tm, const PxVec3& scale, uint32_t boneIndex /* = 0 */) +{ + if (!mPreviousFrameBoneBufferValid) + { + return; + } + + // Assumes tm is pure rotation. This can allow some optimization. + + PX_ASSERT(boneIndex < mRenderMeshAsset->getBoneCount()); + PxMat44& boneTM = accessLastFrameTM(boneIndex); + boneTM.column0 = tm.column0; + boneTM.column1 = tm.column1; + boneTM.column2 = tm.column2; + boneTM.column3 = tm.column3; + boneTM.scale(PxVec4(scale, 1.f)); +} + + +void ApexRenderMeshActor::setSkinningMode(RenderMeshActorSkinningMode::Enum mode) +{ + if (mode >= RenderMeshActorSkinningMode::Default && mode < RenderMeshActorSkinningMode::Count) + { + mSkinningMode = mode; + } +} + +RenderMeshActorSkinningMode::Enum ApexRenderMeshActor::getSkinningMode() const +{ + return mSkinningMode; +} + + +void ApexRenderMeshActor::syncVisibility(bool useLock) +{ + WRITE_ZONE(); + if (mApiVisibilityChanged && mBufferVisibility) + { + if (useLock) + { + lockRenderResources(); + } + mVisiblePartsForRendering.resize(mVisiblePartsForAPI.usedCount()); + memcpy(mVisiblePartsForRendering.begin(), mVisiblePartsForAPI.usedIndices(), mVisiblePartsForAPI.usedCount()*sizeof(uint32_t)); + const uint32_t swapBufferSize = mTMSwapBuffer.size(); + for (uint32_t i = 0; i < swapBufferSize; ++i) + { + const uint32_t swapIndices = mTMSwapBuffer[i]; + nvidia::swap(mTransforms[swapIndices >> 16], mTransforms[swapIndices & 0xFFFF]); + } + mTMSwapBuffer.reset(); + mPartVisibilityChanged = true; + mApiVisibilityChanged = false; + if (useLock) + { + unlockRenderResources(); + } + } +} + +// TODO - LRR - update part bounds actor bounds to work with >1 bones per part +void ApexRenderMeshActor::updateBounds() +{ + mRenderBounds.setEmpty(); + const uint32_t* visiblePartIndexPtr = mVisiblePartsForAPI.usedIndices(); + const uint32_t* visiblePartIndexPtrStop = visiblePartIndexPtr + mVisiblePartsForAPI.usedCount(); + if (mTransforms.size() < mRenderMeshAsset->getPartCount()) + { + // BRG - for static meshes. We should create a mapping for more generality. + PX_ASSERT(mTransforms.size() == 1); + PxMat44& tm = accessTM(); + while (visiblePartIndexPtr < visiblePartIndexPtrStop) + { + const uint32_t partIndex = *visiblePartIndexPtr++; + PxBounds3 partBounds = mRenderMeshAsset->getBounds(partIndex); + partBounds = PxBounds3::basisExtent(tm.transform(partBounds.getCenter()), PxMat33(tm.getBasis(0), tm.getBasis(1), tm.getBasis(2)), partBounds.getExtents()); + mRenderBounds.include(partBounds); + } + } + else + { + while (visiblePartIndexPtr < visiblePartIndexPtrStop) + { + const uint32_t partIndex = *visiblePartIndexPtr++; + PxBounds3 partBounds = mRenderMeshAsset->getBounds(partIndex); + PxMat44& tm = accessTM(partIndex); + partBounds = PxBounds3::basisExtent(tm.transform(partBounds.getCenter()), PxMat33(tm.getBasis(0), tm.getBasis(1), tm.getBasis(2)), partBounds.getExtents()); + mRenderBounds.include(partBounds); + } + } +} + +void ApexRenderMeshActor::updateInstances(uint32_t submeshIndex) +{ + PX_PROFILE_ZONE("ApexRenderMesh::updateInstances", GetInternalApexSDK()->getContextId()); + + for (uint32_t i = 0; i < mSubmeshData[submeshIndex].renderResources.size(); ++i) + { + UserRenderResource* renderResource = mSubmeshData[submeshIndex].renderResources[i].resource; + renderResource->setInstanceBufferRange(mInstanceOffset, mInstanceCount); + } +} + +void ApexRenderMeshActor::setReleaseResourcesIfNothingToRender(bool value) +{ + WRITE_ZONE(); + mReleaseResourcesIfNothingToRender = value; +} + +void ApexRenderMeshActor::setBufferVisibility(bool bufferVisibility) +{ + WRITE_ZONE(); + mBufferVisibility = bufferVisibility; + mPartVisibilityChanged = true; +} + +void ApexRenderMeshActor::setOverrideMaterial(uint32_t index, const char* overrideMaterialName) +{ + WRITE_ZONE(); + ResourceProviderIntl* nrp = GetInternalApexSDK()->getInternalResourceProvider(); + if (nrp != NULL && index < mSubmeshData.size()) + { + // do create before release, so we don't release the resource if the newID is the same as the old + ResID materialNS = GetInternalApexSDK()->getMaterialNameSpace(); + + ResID newID = nrp->createResource(materialNS, overrideMaterialName); + nrp->releaseResource(mSubmeshData[index].materialID); + + mSubmeshData[index].materialID = newID; + mSubmeshData[index].material = NULL; + mSubmeshData[index].isMaterialPointerValid = false; + mSubmeshData[index].maxBonesPerMaterial = 0; + + if (!GetInternalApexSDK()->getRMALoadMaterialsLazily()) + { + loadMaterial(mSubmeshData[index]); + } + } +} + +// Need an inverse +PX_INLINE PxMat44 inverse(const PxMat44& m) +{ + const PxMat33 invM33 = PxMat33(m.getBasis(0), m.getBasis(1), m.getBasis(2)).getInverse(); + return PxMat44(invM33, -(invM33.transform(m.getPosition()))); +} + +bool ApexRenderMeshActor::rayCast(RenderMeshActorRaycastHitData& hitData, + const PxVec3& worldOrig, const PxVec3& worldDisp, + RenderMeshActorRaycastFlags::Enum flags, + RenderCullMode::Enum winding, + int32_t partIndex) const +{ + READ_ZONE(); + PX_ASSERT(mRenderMeshAsset != NULL); + PX_ASSERT(worldOrig.isFinite() && worldDisp.isFinite() && !worldDisp.isZero()); + + // Come up with a part range which matches the flags, and if partIndex > 0, ensure it lies within the part range + uint32_t rankStart = (flags & RenderMeshActorRaycastFlags::VISIBLE_PARTS) != 0 ? 0 : mVisiblePartsForAPI.usedCount(); + uint32_t rankStop = (flags & RenderMeshActorRaycastFlags::INVISIBLE_PARTS) != 0 ? mRenderMeshAsset->getPartCount() : mVisiblePartsForAPI.usedCount(); + // We use the visibility index bank, since it holds visible and invisible parts contiguously + if (rankStart >= rankStop) + { + return false; // No parts selected for raycast + } + if (partIndex >= 0) + { + const uint32_t partRank = mVisiblePartsForAPI.getRank((uint32_t)partIndex); + if (partRank < rankStart || partRank >= rankStop) + { + return false; + } + rankStart = partRank; + rankStop = partRank + 1; + } + const uint32_t* partIndices = mVisiblePartsForAPI.usedIndices(); + + // Allocate an inverse transform and local ray for each part and calculate them + const uint32_t tmCount = mRenderWithoutSkinning ? 1 : rankStop - rankStart; // Only need one transform if not skinning + + PX_ALLOCA(invTMs, PxMat44, tmCount); + PX_ALLOCA(localOrigs, PxVec3, tmCount); + PX_ALLOCA(localDisps, PxVec3, tmCount); + + if (mRenderWithoutSkinning) + { + invTMs[0] = inverse(mTransforms[0]); + localOrigs[0] = invTMs[0].transform(worldOrig); + localDisps[0] = invTMs[0].rotate(worldDisp); + } + else + { + for (uint32_t partRank = rankStart; partRank < rankStop; ++partRank) + { + invTMs[partRank - rankStart] = inverse(mTransforms[partRank - rankStart]); + localOrigs[partRank - rankStart] = invTMs[partRank - rankStart].transform(worldOrig); + localDisps[partRank - rankStart] = invTMs[partRank - rankStart].rotate(worldDisp); + } + } + + // Side "discriminant" - used to reduce branches in inner loops + const float disc = winding == RenderCullMode::CLOCKWISE ? 1.0f : (winding == RenderCullMode::COUNTER_CLOCKWISE ? -1.0f : 0.0f); + + // Keeping hit time as a fraction + float tNum = -1.0f; + float tDen = 0.0f; + + // To do: handle multiple-weighted vertices, and other cases where the number of parts does not equal the number of bones (besides non-skinned, which we do handle) +// if (single-weighted vertices) + { + // Traverse the selected parts: + const uint32_t submeshCount = mRenderMeshAsset->getSubmeshCount(); + for (uint32_t submeshIndex = 0; submeshIndex < submeshCount; ++submeshIndex) + { + const RenderSubmesh& submesh = mRenderMeshAsset->getSubmesh(submeshIndex); + const VertexBuffer& vertexBuffer = submesh.getVertexBuffer(); + const VertexFormat& vertexFormat = vertexBuffer.getFormat(); + RenderDataFormat::Enum positionFormat; + const PxVec3* vertexPositions = (const PxVec3*)vertexBuffer.getBufferAndFormat(positionFormat, (uint32_t)vertexFormat.getBufferIndexFromID(vertexFormat.getSemanticID(nvidia::RenderVertexSemantic::POSITION))); + if (positionFormat != RenderDataFormat::FLOAT3) + { + continue; // Not handling any position format other than FLOAT3 + } + for (uint32_t partRank = rankStart; partRank < rankStop; ++partRank) + { + const uint32_t cachedLocalIndex = mRenderWithoutSkinning ? 0 : partRank - rankStart; + const PxVec3& localOrig = localOrigs[cachedLocalIndex]; + const PxVec3& localDisp = localDisps[cachedLocalIndex]; + const uint32_t partIndex = partIndices[partRank]; + const uint32_t* ib = submesh.getIndexBuffer(partIndex); + const uint32_t* ibStop = ib + submesh.getIndexCount(partIndex); + PX_ASSERT(submesh.getIndexCount(partIndex) % 3 == 0); + for (; ib < ibStop; ib += 3) + { + const PxVec3 offsetVertices[3] = { vertexPositions[ib[0]] - localOrig, vertexPositions[ib[1]] - localOrig, vertexPositions[ib[2]] - localOrig }; + const PxVec3 triangleNormal = (offsetVertices[1] - offsetVertices[0]).cross(offsetVertices[2] - offsetVertices[0]); + const float den = triangleNormal.dot(localDisp); + if (den > -PX_EPS_F32 * PX_EPS_F32) + { + // Ray misses plane (or is too near parallel) + continue; + } + const float sides[3] = { (offsetVertices[0].cross(offsetVertices[1])).dot(localDisp), (offsetVertices[1].cross(offsetVertices[2])).dot(localDisp), (offsetVertices[2].cross(offsetVertices[0])).dot(localDisp) }; + if ((int)(sides[0]*disc > 0.0f) | (int)(sides[1]*disc > 0.0f) | (int)(sides[2]*disc > 0.0f)) + { + // Ray misses triangle + continue; + } + // Ray has hit the triangle; calculate time of intersection + const float num = offsetVertices[0].dot(triangleNormal); + // Since den and tDen both have the same (negative) sign, this is equivalent to : if (num/den < tNum/tDen) + if (num * tDen < tNum * den) + { + // This intersection is earliest + tNum = num; + tDen = den; + hitData.partIndex = partIndex; + hitData.submeshIndex = submeshIndex; + hitData.vertexIndices[0] = ib[0]; + hitData.vertexIndices[1] = ib[1]; + hitData.vertexIndices[2] = ib[2]; + } + } + } + } + + if (tDen == 0.0f) + { + // No intersection found + return false; + } + + // Found a triangle. Fill in hit data + hitData.time = tNum / tDen; + + // See if normal, tangent, or binormal can be found + const RenderSubmesh& submesh = mRenderMeshAsset->getSubmesh(hitData.submeshIndex); + const VertexBuffer& vertexBuffer = submesh.getVertexBuffer(); + const VertexFormat& vertexFormat = vertexBuffer.getFormat(); + + const int32_t normalBufferIndex = vertexFormat.getBufferIndexFromID(vertexFormat.getSemanticID(nvidia::RenderVertexSemantic::NORMAL)); + const int32_t tangentBufferIndex = vertexFormat.getBufferIndexFromID(vertexFormat.getSemanticID(nvidia::RenderVertexSemantic::TANGENT)); + const int32_t binormalBufferIndex = vertexFormat.getBufferIndexFromID(vertexFormat.getSemanticID(nvidia::RenderVertexSemantic::BINORMAL)); + + ExplicitRenderTriangle triangle; + const bool haveNormal = vertexBuffer.getBufferData(&triangle.vertices[0].normal, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)normalBufferIndex, hitData.vertexIndices[0], 1); + const bool haveTangent = vertexBuffer.getBufferData(&triangle.vertices[0].tangent, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)tangentBufferIndex, hitData.vertexIndices[0], 1); + const bool haveBinormal = vertexBuffer.getBufferData(&triangle.vertices[0].binormal, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)binormalBufferIndex, hitData.vertexIndices[0], 1); + + uint32_t fieldMask = 0; + + if (haveNormal) + { + vertexBuffer.getBufferData(&triangle.vertices[1].normal, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)normalBufferIndex, hitData.vertexIndices[1], 1); + vertexBuffer.getBufferData(&triangle.vertices[2].normal, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)normalBufferIndex, hitData.vertexIndices[2], 1); + fieldMask |= 1 << TriangleFrame::Normal_x | 1 << TriangleFrame::Normal_y | 1 << TriangleFrame::Normal_z; + } + else + { + hitData.normal = PxVec3(0.0f); + } + + if (haveTangent) + { + vertexBuffer.getBufferData(&triangle.vertices[1].tangent, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)tangentBufferIndex, hitData.vertexIndices[1], 1); + vertexBuffer.getBufferData(&triangle.vertices[2].tangent, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)tangentBufferIndex, hitData.vertexIndices[2], 1); + fieldMask |= 1 << TriangleFrame::Tangent_x | 1 << TriangleFrame::Tangent_y | 1 << TriangleFrame::Tangent_z; + } + else + { + hitData.tangent = PxVec3(0.0f); + } + + if (haveBinormal) + { + vertexBuffer.getBufferData(&triangle.vertices[1].binormal, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)binormalBufferIndex, hitData.vertexIndices[1], 1); + vertexBuffer.getBufferData(&triangle.vertices[2].binormal, nvidia::RenderDataFormat::FLOAT3, 0, (uint32_t)binormalBufferIndex, hitData.vertexIndices[2], 1); + fieldMask |= 1 << TriangleFrame::Binormal_x | 1 << TriangleFrame::Binormal_y | 1 << TriangleFrame::Binormal_z; + } + else + { + hitData.binormal = PxVec3(0.0f); + } + + if (fieldMask != 0) + { + // We know the positions are in the correct format from the check in the raycast + const PxVec3* vertexPositions = (const PxVec3*)vertexBuffer.getBuffer( + (uint32_t)vertexFormat.getBufferIndexFromID(vertexFormat.getSemanticID(nvidia::RenderVertexSemantic::POSITION))); + triangle.vertices[0].position = vertexPositions[hitData.vertexIndices[0]]; + triangle.vertices[1].position = vertexPositions[hitData.vertexIndices[1]]; + triangle.vertices[2].position = vertexPositions[hitData.vertexIndices[2]]; + TriangleFrame frame(triangle, fieldMask); + + // Find the local hit position + const uint32_t partRank = mVisiblePartsForAPI.getRank(hitData.partIndex); + const uint32_t cachedLocalIndex = mRenderWithoutSkinning ? 0 : partRank - rankStart; + const PxMat44& tm = mTransforms[mRenderWithoutSkinning ? 0 : hitData.partIndex]; + + Vertex v; + v.position = localOrigs[cachedLocalIndex] + hitData.time * localDisps[cachedLocalIndex]; + frame.interpolateVertexData(v); + if (haveNormal) + { + hitData.normal = invTMs[cachedLocalIndex].getTranspose().rotate(v.normal); + hitData.normal.normalize(); + } + + if (haveTangent) + { + hitData.tangent = tm.rotate(v.tangent); + hitData.tangent.normalize(); + } + + if (haveBinormal) + { + hitData.binormal = tm.rotate(v.binormal); + hitData.binormal.normalize(); + } + else + { + if (haveNormal && haveTangent) + { + hitData.binormal = hitData.normal.cross(hitData.tangent); + hitData.binormal.normalize(); + } + } + } + + return true; + } +} + +void ApexRenderMeshActor::visualize(RenderDebugInterface& batcher, nvidia::apex::DebugRenderParams* debugParams, PxMat33* scaledRotations, PxVec3* translations, uint32_t stride, uint32_t numberOfTransforms) const +{ +#ifdef WITHOUT_DEBUG_VISUALIZE + PX_UNUSED(batcher); + PX_UNUSED(debugParams); + PX_UNUSED(scaledRotations); + PX_UNUSED(translations); + PX_UNUSED(stride); + PX_UNUSED(numberOfTransforms); +#else + PX_ASSERT(&batcher != NULL); + if ( !mEnableDebugVisualization ) return; + + // This implementation seems to work for destruction and clothing! + const float scale = debugParams->Scale; + visualizeTangentSpace(batcher, debugParams->RenderNormals * scale, debugParams->RenderTangents * scale, debugParams->RenderBitangents * scale, scaledRotations, translations, stride, numberOfTransforms); +#endif +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/ApexRenderMeshAsset.cpp b/APEX_1.4/framework/src/ApexRenderMeshAsset.cpp new file mode 100644 index 00000000..6a199fa5 --- /dev/null +++ b/APEX_1.4/framework/src/ApexRenderMeshAsset.cpp @@ -0,0 +1,495 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexRenderMeshAsset.h" +#include "ApexRenderMeshActor.h" +#include "ApexSharedUtils.h" + +#include "ApexSDKIntl.h" +#include "ResourceProviderIntl.h" + +namespace nvidia +{ +namespace apex +{ + + +// ApexRenderMeshAsset functions + +ApexRenderMeshAsset::ApexRenderMeshAsset(ResourceList& list, const char* name, AuthObjTypeID ownerModuleID) : + mOwnerModuleID(ownerModuleID), + mParams(NULL), + mOpaqueMesh(NULL), + mName(name) +{ + list.add(*this); +} + + + +ApexRenderMeshAsset::~ApexRenderMeshAsset() +{ + // this should have been cleared in releaseActor() + PX_ASSERT(mRuntimeSubmeshData.empty()); + + // Release named resources + ResourceProviderIntl* resourceProvider = GetInternalApexSDK()->getInternalResourceProvider(); + for (uint32_t i = 0 ; i < mMaterialIDs.size() ; i++) + { + resourceProvider->releaseResource(mMaterialIDs[i]); + } + + setSubmeshCount(0); +} + + + +void ApexRenderMeshAsset::destroy() +{ + for (uint32_t i = 0; i < mSubmeshes.size(); i++) + { + mSubmeshes[i]->setParams(NULL, NULL); + } + + if (mParams != NULL) + { + if (!mParams->isReferenced) + { + mParams->destroy(); + } + mParams = NULL; + } + + // this is necessary so that all the actors will be destroyed before the destructor runs + mActorList.clear(); + + delete this; +} + + + +bool ApexRenderMeshAsset::createFromParameters(RenderMeshAssetParameters* params) +{ + mParams = params; + + NvParameterized::Handle handle(*mParams); + uint32_t size; + + // submeshes + mParams->getParameterHandle("submeshes", handle); + mParams->getArraySize(handle, (int32_t&)size); + setSubmeshCount(size); + for (uint32_t i = 0; i < size; ++i) + { + NvParameterized::Handle elementHandle(*mParams); + handle.getChildHandle((int32_t)i, elementHandle); + NvParameterized::Interface* submeshParams = NULL; + mParams->getParamRef(elementHandle, submeshParams); + + mSubmeshes[i]->setParams(static_cast<SubmeshParameters*>(submeshParams), NULL); + } + + createLocalData(); + + return true; +} + +// Load all of our named resources (that consists of materials) if they are +// not registered in the NRP +uint32_t ApexRenderMeshAsset::forceLoadAssets() +{ + uint32_t assetLoadedCount = 0; + ResourceProviderIntl* nrp = GetInternalApexSDK()->getInternalResourceProvider(); + ResID materialNS = GetInternalApexSDK()->getMaterialNameSpace(); + + for (uint32_t i = 0; i < mMaterialIDs.size(); i++) + { + + if (!nrp->checkResource(materialNS, mParams->materialNames.buf[i])) + { + /* we know for SURE that createResource() has already been called, so just getResource() */ + nrp->getResource(mMaterialIDs[i]); + assetLoadedCount++; + } + } + + return assetLoadedCount; +} + + +RenderMeshActor* ApexRenderMeshAsset::createActor(const RenderMeshActorDesc& desc) +{ + return PX_NEW(ApexRenderMeshActor)(desc, *this, mActorList); +} + + + +void ApexRenderMeshAsset::releaseActor(RenderMeshActor& renderMeshActor) +{ + ApexRenderMeshActor* actor = DYNAMIC_CAST(ApexRenderMeshActor*)(&renderMeshActor); + actor->destroy(); + + // Last one out turns out the lights + if (!mActorList.getSize()) + { + UserRenderResourceManager* rrm = GetInternalApexSDK()->getUserRenderResourceManager(); + for (uint32_t i = 0 ; i < mRuntimeSubmeshData.size() ; i++) + { + if (mRuntimeSubmeshData[i].staticVertexBuffer != NULL) + { + rrm->releaseVertexBuffer(*mRuntimeSubmeshData[i].staticVertexBuffer); + mRuntimeSubmeshData[i].staticVertexBuffer = NULL; + } + if (mRuntimeSubmeshData[i].skinningVertexBuffer != NULL) + { + rrm->releaseVertexBuffer(*mRuntimeSubmeshData[i].skinningVertexBuffer); + mRuntimeSubmeshData[i].skinningVertexBuffer = NULL; + } + if (mRuntimeSubmeshData[i].dynamicVertexBuffer != NULL) + { + rrm->releaseVertexBuffer(*mRuntimeSubmeshData[i].dynamicVertexBuffer); + mRuntimeSubmeshData[i].dynamicVertexBuffer = NULL; + } + } + mRuntimeSubmeshData.clear(); + } +} + + + +void ApexRenderMeshAsset::permuteBoneIndices(const physx::Array<int32_t>& old2new) +{ + int32_t maxBoneIndex = -1; + for (uint32_t i = 0; i < mSubmeshes.size(); i++) + { + RenderDataFormat::Enum format; + const VertexBuffer& vb = mSubmeshes[i]->getVertexBuffer(); + const VertexFormat& vf = vb.getFormat(); + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(nvidia::RenderVertexSemantic::BONE_INDEX)); + uint16_t* boneIndices = (uint16_t*)vb.getBufferAndFormat(format, bufferIndex); + if (boneIndices == NULL) + { + continue; + } + + uint32_t numBonesPerVertex = 0; + switch (format) + { + case RenderDataFormat::USHORT1: + numBonesPerVertex = 1; + break; + case RenderDataFormat::USHORT2: + numBonesPerVertex = 2; + break; + case RenderDataFormat::USHORT3: + numBonesPerVertex = 3; + break; + case RenderDataFormat::USHORT4: + numBonesPerVertex = 4; + break; + default: + continue; + } + + const uint32_t numVertices = vb.getVertexCount(); + for (uint32_t j = 0; j < numVertices; j++) + { + for (uint32_t k = 0; k < numBonesPerVertex; k++) + { + uint16_t& index = boneIndices[j * numBonesPerVertex + k]; + PX_ASSERT(old2new[index] >= 0); + PX_ASSERT(old2new[index] <= 0xffff); + index = (uint16_t)old2new[index]; + maxBoneIndex = PxMax(maxBoneIndex, (int32_t)index); + } + } + } + mParams->boneCount = (uint32_t)maxBoneIndex + 1; +} + + +void ApexRenderMeshAsset::reverseWinding() +{ + for (uint32_t submeshId = 0; submeshId < mSubmeshes.size(); submeshId++) + { + uint32_t numIndices = mSubmeshes[submeshId]->getTotalIndexCount(); + // assume that all of the parts are contiguous + uint32_t* indices = mSubmeshes[submeshId]->getIndexBufferWritable(0); + for (uint32_t i = 0; i < numIndices; i += 3) + { + nvidia::swap<uint32_t>(indices[i + 1], indices[i + 2]); + } + } + + updatePartBounds(); +} + +void ApexRenderMeshAsset::applyTransformation(const PxMat44& transformation, float scale) +{ + for (uint32_t submeshId = 0; submeshId < mSubmeshes.size(); submeshId++) + { + VertexBufferIntl& vb = mSubmeshes[submeshId]->getVertexBufferWritable(); + vb.applyScale(scale); + vb.applyTransformation(transformation); + } + + // if the transform will mirror the mesh, change the triangle winding in the ib + + const PxMat33 tm(transformation.column0.getXYZ(), + transformation.column1.getXYZ(), + transformation.column2.getXYZ()); + + if (tm.getDeterminant() * scale < 0.0f) + { + reverseWinding(); + } + else + { + updatePartBounds(); + } +} + + + +void ApexRenderMeshAsset::applyScale(float scale) +{ + for (uint32_t submeshId = 0; submeshId < mSubmeshes.size(); submeshId++) + { + VertexBufferIntl& vb = mSubmeshes[submeshId]->getVertexBufferWritable(); + vb.applyScale(scale); + } + + for (int partId = 0; partId < mParams->partBounds.arraySizes[0]; partId++) + { + PX_ASSERT(!mParams->partBounds.buf[partId].isEmpty()); + mParams->partBounds.buf[partId].minimum *= scale; + mParams->partBounds.buf[partId].maximum *= scale; + } + + if (scale < 0.0f) + { + for (int partId = 0; partId < mParams->partBounds.arraySizes[0]; partId++) + { + PX_ASSERT(!mParams->partBounds.buf[partId].isEmpty()); + nvidia::swap(mParams->partBounds.buf[partId].minimum, mParams->partBounds.buf[partId].maximum); + } + } +} + + + +bool ApexRenderMeshAsset::mergeBinormalsIntoTangents() +{ + bool changed = false; + for (uint32_t submeshId = 0; submeshId < mSubmeshes.size(); submeshId++) + { + VertexBufferIntl& vb = mSubmeshes[submeshId]->getVertexBufferWritable(); + changed |= vb.mergeBinormalsIntoTangents(); + } + return changed; +} + + + +TextureUVOrigin::Enum ApexRenderMeshAsset::getTextureUVOrigin() const +{ + PX_ASSERT(mParams->textureUVOrigin < 4); + return static_cast<TextureUVOrigin::Enum>(mParams->textureUVOrigin); +} + + + +void ApexRenderMeshAsset::createLocalData() +{ + mMaterialIDs.resize((uint32_t)mParams->materialNames.arraySizes[0]); + ResourceProviderIntl* resourceProvider = GetInternalApexSDK()->getInternalResourceProvider(); + ResID materialNS = GetInternalApexSDK()->getMaterialNameSpace(); + ResID customVBNS = GetInternalApexSDK()->getCustomVBNameSpace(); + + + // Resolve material names using the NRP... + for (uint32_t i = 0; i < (uint32_t)mParams->materialNames.arraySizes[0]; ++i) + { + if (resourceProvider) + { + mMaterialIDs[i] = resourceProvider->createResource(materialNS, mParams->materialNames.buf[i]); + } + else + { + mMaterialIDs[i] = INVALID_RESOURCE_ID; + } + } + + // Resolve custom vertex buffer semantics using the NRP... + mRuntimeCustomSubmeshData.resize(getSubmeshCount()); + //JPB memset(mRuntimeCustomSubmeshData.begin(), 0, sizeof(CustomSubmeshData) * mRuntimeCustomSubmeshData.size()); + + for (uint32_t i = 0; i < getSubmeshCount(); ++i) + { + const VertexFormat& fmt = getSubmesh(i).getVertexBuffer().getFormat(); + + mRuntimeCustomSubmeshData[i].customBufferFormats.resize(fmt.getCustomBufferCount()); + mRuntimeCustomSubmeshData[i].customBufferVoidPtrs.resize(fmt.getCustomBufferCount()); + + uint32_t customBufferIndex = 0; + for (uint32_t j = 0; j < fmt.getBufferCount(); ++j) + { + if (fmt.getBufferSemantic(j) != RenderVertexSemantic::CUSTOM) + { + continue; + } + RenderDataFormat::Enum f = fmt.getBufferFormat(j); + const char* name = fmt.getBufferName(j); + + mRuntimeCustomSubmeshData[i].customBufferFormats[customBufferIndex] = f; + mRuntimeCustomSubmeshData[i].customBufferVoidPtrs[customBufferIndex] = 0; + + if (resourceProvider) + { + ResID id = resourceProvider->createResource(customVBNS, name, true); + mRuntimeCustomSubmeshData[i].customBufferVoidPtrs[customBufferIndex] = GetInternalApexSDK()->getInternalResourceProvider()->getResource(id); + } + + ++customBufferIndex; + } + } + + // find the bone count + // LRR - required for new deserialize path + // PH - mBoneCount is now serialized + if (mParams->boneCount == 0) + { + for (uint32_t i = 0; i < getSubmeshCount(); i++) + { + + RenderDataFormat::Enum format; + const VertexBuffer& vb = mSubmeshes[i]->getVertexBuffer(); + const VertexFormat& vf = vb.getFormat(); + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(nvidia::RenderVertexSemantic::BONE_INDEX)); + uint16_t* boneIndices = (uint16_t*)vb.getBufferAndFormat(format, bufferIndex); + + if (boneIndices == NULL) + { + continue; + } + + if (!vertexSemanticFormatValid(RenderVertexSemantic::BONE_INDEX, format)) + { + continue; + } + + const uint32_t bonesPerVert = vertexSemanticFormatElementCount(RenderVertexSemantic::BONE_INDEX, format); + + PX_ASSERT(format == RenderDataFormat::USHORT1 || format == RenderDataFormat::USHORT2 || format == RenderDataFormat::USHORT3 || format == RenderDataFormat::USHORT4); + + const uint32_t numVertices = vb.getVertexCount(); + for (uint32_t v = 0; v < numVertices; v++) + { + for (uint32_t b = 0; b < bonesPerVert; b++) + { + mParams->boneCount = PxMax(mParams->boneCount, (uint32_t)(boneIndices[v * bonesPerVert + b] + 1)); + } + } + } + } + + // PH - have one bone at all times, if it's just one, it is used as current pose (see ApexRenderMeshActor::dispatchRenderResources) + if (mParams->boneCount == 0) + { + mParams->boneCount = 1; + } +} + +void ApexRenderMeshAsset::getStats(RenderMeshAssetStats& stats) const +{ + stats.totalBytes = sizeof(ApexRenderMeshAsset); + + for (int i = 0; i < mParams->materialNames.arraySizes[0]; ++i) + { + stats.totalBytes += (uint32_t) strlen(mParams->materialNames.buf[i]) + 1; + } + + stats.totalBytes += mParams->partBounds.arraySizes[0] * sizeof(PxBounds3); + stats.totalBytes += mName.len() + 1; + + stats.submeshCount = mSubmeshes.size(); + stats.partCount = (uint32_t)mParams->partBounds.arraySizes[0]; + stats.vertexCount = 0; + stats.indexCount = 0; + stats.vertexBufferBytes = 0; + stats.indexBufferBytes = 0; + + for (uint32_t i = 0; i < mSubmeshes.size(); ++i) + { + const ApexRenderSubmesh& submesh = *mSubmeshes[i]; + + submesh.addStats(stats); + } +} + + +void ApexRenderMeshAsset::updatePartBounds() +{ + for (int i = 0; i < mParams->partBounds.arraySizes[0]; i++) + { + mParams->partBounds.buf[i].setEmpty(); + } + + for (uint32_t i = 0; i < mSubmeshes.size(); i++) + { + const uint32_t* part = mSubmeshes[i]->mParams->vertexPartition.buf; + + RenderDataFormat::Enum format; + const VertexBuffer& vb = mSubmeshes[i]->getVertexBuffer(); + const VertexFormat& vf = vb.getFormat(); + uint32_t bufferIndex = (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(nvidia::RenderVertexSemantic::POSITION)); + PxVec3* positions = (PxVec3*)vb.getBufferAndFormat(format, bufferIndex); + if (positions == NULL) + { + continue; + } + if (format != RenderDataFormat::FLOAT3) + { + continue; + } + + for (int p = 0; p < mParams->partBounds.arraySizes[0]; p++) + { + const uint32_t start = part[p]; + const uint32_t end = part[p + 1]; + for (uint32_t v = start; v < end; v++) + { + mParams->partBounds.buf[p].include(positions[v]); + } + } + } +} + +void ApexRenderMeshAsset::setSubmeshCount(uint32_t submeshCount) +{ + const uint32_t oldSize = mSubmeshes.size(); + + for (uint32_t i = oldSize; i-- > submeshCount;) + { + PX_DELETE(mSubmeshes[i]); + } + + mSubmeshes.resize(submeshCount); + + for (uint32_t i = oldSize; i < submeshCount; ++i) + { + mSubmeshes[i] = PX_NEW(ApexRenderSubmesh); + } +} + + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexRenderMeshAssetAuthoring.cpp b/APEX_1.4/framework/src/ApexRenderMeshAssetAuthoring.cpp new file mode 100644 index 00000000..1ccc32f0 --- /dev/null +++ b/APEX_1.4/framework/src/ApexRenderMeshAssetAuthoring.cpp @@ -0,0 +1,650 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + +#include "PsArray.h" +#include "ApexRenderMeshAssetAuthoring.h" +#include "ApexRenderMeshActor.h" +#include "ApexSharedUtils.h" +#include "ApexCustomBufferIterator.h" +#include "ApexUsingNamespace.h" +#include "ApexSDKIntl.h" +#include "ResourceProviderIntl.h" + +#include "PsSort.h" + +#ifndef WITHOUT_APEX_AUTHORING + +namespace nvidia +{ +namespace apex +{ + + +PX_INLINE bool PxVec3equals(const PxVec3& a, const PxVec3& v, float epsilon) +{ + return + PxEquals(a.x, v.x, epsilon) && + PxEquals(a.y, v.y, epsilon) && + PxEquals(a.z, v.z, epsilon); +} + +ApexRenderMeshAssetAuthoring::ApexRenderMeshAssetAuthoring(ResourceList& list, RenderMeshAssetParameters* params, const char* name) +{ + list.add(*this); + + createFromParameters(params); + + mName = name; +} + +ApexRenderMeshAssetAuthoring::ApexRenderMeshAssetAuthoring(ResourceList& list) +{ + list.add(*this); +} + +ApexRenderMeshAssetAuthoring::~ApexRenderMeshAssetAuthoring() +{ +} + +// We will create our vertex map here. Remapping will be from sorting by part index + + +void ApexRenderMeshAssetAuthoring::createRenderMesh(const MeshDesc& meshDesc, bool createMappingInformation) +{ + if (!meshDesc.isValid()) + { + APEX_INVALID_OPERATION("MeshDesc is not valid!"); + return; + } + + if (mParams != NULL) + { + mParams->destroy(); + } + + NvParameterized::Traits* traits = GetInternalApexSDK()->getParameterizedTraits(); + mParams = (RenderMeshAssetParameters*)traits->createNvParameterized(RenderMeshAssetParameters::staticClassName()); + NvParameterized::Handle rootHandle(*mParams); + + // Submeshes + mParams->getParameterHandle("materialNames", rootHandle); + rootHandle.resizeArray((int32_t)meshDesc.m_numSubmeshes); + mParams->getParameterHandle("submeshes", rootHandle); + rootHandle.resizeArray((int32_t)meshDesc.m_numSubmeshes); + setSubmeshCount(0); + setSubmeshCount(meshDesc.m_numSubmeshes); + for (uint32_t submeshNum = 0; submeshNum < meshDesc.m_numSubmeshes; ++submeshNum) + { + ApexRenderSubmesh& submesh = *mSubmeshes[submeshNum]; + SubmeshParameters* submeshParams = (SubmeshParameters*)traits->createNvParameterized(SubmeshParameters::staticClassName()); + submesh.createFromParameters(submeshParams); + mParams->submeshes.buf[submeshNum] = submeshParams; + //NvParameterized::Handle submeshHandle( submeshParams ); + const SubmeshDesc& submeshDesc = meshDesc.m_submeshes[submeshNum]; + + // Material name + NvParameterized::Handle handle(*mParams); + mParams->getParameterHandle("materialNames", handle); + NvParameterized::Handle elementHandle(*mParams); + handle.getChildHandle((int32_t)submeshNum, elementHandle); + mParams->setParamString(elementHandle, submeshDesc.m_materialName); + + // Index buffer + + physx::Array<VertexPart> submeshMap; + submeshMap.resize(submeshDesc.m_numVertices); + const uint32_t invalidPart = PxMax(1u, submeshDesc.m_numParts); + for (uint32_t i = 0; i < submeshDesc.m_numVertices; ++i) + { + submeshMap[i].part = invalidPart; + submeshMap[i].vertexIndex = i; + } + bool success = false; + switch (submeshDesc.m_indexType) + { + case IndexType::UINT: + success = fillSubmeshMap<uint32_t>(submeshMap, submeshDesc.m_partIndices, submeshDesc.m_numParts, submeshDesc.m_vertexIndices, submeshDesc.m_numIndices, submeshDesc.m_numVertices); + break; + case IndexType::USHORT: + success = fillSubmeshMap<uint16_t>(submeshMap, submeshDesc.m_partIndices, submeshDesc.m_numParts, submeshDesc.m_vertexIndices, submeshDesc.m_numIndices, submeshDesc.m_numVertices); + break; + default: + PX_ALWAYS_ASSERT(); + } + + // error message? + if (!success) + { + return; + } + + if (submeshMap.size() > 1) + { + shdfnd::sort(submeshMap.begin(), submeshMap.size(), VertexPart()); + } + + uint32_t vertexCount = 0; + for (; vertexCount < submeshDesc.m_numVertices; ++vertexCount) + { + if (submeshMap[vertexCount].part == invalidPart) + { + break; + } + } + + // Create inverse map for our internal remapping + Array<int32_t> invMap; // maps old indices to new indices + invMap.resize(submeshDesc.m_numVertices); + for (uint32_t i = 0; i < submeshDesc.m_numVertices; ++i) + { + const uint32_t vIndex = submeshMap[i].vertexIndex; + if (i >= vertexCount) + { + invMap[vIndex] = -1; + } + else + { + invMap[vIndex] = (int32_t)i; + } + } + + // Copy index buffer (remapping) + NvParameterized::Handle ibHandle(submeshParams); + submeshParams->getParameterHandle("indexBuffer", ibHandle); + ibHandle.resizeArray((int32_t)submeshDesc.m_numIndices); + switch (submeshDesc.m_indexType) + { + case IndexType::UINT: + for (uint32_t i = 0; i < submeshDesc.m_numIndices; ++i) + { + const uint32_t index = submeshDesc.m_vertexIndices != NULL ? ((uint32_t*)submeshDesc.m_vertexIndices)[i] : i; + submeshParams->indexBuffer.buf[i] = (uint32_t)invMap[index]; + PX_ASSERT(submeshParams->indexBuffer.buf[i] != (uint32_t)-1); + } + break; + case IndexType::USHORT: + for (uint32_t i = 0; i < submeshDesc.m_numIndices; ++i) + { + const uint16_t index = submeshDesc.m_vertexIndices != NULL ? ((uint16_t*)submeshDesc.m_vertexIndices)[i] : (uint16_t)i; + submeshParams->indexBuffer.buf[i] = (uint32_t)invMap[index]; + PX_ASSERT(submeshParams->indexBuffer.buf[i] != (uint32_t)-1); + } + break; + default: + PX_ALWAYS_ASSERT(); + } + + // Smoothing groups + int32_t smoothingGroupArraySize = 0; + if (submeshDesc.m_smoothingGroups != NULL) + { + switch (submeshDesc.m_primitive) + { + case Primitive::TRIANGLE_LIST: + smoothingGroupArraySize = (int32_t)submeshDesc.m_numIndices/3; + break; + default: + PX_ALWAYS_ASSERT(); // We only have one kind of primitive + } + } + if (smoothingGroupArraySize != 0) + { + NvParameterized::Handle sgHandle(submeshParams); + submeshParams->getParameterHandle("smoothingGroups", sgHandle); + sgHandle.resizeArray(smoothingGroupArraySize); + sgHandle.setParamU32Array(submeshDesc.m_smoothingGroups, smoothingGroupArraySize, 0); + } + + // Index partition + NvParameterized::Handle ipHandle(submeshParams); + submeshParams->getParameterHandle("indexPartition", ipHandle); + ipHandle.resizeArray(PxMax((int32_t)submeshDesc.m_numParts + 1, 2)); + + if (submeshDesc.m_numParts == 0) + { + submeshParams->indexPartition.buf[0] = 0; + submeshParams->indexPartition.buf[1] = submeshDesc.m_numIndices; + } + else + { + switch (submeshDesc.m_indexType) + { + case IndexType::UINT: + for (uint32_t i = 0; i < submeshDesc.m_numParts; ++i) + { + submeshParams->indexPartition.buf[i] = ((uint32_t*)submeshDesc.m_partIndices)[i]; + } + submeshParams->indexPartition.buf[submeshDesc.m_numParts] = submeshDesc.m_numIndices; + break; + case IndexType::USHORT: + for (uint32_t i = 0; i < submeshDesc.m_numParts; ++i) + { + submeshParams->indexPartition.buf[i] = (uint32_t)((uint16_t*)submeshDesc.m_partIndices)[i]; + } + submeshParams->indexPartition.buf[submeshDesc.m_numParts] = submeshDesc.m_numIndices; + break; + default: + PX_ALWAYS_ASSERT(); + } + } + + // Vertex partition + Array<uint32_t> lookup; + createIndexStartLookup(lookup, 0, submeshDesc.m_numParts, (int32_t*)submeshMap.begin(), vertexCount, sizeof(VertexPart)); + NvParameterized::Handle vpHandle(submeshParams); + submeshParams->getParameterHandle("vertexPartition", vpHandle); + vpHandle.resizeArray((int32_t)lookup.size()); + vpHandle.setParamU32Array(lookup.begin(), (int32_t)lookup.size()); + + // Vertex buffer + + // Create format description + ApexVertexFormat format; + + for (uint32_t i = 0; i < submeshDesc.m_numVertexBuffers; ++i) + { + const VertexBuffer& vb = submeshDesc.m_vertexBuffers[i]; + for (uint32_t semantic = 0; semantic < RenderVertexSemantic::NUM_SEMANTICS; ++semantic) + { + RenderVertexSemantic::Enum vertexSemantic = (RenderVertexSemantic::Enum)semantic; + RenderDataFormat::Enum vertexFormat = vb.getSemanticData(vertexSemantic).format; + + if (vertexSemanticFormatValid(vertexSemantic, vertexFormat)) + { + int32_t bufferIndex = format.addBuffer(format.getSemanticName(vertexSemantic)); + format.setBufferFormat((uint32_t)bufferIndex, vb.getSemanticData(vertexSemantic).format); + } + else if (vertexFormat != RenderDataFormat::UNSPECIFIED) + { + APEX_INVALID_PARAMETER("Format (%d) is not valid for Semantic (%s)", vertexFormat, format.getSemanticName(vertexSemantic)); + } + } + } + + format.setWinding(submeshDesc.m_cullMode); + + // Include custom buffers + for (uint32_t i = 0; i < submeshDesc.m_numVertexBuffers; ++i) + { + const VertexBuffer& vb = submeshDesc.m_vertexBuffers[i]; + for (uint32_t index = 0; index < vb.getNumCustomSemantics(); ++index) + { + const RenderSemanticData& data = vb.getCustomSemanticData(index); + // BRG - reusing data.ident as the custom channel name. What to do with the serialize parameter? + int32_t bufferIndex = format.addBuffer((char*)data.ident); + format.setBufferFormat((uint32_t)bufferIndex, data.format); + + // PH: custom buffers are never serialized this way, we might need to change this! + format.setBufferSerialize((uint32_t)bufferIndex, data.serialize); + } + } + + if (createMappingInformation) + { + int32_t bufferIndex = format.addBuffer("VERTEX_ORIGINAL_INDEX"); + format.setBufferFormat((uint32_t)bufferIndex, RenderDataFormat::UINT1); + } + + // Create apex vertex buffer + submesh.buildVertexBuffer(format, vertexCount); + + // Now fill in... + for (uint32_t i = 0; i < submeshDesc.m_numVertexBuffers; ++i) + { + const VertexBuffer& vb = submeshDesc.m_vertexBuffers[i]; + const VertexFormat& vf = submesh.getVertexBuffer().getFormat(); + + RenderSemanticData boneWeightData; + RenderSemanticData boneIndexData; + RenderDataFormat::Enum checkFormatBoneWeight = RenderDataFormat::UNSPECIFIED; + RenderDataFormat::Enum checkFormatBoneIndex = RenderDataFormat::UNSPECIFIED; + RenderDataFormat::Enum dstFormatBoneWeight = RenderDataFormat::UNSPECIFIED; + RenderDataFormat::Enum dstFormatBoneIndex = RenderDataFormat::UNSPECIFIED; + void* dstDataWeight = NULL; + void* dstDataIndex = NULL; + uint32_t numBoneWeights = 0; + uint32_t numBoneIndices = 0; + + for (uint32_t semantic = 0; semantic < RenderVertexSemantic::NUM_SEMANTICS; ++semantic) + { + if (vertexSemanticFormatValid((RenderVertexSemantic::Enum)semantic, vb.getSemanticData((RenderVertexSemantic::Enum)semantic).format)) + { + RenderDataFormat::Enum dstFormat; + void* dst = submesh.getVertexBufferWritable().getBufferAndFormatWritable(dstFormat, (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID((RenderVertexSemantic::Enum)semantic))); + const RenderSemanticData& data = vb.getSemanticData((RenderVertexSemantic::Enum)semantic); + + copyRenderVertexBuffer(dst, dstFormat, 0, 0, data.data, data.srcFormat, data.stride, 0, submeshDesc.m_numVertices, invMap.begin()); + + if (semantic == RenderVertexSemantic::BONE_WEIGHT) + { + boneWeightData = data; + // Verification code for bone weights. + switch (data.srcFormat) + { + case RenderDataFormat::FLOAT1: + checkFormatBoneWeight = RenderDataFormat::FLOAT1; + numBoneWeights = 1; + break; + case RenderDataFormat::FLOAT2: + checkFormatBoneWeight = RenderDataFormat::FLOAT2; + numBoneWeights = 2; + break; + case RenderDataFormat::FLOAT3: + checkFormatBoneWeight = RenderDataFormat::FLOAT3; + numBoneWeights = 3; + break; + case RenderDataFormat::FLOAT4: + checkFormatBoneWeight = RenderDataFormat::FLOAT4; + numBoneWeights = 4; + break; + default: + break; + } + + dstDataWeight = dst; + dstFormatBoneWeight = dstFormat; + } + else if (semantic == RenderVertexSemantic::BONE_INDEX) + { + boneIndexData = data; + switch (data.srcFormat) + { + case RenderDataFormat::USHORT1: + checkFormatBoneIndex = RenderDataFormat::USHORT1; + numBoneIndices = 1; + break; + case RenderDataFormat::USHORT2: + checkFormatBoneIndex = RenderDataFormat::USHORT2; + numBoneIndices = 2; + break; + case RenderDataFormat::USHORT3: + checkFormatBoneIndex = RenderDataFormat::USHORT3; + numBoneIndices = 3; + break; + case RenderDataFormat::USHORT4: + checkFormatBoneIndex = RenderDataFormat::USHORT4; + numBoneIndices = 4; + break; + default: + break; + } + dstDataIndex = dst; + dstFormatBoneIndex = dstFormat; + } + } + } + + // some verification code + if (numBoneIndices > 1 && numBoneWeights == numBoneIndices) + { + float verifyWeights[4] = { 0.0f }; + uint16_t verifyIndices[4] = { 0 }; + for (uint32_t vi = 0; vi < submeshDesc.m_numVertices; vi++) + { + const int32_t dest = invMap[vi]; + if (dest >= 0) + { + + copyRenderVertexBuffer(verifyWeights, checkFormatBoneWeight, 0, 0, boneWeightData.data, boneWeightData.srcFormat, boneWeightData.stride, vi, 1); + copyRenderVertexBuffer(verifyIndices, checkFormatBoneIndex, 0, 0, boneIndexData.data, boneIndexData.srcFormat, boneIndexData.stride, vi, 1); + + float sum = 0.0f; + for (uint32_t j = 0; j < numBoneWeights; j++) + { + sum += verifyWeights[j]; + } + + if (PxAbs(1 - sum) > 0.001) + { + if (sum > 0.0f) + { + for (uint32_t j = 0; j < numBoneWeights; j++) + { + verifyWeights[j] /= sum; + } + } + + APEX_INVALID_PARAMETER("Submesh %d Vertex %d has been normalized, bone weight was (%f)", i, vi, sum); + } + // PH: bubble sort, don't kill me for this + for (uint32_t j = 1; j < numBoneWeights; j++) + { + for (uint32_t k = 1; k < numBoneWeights; k++) + { + if (verifyWeights[k - 1] < verifyWeights[k]) + { + nvidia::swap(verifyWeights[k - 1], verifyWeights[k]); + nvidia::swap(verifyIndices[k - 1], verifyIndices[k]); + } + } + } + + for (uint32_t j = 0; j < numBoneWeights; j++) + { + if (verifyWeights[j] == 0.0f) + { + verifyIndices[j] = 0; + } + } + + copyRenderVertexBuffer(dstDataWeight, dstFormatBoneWeight, 0, (uint32_t)dest, verifyWeights, checkFormatBoneWeight, 0, 0, 1); + copyRenderVertexBuffer(dstDataIndex, dstFormatBoneIndex, 0, (uint32_t)dest, verifyIndices, checkFormatBoneIndex, 0, 0, 1); + } + } + } + + // Custom buffers + for (uint32_t index = 0; index < vb.getNumCustomSemantics(); ++index) + { + const RenderSemanticData& data = vb.getCustomSemanticData(index); + const int32_t bufferIndex = format.getBufferIndexFromID(format.getID((char*)data.ident)); + PX_ASSERT(bufferIndex >= 0); + void* dst = const_cast<void*>(submesh.getVertexBuffer().getBuffer((uint32_t)bufferIndex)); + RenderDataFormat::Enum srcFormat = data.srcFormat != RenderDataFormat::UNSPECIFIED ? data.srcFormat : data.format; + copyRenderVertexBuffer(dst, data.format, 0, 0, data.data, srcFormat, data.stride, 0, submeshDesc.m_numVertices, invMap.begin()); + } + } + + if (createMappingInformation) + { + const VertexFormat::BufferID bufferID = format.getID("VERTEX_ORIGINAL_INDEX"); + const int32_t bufferIndex = format.getBufferIndexFromID(bufferID); + RenderDataFormat::Enum bufferFormat = format.getBufferFormat((uint32_t)bufferIndex); + PX_ASSERT(bufferIndex >= 0); + const void* dst = submesh.getVertexBuffer().getBuffer((uint32_t)bufferIndex); + copyRenderVertexBuffer(const_cast<void*>(dst), bufferFormat, 0, 0, &submeshMap[0].vertexIndex , RenderDataFormat::UINT1 , sizeof(VertexPart), 0, vertexCount, NULL); + } + } + + // Part bounds + uint32_t partCount = 1; + for (uint32_t submeshNum = 0; submeshNum < meshDesc.m_numSubmeshes; ++submeshNum) + { + partCount = PxMax(partCount, meshDesc.m_submeshes[submeshNum].m_numParts); + } + mParams->getParameterHandle("partBounds", rootHandle); + mParams->resizeArray(rootHandle, (int32_t)partCount); + for (uint32_t partNum = 0; partNum < partCount; ++partNum) + { + mParams->partBounds.buf[partNum].setEmpty(); + // Add part vertices + for (uint32_t submeshNum = 0; submeshNum < meshDesc.m_numSubmeshes; ++submeshNum) + { + SubmeshParameters* submeshParams = DYNAMIC_CAST(SubmeshParameters*)(mParams->submeshes.buf[submeshNum]); + ApexRenderSubmesh& submesh = *mSubmeshes[submeshNum]; + RenderDataFormat::Enum positionFormat; + const VertexFormat& vf = submesh.getVertexBuffer().getFormat(); + const PxVec3* positions = (const PxVec3*)submesh.getVertexBuffer().getBufferAndFormat(positionFormat, + (uint32_t)vf.getBufferIndexFromID(vf.getSemanticID(RenderVertexSemantic::POSITION))); + if (positions && positionFormat == RenderDataFormat::FLOAT3) + { + for (uint32_t vertexIndex = submeshParams->vertexPartition.buf[partNum]; vertexIndex < submeshParams->vertexPartition.buf[partNum + 1]; ++vertexIndex) + { + mParams->partBounds.buf[partNum].include(positions[vertexIndex]); + } + } + } + } + + mParams->textureUVOrigin = meshDesc.m_uvOrigin; + + createLocalData(); +} + +uint32_t ApexRenderMeshAssetAuthoring::createReductionMap(uint32_t* map, const Vertex* vertices, const uint32_t* smoothingGroups, uint32_t vertexCount, + const PxVec3& positionTolerance, float normalTolerance, float UVTolerance) +{ + physx::Array<BoundsRep> vertexNeighborhoods; + vertexNeighborhoods.resize(vertexCount); + const PxVec3 neighborhoodExtent = 0.5f * positionTolerance; + for (uint32_t vertexNum = 0; vertexNum < vertexCount; ++vertexNum) + { + vertexNeighborhoods[vertexNum].aabb = PxBounds3(vertices[vertexNum].position - neighborhoodExtent, vertices[vertexNum].position + neighborhoodExtent); + } + + physx::Array<IntPair> vertexNeighbors; + if (vertexNeighborhoods.size() > 0) + { + boundsCalculateOverlaps(vertexNeighbors, Bounds3XYZ, &vertexNeighborhoods[0], vertexNeighborhoods.size(), sizeof(vertexNeighborhoods[0])); + } + + for (uint32_t i = 0; i < vertexCount; ++i) + { + map[i] = i; + } + + for (uint32_t pairNum = 0; pairNum < vertexNeighbors.size(); ++pairNum) + { + const IntPair& pair = vertexNeighbors[pairNum]; + const uint32_t map0 = map[pair.i0]; + const uint32_t map1 = map[pair.i1]; + if (smoothingGroups != NULL && smoothingGroups[map0] != smoothingGroups[map1]) + { + continue; + } + const Vertex& vertex0 = vertices[map0]; + const Vertex& vertex1 = vertices[map1]; + if (PxAbs(vertex0.position.x - vertex1.position.x) > positionTolerance.x || + PxAbs(vertex0.position.y - vertex1.position.y) > positionTolerance.y || + PxAbs(vertex0.position.z - vertex1.position.z) > positionTolerance.z) + { + continue; + } + if (!PxVec3equals(vertex0.normal, vertex1.normal, normalTolerance) || + !PxVec3equals(vertex0.tangent, vertex1.tangent, normalTolerance) || + !PxVec3equals(vertex0.binormal, vertex1.binormal, normalTolerance)) + { + continue; + } + uint32_t uvNum = 0; + for (; uvNum < VertexFormat::MAX_UV_COUNT; ++uvNum) + { + const VertexUV& uv0 = vertex0.uv[uvNum]; + const VertexUV& uv1 = vertex1.uv[uvNum]; + if (PxAbs(uv0[0] - uv1[0]) > UVTolerance || PxAbs(uv0[1] - uv1[1]) > UVTolerance) + { + break; + } + } + if (uvNum < VertexFormat::MAX_UV_COUNT) + { + continue; + } + map[pair.i1] = map0; + } + + physx::Array<int32_t> offsets(vertexCount, -1); + for (uint32_t i = 0; i < vertexCount; ++i) + { + offsets[map[i]] = 0; + } + int32_t delta = 0; + for (uint32_t i = 0; i < vertexCount; ++i) + { + delta += offsets[i]; + offsets[i] = delta; + } + for (uint32_t i = 0; i < vertexCount; ++i) + { + map[i] += offsets[map[i]]; + } + return vertexCount + delta; +} + + + +void ApexRenderMeshAssetAuthoring::setMaterialName(uint32_t submeshIndex, const char* name) +{ + size_t maxMaterials = (uint32_t)mParams->materialNames.arraySizes[0]; + PX_ASSERT(submeshIndex < maxMaterials); + if (submeshIndex < maxMaterials) + { + NvParameterized::Handle handle(*mParams); + mParams->getParameterHandle("materialNames", handle); + NvParameterized::Handle elementHandle(*mParams); + handle.getChildHandle((int32_t)submeshIndex, elementHandle); + mParams->setParamString(elementHandle, name ? name : ""); + } +} + +void ApexRenderMeshAssetAuthoring::setWindingOrder(uint32_t submeshIndex, RenderCullMode::Enum winding) +{ + ApexRenderSubmesh& subMesh = *ApexRenderMeshAsset::mSubmeshes[submeshIndex]; + VertexBufferIntl& vb = subMesh.getVertexBufferWritable(); + vb.getFormatWritable().setWinding(winding); +} + +RenderCullMode::Enum ApexRenderMeshAssetAuthoring::getWindingOrder(uint32_t submeshIndex) const +{ + const RenderSubmesh& subMesh = getSubmesh(submeshIndex); + const nvidia::apex::VertexBuffer& vb = subMesh.getVertexBuffer(); + const VertexFormat& format = vb.getFormat(); + return format.getWinding(); +} + + + +template <typename PxU> +bool ApexRenderMeshAssetAuthoring::fillSubmeshMap(physx::Array<VertexPart>& submeshMap, const void* const partIndicesVoid, + uint32_t numParts, const void* const vertexIndicesVoid, + uint32_t numSubmeshIndices, uint32_t numSubmeshVertices) +{ + PxU partIndexStart = 0; + if (numParts == 0) + { + numParts = 1; + } + + const PxU* const partIndices = partIndicesVoid != NULL ? reinterpret_cast<const PxU * const>(partIndicesVoid) : &partIndexStart; + const PxU* const vertexIndices = reinterpret_cast<const PxU * const>(vertexIndicesVoid); + + for (uint32_t i = 0; i < numParts; ++i) + { + const uint32_t stop = i + 1 < numParts ? partIndices[i + 1] : numSubmeshIndices; + for (uint32_t j = partIndices[i]; j < stop; ++j) + { + const uint32_t vertexIndex = vertexIndices != NULL ? vertexIndices[j] : j; + if (vertexIndex >= numSubmeshVertices) + { + return false; // to do: issue error - index out of range + } + if (submeshMap[vertexIndex].part != numParts && submeshMap[vertexIndex].part != i) + { + return false; // to do: issue error - vertex in more than one part + } + submeshMap[vertexIndex].part = i; + } + } + return true; +} + +} +} // end namespace nvidia::apex +#endif diff --git a/APEX_1.4/framework/src/ApexRenderSubmesh.cpp b/APEX_1.4/framework/src/ApexRenderSubmesh.cpp new file mode 100644 index 00000000..150e2432 --- /dev/null +++ b/APEX_1.4/framework/src/ApexRenderSubmesh.cpp @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexRenderSubmesh.h" + +//#include "ApexStream.h" +//#include "ApexSharedSerialization.h" +#include "ApexSDKIntl.h" + + +namespace nvidia +{ +namespace apex +{ + +PX_INLINE uint32_t findIndexedNeighbors(uint32_t indexedNeighbors[3], uint32_t triangleIndex, + const uint32_t* indexBuffer, const uint32_t* vertexTriangleRefs, const uint32_t* vertexToTriangleMap) +{ + uint32_t indexedNeighborCount = 0; + const uint32_t* triangleVertexIndices = indexBuffer + 3 * triangleIndex; + for (uint32_t v = 0; v < 3; ++v) + { + const uint32_t vertexIndex = triangleVertexIndices[v]; + const uint32_t prevVertexIndex = triangleVertexIndices[(3 >> v) ^ 1]; + // Find all other triangles which have this vertex + const uint32_t mapStart = vertexTriangleRefs[vertexIndex]; + const uint32_t mapStop = vertexTriangleRefs[vertexIndex + 1]; + for (uint32_t i = mapStart; i < mapStop; ++i) + { + const uint32_t neighborTriangleIndex = vertexToTriangleMap[i]; + // See if the previous vertex on the triangle matches the next vertex on the neighbor. (This will + // automatically exclude the triangle itself, so no check to exclude a self-check is made.) + const uint32_t* neighborTriangleVertexIndices = indexBuffer + 3 * neighborTriangleIndex; + const uint8_t indexMatch = (uint8_t)((uint8_t)(neighborTriangleVertexIndices[0] == vertexIndex) | + (uint8_t)(neighborTriangleVertexIndices[1] == vertexIndex) << 1 | + (uint8_t)(neighborTriangleVertexIndices[2] == vertexIndex) << 2); + const uint32_t nextNeighborVertexIndex = neighborTriangleVertexIndices[indexMatch & 3]; + if (nextNeighborVertexIndex == prevVertexIndex) + { + // Found a neighbor + indexedNeighbors[indexedNeighborCount++] = neighborTriangleIndex; + } + } + } + + return indexedNeighborCount; +} + + + +void ApexRenderSubmesh::applyPermutation(const Array<uint32_t>& old2new, const Array<uint32_t>& new2old) +{ + if (mParams->vertexPartition.arraySizes[0] == 2) + { + mVertexBuffer.applyPermutation(new2old); + } + + const uint32_t numIndices = (uint32_t)mParams->indexBuffer.arraySizes[0]; + for (uint32_t i = 0; i < numIndices; i++) + { + PX_ASSERT(mParams->indexBuffer.buf[i] < old2new.size()); + mParams->indexBuffer.buf[i] = old2new[mParams->indexBuffer.buf[i]]; + } +} + + + +bool ApexRenderSubmesh::createFromParameters(SubmeshParameters* params) +{ + mParams = params; + + if (mParams->vertexBuffer == NULL) + { + NvParameterized::Traits* traits = GetInternalApexSDK()->getParameterizedTraits(); + mParams->vertexBuffer = traits->createNvParameterized(VertexBufferParameters::staticClassName()); + } + mVertexBuffer.setParams(static_cast<VertexBufferParameters*>(mParams->vertexBuffer)); + + return true; +} + + + +void ApexRenderSubmesh::setParams(SubmeshParameters* submeshParams, VertexBufferParameters* vertexBufferParams) +{ + + if (vertexBufferParams == NULL && submeshParams != NULL) + { + vertexBufferParams = static_cast<VertexBufferParameters*>(submeshParams->vertexBuffer); + PX_ASSERT(vertexBufferParams != NULL); + } + else if (submeshParams != NULL && submeshParams->vertexBuffer == NULL) + { + submeshParams->vertexBuffer = vertexBufferParams; + } + else if (mParams == NULL) + { + // Only emit this warning if mParams is empty yet (not on destruction of the object) + APEX_INTERNAL_ERROR("Confliciting parameterized objects!"); + } + mParams = submeshParams; + + mVertexBuffer.setParams(vertexBufferParams); +} + + + +void ApexRenderSubmesh::addStats(RenderMeshAssetStats& stats) const +{ + stats.vertexCount += mVertexBuffer.getVertexCount(); + stats.indexCount += mParams->indexBuffer.arraySizes[0]; + + const uint32_t submeshVertexBytes = mVertexBuffer.getAllocationSize(); + stats.vertexBufferBytes += submeshVertexBytes; + stats.totalBytes += submeshVertexBytes; + + const uint32_t submeshIndexBytes = mParams->indexBuffer.arraySizes[0] * sizeof(uint32_t); + stats.indexBufferBytes += submeshIndexBytes; + stats.totalBytes += submeshIndexBytes; + + stats.totalBytes += mParams->smoothingGroups.arraySizes[0] * sizeof(uint32_t); +} + + + +void ApexRenderSubmesh::buildVertexBuffer(const VertexFormat& format, uint32_t vertexCount) +{ + mVertexBuffer.build(format, vertexCount); +} + + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/ApexResourceProvider.cpp b/APEX_1.4/framework/src/ApexResourceProvider.cpp new file mode 100644 index 00000000..ee4e4105 --- /dev/null +++ b/APEX_1.4/framework/src/ApexResourceProvider.cpp @@ -0,0 +1,702 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "Apex.h" +#include "ResourceProviderIntl.h" +#include "ApexResourceProvider.h" +#include "PsUserAllocated.h" +#include "ApexSDKImpl.h" +#include "PsString.h" +#include <ctype.h> // for toupper() + +namespace nvidia +{ +namespace apex +{ + +#pragma warning(disable: 4355) + +ApexResourceProvider::ApexResourceProvider() +: mNSNames(this, 0, false, 0) +, mCaseSensitive(false) +{ +} + +ApexResourceProvider::~ApexResourceProvider() +{ +} + +/* == Public ResourceProvider interface == */ +void ApexResourceProvider::registerCallback(ResourceCallback* func) +{ + mUserCallback = func; +} + +void ApexResourceProvider::setResource(const char* nameSpace, const char* name, void* resource, bool incRefCount) +{ + setResource(nameSpace, name, resource, true, incRefCount); +} + +void ApexResourceProvider::setResource(const char* nameSpace, const char* name, void* resource, bool valueIsSet, bool incRefCount) +{ + PX_ASSERT(nameSpace); + PX_ASSERT(name); + uint32_t nsIndex = getNSIndex(createNameSpaceInternal(nameSpace, true)); + if (nsIndex < mNameSpaces.size()) + { + ResID id = mNameSpaces[nsIndex]->getOrCreateID(name, nameSpace); + PX_ASSERT(id < mResources.size()); + if (id < mResources.size()) + { + ApexResourceProvider::resource& res = mResources[id]; + res.ptr = resource; + res.valueIsSet = (uint8_t)valueIsSet; + if (incRefCount) + { + res.refCount++; + } + } + } +} + +void* ApexResourceProvider::getResource(const char* nameSpace, const char* name) +{ + PX_ASSERT(nameSpace); + PX_ASSERT(name); + uint32_t nsIndex = getNSIndex(createNameSpaceInternal(nameSpace, true)); + if (nsIndex < mNameSpaces.size()) + { + ResID id = mNameSpaces[nsIndex]->getOrCreateID(name, nameSpace); + PX_ASSERT(id < mResources.size()); + if (id < mResources.size()) + { + return getResource(id); + } + } + return NULL; +} + +/* == Internal ResourceProviderIntl interface == */ +ResID ApexResourceProvider::createNameSpaceInternal(const char* &nameSpace, bool releaseAtExit) +{ + /* create or get a name space */ + size_t nextID = mResources.size(); + ResID nsID = mNSNames.getOrCreateID(nameSpace, "NameSpace"); + if (nsID == (ResID) nextID) + { + NameSpace* ns = PX_NEW(NameSpace)(this, nsID, releaseAtExit, nameSpace); + if (ns) + { + ResID id = getNSID(nameSpace); + mNSID.insert(id, mNameSpaces.size()); + + mResources[nsID].ptr = (void*)(size_t) id; + mNameSpaces.pushBack(ns); + } + else + { + return INVALID_RESOURCE_ID; + } + } + ResID ret = (ResID)(size_t) mResources[nsID].ptr; + return ret; +} + +ResID ApexResourceProvider::createResource(ResID nameSpace, const char* name, bool refCount) +{ + uint32_t nsIndex = getNSIndex(nameSpace); + if (nsIndex < mNameSpaces.size()) + { + NameSpace* ns = mNameSpaces[nsIndex]; + ResID id = ns->getOrCreateID(name, mResources[ns->getID()].name); + PX_ASSERT(id < mResources.size()); + if (id < mResources.size() && refCount) + { + mResources[id].refCount++; + } + return id; + } + else + { + return INVALID_RESOURCE_ID; + } +} + +bool ApexResourceProvider::checkResource(ResID nameSpace, const char* name) +{ + /* Return true is named resource has known non-null pointer */ + uint32_t nsIndex = getNSIndex(nameSpace); + if (nsIndex < mNameSpaces.size()) + { + NameSpace* ns = mNameSpaces[nsIndex]; + ResID id = ns->getOrCreateID(name, mResources[ns->getID()].name); + PX_ASSERT(id < mResources.size()); + return checkResource(id); + } + return false; +} + +bool ApexResourceProvider::checkResource(ResID id) +{ + if (mResources.size() <= id) + { + return false; + } + + ApexResourceProvider::resource& res = mResources[id]; + + if (!res.valueIsSet) + { + return false; + } + + return true; +} + +void ApexResourceProvider::generateUniqueName(ResID nameSpace, ApexSimpleString& name) +{ + uint32_t nsIndex = getNSIndex(nameSpace); + if (nsIndex < mNameSpaces.size()) + { + ApexSimpleString test; + uint32_t count = 1; + char buf[64]; + *buf = '.'; + + do + { + shdfnd::snprintf(buf + 1,60, "%d", count); + test = name + ApexSimpleString(buf); + if (!checkResource(nameSpace, test.c_str())) + { + break; + } + } + while (++count < 0xFFFFFFF0); + + name = test; + } +} + +void ApexResourceProvider::releaseResource(ResID id) +{ + if (mResources.size() <= id) + { + return; + } + + ApexResourceProvider::resource& res = mResources[id]; + + PX_ASSERT(res.refCount); + if (res.refCount > 0) + { + res.refCount--; + + if (res.refCount == 0 && mUserCallback && + res.valueIsSet && res.ptr != NULL) + { + if (mResources[id].usedGetResource) // Defect DE641 only callback to the user, if this resource was created by a GetResource request + { + mUserCallback->releaseResource(res.nameSpace, res.name, res.ptr); + } + res.ptr = (void*) UnknownValue; + res.valueIsSet = false; + res.usedGetResource = false; + } + // if the ptr is NULL and we're releasing it, we do want to call requestResource next time it is requested, so valueIsSet = false + else if (res.refCount == 0 && res.valueIsSet && res.ptr == NULL) + { + res.ptr = (void*) UnknownValue; + res.valueIsSet = false; + res.usedGetResource = false; + } + } +} + +void* ApexResourceProvider::getResource(ResID id) +{ + if (mResources.size() <= id) + { + return NULL; + } + else if (!mResources[id].valueIsSet) + { + // PH: WARNING: This MUST not be a reference, mResource can be altered during the requestResource() operation!!!! + ApexResourceProvider::resource res = mResources[id]; + if (mUserCallback) + { + // tmp ensures that the [] operator is called AFTER mResources is possibly + // resized by something in requestResources + void* tmp = mUserCallback->requestResource(res.nameSpace, res.name); + res.ptr = tmp; + res.valueIsSet = true; + res.usedGetResource = true; + } + else + { + res.ptr = NULL; + } + mResources[id] = res; + } + return mResources[id].ptr; +} + +const char* ApexResourceProvider::getResourceName(ResID id) +{ + if (mResources.size() <= id) + { + return NULL; + } + return mResources[id].name; +} +const char* ApexResourceProvider::getResourceNameSpace(ResID id) +{ + if (mResources.size() <= id) + { + return NULL; + } + return mResources[id].nameSpace; +} + + +bool ApexResourceProvider::getResourceIDs(const char* nameSpace, ResID* outResIDs, uint32_t& outCount, uint32_t inCount) +{ + outCount = 0; + + if (!outResIDs) + { + return false; + } + + for (uint32_t i = 0; i < mResources.size(); i++) + { + if (stringsMatch(mResources[i].nameSpace, nameSpace)) + { + if (outCount > inCount) + { + outCount = 0; + return false; + } + outResIDs[outCount++] = i; + } + } + + return true; +} + +void ApexResourceProvider::destroy() +{ + if (mUserCallback) + { + for (uint32_t i = 0 ; i < mResources.size() ; i++) + { + ApexResourceProvider::resource& res = mResources[i]; + if (res.refCount != 0 && res.valueIsSet && res.ptr != NULL) + { + ResID resIndex = mNSNames.getOrCreateID(res.nameSpace, "NameSpace"); + PX_ASSERT(mResources[resIndex].ptr); + uint32_t nsIndex = getNSIndex((ResID)(size_t)mResources[resIndex].ptr); + if (nsIndex < mNameSpaces.size() && + mNameSpaces[nsIndex]->releaseAtExit()) + { + if (res.usedGetResource) // this check added for PhysXLab DE4349 + { + mUserCallback->releaseResource(res.nameSpace, res.name, res.ptr); + } + else + { + APEX_DEBUG_WARNING("Unreleased resource found during teardown: Namespace <%s>, Name <%s>", res.nameSpace, res.name); + } + } + } + } + } + mResources.clear(); + for (uint32_t i = 0 ; i < mNameSpaces.size() ; i++) + { + PX_DELETE(mNameSpaces[i]); + } + mNameSpaces.clear(); + delete this; +} + +ApexResourceProvider::NameSpace::NameSpace(ApexResourceProvider* arp, ResID nsid, bool releaseAtExit, const char* nameSpace) : + mReleaseAtExit(releaseAtExit), + mArp(arp), + mId(nsid) +{ + memset(hash, 0, sizeof(hash)); + mNameSpace = 0; + if (nameSpace) + { + uint32_t len = (uint32_t) strlen(nameSpace); + mNameSpace = (char*)PX_ALLOC(len + 1, PX_DEBUG_EXP("ApexResourceProvider::NameSpace")); + memcpy(mNameSpace, nameSpace, len + 1); + } +} + +ApexResourceProvider::NameSpace::~NameSpace() +{ + // Free up all collision chains in the hash table + for (uint32_t i = 0 ; i < HashSize ; i++) + { + while (hash[i]) + { + const char* entry = hash[i]; + const entryHeader* hdr = (const entryHeader*) entry; + const char* next = hdr->nextEntry; + PX_FREE((void*) entry); + hash[i] = next; + } + } + PX_FREE(mNameSpace); +} + +ResID ApexResourceProvider::NameSpace::getOrCreateID(const char* &name, const char* NSName) +{ + /* Hash Table Entry: | nextEntry* | ResID | name | */ + uint16_t h = genHash(name); + const char* entry = hash[h]; + + while (entry) + { + entryHeader* hdr = (entryHeader*) entry; + const char* entryName = entry + sizeof(entryHeader); + + if (mArp->stringsMatch(name, entryName)) + { + name = entryName; + return hdr->id; + } + + entry = hdr->nextEntry; + } + + size_t len = strlen(name); + size_t bufsize = len + 1 + sizeof(entryHeader); + char* newEntry = (char*) PX_ALLOC(bufsize, PX_DEBUG_EXP("ApexResourceProvider::NameSpace::getOrCreateID")); + if (newEntry) + { +#if defined(WIN32) + strncpy_s(newEntry + sizeof(entryHeader), bufsize - sizeof(entryHeader), name, len); +#else + strcpy(newEntry + sizeof(entryHeader), name); +#endif + entryHeader* hdr = (entryHeader*) newEntry; + hdr->nextEntry = hash[h]; + hdr->id = mArp->mResources.size(); + + resource res; + res.ptr = (void*) UnknownValue; + res.valueIsSet = false; + res.name = newEntry + sizeof(entryHeader); + res.nameSpace = NSName; + res.refCount = 0; + res.usedGetResource = 0; + mArp->mResources.pushBack(res); + + hash[h] = (const char*) newEntry; + + name = res.name; + return hdr->id; + } + + return INVALID_RESOURCE_ID; +} + +ResID ApexResourceProvider::getNSID(const char* nsName) +{ + physx::Hash<const char*> h; + ResID id = h(nsName); + const HashMapNSID::Entry* nsid = mNSID.find(id); + if (nsid) + { + PX_ASSERT(nsid->second < mNameSpaces.size() && mNameSpaces[nsid->second] != NULL); + if (nsid->second < mNameSpaces.size() && mNameSpaces[nsid->second] != NULL && !h.equal(nsName, mNameSpaces[nsid->second]->getNameSpace())) + { + PX_ALWAYS_ASSERT_MESSAGE("Hash collision detected for namespaces in ApexResourceProvider. Try to adjust hash function."); + return INVALID_RESOURCE_ID; + } + } + return id; +} + +uint32_t ApexResourceProvider::getNSIndex(ResID nameSpace) +{ + PX_ASSERT(nameSpace != INVALID_RESOURCE_ID); + if (nameSpace == INVALID_RESOURCE_ID) return INVALID_RESOURCE_ID; + const HashMapNSID::Entry* ns = mNSID.find(nameSpace); + PX_ASSERT(ns); + uint32_t nsIndex = ns ? ns->second : INVALID_RESOURCE_ID; + PX_ASSERT(nsIndex < mNameSpaces.size()); + return nsIndex; +} + +uint16_t ApexResourceProvider::NameSpace::genHash(const char* name) +{ + PX_ASSERT(name != NULL); + /* XOR each 32bit word together */ + uint32_t h = 0; + uint32_t* read32 = (uint32_t*)name; + size_t len = strlen(name); + + /* Add remaining bytes */ + uint8_t* read8 = (uint8_t*) read32; + while (len) + { + if (mArp->isCaseSensitive()) + { + h ^= *read8; + } + else + { + h ^= toupper(*read8); + } + read8++; + len -= sizeof(uint8_t); + } + + /* XOR fold top 16 bits over bottom 16 bits */ + h ^= (h >> 16); + + return (uint16_t)(h & (HashSize - 1)); +} + +void ApexResourceProvider::dumpResourceTable() +{ + APEX_DEBUG_INFO("ApexResourceProvider::dumpResourceTable"); + APEX_DEBUG_INFO("namespace name refcount pointer valueIsSet"); + + for (uint32_t i = 0; i < mResources.size(); i++) + { + APEX_DEBUG_INFO("%s %s %d 0x%08x %d", mResources[i].nameSpace, mResources[i].name, mResources[i].refCount, mResources[i].ptr, mResources[i].valueIsSet); + } +} + + +void ApexResourceProvider::setResourceU32(const char* nameSpace, const char* name, uint32_t id, bool incRefCount) +{ + setResource(nameSpace, name, (void*)(size_t)id, true, incRefCount); +} + +uint32_t ApexResourceProvider::releaseAllResourcesInNamespace(const char* nameSpace) +{ + uint32_t ret = 0; + + for (uint32_t i = 0; i < mResources.size(); i++) + { + ApexResourceProvider::resource& res = mResources[i]; + if (stringsMatch(res.nameSpace, nameSpace) && res.valueIsSet) + { + ret++; + PX_ASSERT(res.refCount); + if (res.refCount > 0) + { + res.refCount--; + if (res.refCount == 0 && mUserCallback && + res.valueIsSet && res.ptr != NULL) + { + mUserCallback->releaseResource(res.nameSpace, res.name, res.ptr); + res.ptr = (void*) UnknownValue; + res.valueIsSet = false; + } + } + } + } + + return ret; +} + +uint32_t ApexResourceProvider::releaseResource(const char* nameSpace, const char* name) +{ + uint32_t ret = 0; + + PX_ASSERT(nameSpace); + PX_ASSERT(name); + uint32_t nsIndex = getNSIndex(createNameSpaceInternal(nameSpace, true)); + if (nsIndex < mNameSpaces.size()) + { + ResID id = mNameSpaces[nsIndex]->getOrCreateID(name, nameSpace); + PX_ASSERT(id < mResources.size()); + if (id < mResources.size()) + { + ApexResourceProvider::resource& res = mResources[id]; + if (res.valueIsSet) + { + ret = (uint32_t)res.refCount - 1; + releaseResource(id); + } + } + } + + + return ret; +} + +bool ApexResourceProvider::findRefCount(const char* nameSpace, const char* name, uint32_t& refCount) +{ + bool ret = false; + refCount = 0; + PX_ASSERT(nameSpace); + PX_ASSERT(name); + uint32_t nsIndex = getNSIndex(createNameSpaceInternal(nameSpace, true)); + if (nsIndex < mNameSpaces.size()) + { + ResID id = mNameSpaces[nsIndex]->getOrCreateID(name, nameSpace); + PX_ASSERT(id < mResources.size()); + if (id < mResources.size()) + { + if (mResources[id].valueIsSet) + { + ret = true; + refCount = mResources[id].refCount; + } + } + } + + return ret; +} + +void* ApexResourceProvider::findResource(const char* nameSpace, const char* name) +{ + void* ret = NULL; + PX_ASSERT(nameSpace); + PX_ASSERT(name); + uint32_t nsIndex = getNSIndex(createNameSpaceInternal(nameSpace, true)); + if (nsIndex < mNameSpaces.size()) + { + ResID id = mNameSpaces[nsIndex]->getOrCreateID(name, nameSpace); + PX_ASSERT(id < mResources.size()); + if (id < mResources.size()) + { + if (mResources[id].valueIsSet) + { + ret = mResources[id].ptr; + } + } + } + return ret; +} + +uint32_t ApexResourceProvider::findResourceU32(const char* nameSpace, const char* name) // find an existing resource. +{ + uint32_t ret = 0; + PX_ASSERT(nameSpace); + PX_ASSERT(name); + uint32_t nsIndex = getNSIndex(createNameSpaceInternal(nameSpace, true)); + if (nsIndex < mNameSpaces.size()) + { + ResID id = mNameSpaces[nsIndex]->getOrCreateID(name, nameSpace); + PX_ASSERT(id < mResources.size()); + if (id < mResources.size()) + { + if (mResources[id].valueIsSet) + { +#if PX_X64 + uint64_t ret64 = (uint64_t)mResources[id].ptr; + ret = (uint32_t)ret64; +#else + ret = (uint32_t)mResources[id].ptr; +#endif + } + } + } + return ret; + +} + +void** ApexResourceProvider::findAllResources(const char* nameSpace, uint32_t& count) // find all resources in this namespace +{ + void** ret = 0; + count = 0; + + mCharResults.clear(); + for (uint32_t i = 0; i < mResources.size(); i++) + { + if (stringsMatch(nameSpace, mResources[i].nameSpace)) + { + if (mResources[i].valueIsSet) + { + mCharResults.pushBack((const char*)mResources[i].ptr); + } + } + } + if (!mCharResults.empty()) + { + ret = (void**)&mCharResults[0]; + count = mCharResults.size(); + } + + return ret; +} + +const char** ApexResourceProvider::findAllResourceNames(const char* nameSpace, uint32_t& count) // find all resources in this namespace +{ + const char** ret = 0; + count = 0; + + mCharResults.clear(); + for (uint32_t i = 0; i < mResources.size(); i++) + { + if (stringsMatch(nameSpace, mResources[i].nameSpace) && mResources[i].valueIsSet) + { + mCharResults.pushBack(mResources[i].name); + } + } + if (!mCharResults.empty()) + { + ret = &mCharResults[0]; + count = mCharResults.size(); + } + + return ret; +} + +const char** ApexResourceProvider::findNameSpaces(uint32_t& count) +{ + const char** ret = 0; + count = 0; + + mCharResults.clear(); + for (physx::Array<NameSpace*>::Iterator i = mNameSpaces.begin(); i != mNameSpaces.end(); ++i) + { + const char* nameSpace = (*i)->getNameSpace(); + if (nameSpace) + { + mCharResults.pushBack(nameSpace); + } + } + + if (!mCharResults.empty()) + { + count = mCharResults.size(); + ret = &mCharResults[0]; + } + + return ret; +} + +bool ApexResourceProvider::stringsMatch(const char* str0, const char* str1) +{ + if (mCaseSensitive) + { + return !nvidia::strcmp(str0, str1); + } + else + { + return !nvidia::stricmp(str0, str1); + } + +} + + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexSDKImpl.cpp b/APEX_1.4/framework/src/ApexSDKImpl.cpp new file mode 100644 index 00000000..c6a04972 --- /dev/null +++ b/APEX_1.4/framework/src/ApexSDKImpl.cpp @@ -0,0 +1,2059 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "Apex.h" +#include "ApexUsingNamespace.h" +#include "ApexScene.h" +#include "ApexSDKImpl.h" +#include "FrameworkPerfScope.h" +#include "ApexRenderMeshAsset.h" +#include "ApexRenderMeshActor.h" +#include "ApexRenderMeshAssetAuthoring.h" +#include "ApexResourceProvider.h" +#include "PsMemoryBuffer.h" +#include "ApexString.h" +#include "ApexDefaultStream.h" +#include "ApexRenderDebug.h" + +#include "nvparameterized/NvParameterized.h" +#include "nvparameterized/NvParamUtils.h" + +#include "PsMemoryBuffer.h" +#include "PsFoundation.h" +#include "NvDefaultTraits.h" + +#include "PsFileBuffer.h" +#include "NvSerializerInternal.h" +#include "UserOpaqueMesh.h" +#include "ApexShape.h" + +#include "ApexAssetPreviewScene.h" +#include "RenderResourceManagerWrapper.h" +#include "PsThread.h" + +#include "PxProfileEventNames.h" +#include "ApexPvdClient.h" + +#ifndef WITHOUT_PVD +#include "PvdNxParamSerializer.h" +#include "ApexStubPxProfileZone.h" +#endif + +#define MAX_MSG_SIZE 65536 +#define WITH_DEBUG_ASSET 0 + +#if PX_WINDOWS_FAMILY +#include <windows/PsWindowsInclude.h> +#include <cstdio> +#include "ModuleUpdateLoader.h" + +#include <PxPvdImpl.h> + +// We require at least Visual Studio 2010 w/ SP1 to compile +#if defined(_MSC_VER) +# if _MSC_VER >= 1900 + PX_COMPILE_TIME_ASSERT(_MSC_FULL_VER >= 190000000); +# elif _MSC_VER >= 1800 + PX_COMPILE_TIME_ASSERT(_MSC_FULL_VER >= 180000000); +# elif _MSC_VER >= 1700 + PX_COMPILE_TIME_ASSERT(_MSC_FULL_VER >= 170000000); +# elif _MSC_VER >= 1600 + PX_COMPILE_TIME_ASSERT(_MSC_FULL_VER >= 160040219); +# endif + +# if _MSC_VER > 1900 + #pragma message("Detected compiler newer than Visual Studio 2013, please update min version checking in ApexSDKImpl.cpp") + PX_COMPILE_TIME_ASSERT(_MSC_VER <= 1900); +# endif +#endif + +#endif //PX_WINDOWS_FAMILY + +#if PX_OSX +#include <mach-o/dyld.h> +#include <libproc.h> +#include <dlfcn.h> +#endif + +#if PX_X86 +#define PTR_TO_UINT64(x) ((uint64_t)(uint32_t)(x)) +#else +#define PTR_TO_UINT64(x) ((uint64_t)(x)) +#endif + +#if APEX_CUDA_SUPPORT +#include "windows/PhysXIndicator.h" +#endif + +#include "PxErrorCallback.h" +#include "PxCudaContextManager.h" +#include "PxCpuDispatcher.h" +#include "PxProfileZoneManager.h" +#ifdef PHYSX_PROFILE_SDK +#if PX_PHYSICS_VERSION_MAJOR == 3 +#include "PxPhysics.h" +#endif + +nvidia::profile::PxProfileZone *gProfileZone=NULL; + +#endif + +namespace nvidia +{ +namespace apex +{ + + +extern ApexSDKImpl* gApexSdk; + +#if defined(_USRDLL) || PX_OSX +typedef Module* (NxCreateModule_FUNC)(ApexSDKIntl*, ModuleIntl**, uint32_t, uint32_t, ApexCreateError*); +#else +/* When modules are statically linked, the user must instantiate modules manually before they can be + * created via the ApexSDK::createModule() method. Each module must supply an instantiation function. + */ +#endif + +ApexSDKImpl::ApexSDKImpl(ApexCreateError* errorCode, uint32_t /*inAPEXsdkVersion*/) + : mAuthorableObjects(NULL) + , mBatchSeedSize(128) + , mErrorString(NULL) + , mNumTempMemoriesActive(0) +#if PX_PHYSICS_VERSION_MAJOR == 0 + , mApexThreadPool(0) +#endif + , renderResourceManager(NULL) + , renderResourceManagerWrapper(NULL) + , apexResourceProvider(NULL) + , cookingVersion(0) + , mURRdepthTLSslot(0xFFFFFFFF) + , mEnableApexStats(true) + , mEnableConcurrencyCheck(false) +{ + if (errorCode) + { + *errorCode = APEX_CE_NO_ERROR; + } + +#if PX_DEBUG || PX_CHECKED + mURRdepthTLSslot = shdfnd::TlsAlloc(); +#endif +} + +AuthObjTypeID ApexRenderMeshAsset::mObjTypeID; +#include "ModuleFrameworkRegistration.h" +#include "ModuleCommonRegistration.h" + +void ModuleFramework::init(NvParameterized::Traits* t) +{ + ModuleFrameworkRegistration::invokeRegistration(t); + ModuleCommonRegistration::invokeRegistration(t); +} + +void ModuleFramework::release(NvParameterized::Traits* t) +{ + ModuleFrameworkRegistration::invokeUnregistration(t); + ModuleCommonRegistration::invokeUnregistration(t); +} + +// Many things can't be initialized in the constructor since they depend on gApexSdk +// being present. +void ApexSDKImpl::init(const ApexSDKDesc& desc) +{ + + renderResourceManager = desc.renderResourceManager; +#if PX_DEBUG || PX_CHECKED + if (renderResourceManagerWrapper != NULL) + { + PX_DELETE(renderResourceManagerWrapper); + renderResourceManagerWrapper = NULL; + } + if (renderResourceManager != NULL) + { + renderResourceManagerWrapper = PX_NEW(RenderResourceManagerWrapper)(*renderResourceManager); + } +#endif + + foundation = desc.foundation; + +#if PX_PHYSICS_VERSION_MAJOR == 3 + cooking = desc.cooking; + physXSDK = desc.physXSDK; + physXsdkVersion = desc.physXSDKVersion; +#endif + + mDllLoadPath = desc.dllLoadPath; + mCustomDllNamePostfix = desc.dllNamePostfix; + mWireframeMaterial = desc.wireframeMaterial; + mSolidShadedMaterial = desc.solidShadedMaterial; +#if PX_WINDOWS_FAMILY + mAppGuid = desc.appGuid ? desc.appGuid : DEFAULT_APP_GUID; +#endif + mRMALoadMaterialsLazily = desc.renderMeshActorLoadMaterialsLazily; + + mEnableConcurrencyCheck = desc.enableConcurrencyCheck; + + Framework::initFrameworkProfiling(this); + + apexResourceProvider = PX_NEW(ApexResourceProvider)(); + PX_ASSERT(apexResourceProvider); + apexResourceProvider->setCaseSensitivity(desc.resourceProviderIsCaseSensitive); + apexResourceProvider->registerCallback(desc.resourceCallback); + + // The param traits depend on the resource provider, so do this now + mParameterizedTraits = new NvParameterized::DefaultTraits(NvParameterized::DefaultTraits::BehaviourFlags::DEFAULT_POLICY); + GetInternalApexSDK()->getInternalResourceProvider()->createNameSpace("NvParameterizedFactories", false); + PX_ASSERT(mParameterizedTraits); + + /* create global name space of authorable asset types */ + mObjTypeNS = apexResourceProvider->createNameSpace(APEX_AUTHORABLE_ASSETS_TYPES_NAME_SPACE, false); + + /* create global name space of NvParameterized authorable asset types */ + mNxParamObjTypeNS = apexResourceProvider->createNameSpace(APEX_NV_PARAM_AUTH_ASSETS_TYPES_NAME_SPACE, false); + + /* create namespace for user materials */ + mMaterialNS = apexResourceProvider->createNameSpace(APEX_MATERIALS_NAME_SPACE, true); + + /* create namespace for user opaque meshes */ + mOpaqueMeshNS = apexResourceProvider->createNameSpace(APEX_OPAQUE_MESH_NAME_SPACE, false); + + /* create namespace for custom vertex buffer semantics */ + mCustomVBNS = apexResourceProvider->createNameSpace(APEX_CUSTOM_VB_NAME_SPACE, true); + + /* create namespace for novodex collision groups */ + mCollGroupNS = apexResourceProvider->createNameSpace(APEX_COLLISION_GROUP_NAME_SPACE, false); + + /* create namespace for 128-bit GroupsMasks */ + mCollGroup128NS = apexResourceProvider->createNameSpace(APEX_COLLISION_GROUP_128_NAME_SPACE, false); + + /* create namespace for 64-bit GroupsMasks64 */ + mCollGroup64NS = apexResourceProvider->createNameSpace(APEX_COLLISION_GROUP_64_NAME_SPACE, false); + + /* create namespace for novodex collision groups masks */ + mCollGroupMaskNS = apexResourceProvider->createNameSpace(APEX_COLLISION_GROUP_MASK_NAME_SPACE, false); + + /* create namespace for novodex Material IDs (returned by raycasts) */ + mPhysMatNS = apexResourceProvider->createNameSpace(APEX_PHYSICS_MATERIAL_NAME_SPACE, false); + + /* create namespace for RenderMeshAssets */ + mAuthorableObjects = PX_NEW(ResourceList); + RenderMeshAuthorableObject* AO = PX_NEW(RenderMeshAuthorableObject)(&frameworkModule, *mAuthorableObjects, RenderMeshAssetParameters::staticClassName()); + ApexRenderMeshAsset::mObjTypeID = AO->getResID(); + + frameworkModule.init(mParameterizedTraits); + + /* Create mDebugColorParams */ + void* newPtr = mParameterizedTraits->alloc(sizeof(DebugColorParamsEx)); + mDebugColorParams = NV_PARAM_PLACEMENT_NEW(newPtr, DebugColorParamsEx)(mParameterizedTraits, this); + + for (uint32_t i = 0 ; i < DescHashSize ; i++) + { + mPhysXObjDescHash[i] = 0; + } + mDescFreeList = 0; + + mCachedData = PX_NEW(ApexSDKCachedDataImpl); + + mBatchSeedSize = desc.physXObjDescTableAllocationIncrement; + +#if defined(PHYSX_PROFILE_SDK) + mProfileZone = 0; // &physx::profile::PxProfileZone::createProfileZone(getAllocator(), "ApexSDK"); // TODO: create a profile zone here + gProfileZone = mProfileZone; + mApexPvdClient = pvdsdk::ApexPvdClient::create(desc.pvd); +#endif +} + + +ApexSDKImpl::~ApexSDKImpl() +{ +#if PX_DEBUG || PX_CHECKED + if (mURRdepthTLSslot != 0xFFFFFFFF) + { + TlsFree(mURRdepthTLSslot); + mURRdepthTLSslot = 0xFFFFFFFF; + } +#endif + + Framework::releaseFrameworkProfiling(); +#if PHYSX_PROFILE_SDK + if ( mProfileZone ) + { + mProfileZone->release(); + mProfileZone = NULL; + } + if (mApexPvdClient != NULL) + { + mApexPvdClient->release(); + } + mApexPvdClient = NULL; +#endif +} + +ApexActor* ApexSDKImpl::getApexActor(Actor* nxactor) const +{ + AuthObjTypeID type = nxactor->getOwner()->getObjTypeID(); + if (type == ApexRenderMeshAsset::mObjTypeID) + { + return (ApexRenderMeshActor*) nxactor; + } + + ApexActor* a = NULL; + for (uint32_t i = 0; i < imodules.size(); i++) + { + a = imodules[i]->getApexActor(nxactor, type); + if (a) + { + break; + } + } + + return a; +} + +Scene* ApexSDKImpl::createScene(const SceneDesc& sceneDesc) +{ + if (!sceneDesc.isValid()) + { + return 0; + } + + ApexScene* s = PX_NEW(ApexScene)(sceneDesc, this); + mScenes.pushBack(s); + + // Trigger ModuleSceneIntl creation for all loaded modules + for (uint32_t i = 0; i < imodules.size(); i++) + { + s->moduleCreated(*imodules[i]); + } + + return s; +} + +AssetPreviewScene* ApexSDKImpl::createAssetPreviewScene() +{ + ApexAssetPreviewScene* s = PX_NEW(ApexAssetPreviewScene)(this); + + return s; +} + +void ApexSDKImpl::releaseScene(Scene* nxScene) +{ + ApexScene* scene = DYNAMIC_CAST(ApexScene*)(nxScene); + mScenes.findAndReplaceWithLast(scene); + scene->destroy(); +} + +void ApexSDKImpl::releaseAssetPreviewScene(AssetPreviewScene* nxScene) +{ + ApexAssetPreviewScene* scene = DYNAMIC_CAST(ApexAssetPreviewScene*)(nxScene); + scene->destroy(); +} + +/** Map PhysX objects back to their APEX objects, hold flags and pointers **/ + +uint16_t ApexPhysXObjectDesc::makeHash(size_t hashable) +{ + return static_cast<uint16_t>(UINT16_MAX & (hashable >> 8)); +} + + +PhysXObjectDescIntl* ApexSDKImpl::getGenericPhysXObjectInfo(const void* obj) const +{ + nvidia::Mutex::ScopedLock scopeLock(mPhysXObjDescsLock); + + uint16_t h = (uint16_t)(ApexPhysXObjectDesc::makeHash(reinterpret_cast<size_t>(obj)) & (DescHashSize - 1)); + uint32_t index = mPhysXObjDescHash[h]; + + while (index) + { + ApexPhysXObjectDesc* desc = const_cast<ApexPhysXObjectDesc*>(&mPhysXObjDescs[index]); + if ((void*) desc->mPhysXObject == obj) + { + return desc; + } + else + { + index = desc->mNext; + } + } + return NULL; +} + +#if PX_PHYSICS_VERSION_MAJOR == 3 + +PhysXObjectDescIntl* ApexSDKImpl::createObjectDesc(const Actor* apexActor, const PxActor* actor) +{ + return createObjectDesc(apexActor, (const void*) actor); +} +PhysXObjectDescIntl* ApexSDKImpl::createObjectDesc(const Actor* apexActor, const PxShape* shape) +{ + return createObjectDesc(apexActor, (const void*) shape); +} +PhysXObjectDescIntl* ApexSDKImpl::createObjectDesc(const Actor* apexActor, const PxJoint* joint) +{ + return createObjectDesc(apexActor, (const void*) joint); +} +PhysXObjectDescIntl* ApexSDKImpl::createObjectDesc(const Actor* apexActor, const PxCloth* cloth) +{ + return createObjectDesc(apexActor, (const void*)cloth); +} +PhysXObjectDescIntl* ApexSDKImpl::createObjectDesc(const Actor* apexActor, const PxParticleSystem* particleSystem) +{ + return createObjectDesc(apexActor, (const void*) particleSystem); +} +PhysXObjectDescIntl* ApexSDKImpl::createObjectDesc(const Actor* apexActor, const PxParticleFluid* particleFluid) +{ + return createObjectDesc(apexActor, (const void*)particleFluid); +} +const PhysXObjectDesc* ApexSDKImpl::getPhysXObjectInfo(const PxActor* actor) const +{ + return getGenericPhysXObjectInfo((void*)actor); +} +const PhysXObjectDesc* ApexSDKImpl::getPhysXObjectInfo(const PxShape* shape) const +{ + return getGenericPhysXObjectInfo((void*)shape); +} +const PhysXObjectDesc* ApexSDKImpl::getPhysXObjectInfo(const PxJoint* joint) const +{ + return getGenericPhysXObjectInfo((void*)joint); +} +const PhysXObjectDesc* ApexSDKImpl::getPhysXObjectInfo(const PxCloth* cloth) const +{ + return getGenericPhysXObjectInfo((void*)cloth); +} +const PhysXObjectDesc* ApexSDKImpl::getPhysXObjectInfo(const PxParticleSystem* particleSystem) const +{ + return getGenericPhysXObjectInfo((void*)particleSystem); +} +const PhysXObjectDesc* ApexSDKImpl::getPhysXObjectInfo(const PxParticleFluid* particleFluid) const +{ + return getGenericPhysXObjectInfo((void*)particleFluid); +} +PxPhysics* ApexSDKImpl::getPhysXSDK() +{ + return physXSDK; +} +PxCooking* ApexSDKImpl::getCookingInterface() +{ + return cooking; +} + +#endif + +AuthObjTypeID ApexSDKImpl::registerAuthObjType(const char* authTypeName, ResID nsid) +{ + AuthObjTypeID aotid = apexResourceProvider->createResource(mObjTypeNS, authTypeName, false); + apexResourceProvider->setResource(APEX_AUTHORABLE_ASSETS_TYPES_NAME_SPACE, + authTypeName, + (void*)(size_t) nsid, false); + return aotid; +} + +AuthObjTypeID ApexSDKImpl::registerAuthObjType(const char* authTypeName, AuthorableObjectIntl* authObjPtr) +{ + AuthObjTypeID aotid = apexResourceProvider->createResource(mObjTypeNS, authTypeName, false); + apexResourceProvider->setResource(APEX_AUTHORABLE_ASSETS_TYPES_NAME_SPACE, + authTypeName, + (void*) authObjPtr, false); + return aotid; +} + +AuthObjTypeID ApexSDKImpl::registerNvParamAuthType(const char* authTypeName, AuthorableObjectIntl* authObjPtr) +{ + AuthObjTypeID aotid = apexResourceProvider->createResource(mNxParamObjTypeNS, authTypeName, false); + apexResourceProvider->setResource(APEX_NV_PARAM_AUTH_ASSETS_TYPES_NAME_SPACE, + authTypeName, + (void*) authObjPtr, false); + return aotid; +} + +void ApexSDKImpl::unregisterAuthObjType(const char* authTypeName) +{ + apexResourceProvider->setResource(APEX_AUTHORABLE_ASSETS_TYPES_NAME_SPACE, + authTypeName, + (void*) NULL, false); +} + +void ApexSDKImpl::unregisterNvParamAuthType(const char* authTypeName) +{ + apexResourceProvider->setResource(APEX_NV_PARAM_AUTH_ASSETS_TYPES_NAME_SPACE, + authTypeName, + (void*) NULL, false); +} + +AuthorableObjectIntl* ApexSDKImpl::getAuthorableObject(const char* authTypeName) +{ + if (!apexResourceProvider->checkResource(mObjTypeNS, authTypeName)) + { + return NULL; + } + + void* ao = apexResourceProvider->getResource(APEX_AUTHORABLE_ASSETS_TYPES_NAME_SPACE, authTypeName); + + return static_cast<AuthorableObjectIntl*>(ao); +} + +AuthorableObjectIntl* ApexSDKImpl::getParamAuthObject(const char* paramName) +{ + if (!apexResourceProvider->checkResource(mNxParamObjTypeNS, paramName)) + { + return NULL; + } + + void* ao = apexResourceProvider->getResource(APEX_NV_PARAM_AUTH_ASSETS_TYPES_NAME_SPACE, paramName); + + return static_cast<AuthorableObjectIntl*>(ao); +} + +bool ApexSDKImpl::getAuthorableObjectNames(const char** authTypeNames, uint32_t& outCount, uint32_t inCount) +{ + ResID ids[128]; + + if (!apexResourceProvider->getResourceIDs(APEX_AUTHORABLE_ASSETS_TYPES_NAME_SPACE, ids, outCount, 128)) + { + return false; + } + + if (outCount > inCount) + { + return false; + } + + for (uint32_t i = 0; i < outCount; i++) + { + authTypeNames[i] = apexResourceProvider->getResourceName(ids[i]); + +#define JUST_A_TEST 0 +#if JUST_A_TEST +#include <stdio.h> + AuthorableObjectIntl* ao = (AuthorableObjectIntl*)getAuthorableObject(authTypeNames[i]); + + Asset* assetList[32]; + uint32_t retCount = 0; + ao->getAssetList(assetList, retCount, 32); + + if (retCount) + { + printf("%s count: %d\n", authTypeNames[i], retCount); + const NvParameterized::Interface* p = assetList[0]->getAssetNvParameterized(); + if (p) + { + printf(" NvParam class name: %s\n", p->className()); + } + } +#endif + } + + return true; +} + +ResID ApexSDKImpl::getApexMeshNameSpace() +{ + AuthorableObjectIntl* AO = getAuthorableObject(RENDER_MESH_AUTHORING_TYPE_NAME); + if (AO) + { + return AO->getResID(); + } + else + { + return INVALID_RESOURCE_ID; + } +} + + + +PhysXObjectDescIntl* ApexSDKImpl::createObjectDesc(const Actor* apexActor, const void* nxPtr) +{ + nvidia::Mutex::ScopedLock scopeLock(mPhysXObjDescsLock); + + uint16_t h = (uint16_t)(ApexPhysXObjectDesc::makeHash(reinterpret_cast<size_t>(nxPtr)) & (DescHashSize - 1)); + uint32_t index = mPhysXObjDescHash[ h ]; + + while (index) + { + ApexPhysXObjectDesc* desc = &mPhysXObjDescs[ index ]; + if (desc->mPhysXObject == nxPtr) + { + APEX_DEBUG_WARNING("createObjectDesc: Object already registered"); + bool hasActor = false; + for (uint32_t i = desc->mApexActors.size(); i--;) + { + if (desc->mApexActors[i] == apexActor) + { + hasActor = true; + break; + } + } + if (hasActor) + { + APEX_DEBUG_WARNING("createObjectDesc: Object already registered with the given Actor"); + } + else + { + desc->mApexActors.pushBack(apexActor); + } + return desc; + } + else + { + index = desc->mNext; + } + } + + // Match not found, allocate new object descriptor + + if (!mDescFreeList) + { + // Free list is empty, seed it with new batch + uint32_t size = mPhysXObjDescs.size(); + if (size == 0) // special initial case, reserve entry 0 + { + size = 1; + mPhysXObjDescs.resize(size + mBatchSeedSize); + } + else + { + PX_PROFILE_ZONE("objDescsResize", GetInternalApexSDK()->getContextId()); + + // Instead of doing a straight resize of mPhysXObjDescs the array is resized by swapping. Doing so removes the potential + // copying/reallocating of the arrays held in ApexPhysXObjectDesc elements which is costly performance wise. + physx::Array<ApexPhysXObjectDesc> swapArray; + swapArray.swap(mPhysXObjDescs); + + mPhysXObjDescs.resize(size + mBatchSeedSize); + ApexPhysXObjectDesc* src = swapArray.begin(); + ApexPhysXObjectDesc* dst = mPhysXObjDescs.begin(); + for (physx::PxU32 i = 0; i < size; i++) + { + src[i].swap(dst[i]); + } + } + + for (uint32_t i = size ; i < size + mBatchSeedSize ; i++) + { + mPhysXObjDescs[i].mNext = mDescFreeList; + mDescFreeList = i; + } + } + + index = mDescFreeList; + ApexPhysXObjectDesc* desc = &mPhysXObjDescs[ index ]; + mDescFreeList = desc->mNext; + + desc->mFlags = 0; + desc->userData = NULL; + desc->mApexActors.reset(); + desc->mApexActors.pushBack(apexActor); + desc->mNext = mPhysXObjDescHash[ h ]; + if (desc->mNext) + { + PX_ASSERT(mPhysXObjDescs[ desc->mNext ].mPrev == 0); + mPhysXObjDescs[ desc->mNext ].mPrev = index; + } + desc->mPrev = 0; + desc->mPhysXObject = nxPtr; + mPhysXObjDescHash[ h ] = index; + + /* Calling function can set mFlags and userData */ + + return desc; +} + +void ApexSDKImpl::releaseObjectDesc(void* physXObject) +{ + nvidia::Mutex::ScopedLock scopeLock(mPhysXObjDescsLock); + + uint16_t h = (uint16_t)(ApexPhysXObjectDesc::makeHash(reinterpret_cast<size_t>(physXObject)) & (DescHashSize - 1)); + uint32_t index = mPhysXObjDescHash[ h ]; + + while (index) + { + ApexPhysXObjectDesc* desc = &mPhysXObjDescs[ index ]; + + if (desc->mPhysXObject == physXObject) + { + if (desc->mPrev) + { + mPhysXObjDescs[ desc->mPrev ].mNext = desc->mNext; + } + else + { + mPhysXObjDescHash[ h ] = desc->mNext; + } + + if (desc->mNext) + { + mPhysXObjDescs[ desc->mNext ].mPrev = desc->mPrev; + } + + desc->mNext = mDescFreeList; + mDescFreeList = index; + + desc->mApexActors.reset(); + return; + } + else + { + index = desc->mNext; + } + } + + APEX_DEBUG_WARNING("releaseObjectDesc: Unable to release object descriptor"); +} + + +void ApexSDKImpl::releaseModule(Module* module) +{ + for (uint32_t i = 0; i < modules.size(); i++) + { + if (modules[i] != module) + { + continue; + } + + // The module will remove its ModuleScenesIntl from each Scene + mCachedData->unregisterModuleDataCache(imodules[ i ]->getModuleDataCache()); + + ModuleIntl *im = imodules[i]; + imodules[i] = NULL; + modules[i] = NULL; + im->destroy(); + +// modules.replaceWithLast(i); +// imodules.replaceWithLast(i); + + break; + } +} + +void ApexSDKImpl::registerModule(Module* newModule, ModuleIntl* newIModule) +{ + + uint32_t newIndex = modules.size(); + for (uint32_t i=0; i<newIndex; i++) + { + if ( imodules[i] == NULL ) + { + newIndex = i; + break; + } + } + if ( newIndex == modules.size() ) + { + modules.pushBack(newModule); + imodules.pushBack(newIModule); + } + + // Trigger ModuleSceneIntl creation for all existing scenes + for (uint32_t i = 0 ; i < mScenes.size(); i++) + { + (DYNAMIC_CAST(ApexScene*)(mScenes[i]))->moduleCreated(*newIModule); + } + + mCachedData->registerModuleDataCache(newIModule->getModuleDataCache()); +} + +Module* ApexSDKImpl::createModule(const char* name, ApexCreateError* err) +{ + if (err) + { + *err = APEX_CE_NO_ERROR; + } + + // Return existing module if it's already loaded + for (uint32_t i = 0; i < modules.size(); i++) + { + if ( modules[i] && !nvidia::strcmp(modules[ i ]->getName(), name)) + { + ModuleIntl *imodule = imodules[i]; + if( imodule->isCreateOk() ) + { + return modules[ i ]; + } + else + { + APEX_DEBUG_WARNING("ApexSDKImpl::createModule(%s) Not allowed.", name ); + if (err) + { + *err = APEX_CE_CREATE_NO_ALLOWED; + } + return NULL; + } + } + } + + Module* newModule = NULL; + ModuleIntl* newIModule = NULL; + +#if defined(_USRDLL) || PX_OSX + /* Dynamically linked module libraries */ + +#if defined(WIN32) + ApexSimpleString dllName = mDllLoadPath + ApexSimpleString("APEX_") + ApexSimpleString(name); +#if _DEBUG + // Request DEBUG DLL unless the user has explicitly asked for it + const size_t nameLen = strlen(name); + if (nameLen <= 5 || nvidia::strcmp(name + nameLen - 5, "DEBUG")) + { + dllName += ApexSimpleString("DEBUG"); + } +#elif PX_CHECKED + dllName += ApexSimpleString("CHECKED"); +#elif defined(PHYSX_PROFILE_SDK) + dllName += ApexSimpleString("PROFILE"); +#endif + +#if PX_X86 + dllName += ApexSimpleString("_x86"); +#elif PX_X64 + dllName += ApexSimpleString("_x64"); +#endif + + dllName += mCustomDllNamePostfix; + + dllName += ApexSimpleString(".dll"); + + HMODULE library = NULL; + { + ModuleUpdateLoader moduleLoader(UPDATE_LOADER_DLL_NAME); + library = moduleLoader.loadModule(dllName.c_str(), getAppGuid()); + + if (NULL == library) + { + dllName = ApexSimpleString("APEX/") + dllName; + library = moduleLoader.loadModule(dllName.c_str(), getAppGuid()); + } + } + + if (library) + { + NxCreateModule_FUNC* createModuleFunc = (NxCreateModule_FUNC*) GetProcAddress(library, "createModule"); + if (createModuleFunc) + { + newModule = createModuleFunc((ApexSDKIntl*) this, + &newIModule, + APEX_SDK_VERSION, + PX_PHYSICS_VERSION, + err); + } + } +#elif PX_OSX + ApexSimpleString dylibName = ApexSimpleString("libAPEX_") + ApexSimpleString(name); + +#if _DEBUG + // Request DEBUG DLL unless the user has explicitly asked for it + const size_t nameLen = strlen(name); + if (nameLen <= 5 || nvidia::strcmp(name + nameLen - 5, "DEBUG")) + { + dylibName += ApexSimpleString("DEBUG"); + } +#elif PX_CHECKED + dylibName += ApexSimpleString("CHECKED"); +#elif defined(PHYSX_PROFILE_SDK) + dylibName += ApexSimpleString("PROFILE"); +#endif + + dylibName += mCustomDllNamePostfix; + + dylibName += ApexSimpleString(".dylib"); + + ApexSimpleString dylibPath = mDllLoadPath + dylibName; + + void* library = NULL; + { + // Check if dylib is already loaded + library = dlopen(dylibPath.c_str(), RTLD_NOLOAD | RTLD_LAZY | RTLD_LOCAL); + if (!library) + { + library = dlopen((ApexSimpleString("@rpath/") + dylibName).c_str(), RTLD_NOLOAD | RTLD_LAZY | RTLD_LOCAL); + } + if (!library) + { + // Not loaded yet, so try to open it + library = dlopen(dylibPath.c_str(), RTLD_LAZY | RTLD_LOCAL); + } + } + + if (library) + { + NxCreateModule_FUNC* createModuleFunc = (NxCreateModule_FUNC*)dlsym(library, "createModule"); + if (createModuleFunc) + { + newModule = createModuleFunc((ApexSDKIntl*) this, + &newIModule, + APEX_SDK_VERSION, + PX_PHYSICS_VERSION, + err); + } + } +#else + /* TODO: other platform dynamic linking? */ +#endif + +#else + /* Statically linked module libraries */ + + /* Modules must supply an instantiation function which calls ApexSDKIntl::registerModule() + * The user must call this function after creating ApexSDKImpl and before createModule(). + */ +#endif + + // register new module and its parameters + if (newModule) + { + registerModule(newModule, newIModule); + } + else if (err) + { + *err = APEX_CE_NOT_FOUND; + } + + return newModule; +} + +ModuleIntl* ApexSDKImpl::getInternalModuleByName(const char* name) +{ + // Return existing module if it's already loaded + for (uint32_t i = 0; i < modules.size(); i++) + { + if (!nvidia::strcmp(modules[ i ]->getName(), name)) + { + return imodules[ i ]; + } + } + return NULL; +} + +PxFileBuf* ApexSDKImpl::createStream(const char* filename, PxFileBuf::OpenMode mode) +{ + return PX_NEW(PsFileBuffer)(filename, mode); +} + +// deprecated, use getErrorCallback instead +PxErrorCallback* ApexSDKImpl::getOutputStream() +{ + return getErrorCallback(); +} + +PxFoundation* ApexSDKImpl::getFoundation() const +{ + return foundation; +} + +PxErrorCallback* ApexSDKImpl::getErrorCallback() const +{ + PX_ASSERT(foundation); + return &foundation->getErrorCallback(); +} + +PxAllocatorCallback* ApexSDKImpl::getAllocator() const +{ + PX_ASSERT(foundation); + return &foundation->getAllocatorCallback(); +} + +ResourceProvider* ApexSDKImpl::getNamedResourceProvider() +{ + return apexResourceProvider; +} + +ResourceProviderIntl* ApexSDKImpl::getInternalResourceProvider() +{ + return apexResourceProvider; +} + +uint32_t ApexSDKImpl::getNbModules() +{ + uint32_t moduleCount = 0; + for (uint32_t i=0; i<modules.size(); i++) + { + if (modules[i] != NULL) + { + moduleCount++; + } + } + + return moduleCount; +} + +Module** ApexSDKImpl::getModules() +{ + if (modules.size() > 0) + { + moduleListForAPI.resize(0); + for (uint32_t i=0; i<modules.size(); i++) + { + if (modules[i] != NULL) + { + moduleListForAPI.pushBack(modules[i]); + } + } + + return &moduleListForAPI.front(); + } + else + { + return NULL; + } +} + + +ModuleIntl** ApexSDKImpl::getInternalModules() +{ + if (imodules.size() > 0) + { + return &imodules.front(); + } + else + { + return NULL; + } +} + +uint32_t ApexSDKImpl::forceLoadAssets() +{ + uint32_t loadedAssetCount = 0; + + // handle render meshes, since they don't live in a module + if (mAuthorableObjects != NULL) + { + for (uint32_t i = 0; i < mAuthorableObjects->getSize(); i++) + { + AuthorableObjectIntl* ao = static_cast<AuthorableObjectIntl*>(mAuthorableObjects->getResource(i)); + loadedAssetCount += ao->forceLoadAssets(); + } + } + + for (uint32_t i = 0; i < imodules.size(); i++) + { + loadedAssetCount += imodules[i]->forceLoadAssets(); + } + + return loadedAssetCount; +} + + +void ApexSDKImpl::debugAsset(Asset* asset, const char* name) +{ + PX_UNUSED(asset); + PX_UNUSED(name); +#if WITH_DEBUG_ASSET + if (asset) + { + const NvParameterized::Interface* pm = asset->getAssetNvParameterized(); + if (pm) + { + NvParameterized::Serializer* s1 = internalCreateSerializer(NvParameterized::Serializer::NST_XML, mParameterizedTraits); + NvParameterized::Serializer* s2 = internalCreateSerializer(NvParameterized::Serializer::NST_BINARY, mParameterizedTraits); + if (s1 && s2) + { + nvidia::PsMemoryBuffer mb1; + nvidia::PsMemoryBuffer mb2; + s1->serialize(mb1, &pm, 1); + s2->serialize(mb2, &pm, 1); + { + char scratch[512]; + nvidia::strlcpy(scratch, 512, name); + char* dot = NULL; + char* scan = scratch; + while (*scan) + { + if (*scan == '/') + { + *scan = '_'; + } + if (*scan == '\\') + { + *scan = '_'; + } + if (*scan == '.') + { + dot = scan; + } + scan++; + } + + if (dot) + { + *dot = 0; + } + + nvidia::strlcat(scratch, 512, ".apx"); + FILE* fph = fopen(scratch, "wb"); + if (fph) + { + fwrite(mb1.getWriteBuffer(), mb1.getWriteBufferSize(), 1, fph); + fclose(fph); + } + if (dot) + { + *dot = 0; + } + + nvidia::strlcat(scratch, 512, ".apb"); + fph = fopen(scratch, "wb"); + if (fph) + { + fwrite(mb2.getWriteBuffer(), mb2.getWriteBufferSize(), 1, fph); + fclose(fph); + } + + } + s1->release(); + s2->release(); + } + } + } +#endif +} + +/** + * checkAssetName + * -If name is NULL, we'll autogenerate one that won't collide with other names and issue a warning + * -If name collides with another name, we'll issue a warning and return NULL, so as not to confuse the + * user by creating an asset authoring with a name that's different from the name specified. + */ +const char* ApexSDKImpl::checkAssetName(AuthorableObjectIntl& ao, const char* inName, ApexSimpleString& autoNameStorage) +{ + ResourceProviderIntl* iNRP = getInternalResourceProvider(); + + if (!inName) + { + autoNameStorage = ao.getName(); + iNRP->generateUniqueName(ao.getResID(), autoNameStorage); + + APEX_DEBUG_INFO("No name provided for asset, auto-naming <%s>.", autoNameStorage.c_str()); + return autoNameStorage.c_str(); + } + + if (iNRP->checkResource(ao.getResID(), inName)) + { + // name collides with another asset [author] + APEX_DEBUG_WARNING("Name provided collides with another asset in the %s namespace: <%s>, no asset created.", ao.getName().c_str(), inName); + + return NULL; + } + + return inName; +} + +/** + * createAsset + * This method will load *any* APEX asset. + * 1. Read the APEX serialization header + * 2. Determine the correct module + * 3. Pass the remainder of the stream to the module along with the asset type name and asset version + */ + +Asset* ApexSDKImpl::createAsset(AssetAuthoring& nxAssetAuthoring, const char* name) +{ + Asset* ret = NULL; + AuthorableObjectIntl* ao = getAuthorableObject(nxAssetAuthoring.getObjTypeName()); + if (ao) + { + ApexSimpleString autoName; + name = checkAssetName(*ao, name, autoName); + if (!name) + { + return NULL; + } + + ret = ao->createAsset(nxAssetAuthoring, name); + } + else + { + APEX_INTERNAL_ERROR("Unknown authorable type: %s, please load all required modules.", nxAssetAuthoring.getObjTypeName()); + } + debugAsset(ret, name); + return ret; +} + +Asset* ApexSDKImpl::createAsset(NvParameterized::Interface* params, const char* name) +{ + Asset* ret = NULL; + // params->className() will tell us the name of the parameterized struct + // there is a mapping of parameterized structs to + PX_ASSERT(params); + if (params) + { + AuthorableObjectIntl* ao = getParamAuthObject(params->className()); + if (ao) + { + ApexSimpleString autoName; + name = checkAssetName(*ao, name, autoName); + if (!name) + { + return NULL; + } + + ret = ao->createAsset(params, name); + } + else + { + APEX_INTERNAL_ERROR("Unknown authorable type: %s, please load all required modules.", params->className()); + } + } + debugAsset(ret, name); + return ret; +} + +AssetAuthoring* ApexSDKImpl::createAssetAuthoring(const char* aoTypeName) +{ + AuthorableObjectIntl* ao = getAuthorableObject(aoTypeName); + if (ao) + { + ApexSimpleString autoName; + const char* name = 0; + name = checkAssetName(*ao, name, autoName); + if (!name) + { + return NULL; + } + + + return ao->createAssetAuthoring(name); + } + else + { + APEX_INTERNAL_ERROR("Unknown authorable type: %s, please load all required modules.", aoTypeName); + } + + return NULL; +} + +AssetAuthoring* ApexSDKImpl::createAssetAuthoring(const char* aoTypeName, const char* name) +{ + AuthorableObjectIntl* ao = getAuthorableObject(aoTypeName); + if (ao) + { + ApexSimpleString autoName; + name = checkAssetName(*ao, name, autoName); + if (!name) + { + return NULL; + } + + return ao->createAssetAuthoring(name); + } + else + { + APEX_INTERNAL_ERROR("Unknown authorable type: %s, please load all required modules.", aoTypeName); + } + + return NULL; +} + +AssetAuthoring* ApexSDKImpl::createAssetAuthoring(NvParameterized::Interface* params, const char* name) +{ + PX_ASSERT(params); + if (!params) + { + APEX_DEBUG_WARNING("NULL NvParameterized Interface, no asset author created."); + return NULL; + } + + AuthorableObjectIntl* ao = getParamAuthObject(params->className()); + if (ao) + { + ApexSimpleString autoName; + name = checkAssetName(*ao, name, autoName); + if (!name) + { + return NULL; + } + + return ao->createAssetAuthoring(params, name); + } + else + { + APEX_INTERNAL_ERROR("Unknown authorable type: %s, please load all required modules.", params->className()); + } + + return NULL; +} + +/** + * releaseAsset + * + */ +void ApexSDKImpl::releaseAsset(Asset& nxasset) +{ + AuthorableObjectIntl* ao = getAuthorableObject(nxasset.getObjTypeName()); + if (ao) + { + return ao->releaseAsset(nxasset); + } + else + { + APEX_INTERNAL_ERROR("Unknown authorable type: %s, please load all required modules.", nxasset.getObjTypeName()); + } +} + +void ApexSDKImpl::releaseAssetAuthoring(AssetAuthoring& nxAssetAuthoring) +{ + AuthorableObjectIntl* ao = getAuthorableObject(nxAssetAuthoring.getObjTypeName()); + if (ao) + { + return ao->releaseAssetAuthoring(nxAssetAuthoring); + } + else + { + APEX_INTERNAL_ERROR("Unknown authorable type: %s, please load all required modules.", nxAssetAuthoring.getObjTypeName()); + } +} + +void ApexSDKImpl::reportError(PxErrorCode::Enum code, const char* file, int line, const char* functionName, const char* msgFormat, ...) +{ + mReportErrorLock.lock(); + + if (gApexSdk) + { + if (getErrorCallback()) + { + if (mErrorString == NULL && code != PxErrorCode::eOUT_OF_MEMORY) + { + mErrorString = (char*) PX_ALLOC(sizeof(char) * MAX_MSG_SIZE, PX_DEBUG_EXP("char")); + } + if (mErrorString != NULL) + { + va_list va; + va_start(va, msgFormat); + + size_t tempLength = 0; + if (functionName != NULL) + { + shdfnd::snprintf(mErrorString,MAX_MSG_SIZE, "%s: ", functionName); + tempLength = strlen(mErrorString); + } + + vsprintf(mErrorString + tempLength, msgFormat, va); + va_end(va); + getErrorCallback()->reportError(code, mErrorString, file, line); + } + else + { + // we can't allocate any memory anymore, let's hope the stack has still a bit of space + char buf[ 100 ]; + va_list va; + va_start(va, msgFormat); + vsprintf(buf, msgFormat, va); + va_end(va); + getErrorCallback()->reportError(code, buf, file, line); + } + } + } + mReportErrorLock.unlock(); +} + + + +void* ApexSDKImpl::getTempMemory(uint32_t size) +{ + mTempMemoryLock.lock(); + + if (size == 0 || mTempMemories.size() > 100) //later growing a size 0 allocation is not handled gracefully! + { + // this is most likely a leak in temp memory consumption + mTempMemoryLock.unlock(); + return NULL; + } + + // now find the smallest one that is bigger than 'size' + int32_t found = -1; + uint32_t bestSize = 0; + for (uint32_t i = mNumTempMemoriesActive; i < mTempMemories.size(); i++) + { + if (mTempMemories[i].size >= size) + { + if (found == -1 || bestSize > mTempMemories[i].size) + { + found = (int32_t)i; + bestSize = mTempMemories[i].size; + } + } + } + + TempMemory result; + + if (found != -1) + { + // found + if ((uint32_t)found > mNumTempMemoriesActive) + { + // swap them + TempMemory temp = mTempMemories[mNumTempMemoriesActive]; + mTempMemories[mNumTempMemoriesActive] = mTempMemories[(uint32_t)found]; + mTempMemories[(uint32_t)found] = temp; + } + PX_ASSERT(mTempMemories[mNumTempMemoriesActive].used == 0); + mTempMemories[mNumTempMemoriesActive].used = size; + result = mTempMemories[mNumTempMemoriesActive]; + mNumTempMemoriesActive++; + } + else if (mNumTempMemoriesActive < mTempMemories.size()) + { + // not found, use last one + + // swap + TempMemory temp = mTempMemories.back(); + mTempMemories.back() = mTempMemories[mNumTempMemoriesActive]; + + void* nb = PX_ALLOC(size, PX_DEBUG_EXP("ApexSDKImpl::getTempMemory")); + if (nb) + { + memcpy(nb, temp.memory, PxMin(temp.size, size)); + } + PX_FREE(temp.memory); + temp.memory = nb; + temp.size = size; + PX_ASSERT(temp.used == 0); + temp.used = size; + + mTempMemories[mNumTempMemoriesActive] = temp; + result = temp; + mNumTempMemoriesActive++; + } + else + { + mNumTempMemoriesActive++; + TempMemory& newTemp = mTempMemories.insert(); + newTemp.memory = PX_ALLOC(size, PX_DEBUG_EXP("ApexSDKImpl::getTempMemory")); + newTemp.size = size; + newTemp.used = size; + result = newTemp; + } + mTempMemoryLock.unlock(); + +#ifdef _DEBUG + if (result.used < result.size) + { + memset((char*)result.memory + result.used, 0xfd, result.size - result.used); + } +#endif + + return result.memory; +} + + +void ApexSDKImpl::releaseTempMemory(void* data) +{ + if (data == NULL) //this is a valid consequence of permittion 0 sized allocations. + { + return; + } + + mTempMemoryLock.lock(); + uint32_t numReleased = 0; + + for (uint32_t i = 0; i < mNumTempMemoriesActive; i++) + { + if (mTempMemories[i].memory == data) + { + PX_ASSERT(mTempMemories[i].used > 0); +#ifdef _DEBUG + if (mTempMemories[i].used < mTempMemories[i].size) + { + for (uint32_t j = mTempMemories[i].used; j < mTempMemories[i].size; j++) + { + unsigned char cur = ((unsigned char*)mTempMemories[i].memory)[j]; + if (cur != 0xfd) + { + PX_ASSERT(cur == 0xfd); + break; // only hit this assert once per error + } + } + } + // you should not operate on data that has been released! + memset(mTempMemories[i].memory, 0xcd, mTempMemories[i].size); +#endif + mTempMemories[i].used = 0; + + // swap with last valid one + if (i < mNumTempMemoriesActive - 1) + { + TempMemory temp = mTempMemories[mNumTempMemoriesActive - 1]; + mTempMemories[mNumTempMemoriesActive - 1] = mTempMemories[i]; + mTempMemories[i] = temp; + } + mNumTempMemoriesActive--; + + numReleased++; + break; + } + } + + PX_ASSERT(numReleased == 1); + + mTempMemoryLock.unlock(); +} + + +void ApexSDKImpl::release() +{ + if (renderResourceManagerWrapper != NULL) + { + PX_DELETE(renderResourceManagerWrapper); + renderResourceManagerWrapper = NULL; + } + + for (uint32_t i = 0; i < mScenes.size(); i++) + { + (DYNAMIC_CAST(ApexScene*)(mScenes[ i ]))->destroy(); + } + mScenes.clear(); + + // Notify all modules that the ApexSDKImpl is getting destructed + for (uint32_t i = 0; i < modules.size(); i++) + { + if ( imodules[i] ) + { + imodules[ i ]->notifyReleaseSDK(); + } + } + + // Now we destroy each module; but we make sure to null out each array element before we call the + // actual destruction routine so that the array of avlie/registered modules contains no pointers to deleted objects + for (uint32_t i = 0; i < modules.size(); i++) + { + ModuleIntl *d = imodules[i]; + imodules[i] = NULL; + modules[i] = NULL; + if ( d ) + { + d->destroy(); + } + } + modules.clear(); + imodules.clear(); + + /* Free all render meshes created from the SDK, release named resources */ + if (mAuthorableObjects != NULL) + { + PX_DELETE(mAuthorableObjects); + mAuthorableObjects = NULL; + } + + if (mDebugColorParams) + { + mDebugColorParams->destroy(); + mDebugColorParams = NULL; + } + + frameworkModule.release(mParameterizedTraits); + + delete mParameterizedTraits; + mParameterizedTraits = 0; + + apexResourceProvider->destroy(); + apexResourceProvider = 0; + + + mPhysXObjDescs.clear(); + + for (uint32_t i = 0; i < mTempMemories.size(); i++) + { + if (mTempMemories[i].memory != NULL) + { + PX_FREE(mTempMemories[i].memory); + mTempMemories[i].memory = NULL; + mTempMemories[i].size = 0; + } + } + mTempMemories.clear(); + + PX_DELETE(mCachedData); + mCachedData = NULL; + + if (mErrorString) + { + PX_FREE_AND_RESET(mErrorString); + } + +#if PX_PHYSICS_VERSION_MAJOR == 0 + if (mApexThreadPool) + { + PX_DELETE(mApexThreadPool); + mApexThreadPool = NULL; + } + while (mUserAllocThreadPools.size()) + { + releaseCpuDispatcher(*mUserAllocThreadPools[0]); + } +#endif + + delete this; + // be very careful what goes below this line! + + gApexSdk = NULL; + +} + +RenderDebugInterface* ApexSDKImpl::createApexRenderDebug(RENDER_DEBUG::RenderDebugInterface* interface, bool useRemoteDebugVisualization) +{ + PX_UNUSED(useRemoteDebugVisualization); + +#ifdef WITHOUT_DEBUG_VISUALIZE + return NULL; + +#else + + return nvidia::apex::createApexRenderDebug(this, interface, useRemoteDebugVisualization); +#endif +} + +void ApexSDKImpl::releaseApexRenderDebug(RenderDebugInterface& debug) +{ +#ifdef WITHOUT_DEBUG_VISUALIZE + PX_UNUSED(debug); +#else + debug.release(); +#endif +} + +SphereShape* ApexSDKImpl::createApexSphereShape() +{ + ApexSphereShape* m = PX_NEW(ApexSphereShape); + return static_cast< SphereShape*>(m); +} + +CapsuleShape* ApexSDKImpl::createApexCapsuleShape() +{ + ApexCapsuleShape* m = PX_NEW(ApexCapsuleShape); + return static_cast< CapsuleShape*>(m); +} + +BoxShape* ApexSDKImpl::createApexBoxShape() +{ + ApexBoxShape* m = PX_NEW(ApexBoxShape); + return static_cast< BoxShape*>(m); +} + +HalfSpaceShape* ApexSDKImpl::createApexHalfSpaceShape() +{ + ApexHalfSpaceShape* m = PX_NEW(ApexHalfSpaceShape); + return static_cast< HalfSpaceShape*>(m); +} + +void ApexSDKImpl::releaseApexShape(Shape& shape) +{ + shape.releaseApexShape(); +} + +const char* ApexSDKImpl::getWireframeMaterial() +{ + return mWireframeMaterial.c_str(); +} + +const char* ApexSDKImpl::getSolidShadedMaterial() +{ + return mSolidShadedMaterial.c_str(); +} + +pvdsdk::ApexPvdClient* ApexSDKImpl::getApexPvdClient() +{ +#if defined(PHYSX_PROFILE_SDK) + return mApexPvdClient; +#else + return NULL; +#endif +} + +profile::PxProfileZone * ApexSDKImpl::getProfileZone() +{ +#if defined(PHYSX_PROFILE_SDK) + return mProfileZone; +#else + return NULL; +#endif +} + +profile::PxProfileZoneManager * ApexSDKImpl::getProfileZoneManager() +{ +#if defined(PHYSX_PROFILE_SDK) + return mProfileZone ? mProfileZone->getProfileZoneManager() : NULL; +#else + return NULL; +#endif +} + +#if PX_WINDOWS_FAMILY +const char* ApexSDKImpl::getAppGuid() +{ + return mAppGuid.c_str(); +} +#endif + +#if APEX_CUDA_SUPPORT +PhysXGpuIndicator* ApexSDKImpl::registerPhysXIndicatorGpuClient() +{ + //allocate memory for the PhysXGpuIndicator + PhysXGpuIndicator* gpuIndicator = static_cast<PhysXGpuIndicator*>(PX_ALLOC(sizeof(PhysXGpuIndicator), PX_DEBUG_EXP("PhysXGpuIndicator"))); + PX_PLACEMENT_NEW(gpuIndicator, PhysXGpuIndicator); + + gpuIndicator->gpuOn(); + return gpuIndicator; +} + +void ApexSDKImpl::unregisterPhysXIndicatorGpuClient(PhysXGpuIndicator* gpuIndicator) +{ + if (gpuIndicator != NULL) + { + gpuIndicator->~PhysXGpuIndicator(); + PX_FREE(gpuIndicator); + } +} +#endif + +void ApexSDKImpl::updateDebugColorParams(const char* color, uint32_t val) +{ + for (uint32_t i = 0; i < mScenes.size(); i++) + { + DYNAMIC_CAST(ApexScene*)(mScenes[ i ])->updateDebugColorParams(color, val); + } +} + + +bool ApexSDKImpl::getRMALoadMaterialsLazily() +{ + return mRMALoadMaterialsLazily; +} + +/////////////////////////////////////////////////////////////////////////////// +// ApexRenderMeshAssetAuthoring +/////////////////////////////////////////////////////////////////////////////// + + +Asset* RenderMeshAuthorableObject::createAsset(AssetAuthoring& author, const char* name) +{ + NvParameterized::Interface* newObj = NULL; + author.getNvParameterized()->clone(newObj); + return createAsset(newObj, name); +} + +Asset* RenderMeshAuthorableObject::createAsset(NvParameterized::Interface* params, const char* name) +{ + ApexRenderMeshAsset* asset = PX_NEW(ApexRenderMeshAsset)(mAssets, name, 0); + if (asset) + { + asset->createFromParameters((RenderMeshAssetParameters*)params); + GetInternalApexSDK()->getNamedResourceProvider()->setResource(mAOTypeName.c_str(), name, asset); + } + return asset; +} + +void RenderMeshAuthorableObject::releaseAsset(Asset& nxasset) +{ + ApexRenderMeshAsset* aa = DYNAMIC_CAST(ApexRenderMeshAsset*)(&nxasset); + GetInternalApexSDK()->getInternalResourceProvider()->setResource(ApexRenderMeshAsset::getClassName(), nxasset.getName(), NULL, false, false); + aa->destroy(); +} + +// this should no longer be called now that we're auto-assigning names in createAssetAuthoring() +AssetAuthoring* RenderMeshAuthorableObject::createAssetAuthoring() +{ + return createAssetAuthoring(""); +} + +AssetAuthoring* RenderMeshAuthorableObject::createAssetAuthoring(NvParameterized::Interface* params, const char* name) +{ +#ifdef WITHOUT_APEX_AUTHORING + PX_UNUSED(params); + PX_UNUSED(name); + return NULL; +#else + ApexRenderMeshAssetAuthoring* assetAuthor = PX_NEW(ApexRenderMeshAssetAuthoring)(mAssetAuthors, (RenderMeshAssetParameters*)params, name); + if (assetAuthor) + { + GetInternalApexSDK()->getNamedResourceProvider()->setResource(mAOTypeName.c_str(), name, assetAuthor); + } + return assetAuthor; + +#endif +} + +AssetAuthoring* RenderMeshAuthorableObject::createAssetAuthoring(const char* name) +{ +#ifdef WITHOUT_APEX_AUTHORING + PX_UNUSED(name); + return NULL; +#else + NvParameterized::Traits* traits = GetInternalApexSDK()->getParameterizedTraits(); + RenderMeshAssetParameters* params = (RenderMeshAssetParameters*)traits->createNvParameterized(RenderMeshAssetParameters::staticClassName()); + ApexRenderMeshAssetAuthoring* assetAuthor = PX_NEW(ApexRenderMeshAssetAuthoring)(mAssetAuthors, params, name); + if (assetAuthor) + { + GetInternalApexSDK()->getNamedResourceProvider()->setResource(mAOTypeName.c_str(), name, assetAuthor); + } + return assetAuthor; + +#endif +} + +void RenderMeshAuthorableObject::releaseAssetAuthoring(AssetAuthoring& nxauthor) +{ +#ifdef WITHOUT_APEX_AUTHORING + PX_UNUSED(nxauthor); +#else + ApexRenderMeshAssetAuthoring* aa = DYNAMIC_CAST(ApexRenderMeshAssetAuthoring*)(&nxauthor); + GetInternalApexSDK()->getInternalResourceProvider()->setResource(mAOTypeName.c_str(), aa->getName(), NULL, false, false); + + aa->destroy(); +#endif +} + + +uint32_t RenderMeshAuthorableObject::forceLoadAssets() +{ + uint32_t loadedAssetCount = 0; + + for (uint32_t i = 0; i < mAssets.getSize(); i++) + { + ApexRenderMeshAsset* asset = DYNAMIC_CAST(ApexRenderMeshAsset*)(mAssets.getResource(i)); + loadedAssetCount += asset->forceLoadAssets(); + } + return loadedAssetCount; +} + + +// Resource methods +void RenderMeshAuthorableObject::release() +{ + // test this by releasing the module before the individual assets + + // remove all assets that we loaded (must do now else we cannot unregister) + mAssets.clear(); + mAssetAuthors.clear(); + + // remove this AO's name from the authorable namespace + GetInternalApexSDK()->unregisterAuthObjType(mAOTypeName.c_str()); + destroy(); +} + +void RenderMeshAuthorableObject::destroy() +{ + delete this; +} + + +/** +Returns a PxFileBuf which reads from a buffer in memory. +*/ +PxFileBuf* ApexSDKImpl::createMemoryReadStream(const void* mem, uint32_t len) +{ + PxFileBuf* ret = 0; + + nvidia::PsMemoryBuffer* rb = PX_NEW(nvidia::PsMemoryBuffer)((const uint8_t*)mem, len); + ret = static_cast< PxFileBuf*>(rb); + + return ret; +} + +/** +Returns a PxFileBuf which writes to memory. +*/ +PxFileBuf* ApexSDKImpl::createMemoryWriteStream(uint32_t alignment) +{ + PxFileBuf* ret = 0; + + nvidia::PsMemoryBuffer* mb = PX_NEW(nvidia::PsMemoryBuffer)(nvidia::BUFFER_SIZE_DEFAULT, alignment); + ret = static_cast< PxFileBuf*>(mb); + + return ret; +} + +/** +Returns the address and length of the contents of a memory write buffer stream. +*/ +const void* ApexSDKImpl::getMemoryWriteBuffer(PxFileBuf& stream, uint32_t& len) +{ + const void* ret = 0; + + nvidia::PsMemoryBuffer* wb = static_cast< nvidia::PsMemoryBuffer*>(&stream); + len = wb->getWriteBufferSize(); + ret = wb->getWriteBuffer(); + + return ret; +} + +/** +Releases a previously created PxFileBuf used as a write or read buffer. +*/ +void ApexSDKImpl::releaseMemoryReadStream(PxFileBuf& stream) +{ + nvidia::PsMemoryBuffer* rb = static_cast< nvidia::PsMemoryBuffer*>(&stream); + delete rb; +} + +void ApexSDKImpl::releaseMemoryWriteStream(PxFileBuf& stream) +{ + nvidia::PsMemoryBuffer* wb = static_cast< nvidia::PsMemoryBuffer*>(&stream); + delete wb; +} + + +NvParameterized::Serializer* ApexSDKImpl::createSerializer(NvParameterized::Serializer::SerializeType type) +{ + return NvParameterized::internalCreateSerializer(type, mParameterizedTraits); +} + +NvParameterized::Serializer* ApexSDKImpl::createSerializer(NvParameterized::Serializer::SerializeType type, NvParameterized::Traits* traits) +{ + return NvParameterized::internalCreateSerializer(type, traits); +} + +NvParameterized::Serializer::SerializeType ApexSDKImpl::getSerializeType(const void* data, uint32_t dlen) +{ + PsMemoryBuffer stream(data, dlen); + return NvParameterized::Serializer::peekSerializeType(stream); +} + +NvParameterized::Serializer::SerializeType ApexSDKImpl::getSerializeType(PxFileBuf& stream) +{ + return NvParameterized::Serializer::peekSerializeType(stream); +} + +NvParameterized::Serializer::ErrorType ApexSDKImpl::getSerializePlatform(const void* data, uint32_t dlen, NvParameterized::SerializePlatform& platform) +{ + if (dlen < 56) + { + APEX_INVALID_PARAMETER("At least 56 Bytes are needed to read the platform of a binary file"); + } + + PsMemoryBuffer stream(data, dlen); + return NvParameterized::Serializer::peekPlatform(stream, platform); +} + +NvParameterized::Serializer::ErrorType ApexSDKImpl::getSerializePlatform(PxFileBuf& stream, NvParameterized::SerializePlatform& platform) +{ + return NvParameterized::Serializer::peekPlatform(stream, platform); +} + +void ApexSDKImpl::getCurrentPlatform(NvParameterized::SerializePlatform& platform) const +{ + platform = NvParameterized::GetCurrentPlatform(); +} + +bool ApexSDKImpl::getPlatformFromString(const char* name, NvParameterized::SerializePlatform& platform) const +{ + return NvParameterized::GetPlatform(name, platform); +} + +const char* ApexSDKImpl::getPlatformName(const NvParameterized::SerializePlatform& platform) const +{ + return NvParameterized::GetPlatformName(platform); +} + +Asset* ApexSDKImpl::createAsset(const char* opaqueMeshName, UserOpaqueMesh* om) +{ + Asset* ret = NULL; + NvParameterized::Interface* params = getParameterizedTraits()->createNvParameterized(RenderMeshAssetParameters::staticClassName()); + if (params) + { + ret = this->createAsset(params, opaqueMeshName); + if (ret) + { + NvParameterized::setParamBool(*params, "isReferenced", false); + ApexRenderMeshAsset* nrma = static_cast<ApexRenderMeshAsset*>(ret); + nrma->setOpaqueMesh(om); + } + else + { + params->destroy(); + } + } + return ret; +} + +ModuleIntl *ApexSDKImpl::getInternalModule(Module *module) +{ + ModuleIntl *ret = NULL; + + for (uint32_t i = 0; i < modules.size(); i++) + { + if ( modules[ i ] == module ) + { + ret = imodules[i]; + break; + } + } + + return ret; +} + +Module *ApexSDKImpl::getModule(ModuleIntl *module) +{ + Module *ret = NULL; + + for (uint32_t i = 0; i < imodules.size(); i++) + { + if ( imodules[ i ] == module ) + { + ret = modules[i]; + break; + } + } + + return ret; +} + + +void ApexSDKImpl::enterURR() +{ + if (mURRdepthTLSslot != 0xFFFFFFFF) + { + uint64_t currentDepth = PTR_TO_UINT64(TlsGet(mURRdepthTLSslot)); + ++currentDepth; + TlsSet(mURRdepthTLSslot, (void*)currentDepth); + } +} + + +void ApexSDKImpl::leaveURR() +{ + if (mURRdepthTLSslot != 0xFFFFFFFF) + { + uint64_t currentDepth = PTR_TO_UINT64(TlsGet(mURRdepthTLSslot)); + if (currentDepth > 0) + { + --currentDepth; + TlsSet(mURRdepthTLSslot, (void*)currentDepth); + } + else + { + // if this is hit, something is wrong with the + // URR_SCOPE implementation + PX_ALWAYS_ASSERT(); + } + } +} + + +void ApexSDKImpl::checkURR() +{ + if (mURRdepthTLSslot != 0xFFFFFFFF) + { + uint64_t currentDepth = PTR_TO_UINT64(TlsGet(mURRdepthTLSslot)); + if (currentDepth == 0) + { + // if this assert is hit it means that + // - either some render resources are created where it's not allowed + // (outside of updateRenderResources or prepareRenderResources) + // => change the place in code where the resource is created + // + // - or the updateRenderResources call is not marked with a URR_SCOPE. + // => add the macro + PX_ALWAYS_ASSERT(); + } + } +} + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexSDKVersionString.cpp b/APEX_1.4/framework/src/ApexSDKVersionString.cpp new file mode 100644 index 00000000..8fe7730d --- /dev/null +++ b/APEX_1.4/framework/src/ApexSDKVersionString.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexSDKVersionString.h" +#include <PxPreprocessor.h> +#include "P4Info.h" + +namespace nvidia +{ +namespace apex +{ + +const char* GetApexSDKVersionString(ApexSDKVersionString versionString) +{ + const char* result = NULL; + switch(versionString) + { + case VERSION: + result = P4_APEX_VERSION_STRING; + break; + case CHANGELIST: + result = PX_STRINGIZE(P4_CHANGELIST); + break; + case TOOLS_CHANGELIST: + result = PX_STRINGIZE(P4_TOOLS_CHANGELIST); + break; + case BRANCH: + result = P4_APEX_BRANCH; + break; + case BUILD_TIME: + result = P4_BUILD_TIME; + break; + case AUTHOR: + result = AUTHOR_DISTRO; + break; + case REASON: + result = REASON_DISTRO; + break; + default: + break; + } + return result; +} + +} +} diff --git a/APEX_1.4/framework/src/ApexScene.cpp b/APEX_1.4/framework/src/ApexScene.cpp new file mode 100644 index 00000000..9652e133 --- /dev/null +++ b/APEX_1.4/framework/src/ApexScene.cpp @@ -0,0 +1,1799 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "Apex.h" +#include "ApexDefs.h" +#include "ApexScene.h" +#include "ApexSceneTasks.h" +#include "ApexSDKImpl.h" +#include "ApexActor.h" +#include "FrameworkPerfScope.h" +#include "ApexRenderDebug.h" +#include "ModuleIntl.h" +#include "ApexPvdClient.h" +#include "PsTime.h" + +#if PX_PHYSICS_VERSION_MAJOR == 3 +#include "ScopedPhysXLock.h" +#include "PxRigidActor.h" +#include "cloth/PxCloth.h" // for PxCloth::isCloth() +#endif + +#include "ApexUsingNamespace.h" +#include "PsSync.h" +#include "PxTask.h" +#include "PxTaskManager.h" +#include "PxGpuDispatcher.h" +#include "PxCudaContextManager.h" +#include "ApexString.h" + +#define USE_FILE_RENDER_DEBUG 0 +#define USE_PVD_RENDER_DEBUG 0 + +#if USE_FILE_RENDER_DEBUG +#include "PxFileRenderDebug.h" +#endif +#if USE_PVD_RENDER_DEBUG +#include "PxPVDRenderDebug.h" +#endif + +#if APEX_CUDA_SUPPORT && !defined(INSTALLER) +#include <cuda.h> +class IDirect3DDevice9; +class IDirect3DResource9; +class IDirect3DVertexBuffer9; +#include <cudad3d9.h> +class ID3D10Device; +class ID3D10Resource; +class IDXGIAdapter; +#include <cudad3d10.h> + +#include "ApexCudaTest.h" +#include "ApexCudaProfile.h" +#endif + +#include "Lock.h" + +#if PX_X86 +#define PTR_TO_UINT64(x) ((uint64_t)(uint32_t)(x)) +#else +#define PTR_TO_UINT64(x) ((uint64_t)(x)) +#endif + +namespace nvidia +{ +namespace apex +{ + +double ApexScene::mQPC2MilliSeconds = 0.0; + +/************ +* ApexScene * +************/ + +ApexScene::ApexScene(const SceneDesc& sceneDesc, ApexSDKImpl* sdk) + : mApexSDK(sdk) +#if PX_PHYSICS_VERSION_MAJOR == 3 + , mPhysXScene(NULL) + , mPhysX3Interface(sceneDesc.physX3Interface) +#endif + , mElapsedTime(0.0f) + , mSceneRenderDebug(NULL) + , mOrigSceneMaxIter(1) + , mOrigSceneSubstepSize(1.0f / 60.0f) + , mTaskManager(NULL) + , mSimulating(false) + , mUseDebugRenderable(sceneDesc.useDebugRenderable) + , mUsedResource(0.0f) + , mSumBenefit(0.0f) + , mPhysXSimulate(NULL) + , mBetweenstepTasks(NULL) +#if APEX_DURING_TICK_TIMING_FIX + , mDuringTickComplete(NULL) +#endif + , mCheckResults(NULL) + , mFetchResults(NULL) + , mTotalElapsedMS(0) + , mTimeRemainder(0.0f) + , mPhysXRemainder(0.0f) + , mPhysXSimulateTime(0.0f) + , mPxLastElapsedTime(0.0f) + , mPxAccumElapsedTime(0.0f) + , mPxStepWasValid(false) + , mFinalStep(false) +#if APEX_CUDA_SUPPORT + , mUseCuda(sceneDesc.useCuda) +#else + , mUseCuda(false) +#endif + , mCudaKernelCheckEnabled(false) + , mGravity(0) +{ + mSimulationComplete.set(); + +#if APEX_CUDA_SUPPORT && !defined(INSTALLER) + mCudaTestManager = PX_NEW(ApexCudaTestManager)(); + mCudaProfileManager = PX_NEW(ApexCudaProfileManager)(); + mCudaTestManager->setInternalApexScene(this); + mCudaProfileManager->setInternalApexScene(this); +#else + mCudaTestManager = 0; + mCudaProfileManager = 0; +#endif + + // APEX was ignoring the numerator from CounterFrequencyToTensOfNanos, this is OK as long as + // the numerator is equal to Time::sNumTensOfNanoSecondsInASecond (100,000,000) + //float ret = (float)((double)(t1 - t0) / (double)Time::getBootCounterFrequency().mDenominator); + + // Let's see if we can use both the numerator and denominator correctly (mostly for linux) + const CounterFrequencyToTensOfNanos freq = Time::getBootCounterFrequency(); + const double freqMultiplier = (double)freq.mNumerator/(double)freq.mDenominator; + + mQPC2MilliSeconds = freqMultiplier * 0.00001; // from tens of nanos to milliseconds ( x / 100 / 1000) + +#if PX_WINDOWS_FAMILY + if (sceneDesc.debugVisualizeRemotely) + { +#if USE_FILE_RENDER_DEBUG + prd = createFileRenderDebug("SceneRenderDebug.bin", false, sceneDesc.debugVisualizeLocally); +#endif +#if USE_PVD_RENDER_DEBUG + prd = createPVDRenderDebug(sceneDesc.debugVisualizeLocally); +#endif + } +#endif + mSceneRenderDebug = sceneDesc.debugInterface; + + /* Create NvParameterized for mDebugRenderParams */ + NvParameterized::Traits* traits = mApexSDK->getParameterizedTraits(); + PX_ASSERT(traits); + mDebugRenderParams = (DebugRenderParams*)traits->createNvParameterized(DebugRenderParams::staticClassName()); + PX_ASSERT(mDebugRenderParams); + + /* Get mDebugColorParams from ApexSDKImpl */ + mDebugColorParams = (DebugColorParams*)mApexSDK->getDebugColorParams(); + initDebugColorParams(); + +#if PX_PHYSICS_VERSION_MAJOR == 0 + mTaskManager = PxTaskManager::createTaskManager(*mApexSDK->getErrorCallback(), sceneDesc.cpuDispatcher, sceneDesc.gpuDispatcher); + mTaskManager->setGpuDispatcher(*sceneDesc.gpuDispatcher); + if (sceneDesc.cpuDispatcher == NULL) + { + mTaskManager->setCpuDispatcher(*mApexSDK->getDefaultThreadPool()); + } +#elif PX_PHYSICS_VERSION_MAJOR == 3 + setPhysXScene(sceneDesc.scene); +#if APEX_CUDA_SUPPORT + if (sceneDesc.scene != NULL) + { + mTaskManager = sceneDesc.scene->getTaskManager(); + if (mTaskManager->getGpuDispatcher()) + { + PxCudaContextManager* ctx = mTaskManager->getGpuDispatcher()->getCudaContextManager(); + if (ctx && ctx->supportsArchSM30()) + { + ctx->setUsingConcurrentStreams(false); + } + } + } +#endif +#endif + + allocateTasks(); + + createApexStats(); +} + +ApexScene::~ApexScene() +{ + destroyApexStats(); + +#if APEX_CUDA_SUPPORT && !defined(INSTALLER) + PX_DELETE(mCudaTestManager); + PX_DELETE(mCudaProfileManager); +#endif + + while (mViewMatrices.size()) + { + PX_FREE(mViewMatrices.popBack()); + } + while (mProjMatrices.size()) + { + PX_FREE(mProjMatrices.popBack()); + } +} + +// This array is still created because it is handy to have it to initialize +// the individual scene arrays. The string data is reduced to pointers. This +// could be improved with macros that do both this and the enums at the same time. +static StatsInfo ApexStatsData[] = +{ + {"NumberOfActors", StatDataType::INT, {{0}} }, + {"NumberOfShapes", StatDataType::INT, {{0}} }, + {"NumberOfAwakeShapes", StatDataType::INT, {{0}} }, + {"NumberOfCpuShapePairs", StatDataType::INT, {{0}} }, + {"ApexBeforeTickTime", StatDataType::FLOAT, {{0}} }, + {"ApexDuringTickTime", StatDataType::FLOAT, {{0}} }, + {"ApexPostTickTime", StatDataType::FLOAT, {{0}} }, + {"PhysXSimulationTime", StatDataType::FLOAT, {{0}} }, + {"ClothingSimulationTime", StatDataType::FLOAT, {{0}} }, + {"ParticleSimulationTime", StatDataType::FLOAT, {{0}} }, + {"TurbulenceSimulationTime", StatDataType::FLOAT, {{0}} }, + {"PhysXFetchResultTime", StatDataType::FLOAT, {{0}} }, + {"UserDelayedFetchTime", StatDataType::FLOAT, {{0}} }, + {"RbThroughput(Mpair/sec)", StatDataType::FLOAT, {{0}} }, + {"IOFX: SimulatedSpriteParticlesCount", StatDataType::INT, {{0}} }, + {"IOFX: SimulatedMeshParticlesCount", StatDataType::INT, {{0}} }, + {"VisibleDestructibleChunkCount", StatDataType::INT, {{0}} }, + {"DynamicDestructibleChunkIslandCount", StatDataType::INT, {{0}} } +}; + +PX_COMPILE_TIME_ASSERT(sizeof(ApexStatsData) / sizeof(ApexStatsData[0]) == ApexScene::NumberOfApexStats); + +void ApexScene::createApexStats(void) +{ + mApexSceneStats.numApexStats = NumberOfApexStats; + mApexSceneStats.ApexStatsInfoPtr = (StatsInfo*)PX_ALLOC(sizeof(StatsInfo) * ApexScene::NumberOfApexStats, PX_DEBUG_EXP("StatsInfo")); + + for (uint32_t i = 0; i < ApexScene::NumberOfApexStats; i++) + { + mApexSceneStats.ApexStatsInfoPtr[i] = ApexStatsData[i]; + } +} + +void ApexScene::destroyApexStats(void) +{ + mApexSceneStats.numApexStats = 0; + if (mApexSceneStats.ApexStatsInfoPtr) + { + PX_FREE_AND_RESET(mApexSceneStats.ApexStatsInfoPtr); + } +} + +const SceneStats* ApexScene::getStats(void) const +{ + READ_ZONE(); + return(&mApexSceneStats); +} + +void ApexScene::setApexStatValue(int32_t index, StatValue dataVal) +{ + if (mApexSceneStats.ApexStatsInfoPtr) + { + mApexSceneStats.ApexStatsInfoPtr[index].StatCurrentValue = dataVal; + } +} + +NvParameterized::Interface* ApexScene::getDebugRenderParams() const +{ + READ_ZONE(); + return mDebugRenderParams; +} + +//Module names are case sensitive: +//BasicIos, Clothing, Destructible, Emitter, Iofx +NvParameterized::Interface* ApexScene::getModuleDebugRenderParams(const char* name) const +{ + READ_ZONE(); + NvParameterized::Handle handle(*mDebugRenderParams), memberHandle(*mDebugRenderParams); + NvParameterized::Interface* refPtr = NULL; + int size; + + if (mDebugRenderParams->getParameterHandle("moduleName", handle) == NvParameterized::ERROR_NONE) + { + handle.getArraySize(size, 0); + for (int i = 0; i < size; i++) + { + if (handle.getChildHandle(i, memberHandle) == NvParameterized::ERROR_NONE) + { + memberHandle.getParamRef(refPtr); + if (strstr(refPtr->className(), name) != 0) + { + return refPtr; + } + } + } + } + + return NULL; +} + +uint32_t ApexScene::allocViewMatrix(ViewMatrixType::Enum viewType) +{ + WRITE_ZONE(); + if (mViewMatrices.size() >= 1) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_OPERATION("instantiating more than %d view matrices is not allowed!", mViewMatrices.size()); + } + } + else + { + ViewMatrixProperties* v; + + switch (viewType) + { + case ViewMatrixType::USER_CUSTOMIZED: + { + v = PX_NEW(ViewMatrixLookAt)(PxMat44(PxIdentity),false,true); + } + break; + case ViewMatrixType::LOOK_AT_RH: + { + v = PX_NEW(ViewMatrixLookAt)(PxMat44(PxIdentity), false, true); + } + break; + case ViewMatrixType::LOOK_AT_LH: + { + v = PX_NEW(ViewMatrixLookAt)(PxMat44(PxIdentity), false, false); + } + break; + default: + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("Invalid ViewMatrixType!"); + } + v = NULL; + break; + } + if (v) + { + mViewMatrices.pushBack(v); + } + } + return mViewMatrices.size() - 1; +} + +uint32_t ApexScene::allocProjMatrix(ProjMatrixType::Enum projType) +{ + WRITE_ZONE(); + if (mProjMatrices.size() >= 1) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_OPERATION("instantiating more than %d projection matrices is not allowed!", mProjMatrices.size()); + } + } + else + { + ProjMatrixProperties* p; + + switch (projType) + { + case ProjMatrixType::USER_CUSTOMIZED: + { + p = PX_NEW(ProjMatrixUserCustomized)(PxMat44(PxIdentity), true, false, 0.1f, 10000.0f, 45.0f, 1024, 640); + } + break; +#if 0 //lionel: work in progress + case ProjMatrixType::PERSPECTIVE_FOV_RH: + { + p = PX_NEW(ProjMatrixPerspectiveFOV)(PxMat44(PxIdentity), false, true, true); + } + break; + case ProjMatrixType::PERSPECTIVE_FOV_LH: + { + p = PX_NEW(ProjMatrixPerspectiveFOV)(PxMat44(PxIdentity), false, true, false); + } + break; +#endif + default: + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("Invalid ProjMatrixType!"); + } + p = NULL; + break; + } + if (p) + { + mProjMatrices.pushBack(p); + } + } + return mProjMatrices.size() - 1; +} + +uint32_t ApexScene::getNumViewMatrices() const +{ + READ_ZONE(); + return mViewMatrices.size(); +} + +uint32_t ApexScene::getNumProjMatrices() const +{ + READ_ZONE(); + return mProjMatrices.size(); +} + +void ApexScene::setViewMatrix(const PxMat44& viewTransform, const uint32_t viewID) +{ + WRITE_ZONE(); + if (viewID >= getNumViewMatrices()) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("view matrix for viewID %d is not initialized! see allocViewMatrix()", viewID); + } + } + else + { + mViewMatrices[viewID]->viewMatrix = viewTransform; + + // create PVD cameras + pvdsdk::ApexPvdClient* client = mApexSDK->getApexPvdClient(); + if ((client != NULL) && client->isConnected()) + { + if (!mViewMatrices[viewID]->pvdCreated) + { + ApexSimpleString cameraNum; + ApexSimpleString::itoa(viewID, cameraNum); + mViewMatrices[viewID]->cameraName = "ApexCamera "; + mViewMatrices[viewID]->cameraName += cameraNum; + mViewMatrices[viewID]->pvdCreated = true; + } + + PxVec3 gravity = getGravity(); + gravity.normalize(); + PxVec3 position = getEyePosition(viewID); + PxVec3 target = position + getEyeDirection(viewID); + + //pvdBinding->getConnectionManager().setCamera(mViewMatrices[viewID]->cameraName.c_str(), position, -gravity, target); + } + } +} + +PxMat44 ApexScene::getViewMatrix(const uint32_t viewID) const +{ + READ_ZONE(); + if (viewID < getNumViewMatrices()) + { + return mViewMatrices[viewID]->viewMatrix; + } + else + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("view matrix for viewID %d is not initialized! see allocViewMatrix()", viewID); + } + } + return PxMat44(PxIdentity); +} + +void ApexScene::setProjMatrix(const PxMat44& projTransform, const uint32_t projID) +{ + WRITE_ZONE(); + if (projID >= getNumProjMatrices()) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("projection matrix for projID %d is not initialized! see allocProjMatrix()", projID); + } + } + else + { + mProjMatrices[projID]->projMatrix = projTransform; + } +} + +PxMat44 ApexScene::getProjMatrix(const uint32_t projID) const +{ + READ_ZONE(); + if (projID < getNumProjMatrices()) + { + return mProjMatrices[projID]->projMatrix; + } + else + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("projection matrix for projID %d is not initialized! see allocProjMatrix()", projID); + } + } + return PxMat44(PxIdentity); +} + +void ApexScene::setUseViewProjMatrix(const uint32_t viewID, const uint32_t projID) +{ + WRITE_ZONE(); + if (viewID >= getNumViewMatrices()) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("view matrix for viewID %d is not initialized! see allocViewMatrix()", viewID); + } + } + else + { + if (projID >= getNumProjMatrices()) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("projection matrix for projID %d is not initialized! see allocProjMatrix()", projID); + } + } + else + { +#if 0 //lionel: work in progress + getColMajColVecArray(mViewMatrices[viewID]->viewMatrix, mViewColMajColVecArray); + getColMajColVecArray(mProjMatrices[projID]->projMatrix, mProjColMajColVecArray); + multiplyColMajColVecArray(mViewColMajColVecArray, mProjColMajColVecArray, mViewProjColMajColVecArray); +#endif + +#ifndef WITHOUT_DEBUG_VISUALIZE + if (mSceneRenderDebug) + { + RENDER_DEBUG_IFACE(mSceneRenderDebug)->setViewMatrix(mViewMatrices[viewID]->viewMatrix.front()); + RENDER_DEBUG_IFACE(mSceneRenderDebug)->setProjectionMatrix(mProjMatrices[projID]->projMatrix.front()); + } +#endif + +#if 0 //lionel: work in progress + //getColVecMat44(mViewProjColMajColVecArray, mViewProjMatrix); //lionel: need to test + //mCurrentViewID = viewID; //lionel : initialize these. will need these when multiple view and prok matrices is supported + //mCurrentProjID = projID; +#endif + } + } +} +#if 0 //lionel: work in progress +const PxMat44& ApexScene::getViewProjMatrix() const +{ + static PxMat44 vp; + vp = vp.createIdentity(); + + if (mViewProjColMajColVecArray == NULL) + { + APEX_INVALID_OPERATION("view-projection matrix is not yet set! see setUseViewProjMatrix()"); + } + else + { + return mViewProjMatrix; + } + return vp; +} + +void ApexScene::getColMajColVecArray(const PxMat44& colVecMat44, float* const result) +{ + *(PxVec4*)(result + 0) = colVecMat44.column0; + *(PxVec4*)(result + 4) = colVecMat44.column1; + *(PxVec4*)(result + 8) = colVecMat44.column2; + *(PxVec4*)(result + 12) = colVecMat44.column3; +} + +void ApexScene::getColVecMat44(const float* const colMajColVecArray, PxMat44& result) +{ + result.column0 = PxVec4(colMajColVecArray + 0); + result.column1 = PxVec4(colMajColVecArray + 4); + result.column2 = PxVec4(colMajColVecArray + 8); + result.column3 = PxVec4(colMajColVecArray + 12); +} + +void ApexScene::multiplyColMajColVecArray(const float* const fromSpace, const float* const toSpace, float* const result) +{ + /**************************************** + col vector -> P * V * W * vertexVector + row vector -> vertexVector * W * V * P + toSpace * fromSpace + result = rows of 1stMat * cols of 2ndMat + ****************************************/ + uint32_t id = 0; + for (uint32_t r = 0; r < 4; ++r) + { + for (uint32_t c = 0; c < 4; ++c) + { + float dotProduct = 0; + for (uint32_t k = 0; k < 4; ++k) + { + dotProduct += toSpace[k * 4 + r] * fromSpace[k + c * 4]; + } + result[id++] = dotProduct; + } + } +} +#endif + +PxVec3 ApexScene::getEyePosition(const uint32_t viewID) const +{ + READ_ZONE(); + if (viewID < getNumViewMatrices()) + { + PxVec3 pos = (mViewMatrices[viewID]->viewMatrix.inverseRT()).column3.getXYZ(); + return pos; + } + else + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("view matrix for viewID %d is not initialized! see allocViewMatrix()", viewID); + } + } + return PxVec3(0, 0, 0); +} + +PxVec3 ApexScene::getEyeDirection(const uint32_t viewID) const +{ + READ_ZONE(); + if (viewID < getNumViewMatrices()) + { + PxVec3 dir; + dir.x = mViewMatrices[viewID]->viewMatrix.column0.z; + dir.y = mViewMatrices[viewID]->viewMatrix.column1.z; + dir.z = mViewMatrices[viewID]->viewMatrix.column2.z; + ViewMatrixLookAt* v = static_cast<ViewMatrixLookAt*>(mViewMatrices[viewID]); + if (v->isRightHand) + { + dir = -1 * dir; + } + return dir; + } + else + { + APEX_INVALID_PARAMETER("invalid view matrix ID viewID %d! see allocViewMatrix()", viewID); + } + return PxVec3(0, 0, 1); +} + +//********************************** + +void getEyeTransform(PxMat44 &xform,const PxVec3 &eye,const PxVec3 &forward,const PxVec3 &up) +{ + PxVec3 right = forward.cross(up); + right.normalize(); + PxVec3 realUp = right.cross(forward); + realUp.normalize(); + xform = PxMat44(right, realUp, -forward, eye); + xform = xform.inverseRT(); +} + + +void ApexScene::setViewParams(const PxVec3& eyePosition, const PxVec3& eyeDirection, const PxVec3& worldUpDirection, const uint32_t viewID) +{ + WRITE_ZONE(); + if (viewID >= getNumViewMatrices()) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("view matrix for viewID %d is not initialized! see allocViewMatrix()", viewID); + } + } + else + { + ViewMatrixLookAt* v = static_cast<ViewMatrixLookAt*>(mViewMatrices[viewID]); + getEyeTransform(v->viewMatrix,eyePosition,eyeDirection,worldUpDirection); + } +} + +void ApexScene::setProjParams(float nearPlaneDistance, float farPlaneDistance, float fieldOfViewDegree, uint32_t viewportWidth, uint32_t viewportHeight, const uint32_t projID) +{ + WRITE_ZONE(); + if (projID >= getNumProjMatrices()) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("projection matrix for projID %d is not initialized! see allocProjMatrix()", projID); + } + } + else + { + if (!mProjMatrices[projID]->isUserCustomized) + { + if (!mTotalElapsedMS) + { + APEX_INVALID_PARAMETER("projection matrix for projID %d is not a user-customized type! see allocProjMatrix()", projID); + } + } + else + { + ProjMatrixUserCustomized* p = static_cast<ProjMatrixUserCustomized*>(mProjMatrices[projID]); + p->nearPlaneDistance = nearPlaneDistance; + p->farPlaneDistance = farPlaneDistance; + p->fieldOfViewDegree = fieldOfViewDegree; + p->viewportWidth = viewportWidth; + p->viewportHeight = viewportHeight; + } + } +} +#if 0 //lionel: work in progress +const PxMat44& ApexScene::buildViewMatrix(const uint32_t viewID) +{ + if (viewID >= getNumViewMatrices()) + { + APEX_INVALID_PARAMETER("view matrix for viewID %d is not initialized! see allocViewMatrix()", viewID); + } + else + { + if (!mViewMatrices[viewID]->isLookAt) + { + APEX_INVALID_PARAMETER("view matrix for viewID %d is not a LookAt type! see allocViewMatrix()", viewID); + } + else + { + ViewMatrixLookAt* v = DYNAMIC_CAST(ViewMatrixLookAt*)(mViewMatrices[viewID]); + if (v->isRightHand) + { + //lionel: todo + //ensure determinant == +ve + //set view matrix as well? + } + else + { + //lionel: todo + //ensure determinant == -ve + //set view matrix as well? + } + } + + } + //lionel: temp hack + static PxMat44 hack; + return hack; +} + +const PxMat44& ApexScene::buildProjMatrix(const uint32_t projID) +{ + if (projID >= getNumProjMatrices()) + { + APEX_INVALID_PARAMETER("projection matrix for projID %d is not initialized! see allocProjMatrix()", projID); + } + else + { + if (!mProjMatrices[projID]->isPerspectiveFOV) + { + APEX_INVALID_PARAMETER("projection matrix for projID %d is a not a perspective FOV type! see allocProjMatrix()", projID); + } + else + { + ProjMatrixPerspectiveFOV* p = DYNAMIC_CAST(ProjMatrixPerspectiveFOV*)(mProjMatrices[projID]); + if (p->isZinvert) + { + //lionel: todo + //set proj matrix as well? + //D3D projection or OGL projection? + } + else + { + //lionel: todo + //set proj matrix as well? + } + } + } + //lionel: temp hack + static PxMat44 hack; + return hack; +} +#endif + +void ApexScene::initDebugColorParams() +{ + if (mSceneRenderDebug == NULL) + { + return; + } + using RENDER_DEBUG::DebugColors; +#ifndef WITHOUT_DEBUG_VISUALIZE +#define INIT_COLOR(_name) \ + RENDER_DEBUG_IFACE(mSceneRenderDebug)->setDebugColor(DebugColors::_name, mDebugColorParams->_name); \ + mColorMap.insert(#_name, DebugColors::_name); + + INIT_COLOR(Default); + INIT_COLOR(PoseArrows); + INIT_COLOR(MeshStatic); + INIT_COLOR(MeshDynamic); + INIT_COLOR(Shape); + INIT_COLOR(Text0); + INIT_COLOR(Text1); + INIT_COLOR(ForceArrowsLow); + INIT_COLOR(ForceArrowsNorm); + INIT_COLOR(ForceArrowsHigh); + INIT_COLOR(Color0); + INIT_COLOR(Color1); + INIT_COLOR(Color2); + INIT_COLOR(Color3); + INIT_COLOR(Color4); + INIT_COLOR(Color5); + INIT_COLOR(Red); + INIT_COLOR(Green); + INIT_COLOR(Blue); + INIT_COLOR(DarkRed); + INIT_COLOR(DarkGreen); + INIT_COLOR(DarkBlue); + INIT_COLOR(LightRed); + INIT_COLOR(LightGreen); + INIT_COLOR(LightBlue); + INIT_COLOR(Purple); + INIT_COLOR(DarkPurple); + INIT_COLOR(Yellow); + INIT_COLOR(Orange); + INIT_COLOR(Gold); + INIT_COLOR(Emerald); + INIT_COLOR(White); + INIT_COLOR(Black); + INIT_COLOR(Gray); + INIT_COLOR(LightGray); + INIT_COLOR(DarkGray); +#endif +} + +void ApexScene::updateDebugColorParams(const char* color, uint32_t val) +{ + WRITE_ZONE(); +#ifndef WITHOUT_DEBUG_VISUALIZE + RENDER_DEBUG_IFACE(mSceneRenderDebug)->setDebugColor(mColorMap[color], val); +#else + PX_UNUSED(color); + PX_UNUSED(val); +#endif +} + +// A module may call this SceneIntl interface if the module has been released. +void ApexScene::moduleReleased(ModuleSceneIntl& moduleScene) +{ + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + { + if (mModuleScenes[i] == &moduleScene) + { + mModuleScenes.replaceWithLast(i); + break; + } + } +} + +// ApexSDKImpl will call this for each module when ApexScene is first created, and +// again for all new modules loaded after the scene was created. +void ApexScene::moduleCreated(ModuleIntl& module) +{ + ModuleSceneIntl* ms = module.createInternalModuleScene(*this, mSceneRenderDebug); + if (ms) + { + mModuleScenes.pushBack(ms); +#if PX_PHYSICS_VERSION_MAJOR == 3 + ms->setModulePhysXScene(mPhysXScene); +#endif + } +} + + + +const PxRenderBuffer* ApexScene::getRenderBuffer() const +{ +#if PX_PHYSICS_VERSION_MAJOR == 3 + READ_ZONE(); + if (mSimulating) + { + APEX_INTERNAL_ERROR("simulation is still running"); + } + else + { +#ifndef WITHOUT_DEBUG_VISUALIZE + if (mUseDebugRenderable && mSceneRenderDebug) + { + mSceneRenderDebug->getRenderBuffer(mRenderBuffer); + } +#endif + } + return &mRenderBuffer; +#else + return 0; +#endif +} + +const PxRenderBuffer* ApexScene::getRenderBufferScreenSpace() const +{ +#if PX_PHYSICS_VERSION_MAJOR == 3 + READ_ZONE(); + if (mSimulating) + { + APEX_INTERNAL_ERROR("simulation is still running"); + } + else + { +#ifndef WITHOUT_DEBUG_VISUALIZE + if (mUseDebugRenderable && mSceneRenderDebug) + { + mSceneRenderDebug->getRenderBufferScreenSpace(mRenderBufferScreenSpace); + } +#endif + } + return &mRenderBufferScreenSpace; +#else + return 0; +#endif +} + + +#if PX_PHYSICS_VERSION_MAJOR == 3 +void ApexScene::setPhysXScene(PxScene* s) +{ + WRITE_ZONE(); + if (mPhysXScene != s) + { + /* Pass along to the module scenes */ + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + { + mModuleScenes[i]->setModulePhysXScene(s); + } + mPhysXScene = s; + + updateGravity(); + + + if (mPhysXScene) + { + mTaskManager = mPhysXScene->getTaskManager(); + } + else + { + mTaskManager = NULL; + } + } +} +#endif + + +uint32_t ApexScene::addActor(ApexActor& actor, ApexActor* actorPtr) +{ +#if PX_PHYSICS_VERSION_MAJOR == 3 + WRITE_ZONE(); + { + SCOPED_PHYSX_LOCK_WRITE(this); + actor.setPhysXScene(mPhysXScene); + } +#endif + return ApexContext::addActor(actor , actorPtr); +} + +void ApexScene::removeAllActors() +{ + WRITE_ZONE(); + if (mSimulating) + { + fetchResults(true, NULL); + } + ApexContext::removeAllActors(); +} + +void ApexScene::destroy() +{ + { + + if (mSimulating) + { + fetchResults(true, NULL); + } + + if (mSceneRenderDebug) + { + mSceneRenderDebug = NULL; + } + + if (mDebugRenderParams) + { + mDebugRenderParams->destroy(); + mDebugRenderParams = NULL; + } + + removeAllActors(); + } + +#if PX_PHYSICS_VERSION_MAJOR == 3 + + using namespace physx; + + PxScene* physXScene = getPhysXScene(); + + // Clean up PhysX objects data + if (physXScene) + { + SCOPED_PHYSX_LOCK_WRITE(physXScene); + + uint32_t zombieActorCount = 0; + uint32_t zombieShapeCount = 0; + uint32_t zombieDeformableCount = 0; + uint32_t zombieParticleSystemCount = 0; + uint32_t zombieParticleFluidCount = 0; + + uint32_t nbActors; + PxActor** actorArray; + + nbActors = physXScene->getNbActors(PxActorTypeFlag::eRIGID_STATIC | PxActorTypeFlag::eRIGID_DYNAMIC); + if (nbActors) + { + actorArray = (PxActor**)PX_ALLOC(sizeof(PxActor*) * nbActors, PX_DEBUG_EXP("PxActor*")); + physXScene->getActors(PxActorTypeFlag::eRIGID_STATIC | PxActorTypeFlag::eRIGID_DYNAMIC, actorArray, nbActors); + for (uint32_t actorIndex = 0; actorIndex < nbActors; ++actorIndex) + { + PxRigidActor* actor = actorArray[actorIndex]->is<physx::PxRigidActor>(); + + uint32_t nbShapes = actor->getNbShapes(); + PxShape** shapeArray = (PxShape**)PX_ALLOC(sizeof(PxShape*) * nbShapes, PX_DEBUG_EXP("PxShape*")); + actor->getShapes(shapeArray, nbShapes); + for (uint32_t shapeIndex = 0; shapeIndex < nbShapes; ++shapeIndex) + { + PxShape* shape = shapeArray[shapeIndex]; + if (mApexSDK->getPhysXObjectInfo(shape)) + { + mApexSDK->releaseObjectDesc(shape); + ++zombieShapeCount; + } + } + if (mApexSDK->getPhysXObjectInfo(actor)) + { + mApexSDK->releaseObjectDesc(actor); + ++zombieActorCount; + } + PX_FREE(shapeArray); + } + PX_FREE(actorArray); + } + + + nbActors = physXScene->getNbActors(PxActorTypeFlag::eCLOTH); + if (nbActors) + { + actorArray = (PxActor**)PX_ALLOC(sizeof(PxActor*) * nbActors, PX_DEBUG_EXP("PxActor*")); + physXScene->getActors(PxActorTypeFlag::eCLOTH, actorArray, nbActors); + for (uint32_t actorIndex = 0; actorIndex < nbActors; ++actorIndex) + { + PxCloth* cloth = actorArray[actorIndex]->is<physx::PxCloth>(); + PX_ASSERT(cloth); + if (mApexSDK->getPhysXObjectInfo(cloth)) + { + mApexSDK->releaseObjectDesc(cloth); + ++zombieDeformableCount; + } + } + PX_FREE(actorArray); + } + + + nbActors = physXScene->getNbActors(PxActorTypeFlag::ePARTICLE_SYSTEM); + if (nbActors) + { + actorArray = (PxActor**)PX_ALLOC(sizeof(PxActor*) * nbActors, PX_DEBUG_EXP("PxActor*")); + physXScene->getActors(PxActorTypeFlag::ePARTICLE_SYSTEM, actorArray, nbActors); + for (uint32_t actorIndex = 0; actorIndex < nbActors; ++actorIndex) + { + PxParticleSystem* particleSystem = actorArray[actorIndex]->is<physx::PxParticleSystem>(); + PX_ASSERT(particleSystem); + if (mApexSDK->getPhysXObjectInfo(particleSystem)) + { + mApexSDK->releaseObjectDesc(particleSystem); + ++zombieParticleSystemCount; + } + } + PX_FREE(actorArray); + } + + nbActors = physXScene->getNbActors(PxActorTypeFlag::ePARTICLE_FLUID); + if (nbActors) + { + actorArray = (PxActor**)PX_ALLOC(sizeof(PxActor*) * nbActors, PX_DEBUG_EXP("PxActor*")); + physXScene->getActors(PxActorTypeFlag::ePARTICLE_FLUID, actorArray, nbActors); + for (uint32_t actorIndex = 0; actorIndex < nbActors; ++actorIndex) + { + PxParticleFluid* particleFluid = actorArray[actorIndex]->is<physx::PxParticleFluid>(); + PX_ASSERT(particleFluid); + if (mApexSDK->getPhysXObjectInfo(particleFluid)) + { + mApexSDK->releaseObjectDesc(particleFluid); + ++zombieParticleFluidCount; + } + } + PX_FREE(actorArray); + } + + + if (zombieDeformableCount) + { + APEX_DEBUG_WARNING("Removed %d physX deformable actor descriptor(s) still remaining in destroyed ApexScene.", zombieDeformableCount); + } + if (zombieParticleSystemCount) + { + APEX_DEBUG_WARNING("Removed %d physX particle system actor descriptor(s) still remaining in destroyed ApexScene.", zombieParticleSystemCount); + } + if (zombieParticleFluidCount) + { + APEX_DEBUG_WARNING("Removed %d physX particle fluid actor descriptor(s) still remaining in destroyed ApexScene.", zombieParticleFluidCount); + } + if (zombieActorCount) + { + APEX_DEBUG_WARNING("Removed %d physX actor descriptor(s) still remaining in destroyed ApexScene.", zombieActorCount); + } + if (zombieShapeCount) + { + APEX_DEBUG_WARNING("Removed %d physX shape descriptor(s) still remaining in destroyed ApexScene.", zombieShapeCount); + } + + } +#endif + while (mModuleScenes.size()) + { + mModuleScenes.back()->release(); + } + + + + freeTasks(); + +#if PX_PHYSICS_VERSION_MAJOR == 0 + mTaskManager->release(); +#else + setPhysXScene(NULL); +#endif + + PX_DELETE(this); +} + + +void ApexScene::updateGravity() +{ +#if PX_PHYSICS_VERSION_MAJOR == 3 + WRITE_ZONE(); + if (mPhysXScene == NULL) + { + return; + } + SCOPED_PHYSX_LOCK_READ(mPhysXScene); + + mGravity = mPhysXScene->getGravity(); +#endif +} + + +void ApexScene::simulate(float elapsedTime, + bool finalStep, + PxBaseTask *completionTask, + void* scratchMemBlock, + uint32_t scratchMemBlockSize) +{ + PX_UNUSED(scratchMemBlock); + PX_UNUSED(scratchMemBlockSize); + + WRITE_ZONE(); + if (mApexSDK->getApexPvdClient()) + mApexSDK->getApexPvdClient()->beginFrame(this); + + PX_PROFILE_ZONE("ApexScene::simulate", GetInternalApexSDK()->getContextId()); + + // reset the APEX simulation time timer + APEX_CHECK_STAT_TIMER("--------- Set mApexSimulateTickCount"); + mApexSimulateTickCount = Time::getCurrentCounterValue(); + + mFinalStep = finalStep; + + if (mSimulating) + return; + +#if PX_PHYSICS_VERSION_MAJOR == 3 + if (!mPhysXScene) + return; +#endif + { + updateGravity(); + } + + uint32_t manualSubsteps = 0; + float substepSize = elapsedTime; + + // Wait for all post-fetchResults() tasks to complete before allowing the next + // simulation step to continue; + mSimulationComplete.wait(); + +#if PX_PHYSICS_VERSION_MAJOR == 3 + // make sure we use the apex user notify... if the application + // changes their custom one make sure we map to it. + mUserNotify.setBatchAppNotify(manualSubsteps > 1); +#if APEX_UE4 + // Why do we need this check if we'll return earlier in case when mPhysXScene == NULL ? Line: 1177 + if (getPhysXScene()) +#endif + { + SCOPED_PHYSX_LOCK_WRITE(this); + PxSimulationEventCallback* userNotify = getPhysXScene()->getSimulationEventCallback(); + if (userNotify != &mUserNotify) + { + mUserNotify.setApplicationNotifier(userNotify); + getPhysXScene()->setSimulationEventCallback(&mUserNotify); + } + PxContactModifyCallback* userContactModify = getPhysXScene()->getContactModifyCallback(); + if (userContactModify != &mUserContactModify) + { + mUserContactModify.setApplicationContactModify(userContactModify); + getPhysXScene()->setContactModifyCallback(&mUserContactModify); + } + } +#endif + + mElapsedTime = elapsedTime; + mPhysXSimulateTime = elapsedTime; + mFetchResultsReady.reset(); + mSimulationComplete.reset(); + mSimulating = true; + + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + mModuleScenes[i]->simulate(elapsedTime); + + // reset dependcies after mModuleScenes[i]->simulate, so they get a chance + // to wait for running tasks from last frame + mTaskManager->resetDependencies(); + + /* Submit APEX scene tasks */ + mTaskManager->submitNamedTask(mPhysXSimulate, mPhysXSimulate->getName()); +#if APEX_DURING_TICK_TIMING_FIX + mTaskManager->submitNamedTask(mDuringTickComplete, mDuringTickComplete->getName()); +#endif + mTaskManager->submitNamedTask(mCheckResults, mCheckResults->getName()); + mTaskManager->submitNamedTask(mFetchResults, mFetchResults->getName()); + + mPhysXSimulate->setElapsedTime(manualSubsteps > 0 ? substepSize : elapsedTime); + + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + mModuleScenes[i]->submitTasks(elapsedTime, substepSize, PxMax(manualSubsteps, 1u)); + + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + mModuleScenes[i]->setTaskDependencies(); + + /* Build scene dependency graph */ + mCheckResults->startAfter(mPhysXSimulate->getTaskID()); + +#if APEX_DURING_TICK_TIMING_FIX + /** + * Tasks that run during the PhysX tick (that start after mPhysXSimulate) should + * "finishBefore" mDuringTickComplete. + */ + mDuringTickComplete->startAfter(mPhysXSimulate->getTaskID()); + mFetchResults->startAfter(mDuringTickComplete->getTaskID()); +#endif + + mFetchResults->startAfter(mPhysXSimulate->getTaskID()); + mFetchResults->startAfter(mCheckResults->getTaskID()); + + if (manualSubsteps > 1) + { + PX_ASSERT(mBetweenstepTasks != NULL); + mBetweenstepTasks->setSubstepSize(substepSize, manualSubsteps); + + mBetweenstepTasks->setFollower(1, mCheckResults); + mCheckResults->addReference(); // make sure checkresults waits until the last immediate step + } + mPhysXSimulate->setFollowingTask(manualSubsteps > 1 ? mBetweenstepTasks : NULL); + +#if PX_PHYSICS_VERSION_MAJOR == 3 + mPhysXSimulate->setScratchBlock(scratchMemBlock, scratchMemBlockSize); +#endif + mFetchResults->setFollowingTask(completionTask); + + { + PX_PROFILE_ZONE("ApexScene::TaskManager::startSimulation", GetInternalApexSDK()->getContextId()); + mTaskManager->startSimulation(); + } +} + + +struct ApexPvdClientEndFrameSender +{ + pvdsdk::ApexPvdClient* mBinding; + void* mInstance; + ApexPvdClientEndFrameSender(pvdsdk::ApexPvdClient* inBinding, void* inInst) + : mBinding(inBinding) + , mInstance(inInst) + { + } + ~ApexPvdClientEndFrameSender() + { + if (mBinding) + { + mBinding->endFrame(mInstance); + } + } +}; + + +bool ApexScene::fetchResults(bool block, uint32_t* errorState) +{ + WRITE_ZONE(); +#if PX_PHYSICS_VERSION_MAJOR == 3 + if (!mPhysXScene) + { + return false; + } +#endif + + { + StatValue dataVal; + if (mFetchResultsReady.wait(0)) + { + dataVal.Float = 0.0f; // fetchResults was called before simulation was done + } + else + { + dataVal.Float = ApexScene::ticksToMilliseconds(mApexSimulateTickCount, Time::getCurrentCounterValue()); + } + setApexStatValue(UserDelayedFetchTime, dataVal); + } + + if (checkResults(block) == false || !mSimulating) + { + return false; + } + + //absolutely, at function exit, ensure we send the eof marker. + //PVD needs the EOF marker sent *after* the last fetch results in order to associate this fetch results + //with this frame. + //If you change the order of the next two statements it will confuse PVD and your frame will look tremendously + //long. + ApexPvdClientEndFrameSender theEnsureEndFrameIsSent(mApexSDK->getApexPvdClient(), this); + PX_PROFILE_ZONE("ApexScene::fetchResults", GetInternalApexSDK()->getContextId()); + + // reset simulation timer to measure fetchResults time + APEX_CHECK_STAT_TIMER("--------- Set fetchTime"); + uint64_t fetchTime = Time::getCurrentCounterValue(); + + // reset simulation + mSimulating = false; + if (errorState) + { + *errorState = 0; + } + + // TODO: Post-FetchResults tasks must set this, if/when we support them. + mSimulationComplete.set(); + + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + { + mModuleScenes[i]->fetchResultsPreRenderLock(); + } + + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + { + mModuleScenes[i]->lockRenderResources(); + } +#if PX_PHYSICS_VERSION_MAJOR == 3 + if (mPhysXScene != NULL) + { + PX_PROFILE_ZONE("PhysXScene::fetchResults", GetInternalApexSDK()->getContextId()); + + SCOPED_PHYSX_LOCK_WRITE(this); + mPhysXScene->fetchResults(true); + // SJB TODO3.0 + mPxStepWasValid = true; + // Check if PhysX actually ran any substeps. (nbSubSteps is the amount of substeps ran during the last simulation) + /*float maxTimeStep; + uint32_t nbSubSteps, maxIter; + NxTimeStepMethod method; + mPhysXScene->getTiming(maxTimeStep, maxIter, method, &nbSubSteps); + + mPxStepWasValid = (nbSubSteps > 0);*/ + + mPxAccumElapsedTime += mElapsedTime; + + if (mPxStepWasValid) + { + mPxLastElapsedTime = mPxAccumElapsedTime; + mPxAccumElapsedTime = 0.0f; + + if (mTimeRemainder + mPxLastElapsedTime > 0.001f) + { + uint32_t elapsedMS = (uint32_t)((1000.0f) * (mTimeRemainder + mPxLastElapsedTime)); + mTotalElapsedMS += elapsedMS; + mTimeRemainder = (mTimeRemainder + mPxLastElapsedTime) - (float)elapsedMS * 0.001f; + } + } + + // restore the application user callbacks. + mPhysXScene->setSimulationEventCallback(mUserNotify.getApplicationNotifier()); + mPhysXScene->setContactModifyCallback(mUserContactModify.getApplicationContactModify()); + + mUserNotify.playBatchedNotifications(); + } +#endif + + { + StatValue dataVal; + { + dataVal.Float = ApexScene::ticksToMilliseconds(fetchTime, Time::getCurrentCounterValue()); + APEX_CHECK_STAT_TIMER("--------- PhysXFetchResultTime (fetchTime)"); + } + setApexStatValue(PhysXFetchResultTime, dataVal); + } + + // reset simulation timer to measure fetchResults time + APEX_CHECK_STAT_TIMER("--------- Set mApexSimulateTickCount"); + mApexSimulateTickCount = Time::getCurrentCounterValue(); + + fetchPhysXStats(); + + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + { + mModuleScenes[i]->fetchResults(); // update render bounds, trigger callbacks, etc + } + + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + { + mModuleScenes[i]->unlockRenderResources(); + } + + for (uint32_t i = 0 ; i < mModuleScenes.size() ; i++) + { + mModuleScenes[i]->fetchResultsPostRenderUnlock(); + } + + mTaskManager->stopSimulation(); + + if (mApexSDK->isApexStatsEnabled()) + { + fetchApexStats(); + } + +#if APEX_CUDA_SUPPORT && !defined(INSTALLER) + mCudaTestManager->nextFrame(); + mCudaProfileManager->nextFrame(); +#endif + + return true; +} + + + +void ApexScene::fetchPhysXStats() +{ + WRITE_ZONE(); + PX_PROFILE_ZONE("ApexScene::fetchPhysXStats", GetInternalApexSDK()->getContextId()); + StatValue dataVal; + + // get the number of shapes and add it to the ApexStats + uint32_t nbShapes = 0; + uint32_t nbPairs = 0; + uint32_t nbAwakeShapes = 0; + +#if PX_PHYSICS_VERSION_MAJOR == 3 +# if USE_MANUAL_ACTOR_LOOP + uint32_t nbActors = 0; + static const PxActorTypeSelectionFlags flags = PxActorTypeSelectionFlag::eRIGID_STATIC + | PxActorTypeSelectionFlag::eRIGID_DYNAMIC; + + if (mPhysXScene) + { + nbActors = mPhysXScene->getNbActors(flags); + } + + if (nbActors) + { + PxActor** actorArray = (PxActor**)PxAlloca(sizeof(PxActor*) * nbActors); + mPhysXScene->getActors(flags, actorArray, nbActors); + + for (uint32_t actorIndex = 0; actorIndex < nbActors; ++actorIndex) + { + PxRigidActor* rigidActor = actorArray[actorIndex]->is<physx::PxRigidActor>(); + if (rigidActor) + { + nbShapes += rigidActor->getNbShapes(); + } + + PxRigidDynamic* dynamic = actorArray[actorIndex]->is<physx::PxRigidDynamic>(); + if (dynamic && !dynamic->isSleeping()) + { + nbAwakeShapes += dynamic->getNbShapes(); + } + } + } +# else + physx::PxSimulationStatistics sceneStats; + if (mPhysXScene) + { + SCOPED_PHYSX_LOCK_READ(mPhysXScene); + mPhysXScene->getSimulationStatistics(sceneStats); + nbShapes = sceneStats.nbDynamicBodies; + nbAwakeShapes = sceneStats.nbActiveDynamicBodies; + nbPairs = 0; + for (PxGeometryType::Enum i = PxGeometryType::eSPHERE; i < PxGeometryType::eGEOMETRY_COUNT; i = (PxGeometryType::Enum)(i + 1)) + { + nbPairs += sceneStats.getRbPairStats(physx::PxSimulationStatistics::eDISCRETE_CONTACT_PAIRS, PxGeometryType::eCONVEXMESH, i); + } + } +# endif +#endif + + dataVal.Int = (int32_t)nbShapes; + setApexStatValue(NumberOfShapes, dataVal); + + dataVal.Int = (int32_t)nbAwakeShapes; + setApexStatValue(NumberOfAwakeShapes, dataVal); + + dataVal.Int = (int32_t)nbPairs; + setApexStatValue(NumberOfCpuShapePairs, dataVal); + + dataVal.Int = 0; + setApexStatValue(RbThroughput, dataVal); +} + + + +void ApexScene::fetchApexStats() +{ + WRITE_ZONE(); + PX_PROFILE_ZONE("ApexScene::fetchApexStats", GetInternalApexSDK()->getContextId()); + StatValue dataVal; + + // get the number of actors and add it to the ApexStats + dataVal.Int = (int32_t)mActorArray.size(); + setApexStatValue(NumberOfActors, dataVal); + + + uint64_t qpc = Time::getCurrentCounterValue(); + dataVal.Float = ApexScene::ticksToMilliseconds(mApexSimulateTickCount, qpc); + APEX_CHECK_STAT_TIMER("--------- ApexPostTickTime (mApexSimulateTickCount)"); + + APEX_CHECK_STAT_TIMER("--------- Set mApexSimulateTickCount"); + mApexSimulateTickCount = qpc; + setApexStatValue(ApexPostTickTime, dataVal); + + //ModuleScenes can also generate stats. So let's collect and add those stats here. + + for (uint32_t i = 0; i < mModuleScenes.size(); i++) + { + SceneStats* moduleSceneStats; + moduleSceneStats = mModuleScenes[i]->getStats(); + + if (moduleSceneStats) + { + //O(n^2), rewrite to use a hash if num stats gets much larger + for (uint32_t j = 0; j < moduleSceneStats->numApexStats; j++) + { + StatsInfo& moduleSceneStat = moduleSceneStats->ApexStatsInfoPtr[j]; + + uint32_t k = 0; + while (k != mApexSceneStats.numApexStats && nvidia::strcmp(mApexSceneStats.ApexStatsInfoPtr[k].StatName, moduleSceneStats->ApexStatsInfoPtr[j].StatName) != 0) + { + k++; + } + bool found = (k != mApexSceneStats.numApexStats); + + if (found) + { + StatsInfo& sceneStat = mApexSceneStats.ApexStatsInfoPtr[k]; + + PX_ASSERT(sceneStat.StatType == moduleSceneStat.StatType); + + if (sceneStat.StatType == StatDataType::FLOAT) + { + sceneStat.StatCurrentValue.Float += moduleSceneStat.StatCurrentValue.Float; + } + else if (sceneStat.StatType == StatDataType::INT) + { + sceneStat.StatCurrentValue.Int += moduleSceneStat.StatCurrentValue.Int; + } + } + } + } + } +} + + + +bool ApexScene::checkResults(bool block) const +{ +// PX_PROFILE_ZONE("ApexScene::checkResults", GetInternalApexSDK()->getContextId()); + + uint32_t waitTime = block ? Sync::waitForever : 0; + if (!mSimulating) + { + return true; + } + else + { + return mFetchResultsReady.wait(waitTime); + } +} + +void ApexScene::lockRenderResources() +{ +#ifndef WITHOUT_DEBUG_VISUALIZE + if (mSceneRenderDebug) + { + mSceneRenderDebug->lockRenderResources(); + } +#endif + checkResults(true); +} + +void ApexScene::unlockRenderResources() +{ +#ifndef WITHOUT_DEBUG_VISUALIZE + if (mSceneRenderDebug) + { + mSceneRenderDebug->unlockRenderResources(); + } +#endif +} + +void ApexScene::updateRenderResources(bool rewriteBuffers, void* userRenderData) +{ + URR_SCOPE; + +#ifdef WITHOUT_DEBUG_VISUALIZE + PX_UNUSED(rewriteBuffers); + PX_UNUSED(userRenderData); +#else + visualize(); + + if (mSceneRenderDebug) + { + mSceneRenderDebug->updateRenderResources(rewriteBuffers, userRenderData); + } +#endif +} + +void ApexScene::dispatchRenderResources(UserRenderer& renderer) +{ +#ifdef WITHOUT_DEBUG_VISUALIZE + PX_UNUSED(renderer); +#else + if (mSceneRenderDebug) + { + mSceneRenderDebug->dispatchRenderResources(renderer); + } +#endif +} + +void ApexScene::visualize() +{ +#ifndef WITHOUT_DEBUG_VISUALIZE + if (mSceneRenderDebug && mDebugRenderParams->Enable && mDebugRenderParams->Scale!= 0.0f) + { + const physx::PxMat44& savedPose = *RENDER_DEBUG_IFACE(mSceneRenderDebug)->getPoseTyped(); + RENDER_DEBUG_IFACE(mSceneRenderDebug)->setIdentityPose(); + if (mDebugRenderParams->Bounds) + { + RENDER_DEBUG_IFACE(mSceneRenderDebug)->setCurrentColor(0xFFFFFF); + for (uint32_t i = 0; i < mActorArray.size(); ++i) + { + ApexActor* actor = mActorArray[i]; + RENDER_DEBUG_IFACE(mSceneRenderDebug)->debugBound(actor->getBounds()); + } + } + + for (ModuleSceneIntl** it = mModuleScenes.begin(); it != mModuleScenes.end(); ++it) + { + (*it)->visualize(); + } + RENDER_DEBUG_IFACE(mSceneRenderDebug)->setPose(savedPose); + } +#endif +} + +PxBounds3 ApexScene::getBounds() const +{ + READ_ZONE(); +#ifdef WITHOUT_DEBUG_VISUALIZE + PxBounds3 bound = PxBounds3::empty(); +#else + PxBounds3 bound = mSceneRenderDebug->getBounds(); +#endif + + return bound; +} + + +void ApexScene::allocateTasks() +{ + mCheckResults = PX_NEW(CheckResultsTask)(*this); + mPhysXSimulate = PX_NEW(PhysXSimulateTask)(*this, *mCheckResults); + mBetweenstepTasks = PX_NEW(PhysXBetweenStepsTask)(*this); +#if APEX_DURING_TICK_TIMING_FIX + mDuringTickComplete = PX_NEW(DuringTickCompleteTask)(*this); +#endif + mFetchResults = PX_NEW(FetchResultsTask)(*this); +} + +void ApexScene::freeTasks() +{ + if (mPhysXSimulate != NULL) + { + delete mPhysXSimulate; + mPhysXSimulate = NULL; + } + + if (mBetweenstepTasks != NULL) + { + delete mBetweenstepTasks; + mBetweenstepTasks = NULL; + } + +#if APEX_DURING_TICK_TIMING_FIX + if (mDuringTickComplete != NULL) + { + delete mDuringTickComplete; + mDuringTickComplete = NULL; + } +#endif + + if (mCheckResults != NULL) + { + delete mCheckResults; + mCheckResults = NULL; + } + + if (mFetchResults != NULL) + { + delete mFetchResults; + mFetchResults = NULL; + } +} + +void ApexScene::setUseDebugRenderable(bool state) +{ + WRITE_ZONE(); + mUseDebugRenderable = state; + if (mSceneRenderDebug) + { +#if !defined(WITHOUT_DEBUG_VISUALIZE) + mSceneRenderDebug->setUseDebugRenderable(state); +#endif + } +} + +uint32_t ApexScene::getSeed() +{ + return (uint32_t)(Time::getCurrentCounterValue() & 0xFFFFFFFF ); + //return IgnoredSeed != mSeed ? mSeed : (uint32_t)(1000 * getElapsedTime()); +} + +ModuleSceneIntl* ApexScene::getInternalModuleScene(const char* moduleName) +{ + ApexSimpleString str1(moduleName); + for (uint32_t i = 0; i < mModuleScenes.size(); i++) + { + ApexSimpleString str2(mModuleScenes[i]->getModule()->getName()); + if (str1 == str2) + { + return mModuleScenes[i]; + } + } + return NULL; +} + +#if PX_PHYSICS_VERSION_MAJOR == 3 + +void ApexScene::addActorPair(PxActor *actor0,PxActor *actor1) +{ + WRITE_ZONE(); + mPairFilter.addPair(PTR_TO_UINT64(actor0),PTR_TO_UINT64(actor1)); +} + +void ApexScene::removeActorPair(PxActor *actor0,PxActor *actor1) +{ + WRITE_ZONE(); + mPairFilter.removePair(PTR_TO_UINT64(actor0), PTR_TO_UINT64(actor1)); +} + +bool ApexScene::findActorPair(PxActor *actor0,PxActor *actor1) const +{ + READ_ZONE(); + return mPairFilter.findPair(PTR_TO_UINT64(actor0), PTR_TO_UINT64(actor1)); +} + +MirrorScene *ApexScene::createMirrorScene(nvidia::apex::Scene &mirrorScene, + MirrorScene::MirrorFilter &mirrorFilter, + float mirrorStaticDistance, + float mirrorDynamicDistance, + float mirrorDistanceThreshold) +{ + WRITE_ZONE(); + MirrorSceneImpl *ms = PX_NEW(MirrorSceneImpl)(*getPhysXScene(),*mirrorScene.getPhysXScene(),mirrorFilter,mirrorStaticDistance,mirrorDynamicDistance,mirrorDistanceThreshold); + return static_cast< MirrorScene *>(ms); +} + +#endif + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexSceneTasks.cpp b/APEX_1.4/framework/src/ApexSceneTasks.cpp new file mode 100644 index 00000000..854ddfc8 --- /dev/null +++ b/APEX_1.4/framework/src/ApexSceneTasks.cpp @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexSceneTasks.h" +#include "FrameworkPerfScope.h" + +#if PX_PHYSICS_VERSION_MAJOR == 3 +#include "ScopedPhysXLock.h" +#endif + +#include "PsTime.h" + +namespace nvidia +{ +namespace apex +{ + +// --------- PhysXSimulateTask + +PhysXSimulateTask::PhysXSimulateTask(ApexScene& scene, CheckResultsTask& checkResultsTask) +: mScene(&scene) +, mElapsedTime(0.0f) +, mFollowingTask(NULL) +, mCheckResultsTask(checkResultsTask) +#if PX_PHYSICS_VERSION_MAJOR == 3 +, mScratchBlock(NULL) +, mScratchBlockSize(0) +#endif +{} + +PhysXSimulateTask::~PhysXSimulateTask() +{ +#if PX_PHYSICS_VERSION_MAJOR == 3 + mScratchBlock = NULL; + mScratchBlockSize = 0; +#endif +} + +const char* PhysXSimulateTask::getName() const +{ + return AST_PHYSX_SIMULATE; +} + + +void PhysXSimulateTask::run() +{ + // record the pretick APEX time + StatValue dataVal; + uint64_t qpc = Time::getCurrentCounterValue(); + dataVal.Float = ApexScene::ticksToMilliseconds(mScene->mApexSimulateTickCount, qpc); + APEX_CHECK_STAT_TIMER("--------- ApexBeforeTickTime (mApexSimulateTickCount)"); + + APEX_CHECK_STAT_TIMER("--------- Set mApexSimulateTickCount"); + mScene->mApexSimulateTickCount = qpc; + + mScene->setApexStatValue(ApexScene::ApexBeforeTickTime, dataVal); + + // start the PhysX simulation time timer + APEX_CHECK_STAT_TIMER("--------- Set mPhysXSimulateTickCount"); + mScene->mPhysXSimulateTickCount = Time::getCurrentCounterValue(); + +#if PX_PHYSICS_VERSION_MAJOR == 3 + if (mScene->mPhysXScene) + { + PX_ASSERT(mElapsedTime >= 0.0f); + SCOPED_PHYSX_LOCK_WRITE(mScene); + #if APEX_UE4 + if (mScene->mPhysXScene->getNbActors(PxActorTypeFlags(0xff))) + mScene->mPhysXScene->simulate(mElapsedTime, &mCheckResultsTask, mScratchBlock, mScratchBlockSize, false); + else + mCheckResultsTask.removeReference(); + #else + mScene->mPhysXScene->simulate(mElapsedTime, &mCheckResultsTask, mScratchBlock, mScratchBlockSize, false); + #endif + } +#endif + +#if PX_PHYSICS_VERSION_MAJOR == 0 + if (mFollowingTask != NULL) + { + mFollowingTask->removeReference(); + } +#endif +} + + + +void PhysXSimulateTask::setElapsedTime(float elapsedTime) +{ + PX_ASSERT(elapsedTime >= 0.0f); + mElapsedTime = elapsedTime; +} + + + +void PhysXSimulateTask::setFollowingTask(PxBaseTask* following) +{ + mFollowingTask = following; +} + + + +// --------- CheckResultsTask + +const char* CheckResultsTask::getName() const +{ + return AST_PHYSX_CHECK_RESULTS; +} + + +void CheckResultsTask::run() +{ +#if !APEX_DURING_TICK_TIMING_FIX + { + // mark the end of the "during tick" simulation time + StatValue dataVal; + { + uint64_t qpc = Time::getCurrentCounterValue(); + dataVal.Float = ApexScene::ticksToSeconds(mScene->mApexSimulateTickCount, qpc); + APEX_CHECK_STAT_TIMER("--------- ApexDuringTickTime (mApexSimulateTickCount)"); + + APEX_CHECK_STAT_TIMER("--------- Set mApexSimulateTickCount"); + mScene->mApexSimulateTickCount = qpc; + } + mScene->setApexStatValue(ApexScene::ApexDuringTickTime, dataVal); + } +#endif + +#if PX_PHYSICS_VERSION_MAJOR == 3 + { + SCOPED_PHYSX_LOCK_WRITE(mScene); + if (mScene->mPhysXScene +#if APEX_UE4 + && mScene->mPhysXScene->getNbActors(PxActorTypeFlags(0xff)) +#endif + ) + mScene->mPhysXScene->checkResults(true); + } +#endif + + // get the PhysX simulation time and add it to the ApexStats + { + StatValue dataVal; + { + uint64_t qpc = Time::getCurrentCounterValue(); + dataVal.Float = ApexScene::ticksToMilliseconds(mScene->mPhysXSimulateTickCount, qpc); + APEX_CHECK_STAT_TIMER("--------- PhysXSimulationTime (mPhysXSimulateTickCount)"); + } + + mScene->setApexStatValue(ApexScene::PhysXSimulationTime, dataVal); + } +} + + + +// --------- FetchResultsTask + +const char* FetchResultsTask::getName() const +{ + return AST_PHYSX_FETCH_RESULTS; +} + + +void FetchResultsTask::run() +{ +} + + +void FetchResultsTask::setFollowingTask(PxBaseTask* following) +{ + mFollowingTask = following; + if (mFollowingTask) + { + mFollowingTask->addReference(); + } +} + + + +/* +* \brief Called by dispatcher after Task has been run. +* +* If you re-implement this method, you must call this base class +* version before returning. +*/ +void FetchResultsTask::release() +{ + PxTask::release(); + + // copy mFollowingTask into local variable, because it might be overwritten + // as soon as mFetchResultsReady.set() is called (and before removeReference() is called on it) + PxBaseTask* followingTask = mFollowingTask; + mFollowingTask = NULL; + + // Allow ApexScene::fetchResults() to run (potentially unblocking game thread) + mScene->mFetchResultsReady.set(); + + // remove reference to the scene completion task submitted in Scene::simulate + // this must be done after the scene's mFetchResultsReady event is set so that the + // app's completion task can be assured that fetchResults is ready to run + if (followingTask) + { + followingTask->removeReference(); + } +} + + + +#if APEX_DURING_TICK_TIMING_FIX +// --------- DuringTickCompleteTask + +const char* DuringTickCompleteTask::getName() const +{ + return AST_DURING_TICK_COMPLETE; +} + + + +void DuringTickCompleteTask::run() +{ + // mark the end of the "during tick" simulation time + StatValue dataVal; + uint64_t qpc = Time::getCurrentCounterValue(); + dataVal.Float = ApexScene::ticksToMilliseconds(mScene->mApexSimulateTickCount, qpc); + APEX_CHECK_STAT_TIMER("--------- ApexDuringTickTime (mApexSimulateTickCount)"); + + APEX_CHECK_STAT_TIMER("--------- Set mApexSimulateTickCount"); + mScene->mApexSimulateTickCount = qpc; + + mScene->setApexStatValue(ApexScene::ApexDuringTickTime, dataVal); +} +#endif + + +// --------- PhysXBetweenStepsTask + +const char* PhysXBetweenStepsTask::getName() const +{ + return AST_PHYSX_BETWEEN_STEPS; +} + + + +void PhysXBetweenStepsTask::run() +{ + PX_ASSERT(mSubStepSize > 0.0f); + PX_ASSERT(mNumSubSteps > 0); +#if PX_PHYSICS_VERSION_MAJOR == 3 + PxScene* scene = mScene.getPhysXScene(); + + if (scene != NULL) + { + while (mSubStepNumber < mNumSubSteps) + { + PX_PROFILE_ZONE("ApexSceneManualSubstep", GetInternalApexSDK()->getContextId()); + // fetch the first substep + uint32_t errorState = 0; + { + SCOPED_PHYSX_LOCK_WRITE(&mScene); + scene->fetchResults(true, &errorState); + } + PX_ASSERT(errorState == 0); + + for (uint32_t i = 0; i < mScene.mModuleScenes.size(); i++) + { + PX_PROFILE_ZONE("ModuleSceneManualSubstep", GetInternalApexSDK()->getContextId()); + mScene.mModuleScenes[i]->interStep(mSubStepNumber, mNumSubSteps); + } + + // run the next substep + { + SCOPED_PHYSX_LOCK_WRITE(&mScene); + scene->simulate(mSubStepSize); + } + + mSubStepNumber++; + } + } +#endif + + mLast->removeReference(); // decrement artificially high ref count that prevented checkresults from being executed +} + + + +void PhysXBetweenStepsTask::setSubstepSize(float substepSize, uint32_t numSubSteps) +{ + mSubStepSize = substepSize; + mNumSubSteps = numSubSteps; +} + + + +void PhysXBetweenStepsTask::setFollower(uint32_t substepNumber, PxTask* last) +{ + mSubStepNumber = substepNumber; + mLast = last; + + setContinuation(last); +} + + + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/ApexSceneUserNotify.cpp b/APEX_1.4/framework/src/ApexSceneUserNotify.cpp new file mode 100644 index 00000000..12cf2469 --- /dev/null +++ b/APEX_1.4/framework/src/ApexSceneUserNotify.cpp @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexDefs.h" + +#if PX_PHYSICS_VERSION_MAJOR == 3 + +#pragma warning(push) +#pragma warning(disable: 4324) + +#include <ApexSceneUserNotify.h> +#include "PxPreprocessor.h" +#include <PxJoint.h> +#include <PxScene.h> + +namespace nvidia +{ +namespace apex +{ + using namespace physx; + +ApexSceneUserNotify::~ApexSceneUserNotify(void) +{ + // All callbacks should have been removed by now... something is wrong. + PX_ASSERT(mModuleNotifiers.size() == 0); +} + +void ApexSceneUserNotify::addModuleNotifier(PxSimulationEventCallback& notify) +{ + mModuleNotifiers.pushBack(¬ify); +} + +void ApexSceneUserNotify::removeModuleNotifier(PxSimulationEventCallback& notify) +{ + const uint32_t numNotifiers = mModuleNotifiers.size(); + uint32_t found = numNotifiers; + for (uint32_t i = 0; i < numNotifiers; i++) + { + if (mModuleNotifiers[i] == ¬ify) + { + found = i; + break; + } + } + PX_ASSERT(found < numNotifiers); + if (found < numNotifiers) + { + mModuleNotifiers.replaceWithLast(found); + } +} + +void ApexSceneUserNotify::playBatchedNotifications() +{ +#if TODO_HANDLE_NEW_CONTACT_STREAM + // onConstraintBreak + { + for (uint32_t i = 0; i < mBatchedBreakNotifications.size(); i++) + { + physx::PxConstraintInfo& constraintInfo = mBatchedBreakNotifications[i]; + PX_ASSERT(mAppNotify != NULL); + mAppNotify->onConstraintBreak(&constraintInfo, 1); + + /* + // apan, shold we release joint? how? + if (releaseJoint) + { + // scene isn't running anymore, guess we need to release the joint by hand. + notify.breakingJoint->getScene().releaseJoint(*notify.breakingJoint); + } + */ + } + // release if the array is too big + if (mBatchedBreakNotifications.size() * 4 < mBatchedBreakNotifications.capacity()) + { + mBatchedBreakNotifications.shrink(); + } + + mBatchedBreakNotifications.clear(); + } + + + // onContact + { + for (uint32_t i = 0; i < mBatchedContactNotifications.size(); i++) + { + BatchedContactNotification& contact = mBatchedContactNotifications[i]; + PX_ASSERT(contact.batchedStreamStart < mBatchedContactStreams.size()); + contact.batchedPair.stream = (PxConstContactStream)(mBatchedContactStreams.begin() + contact.batchedStreamStart); + + mAppNotify->onContact(contact.batchedPair, contact.batchedEvents); + } + mBatchedContactNotifications.clear(); + mBatchedContactStreams.clear(); + } + + // onSleep/onWake + { + for (uint32_t i = 0; i < mBatchedSleepWakeEventBorders.size(); i++) + { + const SleepWakeBorders border = mBatchedSleepWakeEventBorders[i]; + if (border.sleepEvents) + { + mAppNotify->onSleep(&mBatchedSleepEvents[border.start], border.count); + } + else + { + mAppNotify->onWake(&mBatchedWakeEvents[border.start], border.count); + } + } + mBatchedSleepWakeEventBorders.clear(); + mBatchedSleepEvents.clear(); + mBatchedWakeEvents.clear(); + } + + // mBatchedTriggerReports + { + for (uint32_t i = 0; i < mBatchedTriggerReports.size(); i++) + { + PxTriggerPair& triggerPair = mBatchedTriggerReports[i]; + mAppNotify->onTrigger(&triggerPair, 1); + } + mBatchedTriggerReports.clear(); + } +#endif +} + +void ApexSceneUserNotify::onConstraintBreak(physx::PxConstraintInfo* constraints, uint32_t count) +{ + for (Array<PxSimulationEventCallback*>::Iterator curr = mModuleNotifiers.begin(); curr != mModuleNotifiers.end(); ++curr) + { + (*curr)->onConstraintBreak(constraints, count); + } + + if (mAppNotify != NULL) + { + if (mBatchAppNotify) + { + for (uint32_t i = 0 ; i < count; i++) + { + mBatchedBreakNotifications.pushBack(constraints[i]); + } + } + else + { + mAppNotify->onConstraintBreak(constraints, count); + } + } +} + +void ApexSceneUserNotify::onWake(PxActor** actors, uint32_t count) +{ + for (Array<PxSimulationEventCallback*>::Iterator curr = mModuleNotifiers.begin(); curr != mModuleNotifiers.end(); ++curr) + { + (*curr)->onWake(actors, count); + } + + if (mAppNotify != NULL) + { + if (mBatchAppNotify) + { + SleepWakeBorders border(mBatchedWakeEvents.size(), count, false); + mBatchedSleepWakeEventBorders.pushBack(border); + mBatchedWakeEvents.resize(mBatchedWakeEvents.size() + count); + for (uint32_t i = 0; i < count; i++) + { + mBatchedWakeEvents.pushBack(actors[i]); + } + } + else + { + mAppNotify->onWake(actors, count); + } + } +} + +void ApexSceneUserNotify::onSleep(PxActor** actors, uint32_t count) +{ + for (Array<PxSimulationEventCallback*>::Iterator curr = mModuleNotifiers.begin(); curr != mModuleNotifiers.end(); ++curr) + { + (*curr)->onSleep(actors, count); + } + if (mAppNotify) + { + if (mBatchAppNotify) + { + SleepWakeBorders border(mBatchedSleepEvents.size(), count, true); + mBatchedSleepWakeEventBorders.pushBack(border); + mBatchedSleepEvents.resize(mBatchedSleepEvents.size() + count); + for (uint32_t i = 0; i < count; i++) + { + mBatchedSleepEvents.pushBack(actors[i]); + } + } + else + { + mAppNotify->onSleep(actors, count); + } + } +} + + +void ApexSceneUserNotify::onContact(const physx::PxContactPairHeader& pairHeader, const PxContactPair* pairs, uint32_t nbPairs) +{ + for (Array<PxSimulationEventCallback*>::Iterator curr = mModuleNotifiers.begin(); curr != mModuleNotifiers.end(); ++curr) + { + (*curr)->onContact(pairHeader, pairs, nbPairs); + } + + if (mAppNotify) + { + if (mBatchAppNotify) + { +#if TODO_HANDLE_NEW_CONTACT_STREAM + mBatchedContactNotifications.pushBack(BatchedContactNotification(pairHeader, pairs, nbPairs)); + const uint32_t length = pair.contactCount; //getContactStreamLength(pair.stream); + for (uint32_t i = 0; i < length; i++) + { + mBatchedContactStreams.pushBack(pair.stream[i]); + } +#endif + } + else + { + mAppNotify->onContact(pairHeader, pairs, nbPairs); + } + } +} + + +#if TODO_HANDLE_NEW_CONTACT_STREAM +class ApexContactStreamIterator : public PxContactStreamIterator +{ +public: + ApexContactStreamIterator( PxConstContactStream streamIt) : PxContactStreamIterator(streamIt) { } + + PxConstContactStream getStreamIt() { return streamIt; } +}; +#endif + +void ApexSceneUserNotify::onTrigger(PxTriggerPair* pairs, uint32_t count) +{ + if (mAppNotify != NULL) + { + if (mBatchAppNotify) + { + for (uint32_t i = 0; i < count; i++) + { + mBatchedTriggerReports.pushBack(pairs[count]); + } + } + else + { + mAppNotify->onTrigger(pairs, count); + } + } +} + + +void ApexSceneUserNotify::onAdvance(const PxRigidBody*const* bodyBuffer, const PxTransform* poseBuffer, const PxU32 count) +{ + PX_UNUSED(bodyBuffer); + PX_UNUSED(poseBuffer); + PX_UNUSED(count); +} + + + + +ApexSceneUserContactModify::ApexSceneUserContactModify(void) +{ + mAppContactModify = 0; +} + +ApexSceneUserContactModify::~ApexSceneUserContactModify(void) +{ + // All callbacks should have been removed by now... something is wrong. + PX_ASSERT(mModuleContactModify.size() == 0); +} + +void ApexSceneUserContactModify::addModuleContactModify(PxContactModifyCallback& contactModify) +{ + mModuleContactModify.pushBack(&contactModify); +} + +void ApexSceneUserContactModify::removeModuleContactModify(PxContactModifyCallback& contactModify) +{ + const uint32_t numContactModifies = mModuleContactModify.size(); + uint32_t found = numContactModifies; + for (uint32_t i = 0; i < numContactModifies; i++) + { + if (mModuleContactModify[i] == &contactModify) + { + found = i; + break; + } + } + PX_ASSERT(found < numContactModifies); + if (found < numContactModifies) + { + mModuleContactModify.replaceWithLast(found); + } +} + +void ApexSceneUserContactModify::setApplicationContactModify(PxContactModifyCallback* contactModify) +{ + mAppContactModify = contactModify; +} + +void ApexSceneUserContactModify::onContactModify(PxContactModifyPair* const pairs, uint32_t count) +{ + for (Array<PxContactModifyCallback*>::Iterator curr = mModuleContactModify.begin(); curr != mModuleContactModify.end(); curr++) + { + (*curr)->onContactModify(pairs, count); + } + if (mAppContactModify) + { + mAppContactModify->onContactModify(pairs, count); + } +} + + + +} +} // namespace nvidia::apex +#pragma warning(pop) + +#endif // PX_PHYSICS_VERSION_MAJOR == 3
\ No newline at end of file diff --git a/APEX_1.4/framework/src/ApexVertexBuffer.cpp b/APEX_1.4/framework/src/ApexVertexBuffer.cpp new file mode 100644 index 00000000..b5cba4ba --- /dev/null +++ b/APEX_1.4/framework/src/ApexVertexBuffer.cpp @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexVertexBuffer.h" + +#include "ApexSDKIntl.h" + +#include "VertexFormatParameters.h" + +#include <ParamArray.h> + +#include "PsMemoryBuffer.h" +#include "Cof44.h" + +#include "BufferU8x1.h" +#include "BufferU8x2.h" +#include "BufferU8x3.h" +#include "BufferU8x4.h" +#include "BufferU16x1.h" +#include "BufferU16x2.h" +#include "BufferU16x3.h" +#include "BufferU16x4.h" +#include "BufferU32x1.h" +#include "BufferU32x2.h" +#include "BufferU32x3.h" +#include "BufferU32x4.h" +#include "BufferF32x1.h" +#include "BufferF32x2.h" +#include "BufferF32x3.h" +#include "BufferF32x4.h" + +#include "ApexPermute.h" + +namespace nvidia +{ +namespace apex +{ + +#ifdef _DEBUG +#define VERIFY_PARAM(_A) PX_ASSERT(_A == NvParameterized::ERROR_NONE) +#else +#define VERIFY_PARAM(_A) _A +#endif + +// Transform Vec3 by PxMat44Legacy +PX_INLINE void transform_FLOAT3_by_PxMat44(FLOAT3_TYPE& dst, const FLOAT3_TYPE& src, const PxMat44& m) +{ + (PxVec3&)dst = m.transform((const PxVec3&)src); +} + +// Transform Vec3 by PxMat33 +PX_INLINE void transform_FLOAT3_by_PxMat33(FLOAT3_TYPE& dst, const FLOAT3_TYPE& src, const PxMat33& m) +{ + (PxVec3&)dst = m * (const PxVec3&)src; +} + +// Transform Vec4 (tangent) by PxMat33, ignoring tangent.w +PX_INLINE void transform_FLOAT4_by_PxMat33(FLOAT4_TYPE& dst, const FLOAT4_TYPE& src, const PxMat33& m) +{ + const PxVec4 source = (const PxVec4&)src; + (PxVec4&)dst = PxVec4(m * source.getXYZ(), PxSign(m.getDeterminant()) * source.w); +} + +// Transform Quat by PxMat33 +PX_INLINE void transform_FLOAT4_QUAT_by_PxMat33(FLOAT4_QUAT_TYPE& dst, const FLOAT4_QUAT_TYPE& src, const PxMat33& m) +{ + *((PxVec3*)&dst) = m * (*(const PxVec3*)&src.x); +} + +// Multiply Vec3 by scalar +PX_INLINE void transform_FLOAT3_by_float(FLOAT3_TYPE& dst, const FLOAT3_TYPE& src, const float& s) +{ + (PxVec3&)dst = s * (const PxVec3&)src; +} + +// Transform signed normalized byte 3-vector by PxMat44Legacy +PX_INLINE void transform_BYTE_SNORM3_by_PxMat44(BYTE_SNORM3_TYPE& dst, const BYTE_SNORM3_TYPE& src, const PxMat44& m) +{ + PxVec3 v; + convert_FLOAT3_from_BYTE_SNORM3((FLOAT3_TYPE&)v, src); + transform_FLOAT3_by_PxMat44((FLOAT3_TYPE&)v, (const FLOAT3_TYPE&)v, m); + convert_BYTE_SNORM3_from_FLOAT3(dst, (const FLOAT3_TYPE&)v); +} + +// Transform signed normalized byte 3-vector by PxMat33 +PX_INLINE void transform_BYTE_SNORM3_by_PxMat33(BYTE_SNORM3_TYPE& dst, const BYTE_SNORM3_TYPE& src, const PxMat33& m) +{ + PxVec3 v; + convert_FLOAT3_from_BYTE_SNORM3((FLOAT3_TYPE&)v, src); + transform_FLOAT3_by_PxMat33((FLOAT3_TYPE&)v, (const FLOAT3_TYPE&)v, m); + convert_BYTE_SNORM3_from_FLOAT3(dst, (const FLOAT3_TYPE&)v); +} + +// Transform signed normalized byte 4-vector by PxMat33 +PX_INLINE void transform_BYTE_SNORM4_by_PxMat33(BYTE_SNORM4_TYPE& dst, const BYTE_SNORM4_TYPE& src, const PxMat33& m) +{ + physx::PxVec4 v; + convert_FLOAT4_from_BYTE_SNORM4((FLOAT4_TYPE&)v, src); + transform_FLOAT4_by_PxMat33((FLOAT4_TYPE&)v, (const FLOAT4_TYPE&)v, m); + convert_BYTE_SNORM4_from_FLOAT4(dst, (const FLOAT4_TYPE&)v); +} + +// Multiply signed normalized byte 3-vector by scalar +PX_INLINE void transform_BYTE_SNORM3_by_float(BYTE_SNORM3_TYPE& dst, const BYTE_SNORM3_TYPE& src, const float& s) +{ + PxVec3 v; + convert_FLOAT3_from_BYTE_SNORM3((FLOAT3_TYPE&)v, src); + transform_FLOAT3_by_float((FLOAT3_TYPE&)v, (const FLOAT3_TYPE&)v, s); + convert_BYTE_SNORM3_from_FLOAT3(dst, (const FLOAT3_TYPE&)v); +} + +// Transform signed normalized byte quat by PxMat33 +PX_INLINE void transform_BYTE_SNORM4_QUATXYZW_by_PxMat33(BYTE_SNORM4_QUATXYZW_TYPE& dst, const BYTE_SNORM4_QUATXYZW_TYPE& src, const PxMat33& m) +{ + transform_BYTE_SNORM3_by_PxMat33(*(BYTE_SNORM3_TYPE*)&dst, *(const BYTE_SNORM3_TYPE*)&src, m); +} + +// Transform signed normalized short 3-vector by PxMat44 +PX_INLINE void transform_SHORT_SNORM3_by_PxMat44(SHORT_SNORM3_TYPE& dst, const SHORT_SNORM3_TYPE& src, const PxMat44& m) +{ + PxVec3 v; + convert_FLOAT3_from_SHORT_SNORM3((FLOAT3_TYPE&)v, src); + transform_FLOAT3_by_PxMat44((FLOAT3_TYPE&)v, (const FLOAT3_TYPE&)v, m); + convert_SHORT_SNORM3_from_FLOAT3(dst, (const FLOAT3_TYPE&)v); +} + +// Transform signed normalized short 3-vector by PxMat33 +PX_INLINE void transform_SHORT_SNORM3_by_PxMat33(SHORT_SNORM3_TYPE& dst, const SHORT_SNORM3_TYPE& src, const PxMat33& m) +{ + PxVec3 v; + convert_FLOAT3_from_SHORT_SNORM3((FLOAT3_TYPE&)v, src); + transform_FLOAT3_by_PxMat33((FLOAT3_TYPE&)v, (const FLOAT3_TYPE&)v, m); + convert_SHORT_SNORM3_from_FLOAT3(dst, (const FLOAT3_TYPE&)v); +} + +// Transform signed normalized short 4-vector by PxMat33 +PX_INLINE void transform_SHORT_SNORM4_by_PxMat33(SHORT_SNORM4_TYPE& dst, const SHORT_SNORM4_TYPE& src, const PxMat33& m) +{ + physx::PxVec4 v; + convert_FLOAT4_from_SHORT_SNORM4((FLOAT4_TYPE&)v, src); + transform_FLOAT4_by_PxMat33((FLOAT4_TYPE&)v, (const FLOAT4_TYPE&)v, m); + convert_SHORT_SNORM4_from_FLOAT4(dst, (const FLOAT4_TYPE&)v); +} + +// Multiply signed normalized short 3-vector by scalar +PX_INLINE void transform_SHORT_SNORM3_by_float(SHORT_SNORM3_TYPE& dst, const SHORT_SNORM3_TYPE& src, const float& s) +{ + PxVec3 v; + convert_FLOAT3_from_SHORT_SNORM3((FLOAT3_TYPE&)v, src); + transform_FLOAT3_by_float((FLOAT3_TYPE&)v, (const FLOAT3_TYPE&)v, s); + convert_SHORT_SNORM3_from_FLOAT3(dst, (const FLOAT3_TYPE&)v); +} + +// Transform signed normalized short quat by PxMat33 +PX_INLINE void transform_SHORT_SNORM4_QUATXYZW_by_PxMat33(SHORT_SNORM4_QUATXYZW_TYPE& dst, const SHORT_SNORM4_QUATXYZW_TYPE& src, const PxMat33& m) +{ + transform_SHORT_SNORM3_by_PxMat33(*(SHORT_SNORM3_TYPE*)&dst, *(const SHORT_SNORM3_TYPE*)&src, m); +} + +#define SAME(x) x + +#define HANDLE_TRANSFORM( _DataType, _OpType ) \ + case RenderDataFormat::SAME(_DataType): \ + while( numVertices-- ) \ + { \ + transform_##_DataType##_by_##_OpType( *(_DataType##_TYPE*)dst, *(const _DataType##_TYPE*)src, op ); \ + ((uint8_t*&)dst) += sizeof( _DataType##_TYPE ); \ + ((const uint8_t*&)src) += sizeof( _DataType##_TYPE ); \ + } \ + return; + +void transformRenderBuffer(void* dst, const void* src, RenderDataFormat::Enum format, uint32_t numVertices, const PxMat44& op) +{ + switch (format) + { + // Put transform handlers here + HANDLE_TRANSFORM(FLOAT3, PxMat44) + HANDLE_TRANSFORM(BYTE_SNORM3, PxMat44) + HANDLE_TRANSFORM(SHORT_SNORM3, PxMat44) + default: + break; + } + + PX_ALWAYS_ASSERT(); // Unhandled format +} + +void transformRenderBuffer(void* dst, const void* src, RenderDataFormat::Enum format, uint32_t numVertices, const PxMat33& op) +{ + switch (format) + { + // Put transform handlers here + HANDLE_TRANSFORM(FLOAT3, PxMat33) + HANDLE_TRANSFORM(FLOAT4, PxMat33) + HANDLE_TRANSFORM(FLOAT4_QUAT, PxMat33) + HANDLE_TRANSFORM(BYTE_SNORM3, PxMat33) + HANDLE_TRANSFORM(BYTE_SNORM4, PxMat33) + HANDLE_TRANSFORM(BYTE_SNORM4_QUATXYZW, PxMat33) + HANDLE_TRANSFORM(SHORT_SNORM3, PxMat33) + HANDLE_TRANSFORM(SHORT_SNORM4, PxMat33) + HANDLE_TRANSFORM(SHORT_SNORM4_QUATXYZW, PxMat33) + default: + break; + } + + PX_ALWAYS_ASSERT(); // Unhandled format +} + +void transformRenderBuffer(void* dst, const void* src, RenderDataFormat::Enum format, uint32_t numVertices, const float& op) +{ + switch (format) + { + // Put transform handlers here + HANDLE_TRANSFORM(FLOAT3, float) + HANDLE_TRANSFORM(BYTE_SNORM3, float) + default: + break; + } + + PX_ALWAYS_ASSERT(); // Unhandled format +} + + +ApexVertexBuffer::ApexVertexBuffer() : mParams(NULL), mFormat(NULL) +{ +} + +ApexVertexBuffer::~ApexVertexBuffer() +{ + PX_ASSERT(mParams == NULL); +} + +void ApexVertexBuffer::build(const VertexFormat& format, uint32_t vertexCount) +{ + const ApexVertexFormat* apexVertexFormat = DYNAMIC_CAST(const ApexVertexFormat*)(&format); + if (apexVertexFormat) + { + mFormat.copy(*apexVertexFormat); + } + + NvParameterized::Handle handle(*mParams); + VERIFY_PARAM(mParams->getParameterHandle("buffers", handle)); + VERIFY_PARAM(mParams->resizeArray(handle, mFormat.mParams->bufferFormats.arraySizes[0])); + + resize(vertexCount); +} + +void ApexVertexBuffer::applyTransformation(const PxMat44& transformation) +{ + RenderDataFormat::Enum format; + void* buf; + uint32_t index; + + // Positions + index = (uint32_t)getFormat().getBufferIndexFromID(getFormat().getSemanticID(RenderVertexSemantic::POSITION)); + buf = getBuffer(index); + if (buf) + { + format = getFormat().getBufferFormat(index); + transformRenderBuffer(buf, buf, format, getVertexCount(), transformation); + } + + // Normals + index = (uint32_t)getFormat().getBufferIndexFromID(getFormat().getSemanticID(RenderVertexSemantic::NORMAL)); + buf = getBuffer(index); + if (buf) + { + // PH: the Cofactor matrix now also handles negative determinants, so it does the same as multiplying with the inverse transpose of transformation.M. + const Cof44 cof(transformation); + format = getFormat().getBufferFormat(index); + transformRenderBuffer(buf, buf, format, getVertexCount(), cof.getBlock33()); + } + + // Tangents + index = (uint32_t)getFormat().getBufferIndexFromID(getFormat().getSemanticID(RenderVertexSemantic::TANGENT)); + buf = getBuffer(index); + if (buf) + { + format = getFormat().getBufferFormat(index); + const PxMat33 tm(transformation.column0.getXYZ(), + transformation.column1.getXYZ(), + transformation.column2.getXYZ()); + transformRenderBuffer(buf, buf, format, getVertexCount(), tm); + } + + // Binormals + index = (uint32_t)getFormat().getBufferIndexFromID(getFormat().getSemanticID(RenderVertexSemantic::BINORMAL)); + buf = getBuffer(index); + if (buf) + { + format = getFormat().getBufferFormat(index); + const PxMat33 tm(transformation.column0.getXYZ(), + transformation.column1.getXYZ(), + transformation.column2.getXYZ()); + transformRenderBuffer(buf, buf, format, getVertexCount(), tm); + } +} + + + +void ApexVertexBuffer::applyScale(float scale) +{ + uint32_t index = (uint32_t)getFormat().getBufferIndexFromID(getFormat().getSemanticID(RenderVertexSemantic::POSITION)); + void* buf = getBuffer(index); + RenderDataFormat::Enum format = getFormat().getBufferFormat(index); + transformRenderBuffer(buf, buf, format, getVertexCount(), scale); +} + + + +bool ApexVertexBuffer::mergeBinormalsIntoTangents() +{ + const uint32_t numBuffers = mFormat.getBufferCount(); + + int32_t normalBufferIndex = -1; + int32_t tangentBufferIndex = -1; + int32_t binormalBufferIndex = -1; + for (uint32_t i = 0; i < numBuffers; i++) + { + const RenderVertexSemantic::Enum semantic = mFormat.getBufferSemantic(i); + const RenderDataFormat::Enum format = mFormat.getBufferFormat(i); + if (semantic == RenderVertexSemantic::NORMAL && format == RenderDataFormat::FLOAT3) + { + normalBufferIndex = (int32_t)i; + } + else if (semantic == RenderVertexSemantic::TANGENT && format == RenderDataFormat::FLOAT3) + { + tangentBufferIndex = (int32_t)i; + } + else if (semantic == RenderVertexSemantic::BINORMAL && format == RenderDataFormat::FLOAT3) + { + binormalBufferIndex = (int32_t)i; + } + } + + if (normalBufferIndex != -1 && tangentBufferIndex != -1 && binormalBufferIndex != -1) + { + // PH: This gets dirty. modifying the parameterized object directly + BufferF32x3* normalsBuffer = static_cast<BufferF32x3*>(mParams->buffers.buf[normalBufferIndex]); + BufferF32x3* oldTangentsBuffer = static_cast<BufferF32x3*>(mParams->buffers.buf[tangentBufferIndex]); + BufferF32x3* oldBinormalsBuffer = static_cast<BufferF32x3*>(mParams->buffers.buf[binormalBufferIndex]); + BufferF32x4* newTangentsBuffer = static_cast<BufferF32x4*>(GetInternalApexSDK()->getParameterizedTraits()->createNvParameterized("BufferF32x4")); + + if (normalsBuffer != NULL && oldTangentsBuffer != NULL && oldBinormalsBuffer != NULL && newTangentsBuffer != NULL) + { + const uint32_t numElements = (uint32_t)oldTangentsBuffer->data.arraySizes[0]; + + PX_ASSERT(oldTangentsBuffer->data.arraySizes[0] == oldBinormalsBuffer->data.arraySizes[0]); + { + // resize the array + NvParameterized::Handle handle(*newTangentsBuffer, "data"); + PX_ASSERT(handle.isValid()); + handle.resizeArray((int32_t)numElements); + } + PX_ASSERT(oldTangentsBuffer->data.arraySizes[0] == newTangentsBuffer->data.arraySizes[0]); + + const PxVec3* normals = normalsBuffer->data.buf; + const PxVec3* oldTangents = oldTangentsBuffer->data.buf; + const PxVec3* oldBinormals = oldBinormalsBuffer->data.buf; + PxVec4* newTangents = (PxVec4*)newTangentsBuffer->data.buf; + + for (uint32_t i = 0; i < numElements; i++) + { + const float binormal = PxSign(normals[i].cross(oldTangents[i]).dot(oldBinormals[i])); + newTangents[i] = PxVec4(oldTangents[i], binormal); + } + + // Ok, real dirty now + mParams->buffers.buf[(uint32_t)tangentBufferIndex] = newTangentsBuffer; + for (uint32_t i = (uint32_t)binormalBufferIndex + 1; i < numBuffers; i++) + { + mParams->buffers.buf[i - 1] = mParams->buffers.buf[i]; + } + mParams->buffers.buf[numBuffers - 1] = NULL; + { + NvParameterized::Handle handle(*mParams, "buffers"); + PX_ASSERT(handle.isValid()); + handle.resizeArray((int32_t)numBuffers - 1); + } + oldTangentsBuffer->destroy(); + oldBinormalsBuffer->destroy(); + + // and make same change to the format too + VertexFormatParameters* format = static_cast<VertexFormatParameters*>(mParams->vertexFormat); + PX_ASSERT(format->bufferFormats.buf[tangentBufferIndex].semantic == RenderVertexSemantic::TANGENT); + PX_ASSERT(format->bufferFormats.buf[tangentBufferIndex].format == RenderDataFormat::FLOAT3); + format->bufferFormats.buf[tangentBufferIndex].format = RenderDataFormat::FLOAT4; + + VertexFormatParametersNS::BufferFormat_Type binormalBuffer = format->bufferFormats.buf[binormalBufferIndex]; + for (uint32_t i = (uint32_t)binormalBufferIndex + 1; i < numBuffers; i++) + { + format->bufferFormats.buf[i - 1] = format->bufferFormats.buf[i]; + } + + // swap it to the last such that it gets released properly + format->bufferFormats.buf[numBuffers - 1] = binormalBuffer; + { + NvParameterized::Handle handle(*format, "bufferFormats"); + PX_ASSERT(handle.isValid()); + handle.resizeArray((int32_t)numBuffers - 1); + } + + return true; + } + } + return false; +} + + + +void ApexVertexBuffer::copy(uint32_t dstIndex, uint32_t srcIndex, ApexVertexBuffer* srcBufferPtr) +{ + ApexVertexBuffer& srcVB = srcBufferPtr != NULL ? *srcBufferPtr : *this; + ApexVertexFormat& srcVF = srcVB.mFormat; + + if (mParams->buffers.arraySizes[0] != srcVB.mParams->buffers.arraySizes[0]) + { + PX_ALWAYS_ASSERT(); + return; + } + + for (uint32_t i = 0; i < (uint32_t)mParams->buffers.arraySizes[0]; i++) + { + RenderDataFormat::Enum dstFormat = mFormat.getBufferFormat(i); + VertexFormat::BufferID id = mFormat.getBufferID(i); + const int32_t srcBufferIndex = srcVF.getBufferIndexFromID(id); + if (srcBufferIndex >= 0) + { + RenderDataFormat::Enum srcFormat = srcVF.getBufferFormat((uint32_t)srcBufferIndex); + NvParameterized::Interface* dstInterface = mParams->buffers.buf[i]; + NvParameterized::Interface* srcInterface = srcVB.mParams->buffers.buf[(uint32_t)srcBufferIndex]; + // BRG: Using PH's reasoning: Technically all those CustomBuffer* classes should have the same struct, so I just use the first one + BufferU8x1& srcBuffer = *static_cast<BufferU8x1*>(srcInterface); + BufferU8x1& dstBuffer = *static_cast<BufferU8x1*>(dstInterface); + PX_ASSERT(dstIndex < (uint32_t)dstBuffer.data.arraySizes[0]); + PX_ASSERT(srcIndex < (uint32_t)srcBuffer.data.arraySizes[0]); + copyRenderVertexData(dstBuffer.data.buf, dstFormat, dstIndex, srcBuffer.data.buf, srcFormat, srcIndex); + } + } +} + +void ApexVertexBuffer::resize(uint32_t vertexCount) +{ + mParams->vertexCount = vertexCount; + + NvParameterized::Handle handle(*mParams); + + VERIFY_PARAM(mParams->getParameterHandle("buffers", handle)); + int32_t buffersSize = 0; + VERIFY_PARAM(mParams->getArraySize(handle, buffersSize)); + + for (int32_t i = 0; i < buffersSize; i++) + { + RenderDataFormat::Enum outFormat = mFormat.getBufferFormat((uint32_t)i); + + NvParameterized::Handle elementHandle(*mParams); + VERIFY_PARAM(handle.getChildHandle(i, elementHandle)); + + NvParameterized::Interface* currentReference = NULL; + VERIFY_PARAM(mParams->getParamRef(elementHandle, currentReference)); + + // BUFFER_FORMAT_ADD This is just a bookmark for places where to add buffer formats + if (currentReference == NULL && vertexCount > 0) + { + const char* className = NULL; + + switch (outFormat) + { + case RenderDataFormat::UBYTE1: + case RenderDataFormat::BYTE_UNORM1: + case RenderDataFormat::BYTE_SNORM1: + className = BufferU8x1::staticClassName(); + break; + case RenderDataFormat::UBYTE2: + case RenderDataFormat::BYTE_UNORM2: + case RenderDataFormat::BYTE_SNORM2: + className = BufferU8x2::staticClassName(); + break; + case RenderDataFormat::UBYTE3: + case RenderDataFormat::BYTE_UNORM3: + case RenderDataFormat::BYTE_SNORM3: + className = BufferU8x3::staticClassName(); + break; + case RenderDataFormat::UBYTE4: + case RenderDataFormat::BYTE_UNORM4: + case RenderDataFormat::BYTE_SNORM4: + case RenderDataFormat::R8G8B8A8: + case RenderDataFormat::B8G8R8A8: + className = BufferU8x4::staticClassName(); + break; + case RenderDataFormat::SHORT1: + case RenderDataFormat::USHORT1: + case RenderDataFormat::SHORT_UNORM1: + case RenderDataFormat::SHORT_SNORM1: + case RenderDataFormat::HALF1: + className = BufferU16x1::staticClassName(); + break; + case RenderDataFormat::SHORT2: + case RenderDataFormat::USHORT2: + case RenderDataFormat::SHORT_UNORM2: + case RenderDataFormat::SHORT_SNORM2: + case RenderDataFormat::HALF2: + className = BufferU16x2::staticClassName(); + break; + case RenderDataFormat::SHORT3: + case RenderDataFormat::USHORT3: + case RenderDataFormat::SHORT_UNORM3: + case RenderDataFormat::SHORT_SNORM3: + case RenderDataFormat::HALF3: + className = BufferU16x3::staticClassName(); + break; + case RenderDataFormat::SHORT4: + case RenderDataFormat::USHORT4: + case RenderDataFormat::SHORT_UNORM4: + case RenderDataFormat::SHORT_SNORM4: + case RenderDataFormat::HALF4: + className = BufferU16x4::staticClassName(); + break; + case RenderDataFormat::UINT1: + className = BufferU32x1::staticClassName(); + break; + case RenderDataFormat::UINT2: + className = BufferU32x2::staticClassName(); + break; + case RenderDataFormat::UINT3: + className = BufferU32x3::staticClassName(); + break; + case RenderDataFormat::UINT4: + className = BufferU32x4::staticClassName(); + break; + case RenderDataFormat::FLOAT1: + className = BufferF32x1::staticClassName(); + break; + case RenderDataFormat::FLOAT2: + className = BufferF32x2::staticClassName(); + break; + case RenderDataFormat::FLOAT3: + className = BufferF32x3::staticClassName(); + break; + case RenderDataFormat::FLOAT4: + case RenderDataFormat::R32G32B32A32_FLOAT: + case RenderDataFormat::B32G32R32A32_FLOAT: + className = BufferF32x4::staticClassName(); + break; + default: + PX_ALWAYS_ASSERT(); + break; + } + + if (className != NULL) + { + currentReference = GetInternalApexSDK()->getParameterizedTraits()->createNvParameterized(className); + } + + if (currentReference != NULL) + { + NvParameterized::Handle arrayHandle(*currentReference); + VERIFY_PARAM(currentReference->getParameterHandle("data", arrayHandle)); + PX_ASSERT(arrayHandle.isValid()); + VERIFY_PARAM(arrayHandle.resizeArray((int32_t)vertexCount)); + + mParams->setParamRef(elementHandle, currentReference); + } + } + else if (vertexCount > 0) + { + NvParameterized::Interface* oldReference = currentReference; + PX_ASSERT(oldReference != NULL); + currentReference = GetInternalApexSDK()->getParameterizedTraits()->createNvParameterized(oldReference->className()); + if (currentReference != NULL) + { + VERIFY_PARAM(currentReference->copy(*oldReference)); + + NvParameterized::Handle arrayHandle(*currentReference); + VERIFY_PARAM(currentReference->getParameterHandle("data", arrayHandle)); + VERIFY_PARAM(arrayHandle.resizeArray((int32_t)vertexCount)); + } + VERIFY_PARAM(mParams->setParamRef(elementHandle, currentReference)); + oldReference->destroy(); + } + else if (vertexCount == 0) + { + VERIFY_PARAM(mParams->setParamRef(elementHandle, NULL)); + + if (currentReference != NULL) + { + currentReference->destroy(); + } + } + } +} + + + +void ApexVertexBuffer::preSerialize(void*) +{ + PX_ASSERT((int32_t)mFormat.getBufferCount() == mParams->buffers.arraySizes[0]); + ParamArray<NvParameterized::Interface*> buffers(mParams, "buffers", reinterpret_cast<ParamDynamicArrayStruct*>(&mParams->buffers)); + for (uint32_t i = 0; i < mFormat.getBufferCount(); i++) + { + if (!mFormat.getBufferSerialize(i)) + { + // [i] no longer needs to be destroyed because the resize will handle it + buffers.replaceWithLast(i); + mFormat.bufferReplaceWithLast(i); + i--; + } + } + + PX_ASSERT((int32_t)mFormat.getBufferCount() == mParams->buffers.arraySizes[0]); +} + +bool ApexVertexBuffer::getBufferData(void* dstBuffer, nvidia::RenderDataFormat::Enum dstBufferFormat, uint32_t dstBufferStride, uint32_t bufferIndex, + uint32_t startIndex, uint32_t elementCount) const +{ + const void* data = getBuffer(bufferIndex); + if (data == NULL) + { + return false; + } + nvidia::RenderDataFormat::Enum srcFormat = getFormat().getBufferFormat(bufferIndex); + return copyRenderVertexBuffer(dstBuffer, dstBufferFormat, dstBufferStride, 0, data, srcFormat, RenderDataFormat::getFormatDataSize(srcFormat), startIndex, elementCount); +} + +void* ApexVertexBuffer::getBuffer(uint32_t bufferIndex) +{ + if (bufferIndex < (uint32_t)mParams->buffers.arraySizes[0]) + { + NvParameterized::Interface* buffer = mParams->buffers.buf[bufferIndex]; + if (buffer != NULL) + { + BufferU8x1* particularBuffer = DYNAMIC_CAST(BufferU8x1*)(buffer); + return particularBuffer->data.buf; + } + } + + return NULL; +} + +uint32_t ApexVertexBuffer::getAllocationSize() const +{ + uint32_t size = sizeof(ApexVertexBuffer); + + for (uint32_t index = 0; (int32_t)index < mParams->buffers.arraySizes[0]; ++index) + { + PX_ASSERT(index < getFormat().getBufferCount()); + if (index >= getFormat().getBufferCount()) + { + break; + } + const uint32_t dataSize = RenderDataFormat::getFormatDataSize(getFormat().getBufferFormat(index)); + NvParameterized::Interface* buffer = mParams->buffers.buf[index]; + if (buffer != NULL) + { + BufferU8x1* particularBuffer = DYNAMIC_CAST(BufferU8x1*)(buffer); + size += particularBuffer->data.arraySizes[0] * dataSize; + } + } + + return size; +} + +void ApexVertexBuffer::setParams(VertexBufferParameters* param) +{ + if (mParams != param) + { + if (mParams != NULL) + { + mParams->setSerializationCallback(NULL); + } + + mParams = param; + + if (mParams != NULL) + { + NvParameterized::Traits* traits = GetInternalApexSDK()->getParameterizedTraits(); + if (mParams->vertexFormat != NULL) + { + if (mFormat.mParams && mFormat.mParams != (VertexFormatParameters*)mParams->vertexFormat) + { + mFormat.mParams->destroy(); + } + } + else + { + mParams->vertexFormat = DYNAMIC_CAST(VertexFormatParameters*)(traits->createNvParameterized(VertexFormatParameters::staticClassName())); + } + } + + mFormat.mParams = mParams != NULL ? static_cast<VertexFormatParameters*>(mParams->vertexFormat) : NULL; + mFormat.mOwnsParams = false; + + if (mParams != NULL) + { + mParams->setSerializationCallback(this); + } + } +} + +namespace +{ + class PxMat34Legacy + { + float f[12]; + }; +} + +void ApexVertexBuffer::applyPermutation(const Array<uint32_t>& permutation) +{ + const uint32_t numVertices = mParams->vertexCount; + PX_ASSERT(numVertices == permutation.size()); + for (uint32_t i = 0; i < (uint32_t)mParams->buffers.arraySizes[0]; i++) + { + NvParameterized::Interface* bufferInterface = mParams->buffers.buf[i]; + RenderDataFormat::Enum format = getFormat().getBufferFormat(i); + switch(format) + { + // all 1 byte + case RenderDataFormat::UBYTE1: + case RenderDataFormat::BYTE_UNORM1: + case RenderDataFormat::BYTE_SNORM1: + { + BufferU8x1* byte1 = static_cast<BufferU8x1*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)byte1->data.arraySizes[0]); + ApexPermute(byte1->data.buf, permutation.begin(), numVertices); + } + break; + + // all 2 byte + case RenderDataFormat::UBYTE2: + case RenderDataFormat::USHORT1: + case RenderDataFormat::SHORT1: + case RenderDataFormat::BYTE_UNORM2: + case RenderDataFormat::SHORT_UNORM1: + case RenderDataFormat::BYTE_SNORM2: + case RenderDataFormat::SHORT_SNORM1: + case RenderDataFormat::HALF1: + { + BufferU16x1* short1 = static_cast<BufferU16x1*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)short1->data.arraySizes[0]); + ApexPermute(short1->data.buf, permutation.begin(), numVertices); + } + break; + + // all 3 byte + case RenderDataFormat::UBYTE3: + case RenderDataFormat::BYTE_UNORM3: + case RenderDataFormat::BYTE_SNORM3: + { + BufferU8x3* byte3 = static_cast<BufferU8x3*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)byte3->data.arraySizes[0]); + ApexPermute(byte3->data.buf, permutation.begin(), numVertices); + } + break; + + // all 4 byte + case RenderDataFormat::UBYTE4: + case RenderDataFormat::USHORT2: + case RenderDataFormat::SHORT2: + case RenderDataFormat::UINT1: + case RenderDataFormat::R8G8B8A8: + case RenderDataFormat::B8G8R8A8: + case RenderDataFormat::BYTE_UNORM4: + case RenderDataFormat::SHORT_UNORM2: + case RenderDataFormat::BYTE_SNORM4: + case RenderDataFormat::SHORT_SNORM2: + case RenderDataFormat::HALF2: + case RenderDataFormat::FLOAT1: + case RenderDataFormat::BYTE_SNORM4_QUATXYZW: + case RenderDataFormat::SHORT_SNORM4_QUATXYZW: + { + BufferU32x1* int1 = static_cast<BufferU32x1*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)int1->data.arraySizes[0]); + ApexPermute(int1->data.buf, permutation.begin(), numVertices); + } + break; + + // all 6 byte + case RenderDataFormat::USHORT3: + case RenderDataFormat::SHORT3: + case RenderDataFormat::SHORT_UNORM3: + case RenderDataFormat::SHORT_SNORM3: + case RenderDataFormat::HALF3: + { + BufferU16x3* short3 = static_cast<BufferU16x3*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)short3->data.arraySizes[0]); + ApexPermute(short3->data.buf, permutation.begin(), numVertices); + } + break; + + // all 8 byte + case RenderDataFormat::USHORT4: + case RenderDataFormat::SHORT4: + case RenderDataFormat::SHORT_UNORM4: + case RenderDataFormat::SHORT_SNORM4: + case RenderDataFormat::UINT2: + case RenderDataFormat::HALF4: + case RenderDataFormat::FLOAT2: + { + BufferU32x2* int2 = static_cast<BufferU32x2*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)int2->data.arraySizes[0]); + ApexPermute(int2->data.buf, permutation.begin(), numVertices); + } + break; + + // all 12 byte + case RenderDataFormat::UINT3: + case RenderDataFormat::FLOAT3: + { + BufferU32x3* int3 = static_cast<BufferU32x3*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)int3->data.arraySizes[0]); + ApexPermute(int3->data.buf, permutation.begin(), numVertices); + } + break; + + // all 16 byte + case RenderDataFormat::UINT4: + case RenderDataFormat::R32G32B32A32_FLOAT: + case RenderDataFormat::B32G32R32A32_FLOAT: + case RenderDataFormat::FLOAT4: + case RenderDataFormat::FLOAT4_QUAT: + { + BufferU32x4* int4 = static_cast<BufferU32x4*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)int4->data.arraySizes[0]); + ApexPermute(int4->data.buf, permutation.begin(), numVertices); + } + break; + + // all 36 byte + case RenderDataFormat::FLOAT3x3: + { + BufferF32x1* float1 = static_cast<BufferF32x1*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)float1->data.arraySizes[0]); + ApexPermute((PxMat33*)float1->data.buf, permutation.begin(), numVertices); + } + break; + + // all 48 byte + case RenderDataFormat::FLOAT3x4: + { + BufferF32x1* float1 = static_cast<BufferF32x1*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)float1->data.arraySizes[0]); + ApexPermute((PxMat34Legacy*)float1->data.buf, permutation.begin(), numVertices); + + } + break; + + // all 64 byte + case RenderDataFormat::FLOAT4x4: + { + BufferF32x1* float1 = static_cast<BufferF32x1*>(bufferInterface); + PX_ASSERT(numVertices == (uint32_t)float1->data.arraySizes[0]); + ApexPermute((PxMat44*)float1->data.buf, permutation.begin(), numVertices); + } + break; + + // fix gcc warnings + case RenderDataFormat::UNSPECIFIED: + case RenderDataFormat::NUM_FORMATS: + break; + } + } +} + + + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/ApexVertexFormat.cpp b/APEX_1.4/framework/src/ApexVertexFormat.cpp new file mode 100644 index 00000000..17d0ef48 --- /dev/null +++ b/APEX_1.4/framework/src/ApexVertexFormat.cpp @@ -0,0 +1,404 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexVertexFormat.h" +#include "ApexSDKIntl.h" + +#include <ParamArray.h> + +namespace nvidia +{ +namespace apex +{ + +// Local functions and definitions + +PX_INLINE char* apex_strdup(const char* input) +{ + if (input == NULL) + { + return NULL; + } + + size_t len = strlen(input); + + char* result = (char*)PX_ALLOC(sizeof(char) * (len + 1), PX_DEBUG_EXP("apex_strdup")); +#ifdef WIN32 + strncpy_s(result, len + 1, input, len); +#else + strncpy(result, input, len); +#endif + + return result; +} + +PX_INLINE uint32_t hash(const char* string) +{ + // "DJB" string hash + uint32_t h = 5381; + char c; + while ((c = *string++) != '\0') + { + h = ((h << 5) + h) ^ c; + } + return h; +} + +struct SemanticNameAndID +{ + SemanticNameAndID(const char* name, VertexFormat::BufferID id) : m_name(name), m_id(id) + { + PX_ASSERT(m_id != 0 || nvidia::strcmp(m_name, "SEMANTIC_INVALID") == 0); + } + const char* m_name; + VertexFormat::BufferID m_id; +}; + +#define SEMANTIC_NAME_AND_ID( name ) SemanticNameAndID( name, (VertexFormat::BufferID)hash( name ) ) + +static const SemanticNameAndID sSemanticNamesAndIDs[] = +{ + SEMANTIC_NAME_AND_ID("SEMANTIC_POSITION"), + SEMANTIC_NAME_AND_ID("SEMANTIC_NORMAL"), + SEMANTIC_NAME_AND_ID("SEMANTIC_TANGENT"), + SEMANTIC_NAME_AND_ID("SEMANTIC_BINORMAL"), + SEMANTIC_NAME_AND_ID("SEMANTIC_COLOR"), + SEMANTIC_NAME_AND_ID("SEMANTIC_TEXCOORD0"), + SEMANTIC_NAME_AND_ID("SEMANTIC_TEXCOORD1"), + SEMANTIC_NAME_AND_ID("SEMANTIC_TEXCOORD2"), + SEMANTIC_NAME_AND_ID("SEMANTIC_TEXCOORD3"), + SEMANTIC_NAME_AND_ID("SEMANTIC_BONE_INDEX"), + SEMANTIC_NAME_AND_ID("SEMANTIC_BONE_WEIGHT"), + SEMANTIC_NAME_AND_ID("SEMANTIC_DISPLACEMENT_TEXCOORD"), + SEMANTIC_NAME_AND_ID("SEMANTIC_DISPLACEMENT_FLAGS"), + + SemanticNameAndID("SEMANTIC_INVALID", (VertexFormat::BufferID)0) +}; + + +// VertexFormat implementation +void ApexVertexFormat::reset() +{ + if (mParams != NULL) + { + mParams->winding = 0; + mParams->hasSeparateBoneBuffer = 0; + } + clearBuffers(); +} + +void ApexVertexFormat::setWinding(RenderCullMode::Enum winding) +{ + mParams->winding = winding; +} + +void ApexVertexFormat::setHasSeparateBoneBuffer(bool hasSeparateBoneBuffer) +{ + mParams->hasSeparateBoneBuffer = hasSeparateBoneBuffer; +} + +RenderCullMode::Enum ApexVertexFormat::getWinding() const +{ + return (RenderCullMode::Enum)mParams->winding; +} + +bool ApexVertexFormat::hasSeparateBoneBuffer() const +{ + return mParams->hasSeparateBoneBuffer; +} + +const char* ApexVertexFormat::getSemanticName(RenderVertexSemantic::Enum semantic) const +{ + PX_ASSERT((uint32_t)semantic < RenderVertexSemantic::NUM_SEMANTICS); + return (uint32_t)semantic < RenderVertexSemantic::NUM_SEMANTICS ? sSemanticNamesAndIDs[semantic].m_name : NULL; +} + +VertexFormat::BufferID ApexVertexFormat::getSemanticID(RenderVertexSemantic::Enum semantic) const +{ + PX_ASSERT((uint32_t)semantic < RenderVertexSemantic::NUM_SEMANTICS); + return (uint32_t)semantic < RenderVertexSemantic::NUM_SEMANTICS ? sSemanticNamesAndIDs[semantic].m_id : (BufferID)0; +} + +VertexFormat::BufferID ApexVertexFormat::getID(const char* name) const +{ + if (name == NULL) + { + return (BufferID)0; + } + const BufferID id = hash(name); + return id ? id : (BufferID)1; // We reserve 0 for an invalid ID +} + +int32_t ApexVertexFormat::addBuffer(const char* name) +{ + if (name == NULL) + { + return -1; + } + + const BufferID id = getID(name); + + int32_t index = getBufferIndexFromID(id); + if (index >= 0) + { + return index; + } + + int32_t semantic = 0; + for (; semantic < RenderVertexSemantic::NUM_SEMANTICS; ++semantic) + { + if (getSemanticID((RenderVertexSemantic::Enum)semantic) == id) + { + break; + } + } + if (semantic == RenderVertexSemantic::NUM_SEMANTICS) + { + semantic = RenderVertexSemantic::CUSTOM; + } + + NvParameterized::Handle handle(*mParams); + mParams->getParameterHandle("bufferFormats", handle); + + mParams->getArraySize(handle, index); + + mParams->resizeArray(handle, index + 1); + + NvParameterized::Handle elementHandle(*mParams); + handle.getChildHandle(index, elementHandle); + NvParameterized::Handle subElementHandle(*mParams); + elementHandle.getChildHandle(mParams, "name", subElementHandle); + mParams->setParamString(subElementHandle, name); + elementHandle.getChildHandle(mParams, "semantic", subElementHandle); + mParams->setParamI32(subElementHandle, semantic); + elementHandle.getChildHandle(mParams, "id", subElementHandle); + mParams->setParamU32(subElementHandle, (uint32_t)id); + elementHandle.getChildHandle(mParams, "format", subElementHandle); + mParams->setParamU32(subElementHandle, (uint32_t)RenderDataFormat::UNSPECIFIED); + elementHandle.getChildHandle(mParams, "access", subElementHandle); + mParams->setParamU32(subElementHandle, (uint32_t)RenderDataAccess::STATIC); + elementHandle.getChildHandle(mParams, "serialize", subElementHandle); + mParams->setParamBool(subElementHandle, true); + + return index; +} + +bool ApexVertexFormat::bufferReplaceWithLast(uint32_t index) +{ + PX_ASSERT((int32_t)index < mParams->bufferFormats.arraySizes[0]); + if ((int32_t)index < mParams->bufferFormats.arraySizes[0]) + { + ParamArray<VertexFormatParametersNS::BufferFormat_Type> bufferFormats(mParams, "bufferFormats", reinterpret_cast<ParamDynamicArrayStruct*>(&mParams->bufferFormats)); + bufferFormats.replaceWithLast(index); + return true; + } + + return false; +} + +bool ApexVertexFormat::setBufferFormat(uint32_t index, RenderDataFormat::Enum format) +{ + if (index < getBufferCount()) + { + mParams->bufferFormats.buf[index].format = format; + return true; + } + + return false; +} + +bool ApexVertexFormat::setBufferAccess(uint32_t index, RenderDataAccess::Enum access) +{ + if (index < getBufferCount()) + { + mParams->bufferFormats.buf[index].access = access; + return true; + } + + return false; +} + +bool ApexVertexFormat::setBufferSerialize(uint32_t index, bool serialize) +{ + if (index < getBufferCount()) + { + mParams->bufferFormats.buf[index].serialize = serialize; + return true; + } + + return false; +} + +const char* ApexVertexFormat::getBufferName(uint32_t index) const +{ + return index < getBufferCount() ? (const char*)mParams->bufferFormats.buf[index].name : NULL; +} + +RenderVertexSemantic::Enum ApexVertexFormat::getBufferSemantic(uint32_t index) const +{ + return index < getBufferCount() ? (RenderVertexSemantic::Enum)mParams->bufferFormats.buf[index].semantic : RenderVertexSemantic::NUM_SEMANTICS; +} + +VertexFormat::BufferID ApexVertexFormat::getBufferID(uint32_t index) const +{ + return index < getBufferCount() ? (BufferID)mParams->bufferFormats.buf[index].id : (BufferID)0; +} + +RenderDataFormat::Enum ApexVertexFormat::getBufferFormat(uint32_t index) const +{ + return index < getBufferCount() ? (RenderDataFormat::Enum)mParams->bufferFormats.buf[index].format : RenderDataFormat::UNSPECIFIED; +} + +RenderDataAccess::Enum ApexVertexFormat::getBufferAccess(uint32_t index) const +{ + return index < getBufferCount() ? (RenderDataAccess::Enum)mParams->bufferFormats.buf[index].access : RenderDataAccess::ACCESS_TYPE_COUNT; +} + +bool ApexVertexFormat::getBufferSerialize(uint32_t index) const +{ + return index < getBufferCount() ? mParams->bufferFormats.buf[index].serialize : false; +} + +uint32_t ApexVertexFormat::getBufferCount() const +{ + return (uint32_t)mParams->bufferFormats.arraySizes[0]; +} + +uint32_t ApexVertexFormat::getCustomBufferCount() const +{ + PX_ASSERT(mParams != NULL); + uint32_t customBufferCount = 0; + for (int32_t i = 0; i < mParams->bufferFormats.arraySizes[0]; ++i) + { + if (mParams->bufferFormats.buf[i].semantic == RenderVertexSemantic::CUSTOM) + { + ++customBufferCount; + } + } + return customBufferCount; +} + +int32_t ApexVertexFormat::getBufferIndexFromID(BufferID id) const +{ + for (int32_t i = 0; i < mParams->bufferFormats.arraySizes[0]; ++i) + { + if (mParams->bufferFormats.buf[i].id == (uint32_t)id) + { + return i; + } + } + + return -1; +} + + + +// ApexVertexFormat functions + +ApexVertexFormat::ApexVertexFormat() +{ + NvParameterized::Traits* traits = GetInternalApexSDK()->getParameterizedTraits(); + mParams = DYNAMIC_CAST(VertexFormatParameters*)(traits->createNvParameterized(VertexFormatParameters::staticClassName())); + mOwnsParams = mParams != NULL; +} + +ApexVertexFormat::ApexVertexFormat(VertexFormatParameters* params) : mParams(params), mOwnsParams(false) +{ +} + +ApexVertexFormat::ApexVertexFormat(const ApexVertexFormat& f) : VertexFormat(f) +{ + NvParameterized::Traits* traits = GetInternalApexSDK()->getParameterizedTraits(); + mParams = DYNAMIC_CAST(VertexFormatParameters*)(traits->createNvParameterized(VertexFormatParameters::staticClassName())); + mOwnsParams = mParams != NULL; + if (mParams) + { + copy(f); + } +} + +ApexVertexFormat::~ApexVertexFormat() +{ + if (mOwnsParams && mParams != NULL) + { + mParams->destroy(); + } +} + +bool ApexVertexFormat::operator == (const VertexFormat& format) const +{ + if (getWinding() != format.getWinding()) + { + return false; + } + + if (hasSeparateBoneBuffer() != format.hasSeparateBoneBuffer()) + { + return false; + } + + if (getBufferCount() != format.getBufferCount()) + { + return false; + } + + for (uint32_t thisIndex = 0; thisIndex < getBufferCount(); ++thisIndex) + { + BufferID id = getBufferID(thisIndex); + const int32_t thatIndex = format.getBufferIndexFromID(id); + if (thatIndex < 0) + { + return false; + } + if (getBufferFormat(thisIndex) != format.getBufferFormat((uint32_t)thatIndex)) + { + return false; + } + if (getBufferAccess(thisIndex) != format.getBufferAccess((uint32_t)thatIndex)) + { + return false; + } + } + + return true; +} + +void ApexVertexFormat::copy(const ApexVertexFormat& other) +{ + reset(); + + setWinding(other.getWinding()); + setHasSeparateBoneBuffer(other.hasSeparateBoneBuffer()); + + for (uint32_t i = 0; i < other.getBufferCount(); ++i) + { + const char* name = other.getBufferName(i); + const uint32_t index = (uint32_t)addBuffer(name); + setBufferFormat(index, other.getBufferFormat(i)); + setBufferAccess(index, other.getBufferAccess(i)); + setBufferSerialize(index, other.getBufferSerialize(i)); + } +} + +void ApexVertexFormat::clearBuffers() +{ + if (mParams) + { + NvParameterized::Handle handle(*mParams); + + mParams->getParameterHandle("bufferFormats", handle); + handle.resizeArray(0); + } +} + + +} +} // end namespace nvidia::apex diff --git a/APEX_1.4/framework/src/FrameworkProfile.cpp b/APEX_1.4/framework/src/FrameworkProfile.cpp new file mode 100644 index 00000000..73688a65 --- /dev/null +++ b/APEX_1.4/framework/src/FrameworkProfile.cpp @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexDefs.h" +#include "ApexSDKImpl.h" +#include "FrameworkPerfScope.h" + +namespace Framework +{ + +void initFrameworkProfiling(nvidia::apex::ApexSDKImpl*) {} +void releaseFrameworkProfiling() {} + +} // end namespace Framework diff --git a/APEX_1.4/framework/src/MirrorSceneImpl.cpp b/APEX_1.4/framework/src/MirrorSceneImpl.cpp new file mode 100644 index 00000000..d3ac9db1 --- /dev/null +++ b/APEX_1.4/framework/src/MirrorSceneImpl.cpp @@ -0,0 +1,648 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "MirrorSceneImpl.h" + +#if PX_PHYSICS_VERSION_MAJOR == 3 + +#include "PxScene.h" +#include "PxRigidDynamic.h" +#include "PxMaterial.h" +#include "PxSphereGeometry.h" +#include "PxRigidDynamic.h" +#include "PxRigidStatic.h" +#include "PxShape.h" +#include "ApexSDKIntl.h" +#include "PsInlineArray.h" +#include "PxPhysics.h" + +#pragma warning(disable:4100) + +namespace nvidia +{ + +namespace apex +{ + + using namespace physx; + +bool copyStaticProperties(PxRigidActor& to, const PxRigidActor& from,MirrorScene::MirrorFilter &mirrorFilter) +{ + shdfnd::InlineArray<PxShape*, 64> shapes; + shapes.resize(from.getNbShapes()); + + uint32_t shapeCount = from.getNbShapes(); + from.getShapes(shapes.begin(), shapeCount); + + shdfnd::InlineArray<PxMaterial*, 64> materials; + for(uint32_t i = 0; i < shapeCount; i++) + { + PxShape* s = shapes[i]; + + if ( mirrorFilter.shouldMirror(*s) ) + { + uint32_t materialCount = s->getNbMaterials(); + materials.resize(materialCount); + s->getMaterials(materials.begin(), materialCount); + PxShape* shape = to.createShape(s->getGeometry().any(), materials.begin(), static_cast<uint16_t>(materialCount)); + shape->setLocalPose( s->getLocalPose()); + shape->setContactOffset(s->getContactOffset()); + shape->setRestOffset(s->getRestOffset()); + shape->setFlags(s->getFlags()); + shape->setSimulationFilterData(s->getSimulationFilterData()); + shape->setQueryFilterData(s->getQueryFilterData()); + mirrorFilter.reviseMirrorShape(*shape); + } + } + + to.setActorFlags(from.getActorFlags()); + to.setOwnerClient(from.getOwnerClient()); + to.setDominanceGroup(from.getDominanceGroup()); + + if ( to.getNbShapes() ) + { + mirrorFilter.reviseMirrorActor(to); + } + + return to.getNbShapes() != 0; +} + +PxRigidStatic* CloneStatic(PxPhysics& physicsSDK, + const PxTransform& transform, + const PxRigidActor& from, + MirrorScene::MirrorFilter &mirrorFilter) +{ + PxRigidStatic* to = physicsSDK.createRigidStatic(transform); + if(!to) + return NULL; + + if ( !copyStaticProperties(*to, from,mirrorFilter) ) + { + to->release(); + to = NULL; + } + + return to; +} + +PxRigidDynamic* CloneDynamic(PxPhysics& physicsSDK, + const PxTransform& transform, + const PxRigidDynamic& from, + MirrorScene::MirrorFilter &mirrorFilter) +{ + PxRigidDynamic* to = physicsSDK.createRigidDynamic(transform); + if(!to) + return NULL; + + if ( !copyStaticProperties(*to, from, mirrorFilter) ) + { + to->release(); + to = NULL; + return NULL; + } + + to->setRigidBodyFlags(from.getRigidBodyFlags()); + + to->setMass(from.getMass()); + to->setMassSpaceInertiaTensor(from.getMassSpaceInertiaTensor()); + to->setCMassLocalPose(from.getCMassLocalPose()); + + if ( !(to->getRigidBodyFlags() & PxRigidBodyFlag::eKINEMATIC) ) + { + to->setLinearVelocity(from.getLinearVelocity()); + to->setAngularVelocity(from.getAngularVelocity()); + } + + to->setLinearDamping(from.getAngularDamping()); + to->setAngularDamping(from.getAngularDamping()); + + to->setMaxAngularVelocity(from.getMaxAngularVelocity()); + + uint32_t posIters, velIters; + from.getSolverIterationCounts(posIters, velIters); + to->setSolverIterationCounts(posIters, velIters); + + to->setSleepThreshold(from.getSleepThreshold()); + + to->setContactReportThreshold(from.getContactReportThreshold()); + + return to; +} + + +MirrorSceneImpl::MirrorSceneImpl(physx::PxScene &primaryScene, + physx::PxScene &mirrorScene, + MirrorScene::MirrorFilter &mirrorFilter, + float mirrorStaticDistance, + float mirrorDynamicDistance, + float mirrorDistanceThreshold) + : mPrimaryScene(primaryScene) + , mMirrorScene(mirrorScene) + , mMirrorFilter(mirrorFilter) + , mMirrorStaticDistance(mirrorStaticDistance) + , mMirrorDynamicDistance(mirrorDynamicDistance) + , mMirrorDistanceThreshold(mirrorDistanceThreshold*mirrorDistanceThreshold) + , mTriggerActor(NULL) + , mTriggerMaterial(NULL) + , mTriggerShapeStatic(NULL) + , mTriggerShapeDynamic(NULL) + , mSimulationEventCallback(NULL) +{ + mLastCameraLocation = PxVec3(1e9,1e9,1e9); + primaryScene.getPhysics().registerDeletionListener(*this,physx::PxDeletionEventFlag::eMEMORY_RELEASE | physx::PxDeletionEventFlag::eUSER_RELEASE); +} + +MirrorSceneImpl::~MirrorSceneImpl(void) +{ + if ( mTriggerActor ) + { + mPrimaryScene.lockWrite(__FILE__,__LINE__); + mTriggerActor->release(); + mPrimaryScene.unlockWrite(); + } + if ( mTriggerMaterial ) + { + mTriggerMaterial->release(); + } + mPrimaryScene.getPhysics().unregisterDeletionListener(*this); +} + +void MirrorSceneImpl::createTriggerActor(const PxVec3 &cameraPosition) +{ + PX_ASSERT( mTriggerActor == NULL ); + mTriggerActor = mPrimaryScene.getPhysics().createRigidDynamic( PxTransform(cameraPosition) ); + PX_ASSERT(mTriggerActor); + if ( mTriggerActor ) + { + mTriggerActor->setRigidBodyFlag(physx::PxRigidBodyFlag::eKINEMATIC,true); + physx::PxSphereGeometry staticSphere; + physx::PxSphereGeometry dynamicSphere; + staticSphere.radius = mMirrorStaticDistance; + dynamicSphere.radius = mMirrorDynamicDistance; + mTriggerMaterial = mPrimaryScene.getPhysics().createMaterial(1,1,1); + PX_ASSERT(mTriggerMaterial); + if ( mTriggerMaterial ) + { + mTriggerShapeStatic = mTriggerActor->createShape(staticSphere,*mTriggerMaterial); + mTriggerShapeDynamic = mTriggerActor->createShape(dynamicSphere,*mTriggerMaterial); + PX_ASSERT(mTriggerShapeStatic); + PX_ASSERT(mTriggerShapeDynamic); + if ( mTriggerShapeStatic && mTriggerShapeDynamic ) + { + mPrimaryScene.lockWrite(__FILE__,__LINE__); + + mTriggerActor->setOwnerClient(0); + mTriggerShapeStatic->setFlag(physx::PxShapeFlag::eSCENE_QUERY_SHAPE,false); + mTriggerShapeStatic->setFlag(physx::PxShapeFlag::eSIMULATION_SHAPE,false); + mTriggerShapeStatic->setFlag(physx::PxShapeFlag::eTRIGGER_SHAPE,true); + + mTriggerShapeDynamic->setFlag(physx::PxShapeFlag::eSCENE_QUERY_SHAPE,false); + mTriggerShapeDynamic->setFlag(physx::PxShapeFlag::eSIMULATION_SHAPE,false); + mTriggerShapeDynamic->setFlag(physx::PxShapeFlag::eTRIGGER_SHAPE,true); + + mSimulationEventCallback = mPrimaryScene.getSimulationEventCallback(); // get a copy of the original callback + mPrimaryScene.setSimulationEventCallback(this,0); + mPrimaryScene.addActor(*mTriggerActor); + + mPrimaryScene.unlockWrite(); + } + } + } +} + +// Each frame, we do a shape query for static and dynamic objects +// If this is the first time the synchronize has been called, then we create +// a trigger actor with two spheres in the primary scene. This trigger +// actor is used to detect when objects move in and outside of the static and dynamic +// mirror range specified. +void MirrorSceneImpl::synchronizePrimaryScene(const PxVec3 &cameraPos) +{ + PxVec3 diff = cameraPos - mLastCameraLocation; + float dist = diff.magnitudeSquared(); + if ( dist > mMirrorDistanceThreshold ) + { + mLastCameraLocation = cameraPos; + if ( mTriggerActor == NULL ) + { + createTriggerActor(cameraPos); // Create the scene mirroring trigger actor + } + if ( mTriggerActor ) + { + mPrimaryScene.lockWrite(__FILE__,__LINE__); + mTriggerActor->setKinematicTarget( PxTransform(cameraPos) ); // Update the position of the trigger actor to be the current camera location + mPrimaryScene.unlockWrite(); + } + } + // Now, iterate on all of the current actors which are being mirrored + // Only the primary scene after modifies this hash, so it is safe to do this + // without any concerns of thread locking. + // The mirrored scene thread does access the contents of this hash (MirrorActor) + { + mPrimaryScene.lockRead(__FILE__,__LINE__); + for (ActorHash::Iterator i=mActors.getIterator(); !i.done(); ++i) + { + MirrorActor *ma = i->second; + ma->synchronizePose(); // check to see if the position of this object in the primary + // scene has changed. If it has, then we create a command for the mirror scene to update + // it's mirror actor to that new position. + } + mPrimaryScene.unlockRead(); + } +} + +// When the mirrored scene is synchronized, we grab the mirror command buffer +// And then despool all of the create/release/update commands that got posted previously by the +// primary scene thread. A mutex is used to safe brief access to the command buffer. +// A copy of the command buffer is made so that we only grab the mutex for the shorted period +// of time possible. +void MirrorSceneImpl::synchronizeMirrorScene(void) +{ + MirrorCommandArray temp; + mMirrorCommandMutex.lock(); + temp = mMirrorCommands; + mMirrorCommands.clear(); + mMirrorCommandMutex.unlock(); + if ( !temp.empty() ) + { + mMirrorScene.lockWrite(__FILE__,__LINE__); + for (uint32_t i=0; i<temp.size(); i++) + { + MirrorCommand &mc = temp[i]; + switch ( mc.mType ) + { + case MCT_CREATE_ACTOR: + { + mc.mMirrorActor->createActor(mMirrorScene); + } + break; + case MCT_RELEASE_ACTOR: + { + delete mc.mMirrorActor; + } + break; + case MCT_UPDATE_POSE: + { + mc.mMirrorActor->updatePose(mc.mPose); + } + break; + default: + break; + } + } + mMirrorScene.unlockWrite(); + } +} + +void MirrorSceneImpl::release(void) +{ + delete this; +} + + +/** +\brief This is called when a breakable constraint breaks. + +\note The user should not release the constraint shader inside this call! + +\param[in] constraints - The constraints which have been broken. +\param[in] count - The number of constraints + +@see PxConstraint PxConstraintDesc.linearBreakForce PxConstraintDesc.angularBreakForce +*/ +void MirrorSceneImpl::onConstraintBreak(PxConstraintInfo* constraints, uint32_t count) +{ + if ( mSimulationEventCallback ) + { + mSimulationEventCallback->onConstraintBreak(constraints,count); + } +} + +/** +\brief This is called during PxScene::fetchResults with the actors which have just been woken up. + +\note Only supported by rigid bodies yet. +\note Only called on actors for which the PxActorFlag eSEND_SLEEP_NOTIFIES has been set. + +\param[in] actors - The actors which just woke up. +\param[in] count - The number of actors + +@see PxScene.setSimulationEventCallback() PxSceneDesc.simulationEventCallback PxActorFlag PxActor.setActorFlag() +*/ +void MirrorSceneImpl::onWake(PxActor** actors, uint32_t count) +{ + if ( mSimulationEventCallback ) + { + mSimulationEventCallback->onWake(actors,count); + } +} + +/** +\brief This is called during PxScene::fetchResults with the actors which have just been put to sleep. + +\note Only supported by rigid bodies yet. +\note Only called on actors for which the PxActorFlag eSEND_SLEEP_NOTIFIES has been set. + +\param[in] actors - The actors which have just been put to sleep. +\param[in] count - The number of actors + +@see PxScene.setSimulationEventCallback() PxSceneDesc.simulationEventCallback PxActorFlag PxActor.setActorFlag() +*/ +void MirrorSceneImpl::onSleep(PxActor** actors, uint32_t count) +{ + if ( mSimulationEventCallback ) + { + mSimulationEventCallback->onSleep(actors,count); + } + +} + +/** +\brief The user needs to implement this interface class in order to be notified when +certain contact events occur. + +The method will be called for a pair of actors if one of the colliding shape pairs requested contact notification. +You request which events are reported using the filter shader/callback mechanism (see #PxSimulationFilterShader, +#PxSimulationFilterCallback, #PxPairFlag). + +Do not keep references to the passed objects, as they will be +invalid after this function returns. + +\param[in] pairHeader Information on the two actors whose shapes triggered a contact report. +\param[in] pairs The contact pairs of two actors for which contact reports have been requested. See #PxContactPair. +\param[in] nbPairs The number of provided contact pairs. + +@see PxScene.setSimulationEventCallback() PxSceneDesc.simulationEventCallback PxContactPair PxPairFlag PxSimulationFilterShader PxSimulationFilterCallback +*/ +void MirrorSceneImpl::onContact(const PxContactPairHeader& pairHeader, const PxContactPair* pairs, uint32_t nbPairs) +{ + if ( mSimulationEventCallback ) + { + mSimulationEventCallback->onContact(pairHeader,pairs,nbPairs); + } + +} + +/* +\brief This is called during PxScene::fetchResults with the current trigger pair events. + +Shapes which have been marked as triggers using PxShapeFlag::eTRIGGER_SHAPE will send events +according to the pair flag specification in the filter shader (see #PxPairFlag, #PxSimulationFilterShader). + +\param[in] pairs - The trigger pairs which caused events. +\param[in] count - The number of trigger pairs. + +@see PxScene.setSimulationEventCallback() PxSceneDesc.simulationEventCallback PxPairFlag PxSimulationFilterShader PxShapeFlag PxShape.setFlag() +*/ +void MirrorSceneImpl::onTrigger(PxTriggerPair* pairs, uint32_t count) +{ + mTriggerPairs.clear(); + for (uint32_t i=0; i<count; i++) + { + PxTriggerPair &tp = pairs[i]; + + if ( ( tp.triggerShape == mTriggerShapeStatic ) || ( tp.triggerShape == mTriggerShapeDynamic ) ) + { + if ( tp.flags & PxTriggerPairFlag::eREMOVED_SHAPE_OTHER ) // actor was deleted! + { + // handle shape release.. + mirrorShape(tp); + } + else + { + PxActor *actor = tp.otherActor; + if( mMirrorFilter.shouldMirror(*actor) ) // let the application telll us whether this is an actor we want to mirror or not + { + if ( tp.triggerShape == mTriggerShapeStatic ) + { + if ( actor->getType() == PxActorType::eRIGID_STATIC ) + { + mirrorShape(tp); + } + } + else if ( tp.triggerShape == mTriggerShapeDynamic ) + { + if ( actor->getType() == PxActorType::eRIGID_DYNAMIC ) + { + mirrorShape(tp); + } + } + } + } + } + else + { + mTriggerPairs.pushBack(tp); + } + } + if ( !mTriggerPairs.empty() ) // If some of the triggers were for the application; then we pass them on + { + mSimulationEventCallback->onTrigger(&mTriggerPairs[0],mTriggerPairs.size()); + } +} + +void MirrorSceneImpl::onAdvance(const PxRigidBody*const* bodyBuffer, const PxTransform* poseBuffer, const PxU32 count) +{ + PX_UNUSED(bodyBuffer); + PX_UNUSED(poseBuffer); + PX_UNUSED(count); +} + +void MirrorSceneImpl::mirrorShape(const PxTriggerPair &tp) +{ + size_t hash = (size_t)tp.otherShape; + const ShapeHash::Entry *found = mShapes.find(hash); + MirrorActor *ma = found ? found->second : NULL; + if ( tp.flags & PxTriggerPairFlag::eREMOVED_SHAPE_OTHER ) + { + if ( found ) + { + bool kill = ma->removeShape(); + mShapes.erase(hash); + if ( kill ) + { + ma->release(); + mActors.erase( ma->mActorHash ); + } + } + } + else if ( tp.status == PxPairFlag::eNOTIFY_TOUCH_FOUND ) + { + PX_ASSERT( found == NULL ); + size_t actorHash = (size_t) &tp.otherActor; + const ActorHash::Entry *foundActor = mActors.find(actorHash); + if ( foundActor == NULL ) + { + ma = PX_NEW(MirrorActor)(actorHash,*tp.otherActor,*this); + mActors[actorHash] = ma; + } + else + { + ma = foundActor->second; + } + ma->addShape(); + mShapes[hash] = ma; + } + else if ( tp.status == PxPairFlag::eNOTIFY_TOUCH_LOST ) + { + PX_ASSERT( found ); + if ( ma ) + { + bool kill = ma->removeShape(); + mShapes.erase(hash); + if ( kill ) + { + mActors.erase( ma->mActorHash ); + ma->release(); + } + } + } + +} + +void MirrorSceneImpl::postCommand(const MirrorCommand &mc) +{ + mMirrorCommandMutex.lock(); + mMirrorCommands.pushBack(mc); + mMirrorCommandMutex.unlock(); +} + +MirrorActor::MirrorActor(size_t actorHash, + physx::PxRigidActor &actor, + MirrorSceneImpl &mirrorScene) : mMirrorScene(mirrorScene), mPrimaryActor(&actor), mActorHash(actorHash) +{ + mReleasePosted = false; + mMirrorActor = NULL; + mShapeCount = 0; + PxScene *scene = actor.getScene(); + PX_ASSERT(scene); + if ( scene ) + { + scene->lockWrite(__FILE__,__LINE__); + mPrimaryGlobalPose = actor.getGlobalPose(); + PxPhysics *sdk = &scene->getPhysics(); + if ( actor.getType() == physx::PxActorType::eRIGID_STATIC ) + { + mMirrorActor = CloneStatic(*sdk,actor.getGlobalPose(),actor, mirrorScene.getMirrorFilter()); + } + else + { + physx::PxRigidDynamic *rd = static_cast< physx::PxRigidDynamic *>(&actor); + mMirrorActor = CloneDynamic(*sdk,actor.getGlobalPose(),*rd, mirrorScene.getMirrorFilter()); + if ( mMirrorActor ) + { + rd = static_cast< physx::PxRigidDynamic *>(mMirrorActor); + rd->setRigidBodyFlag(physx::PxRigidBodyFlag::eKINEMATIC,true); + } + } + scene->unlockWrite(); + if ( mMirrorActor ) + { + MirrorCommand mc(MCT_CREATE_ACTOR,this); + mMirrorScene.postCommand(mc); + } + } +} + +MirrorActor::~MirrorActor(void) +{ + if ( mMirrorActor ) + { + mMirrorActor->release(); + } +} + +void MirrorActor::release(void) +{ + PX_ASSERT( mReleasePosted == false ); + if ( !mReleasePosted ) + { + if ( mPrimaryActor ) + { + } + MirrorCommand mc(MCT_RELEASE_ACTOR,this); + mMirrorScene.postCommand(mc); + mReleasePosted = true; + } +} + +void MirrorActor::createActor(PxScene &scene) +{ + if ( mMirrorActor ) + { + scene.addActor(*mMirrorActor); + } +} + +static bool sameTransform(const PxTransform &a,const PxTransform &b) +{ + if ( a.p == b.p && + a.q.x == b.q.x && + a.q.y == b.q.y && + a.q.z == b.q.z && + a.q.w == b.q.w ) + { + return true; + } + return false; +} + +void MirrorActor::synchronizePose(void) +{ + if ( mPrimaryActor ) + { + PxTransform p = mPrimaryActor->getGlobalPose(); + if ( !sameTransform(p,mPrimaryGlobalPose) ) + { + mPrimaryGlobalPose = p; + MirrorCommand mc(MCT_UPDATE_POSE,this,p); + mMirrorScene.postCommand(mc); + } + } +} + +void MirrorActor::updatePose(const PxTransform &pose) +{ + if ( mMirrorActor ) + { + if ( mMirrorActor->getType() == PxActorType::eRIGID_STATIC ) + { + PxRigidStatic *p = static_cast< PxRigidStatic *>(mMirrorActor); + p->setGlobalPose(pose); + } + else + { + PxRigidDynamic *p = static_cast< PxRigidDynamic *>(mMirrorActor); + p->setKinematicTarget(pose); + } + } +} + +void MirrorSceneImpl::onRelease(const PxBase* observed, + void* /*userData*/, + PxDeletionEventFlag::Enum /*deletionEvent*/) +{ + const physx::PxRigidActor *a = observed->is<PxRigidActor>(); + if ( a ) + { + size_t actorHash = (size_t)a; + const ActorHash::Entry *foundActor = mActors.find(actorHash); + if ( foundActor != NULL ) + { + MirrorActor *ma = foundActor->second; + ma->mPrimaryActor = NULL; + } + } +} + +}; // end apex namespace +}; // end physx namespace + +#endif diff --git a/APEX_1.4/framework/src/ThreadPool.cpp b/APEX_1.4/framework/src/ThreadPool.cpp new file mode 100644 index 00000000..1e138231 --- /dev/null +++ b/APEX_1.4/framework/src/ThreadPool.cpp @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + + +#include "ApexDefs.h" +#if PX_PHYSICS_VERSION_MAJOR == 0 + +#include "ApexSDK.h" + +#include "PsThread.h" +#include "PsSList.h" +#include "PsSync.h" +#include "PsString.h" +#include "PsUserAllocated.h" +#include "PsAllocator.h" + +#include "ThreadPool.h" +#include "ProfilerCallback.h" + +#if PX_WINDOWS_FAMILY +#define PROFILE_TASKS 1 +#else +#define PROFILE_TASKS 1 +#endif + +namespace nvidia +{ +namespace apex +{ + +PxCpuDispatcher* createDefaultThreadPool(unsigned int numThreads) +{ + if (numThreads == 0) + { +#if PX_WINDOWS_FAMILY + numThreads = 4; +#elif PX_APPLE_FAMILY + numThreads = 2; +#endif + } + return PX_NEW(DefaultCpuDispatcher)(numThreads, 0); +} + +DefaultCpuDispatcher::DefaultCpuDispatcher(uint32_t numThreads, uint32_t* affinityMasks) + : mQueueEntryPool(TASK_QUEUE_ENTRY_POOL_SIZE), mNumThreads(numThreads), mShuttingDown(false) +{ + uint32_t defaultAffinityMask = 0; + + // initialize threads first, then start + + mWorkerThreads = reinterpret_cast<CpuWorkerThread*>(PX_ALLOC(numThreads * sizeof(CpuWorkerThread), PX_DEBUG_EXP("CpuWorkerThread"))); + if (mWorkerThreads) + { + for (uint32_t i = 0; i < numThreads; ++i) + { + PX_PLACEMENT_NEW(mWorkerThreads + i, CpuWorkerThread)(); + mWorkerThreads[i].initialize(this); + } + + for (uint32_t i = 0; i < numThreads; ++i) + { + mWorkerThreads[i].start(shdfnd::Thread::getDefaultStackSize()); + if (affinityMasks) + { + mWorkerThreads[i].setAffinityMask(affinityMasks[i]); + } + else + { + mWorkerThreads[i].setAffinityMask(defaultAffinityMask); + } + + char threadName[32]; + shdfnd::snprintf(threadName, 32, "PxWorker%02d", i); + mWorkerThreads[i].setName(threadName); + } + } + else + { + mNumThreads = 0; + } +} + + +DefaultCpuDispatcher::~DefaultCpuDispatcher() +{ + for (uint32_t i = 0; i < mNumThreads; ++i) + { + mWorkerThreads[i].signalQuit(); + } + + mShuttingDown = true; + mWorkReady.set(); + for (uint32_t i = 0; i < mNumThreads; ++i) + { + mWorkerThreads[i].waitForQuit(); + } + + for (uint32_t i = 0; i < mNumThreads; ++i) + { + mWorkerThreads[i].~CpuWorkerThread(); + } + + PX_FREE(mWorkerThreads); +} + + +void DefaultCpuDispatcher::submitTask(PxBaseTask& task) +{ + shdfnd::Thread::Id currentThread = shdfnd::Thread::getId(); + + // TODO: Could use TLS to make this more efficient + for (uint32_t i = 0; i < mNumThreads; ++i) + if (mWorkerThreads[i].tryAcceptJobToLocalQueue(task, currentThread)) + { + return mWorkReady.set(); + } + + SharedQueueEntry* entry = mQueueEntryPool.getEntry(&task); + if (entry) + { + mJobList.push(*entry); + mWorkReady.set(); + } +} + +void DefaultCpuDispatcher::flush( PxBaseTask& task, int32_t targetRef) +{ + // TODO: implement + PX_ALWAYS_ASSERT(); + PX_UNUSED(task); + PX_UNUSED(targetRef); +} + +uint32_t DefaultCpuDispatcher::getWorkerCount() const +{ + return mNumThreads; +} + +void DefaultCpuDispatcher::release() +{ + GetApexSDK()->releaseCpuDispatcher(*this); +} + + +PxBaseTask* DefaultCpuDispatcher::getJob(void) +{ + return TaskQueueHelper::fetchTask(mJobList, mQueueEntryPool); +} + + +PxBaseTask* DefaultCpuDispatcher::stealJob() +{ + PxBaseTask* ret = NULL; + + for (uint32_t i = 0; i < mNumThreads; ++i) + { + ret = mWorkerThreads[i].giveUpJob(); + + if (ret != NULL) + { + break; + } + } + + return ret; +} + + +void DefaultCpuDispatcher::resetWakeSignal() +{ + mWorkReady.reset(); + + // The code below is necessary to avoid deadlocks on shut down. + // A thread usually loops as follows: + // while quit is not signaled + // 1) reset wake signal + // 2) fetch work + // 3) if work -> process + // 4) else -> wait for wake signal + // + // If a thread reaches 1) after the thread pool signaled wake up, + // the wake up sync gets reset and all other threads which have not + // passed 4) already will wait forever. + // The code below makes sure that on shutdown, the wake up signal gets + // sent again after it was reset + // + if (mShuttingDown) + { + mWorkReady.set(); + } +} + + +CpuWorkerThread::CpuWorkerThread() + : mQueueEntryPool(TASK_QUEUE_ENTRY_POOL_SIZE) + , mThreadId(0) +{ +} + + +CpuWorkerThread::~CpuWorkerThread() +{ +} + + +void CpuWorkerThread::initialize(DefaultCpuDispatcher* ownerDispatcher) +{ + mOwner = ownerDispatcher; +} + + +bool CpuWorkerThread::tryAcceptJobToLocalQueue(PxBaseTask& task, shdfnd::Thread::Id taskSubmitionThread) +{ + if (taskSubmitionThread == mThreadId) + { + SharedQueueEntry* entry = mQueueEntryPool.getEntry(&task); + if (entry) + { + mLocalJobList.push(*entry); + return true; + } + else + { + return false; + } + } + + return false; +} + + +PxBaseTask* CpuWorkerThread::giveUpJob() +{ + return TaskQueueHelper::fetchTask(mLocalJobList, mQueueEntryPool); +} + + +void CpuWorkerThread::execute() +{ + mThreadId = getId(); + + while (!quitIsSignalled()) + { + mOwner->resetWakeSignal(); + + PxBaseTask* task = TaskQueueHelper::fetchTask(mLocalJobList, mQueueEntryPool); + + if (!task) + { + task = mOwner->getJob(); + } + + if (!task) + { + task = mOwner->stealJob(); + } + + if (task) + { +#if PHYSX_PROFILE_SDK + if (mApexPvdClient!=NULL) + { + task->runProfiled(); + } + else + { + task->run(); + } +#else + task->run(); +#endif + task->release(); + } + else + { + mOwner->waitForWork(); + } + } + + quit(); +}; + + + +} // end pxtask namespace +} // end physx namespace + +#endif // PX_PHYSICS_VERSION_MAJOR == 0 diff --git a/APEX_1.4/framework/src/autogen/BufferF32x1.cpp b/APEX_1.4/framework/src/autogen/BufferF32x1.cpp new file mode 100644 index 00000000..4f4b2f36 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferF32x1.cpp @@ -0,0 +1,346 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferF32x1.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferF32x1NS; + +const char* const BufferF32x1Factory::vptr = + NvParameterized::getVptr<BufferF32x1, BufferF32x1::ClassAlignment>(); + +const uint32_t NumParamDefs = 3; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_F32, false, 1 * sizeof(float), NULL, 0 }, // data[] +}; + + +bool BufferF32x1::mBuiltFlag = false; +NvParameterized::MutexType BufferF32x1::mBuiltFlagMutex; + +BufferF32x1::BufferF32x1(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferF32x1FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferF32x1::~BufferF32x1() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferF32x1::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferF32x1(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferF32x1::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferF32x1::getParameterDefinitionTree(void) const +{ + BufferF32x1* tmpParam = const_cast<BufferF32x1*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferF32x1::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferF32x1::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferF32x1::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferF32x1::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferF32x1::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferF32x1::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for FLOAT1 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_F32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for FLOAT1 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + mBuiltFlag = true; + +} +void BufferF32x1::initStrings(void) +{ +} + +void BufferF32x1::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(float); + data.arraySizes[0] = 0; +} + +void BufferF32x1::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferF32x1::initReferences(void) +{ +} + +void BufferF32x1::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferF32x1::freeStrings(void) +{ +} + +void BufferF32x1::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferF32x2.cpp b/APEX_1.4/framework/src/autogen/BufferF32x2.cpp new file mode 100644 index 00000000..16afe051 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferF32x2.cpp @@ -0,0 +1,401 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferF32x2.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferF32x2NS; + +const char* const BufferF32x2Factory::vptr = + NvParameterized::getVptr<BufferF32x2, BufferF32x2::ClassAlignment>(); + +const uint32_t NumParamDefs = 5; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(F32x2_Type), CHILDREN(2), 2 }, // data[] + { TYPE_F32, false, (size_t)(&((F32x2_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_F32, false, (size_t)(&((F32x2_Type*)0)->y), NULL, 0 }, // data[].y +}; + + +bool BufferF32x2::mBuiltFlag = false; +NvParameterized::MutexType BufferF32x2::mBuiltFlagMutex; + +BufferF32x2::BufferF32x2(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferF32x2FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferF32x2::~BufferF32x2() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferF32x2::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferF32x2(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferF32x2::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferF32x2::getParameterDefinitionTree(void) const +{ + BufferF32x2* tmpParam = const_cast<BufferF32x2*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferF32x2::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferF32x2::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferF32x2::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferF32x2::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferF32x2::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferF32x2::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for FLOAT2 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "F32x2", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for FLOAT2 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_F32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_F32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[2]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + + ParamDefTable[2].setChildren(Children, 2); + } + + mBuiltFlag = true; + +} +void BufferF32x2::initStrings(void) +{ +} + +void BufferF32x2::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(F32x2_Type); + data.arraySizes[0] = 0; +} + +void BufferF32x2::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferF32x2::initReferences(void) +{ +} + +void BufferF32x2::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferF32x2::freeStrings(void) +{ +} + +void BufferF32x2::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferF32x3.cpp b/APEX_1.4/framework/src/autogen/BufferF32x3.cpp new file mode 100644 index 00000000..d6866511 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferF32x3.cpp @@ -0,0 +1,346 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferF32x3.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferF32x3NS; + +const char* const BufferF32x3Factory::vptr = + NvParameterized::getVptr<BufferF32x3, BufferF32x3::ClassAlignment>(); + +const uint32_t NumParamDefs = 3; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_VEC3, false, 1 * sizeof(physx::PxVec3), NULL, 0 }, // data[] +}; + + +bool BufferF32x3::mBuiltFlag = false; +NvParameterized::MutexType BufferF32x3::mBuiltFlagMutex; + +BufferF32x3::BufferF32x3(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferF32x3FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferF32x3::~BufferF32x3() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferF32x3::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferF32x3(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferF32x3::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferF32x3::getParameterDefinitionTree(void) const +{ + BufferF32x3* tmpParam = const_cast<BufferF32x3*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferF32x3::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferF32x3::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferF32x3::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferF32x3::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferF32x3::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferF32x3::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for FLOAT3 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_VEC3, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for FLOAT3 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + mBuiltFlag = true; + +} +void BufferF32x3::initStrings(void) +{ +} + +void BufferF32x3::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(physx::PxVec3); + data.arraySizes[0] = 0; +} + +void BufferF32x3::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferF32x3::initReferences(void) +{ +} + +void BufferF32x3::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferF32x3::freeStrings(void) +{ +} + +void BufferF32x3::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferF32x4.cpp b/APEX_1.4/framework/src/autogen/BufferF32x4.cpp new file mode 100644 index 00000000..7aeb3f3b --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferF32x4.cpp @@ -0,0 +1,453 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferF32x4.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferF32x4NS; + +const char* const BufferF32x4Factory::vptr = + NvParameterized::getVptr<BufferF32x4, BufferF32x4::ClassAlignment>(); + +const uint32_t NumParamDefs = 7; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, 5, 6, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(F32x4_Type), CHILDREN(2), 4 }, // data[] + { TYPE_F32, false, (size_t)(&((F32x4_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_F32, false, (size_t)(&((F32x4_Type*)0)->y), NULL, 0 }, // data[].y + { TYPE_F32, false, (size_t)(&((F32x4_Type*)0)->z), NULL, 0 }, // data[].z + { TYPE_F32, false, (size_t)(&((F32x4_Type*)0)->w), NULL, 0 }, // data[].w +}; + + +bool BufferF32x4::mBuiltFlag = false; +NvParameterized::MutexType BufferF32x4::mBuiltFlagMutex; + +BufferF32x4::BufferF32x4(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferF32x4FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferF32x4::~BufferF32x4() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferF32x4::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferF32x4(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferF32x4::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferF32x4::getParameterDefinitionTree(void) const +{ + BufferF32x4* tmpParam = const_cast<BufferF32x4*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferF32x4::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferF32x4::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferF32x4::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferF32x4::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferF32x4::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferF32x4::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for FLOAT4 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + ParamDefTable[1].setAlignment(16); + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "F32x4", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for FLOAT4 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + ParamDefTable[2].setAlignment(16); + ParamDefTable[2].setPadding(16); + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_F32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + ParamDefTable[3].setAlignment(16); + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_F32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="data[].z" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("z", TYPE_F32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Z", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=6, longName="data[].w" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[6]; + ParamDef->init("w", TYPE_F32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "W", true); + ParamDefTable[6].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[4]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + Children[2] = PDEF_PTR(5); + Children[3] = PDEF_PTR(6); + + ParamDefTable[2].setChildren(Children, 4); + } + + mBuiltFlag = true; + +} +void BufferF32x4::initStrings(void) +{ +} + +void BufferF32x4::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(F32x4_Type); + data.arraySizes[0] = 0; +} + +void BufferF32x4::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferF32x4::initReferences(void) +{ +} + +void BufferF32x4::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferF32x4::freeStrings(void) +{ +} + +void BufferF32x4::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU16x1.cpp b/APEX_1.4/framework/src/autogen/BufferU16x1.cpp new file mode 100644 index 00000000..04d3b5ce --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU16x1.cpp @@ -0,0 +1,346 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU16x1.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU16x1NS; + +const char* const BufferU16x1Factory::vptr = + NvParameterized::getVptr<BufferU16x1, BufferU16x1::ClassAlignment>(); + +const uint32_t NumParamDefs = 3; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_U16, false, 1 * sizeof(uint16_t), NULL, 0 }, // data[] +}; + + +bool BufferU16x1::mBuiltFlag = false; +NvParameterized::MutexType BufferU16x1::mBuiltFlagMutex; + +BufferU16x1::BufferU16x1(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU16x1FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU16x1::~BufferU16x1() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU16x1::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU16x1(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU16x1::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU16x1::getParameterDefinitionTree(void) const +{ + BufferU16x1* tmpParam = const_cast<BufferU16x1*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU16x1::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU16x1::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU16x1::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU16x1::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU16x1::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU16x1::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for SHORT1 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for SHORT1 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + mBuiltFlag = true; + +} +void BufferU16x1::initStrings(void) +{ +} + +void BufferU16x1::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(uint16_t); + data.arraySizes[0] = 0; +} + +void BufferU16x1::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU16x1::initReferences(void) +{ +} + +void BufferU16x1::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU16x1::freeStrings(void) +{ +} + +void BufferU16x1::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU16x2.cpp b/APEX_1.4/framework/src/autogen/BufferU16x2.cpp new file mode 100644 index 00000000..cb680641 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU16x2.cpp @@ -0,0 +1,401 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU16x2.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU16x2NS; + +const char* const BufferU16x2Factory::vptr = + NvParameterized::getVptr<BufferU16x2, BufferU16x2::ClassAlignment>(); + +const uint32_t NumParamDefs = 5; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U16x2_Type), CHILDREN(2), 2 }, // data[] + { TYPE_U16, false, (size_t)(&((U16x2_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U16, false, (size_t)(&((U16x2_Type*)0)->y), NULL, 0 }, // data[].y +}; + + +bool BufferU16x2::mBuiltFlag = false; +NvParameterized::MutexType BufferU16x2::mBuiltFlagMutex; + +BufferU16x2::BufferU16x2(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU16x2FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU16x2::~BufferU16x2() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU16x2::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU16x2(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU16x2::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU16x2::getParameterDefinitionTree(void) const +{ + BufferU16x2* tmpParam = const_cast<BufferU16x2*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU16x2::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU16x2::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU16x2::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU16x2::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU16x2::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU16x2::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for SHORT2 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U16x2", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for SHORT2 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[2]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + + ParamDefTable[2].setChildren(Children, 2); + } + + mBuiltFlag = true; + +} +void BufferU16x2::initStrings(void) +{ +} + +void BufferU16x2::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U16x2_Type); + data.arraySizes[0] = 0; +} + +void BufferU16x2::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU16x2::initReferences(void) +{ +} + +void BufferU16x2::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU16x2::freeStrings(void) +{ +} + +void BufferU16x2::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU16x3.cpp b/APEX_1.4/framework/src/autogen/BufferU16x3.cpp new file mode 100644 index 00000000..b96bedda --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU16x3.cpp @@ -0,0 +1,425 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU16x3.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU16x3NS; + +const char* const BufferU16x3Factory::vptr = + NvParameterized::getVptr<BufferU16x3, BufferU16x3::ClassAlignment>(); + +const uint32_t NumParamDefs = 6; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, 5, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U16x3_Type), CHILDREN(2), 3 }, // data[] + { TYPE_U16, false, (size_t)(&((U16x3_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U16, false, (size_t)(&((U16x3_Type*)0)->y), NULL, 0 }, // data[].y + { TYPE_U16, false, (size_t)(&((U16x3_Type*)0)->z), NULL, 0 }, // data[].z +}; + + +bool BufferU16x3::mBuiltFlag = false; +NvParameterized::MutexType BufferU16x3::mBuiltFlagMutex; + +BufferU16x3::BufferU16x3(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU16x3FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU16x3::~BufferU16x3() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU16x3::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU16x3(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU16x3::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU16x3::getParameterDefinitionTree(void) const +{ + BufferU16x3* tmpParam = const_cast<BufferU16x3*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU16x3::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU16x3::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU16x3::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU16x3::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU16x3::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU16x3::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for SHORT3 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U16x3", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for SHORT3 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="data[].z" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("z", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Z", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[3]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + Children[2] = PDEF_PTR(5); + + ParamDefTable[2].setChildren(Children, 3); + } + + mBuiltFlag = true; + +} +void BufferU16x3::initStrings(void) +{ +} + +void BufferU16x3::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U16x3_Type); + data.arraySizes[0] = 0; +} + +void BufferU16x3::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU16x3::initReferences(void) +{ +} + +void BufferU16x3::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU16x3::freeStrings(void) +{ +} + +void BufferU16x3::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU16x4.cpp b/APEX_1.4/framework/src/autogen/BufferU16x4.cpp new file mode 100644 index 00000000..ef45641b --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU16x4.cpp @@ -0,0 +1,449 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU16x4.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU16x4NS; + +const char* const BufferU16x4Factory::vptr = + NvParameterized::getVptr<BufferU16x4, BufferU16x4::ClassAlignment>(); + +const uint32_t NumParamDefs = 7; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, 5, 6, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U16x4_Type), CHILDREN(2), 4 }, // data[] + { TYPE_U16, false, (size_t)(&((U16x4_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U16, false, (size_t)(&((U16x4_Type*)0)->y), NULL, 0 }, // data[].y + { TYPE_U16, false, (size_t)(&((U16x4_Type*)0)->z), NULL, 0 }, // data[].z + { TYPE_U16, false, (size_t)(&((U16x4_Type*)0)->w), NULL, 0 }, // data[].w +}; + + +bool BufferU16x4::mBuiltFlag = false; +NvParameterized::MutexType BufferU16x4::mBuiltFlagMutex; + +BufferU16x4::BufferU16x4(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU16x4FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU16x4::~BufferU16x4() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU16x4::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU16x4(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU16x4::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU16x4::getParameterDefinitionTree(void) const +{ + BufferU16x4* tmpParam = const_cast<BufferU16x4*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU16x4::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU16x4::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU16x4::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU16x4::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU16x4::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU16x4::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for SHORT4 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U16x4", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for SHORT4 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="data[].z" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("z", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Z", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=6, longName="data[].w" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[6]; + ParamDef->init("w", TYPE_U16, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "W", true); + ParamDefTable[6].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[4]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + Children[2] = PDEF_PTR(5); + Children[3] = PDEF_PTR(6); + + ParamDefTable[2].setChildren(Children, 4); + } + + mBuiltFlag = true; + +} +void BufferU16x4::initStrings(void) +{ +} + +void BufferU16x4::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U16x4_Type); + data.arraySizes[0] = 0; +} + +void BufferU16x4::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU16x4::initReferences(void) +{ +} + +void BufferU16x4::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU16x4::freeStrings(void) +{ +} + +void BufferU16x4::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU32x1.cpp b/APEX_1.4/framework/src/autogen/BufferU32x1.cpp new file mode 100644 index 00000000..347d460b --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU32x1.cpp @@ -0,0 +1,346 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU32x1.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU32x1NS; + +const char* const BufferU32x1Factory::vptr = + NvParameterized::getVptr<BufferU32x1, BufferU32x1::ClassAlignment>(); + +const uint32_t NumParamDefs = 3; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_U32, false, 1 * sizeof(uint32_t), NULL, 0 }, // data[] +}; + + +bool BufferU32x1::mBuiltFlag = false; +NvParameterized::MutexType BufferU32x1::mBuiltFlagMutex; + +BufferU32x1::BufferU32x1(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU32x1FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU32x1::~BufferU32x1() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU32x1::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU32x1(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU32x1::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU32x1::getParameterDefinitionTree(void) const +{ + BufferU32x1* tmpParam = const_cast<BufferU32x1*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU32x1::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU32x1::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU32x1::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU32x1::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU32x1::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU32x1::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for INT1 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for INT1 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + mBuiltFlag = true; + +} +void BufferU32x1::initStrings(void) +{ +} + +void BufferU32x1::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(uint32_t); + data.arraySizes[0] = 0; +} + +void BufferU32x1::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU32x1::initReferences(void) +{ +} + +void BufferU32x1::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU32x1::freeStrings(void) +{ +} + +void BufferU32x1::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU32x2.cpp b/APEX_1.4/framework/src/autogen/BufferU32x2.cpp new file mode 100644 index 00000000..6a62dd05 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU32x2.cpp @@ -0,0 +1,401 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU32x2.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU32x2NS; + +const char* const BufferU32x2Factory::vptr = + NvParameterized::getVptr<BufferU32x2, BufferU32x2::ClassAlignment>(); + +const uint32_t NumParamDefs = 5; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U32x2_Type), CHILDREN(2), 2 }, // data[] + { TYPE_U32, false, (size_t)(&((U32x2_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U32, false, (size_t)(&((U32x2_Type*)0)->y), NULL, 0 }, // data[].y +}; + + +bool BufferU32x2::mBuiltFlag = false; +NvParameterized::MutexType BufferU32x2::mBuiltFlagMutex; + +BufferU32x2::BufferU32x2(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU32x2FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU32x2::~BufferU32x2() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU32x2::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU32x2(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU32x2::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU32x2::getParameterDefinitionTree(void) const +{ + BufferU32x2* tmpParam = const_cast<BufferU32x2*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU32x2::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU32x2::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU32x2::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU32x2::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU32x2::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU32x2::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for INT2 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U32x2", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for INT2 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[2]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + + ParamDefTable[2].setChildren(Children, 2); + } + + mBuiltFlag = true; + +} +void BufferU32x2::initStrings(void) +{ +} + +void BufferU32x2::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U32x2_Type); + data.arraySizes[0] = 0; +} + +void BufferU32x2::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU32x2::initReferences(void) +{ +} + +void BufferU32x2::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU32x2::freeStrings(void) +{ +} + +void BufferU32x2::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU32x3.cpp b/APEX_1.4/framework/src/autogen/BufferU32x3.cpp new file mode 100644 index 00000000..ccec68f0 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU32x3.cpp @@ -0,0 +1,425 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU32x3.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU32x3NS; + +const char* const BufferU32x3Factory::vptr = + NvParameterized::getVptr<BufferU32x3, BufferU32x3::ClassAlignment>(); + +const uint32_t NumParamDefs = 6; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, 5, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U32x3_Type), CHILDREN(2), 3 }, // data[] + { TYPE_U32, false, (size_t)(&((U32x3_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U32, false, (size_t)(&((U32x3_Type*)0)->y), NULL, 0 }, // data[].y + { TYPE_U32, false, (size_t)(&((U32x3_Type*)0)->z), NULL, 0 }, // data[].z +}; + + +bool BufferU32x3::mBuiltFlag = false; +NvParameterized::MutexType BufferU32x3::mBuiltFlagMutex; + +BufferU32x3::BufferU32x3(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU32x3FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU32x3::~BufferU32x3() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU32x3::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU32x3(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU32x3::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU32x3::getParameterDefinitionTree(void) const +{ + BufferU32x3* tmpParam = const_cast<BufferU32x3*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU32x3::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU32x3::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU32x3::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU32x3::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU32x3::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU32x3::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for INT3 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U32x3", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for INT3 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="data[].z" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("z", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Z", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[3]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + Children[2] = PDEF_PTR(5); + + ParamDefTable[2].setChildren(Children, 3); + } + + mBuiltFlag = true; + +} +void BufferU32x3::initStrings(void) +{ +} + +void BufferU32x3::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U32x3_Type); + data.arraySizes[0] = 0; +} + +void BufferU32x3::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU32x3::initReferences(void) +{ +} + +void BufferU32x3::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU32x3::freeStrings(void) +{ +} + +void BufferU32x3::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU32x4.cpp b/APEX_1.4/framework/src/autogen/BufferU32x4.cpp new file mode 100644 index 00000000..26d43552 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU32x4.cpp @@ -0,0 +1,449 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU32x4.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU32x4NS; + +const char* const BufferU32x4Factory::vptr = + NvParameterized::getVptr<BufferU32x4, BufferU32x4::ClassAlignment>(); + +const uint32_t NumParamDefs = 7; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, 5, 6, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U32x4_Type), CHILDREN(2), 4 }, // data[] + { TYPE_U32, false, (size_t)(&((U32x4_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U32, false, (size_t)(&((U32x4_Type*)0)->y), NULL, 0 }, // data[].y + { TYPE_U32, false, (size_t)(&((U32x4_Type*)0)->z), NULL, 0 }, // data[].z + { TYPE_U32, false, (size_t)(&((U32x4_Type*)0)->w), NULL, 0 }, // data[].w +}; + + +bool BufferU32x4::mBuiltFlag = false; +NvParameterized::MutexType BufferU32x4::mBuiltFlagMutex; + +BufferU32x4::BufferU32x4(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU32x4FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU32x4::~BufferU32x4() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU32x4::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU32x4(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU32x4::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU32x4::getParameterDefinitionTree(void) const +{ + BufferU32x4* tmpParam = const_cast<BufferU32x4*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU32x4::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU32x4::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU32x4::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU32x4::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU32x4::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU32x4::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for INT4 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U32x4", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for INT4 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="data[].z" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("z", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Z", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=6, longName="data[].w" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[6]; + ParamDef->init("w", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "W", true); + ParamDefTable[6].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[4]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + Children[2] = PDEF_PTR(5); + Children[3] = PDEF_PTR(6); + + ParamDefTable[2].setChildren(Children, 4); + } + + mBuiltFlag = true; + +} +void BufferU32x4::initStrings(void) +{ +} + +void BufferU32x4::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U32x4_Type); + data.arraySizes[0] = 0; +} + +void BufferU32x4::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU32x4::initReferences(void) +{ +} + +void BufferU32x4::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU32x4::freeStrings(void) +{ +} + +void BufferU32x4::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU8x1.cpp b/APEX_1.4/framework/src/autogen/BufferU8x1.cpp new file mode 100644 index 00000000..c187331a --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU8x1.cpp @@ -0,0 +1,346 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU8x1.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU8x1NS; + +const char* const BufferU8x1Factory::vptr = + NvParameterized::getVptr<BufferU8x1, BufferU8x1::ClassAlignment>(); + +const uint32_t NumParamDefs = 3; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_U8, false, 1 * sizeof(uint8_t), NULL, 0 }, // data[] +}; + + +bool BufferU8x1::mBuiltFlag = false; +NvParameterized::MutexType BufferU8x1::mBuiltFlagMutex; + +BufferU8x1::BufferU8x1(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU8x1FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU8x1::~BufferU8x1() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU8x1::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU8x1(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU8x1::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU8x1::getParameterDefinitionTree(void) const +{ + BufferU8x1* tmpParam = const_cast<BufferU8x1*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU8x1::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU8x1::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU8x1::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU8x1::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU8x1::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU8x1::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for BYTE1 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for BYTE1 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + mBuiltFlag = true; + +} +void BufferU8x1::initStrings(void) +{ +} + +void BufferU8x1::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(uint8_t); + data.arraySizes[0] = 0; +} + +void BufferU8x1::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU8x1::initReferences(void) +{ +} + +void BufferU8x1::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU8x1::freeStrings(void) +{ +} + +void BufferU8x1::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU8x2.cpp b/APEX_1.4/framework/src/autogen/BufferU8x2.cpp new file mode 100644 index 00000000..627ff879 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU8x2.cpp @@ -0,0 +1,401 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU8x2.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU8x2NS; + +const char* const BufferU8x2Factory::vptr = + NvParameterized::getVptr<BufferU8x2, BufferU8x2::ClassAlignment>(); + +const uint32_t NumParamDefs = 5; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U8x2_Type), CHILDREN(2), 2 }, // data[] + { TYPE_U8, false, (size_t)(&((U8x2_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U8, false, (size_t)(&((U8x2_Type*)0)->y), NULL, 0 }, // data[].y +}; + + +bool BufferU8x2::mBuiltFlag = false; +NvParameterized::MutexType BufferU8x2::mBuiltFlagMutex; + +BufferU8x2::BufferU8x2(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU8x2FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU8x2::~BufferU8x2() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU8x2::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU8x2(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU8x2::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU8x2::getParameterDefinitionTree(void) const +{ + BufferU8x2* tmpParam = const_cast<BufferU8x2*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU8x2::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU8x2::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU8x2::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU8x2::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU8x2::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU8x2::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for BYTE2 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U8x2", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for BYTE2 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[2]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + + ParamDefTable[2].setChildren(Children, 2); + } + + mBuiltFlag = true; + +} +void BufferU8x2::initStrings(void) +{ +} + +void BufferU8x2::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U8x2_Type); + data.arraySizes[0] = 0; +} + +void BufferU8x2::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU8x2::initReferences(void) +{ +} + +void BufferU8x2::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU8x2::freeStrings(void) +{ +} + +void BufferU8x2::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU8x3.cpp b/APEX_1.4/framework/src/autogen/BufferU8x3.cpp new file mode 100644 index 00000000..274b14cf --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU8x3.cpp @@ -0,0 +1,425 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU8x3.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU8x3NS; + +const char* const BufferU8x3Factory::vptr = + NvParameterized::getVptr<BufferU8x3, BufferU8x3::ClassAlignment>(); + +const uint32_t NumParamDefs = 6; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, 5, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U8x3_Type), CHILDREN(2), 3 }, // data[] + { TYPE_U8, false, (size_t)(&((U8x3_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U8, false, (size_t)(&((U8x3_Type*)0)->y), NULL, 0 }, // data[].y + { TYPE_U8, false, (size_t)(&((U8x3_Type*)0)->z), NULL, 0 }, // data[].z +}; + + +bool BufferU8x3::mBuiltFlag = false; +NvParameterized::MutexType BufferU8x3::mBuiltFlagMutex; + +BufferU8x3::BufferU8x3(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU8x3FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU8x3::~BufferU8x3() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU8x3::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU8x3(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU8x3::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU8x3::getParameterDefinitionTree(void) const +{ + BufferU8x3* tmpParam = const_cast<BufferU8x3*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU8x3::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU8x3::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU8x3::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU8x3::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU8x3::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU8x3::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for BYTE3 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U8x3", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for BYTE3 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="data[].z" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("z", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Z", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[3]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + Children[2] = PDEF_PTR(5); + + ParamDefTable[2].setChildren(Children, 3); + } + + mBuiltFlag = true; + +} +void BufferU8x3::initStrings(void) +{ +} + +void BufferU8x3::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U8x3_Type); + data.arraySizes[0] = 0; +} + +void BufferU8x3::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU8x3::initReferences(void) +{ +} + +void BufferU8x3::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU8x3::freeStrings(void) +{ +} + +void BufferU8x3::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/BufferU8x4.cpp b/APEX_1.4/framework/src/autogen/BufferU8x4.cpp new file mode 100644 index 00000000..5881b86c --- /dev/null +++ b/APEX_1.4/framework/src/autogen/BufferU8x4.cpp @@ -0,0 +1,449 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "BufferU8x4.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace BufferU8x4NS; + +const char* const BufferU8x4Factory::vptr = + NvParameterized::getVptr<BufferU8x4, BufferU8x4::ClassAlignment>(); + +const uint32_t NumParamDefs = 7; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, 5, 6, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 1 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->data), CHILDREN(1), 1 }, // data + { TYPE_STRUCT, false, 1 * sizeof(U8x4_Type), CHILDREN(2), 4 }, // data[] + { TYPE_U8, false, (size_t)(&((U8x4_Type*)0)->x), NULL, 0 }, // data[].x + { TYPE_U8, false, (size_t)(&((U8x4_Type*)0)->y), NULL, 0 }, // data[].y + { TYPE_U8, false, (size_t)(&((U8x4_Type*)0)->z), NULL, 0 }, // data[].z + { TYPE_U8, false, (size_t)(&((U8x4_Type*)0)->w), NULL, 0 }, // data[].w +}; + + +bool BufferU8x4::mBuiltFlag = false; +NvParameterized::MutexType BufferU8x4::mBuiltFlagMutex; + +BufferU8x4::BufferU8x4(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &BufferU8x4FactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +BufferU8x4::~BufferU8x4() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void BufferU8x4::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~BufferU8x4(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* BufferU8x4::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* BufferU8x4::getParameterDefinitionTree(void) const +{ + BufferU8x4* tmpParam = const_cast<BufferU8x4*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType BufferU8x4::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType BufferU8x4::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void BufferU8x4::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<BufferU8x4::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void BufferU8x4::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void BufferU8x4::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="data" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("data", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for BYTE4 formats", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="data[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("data", TYPE_STRUCT, "U8x4", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Container for BYTE4 formats", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="data[].x" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("x", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "X", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="data[].y" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("y", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Y", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="data[].z" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("z", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Z", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=6, longName="data[].w" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[6]; + ParamDef->init("w", TYPE_U8, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "W", true); + ParamDefTable[6].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(1); + + ParamDefTable[0].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=1, longName="data" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=2, longName="data[]" + { + static Definition* Children[4]; + Children[0] = PDEF_PTR(3); + Children[1] = PDEF_PTR(4); + Children[2] = PDEF_PTR(5); + Children[3] = PDEF_PTR(6); + + ParamDefTable[2].setChildren(Children, 4); + } + + mBuiltFlag = true; + +} +void BufferU8x4::initStrings(void) +{ +} + +void BufferU8x4::initDynamicArrays(void) +{ + data.buf = NULL; + data.isAllocated = true; + data.elementSize = sizeof(U8x4_Type); + data.arraySizes[0] = 0; +} + +void BufferU8x4::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void BufferU8x4::initReferences(void) +{ +} + +void BufferU8x4::freeDynamicArrays(void) +{ + if (data.isAllocated && data.buf) + { + mParameterizedTraits->free(data.buf); + } +} + +void BufferU8x4::freeStrings(void) +{ +} + +void BufferU8x4::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/RenderMeshAssetParameters.cpp b/APEX_1.4/framework/src/autogen/RenderMeshAssetParameters.cpp new file mode 100644 index 00000000..17e27f95 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/RenderMeshAssetParameters.cpp @@ -0,0 +1,621 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "RenderMeshAssetParameters.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace RenderMeshAssetParametersNS; + +const char* const RenderMeshAssetParametersFactory::vptr = + NvParameterized::getVptr<RenderMeshAssetParameters, RenderMeshAssetParameters::ClassAlignment>(); + +const uint32_t NumParamDefs = 11; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 3, 5, 7, 8, 9, 10, 2, 4, 6, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 7 }, + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->submeshes), CHILDREN(7), 1 }, // submeshes + { TYPE_REF, false, 1 * sizeof(NvParameterized::Interface*), NULL, 0 }, // submeshes[] + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->materialNames), CHILDREN(8), 1 }, // materialNames + { TYPE_STRING, false, 1 * sizeof(NvParameterized::DummyStringStruct), NULL, 0 }, // materialNames[] + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->partBounds), CHILDREN(9), 1 }, // partBounds + { TYPE_BOUNDS3, false, 1 * sizeof(physx::PxBounds3), NULL, 0 }, // partBounds[] + { TYPE_U32, false, (size_t)(&((ParametersStruct*)0)->textureUVOrigin), NULL, 0 }, // textureUVOrigin + { TYPE_U32, false, (size_t)(&((ParametersStruct*)0)->boneCount), NULL, 0 }, // boneCount + { TYPE_BOOL, false, (size_t)(&((ParametersStruct*)0)->deleteStaticBuffersAfterUse), NULL, 0 }, // deleteStaticBuffersAfterUse + { TYPE_BOOL, false, (size_t)(&((ParametersStruct*)0)->isReferenced), NULL, 0 }, // isReferenced +}; + + +bool RenderMeshAssetParameters::mBuiltFlag = false; +NvParameterized::MutexType RenderMeshAssetParameters::mBuiltFlagMutex; + +RenderMeshAssetParameters::RenderMeshAssetParameters(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &RenderMeshAssetParametersFactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +RenderMeshAssetParameters::~RenderMeshAssetParameters() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void RenderMeshAssetParameters::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~RenderMeshAssetParameters(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* RenderMeshAssetParameters::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* RenderMeshAssetParameters::getParameterDefinitionTree(void) const +{ + RenderMeshAssetParameters* tmpParam = const_cast<RenderMeshAssetParameters*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType RenderMeshAssetParameters::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType RenderMeshAssetParameters::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void RenderMeshAssetParameters::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<RenderMeshAssetParameters::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ +/* [0] - submeshes (not an array of structs) */ +/* [0] - materialNames (not an array of structs) */ + +void RenderMeshAssetParameters::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void RenderMeshAssetParameters::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="submeshes" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("submeshes", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#else + + static HintImpl HintTable[3]; + static Hint* HintPtrTable[3] = { &HintTable[0], &HintTable[1], &HintTable[2], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("longDescription", "This is the array of submeshes which comprise the mesh. Triangles are grouped\ninto submeshes, which correspond to a unique material name. The distinction\nneed not be just material; this grouping may distinguish any render state which\nrequires a separate draw call.\n", true); + HintTable[2].init("shortDescription", "Array of submeshes", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 3); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + static const char* const RefVariantVals[] = { "SubmeshParameters" }; + ParamDefTable[1].setRefVariantVals((const char**)RefVariantVals, 1); + + + ParamDef->setArraySize(-1); + static const uint8_t dynHandleIndices[1] = { 0, }; + ParamDef->setDynamicHandleIndicesMap(dynHandleIndices, 1); + + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="submeshes[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("submeshes", TYPE_REF, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#else + + static HintImpl HintTable[3]; + static Hint* HintPtrTable[3] = { &HintTable[0], &HintTable[1], &HintTable[2], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("longDescription", "This is the array of submeshes which comprise the mesh. Triangles are grouped\ninto submeshes, which correspond to a unique material name. The distinction\nneed not be just material; this grouping may distinguish any render state which\nrequires a separate draw call.\n", true); + HintTable[2].init("shortDescription", "Array of submeshes", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 3); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + static const char* const RefVariantVals[] = { "SubmeshParameters" }; + ParamDefTable[2].setRefVariantVals((const char**)RefVariantVals, 1); + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="materialNames" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("materialNames", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "The material names which distinguish the submeshes (see submeshes).", true); + HintTable[1].init("shortDescription", "Array of material names", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + static const uint8_t dynHandleIndices[1] = { 0, }; + ParamDef->setDynamicHandleIndicesMap(dynHandleIndices, 1); + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="materialNames[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("materialNames", TYPE_STRING, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "The material names which distinguish the submeshes (see submeshes).", true); + HintTable[1].init("shortDescription", "Array of material names", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="partBounds" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("partBounds", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Array of axis-aligned bounding boxes for each part. The bounds for part i are in partBounds[i].", true); + HintTable[1].init("shortDescription", "The AABBs of each mesh part", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=6, longName="partBounds[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[6]; + ParamDef->init("partBounds", TYPE_BOUNDS3, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Array of axis-aligned bounding boxes for each part. The bounds for part i are in partBounds[i].", true); + HintTable[1].init("shortDescription", "The AABBs of each mesh part", true); + ParamDefTable[6].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=7, longName="textureUVOrigin" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[7]; + ParamDef->init("textureUVOrigin", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "The texture origin convention to use for this mesh. See TextureUVOrigin.", true); + HintTable[1].init("shortDescription", "Texture origin convention", true); + ParamDefTable[7].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=8, longName="boneCount" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[8]; + ParamDef->init("boneCount", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "The number of mesh-skinning bones. For destructible assets, this is the same as the number of parts.", true); + HintTable[1].init("shortDescription", "The number of mesh-skinning bones", true); + ParamDefTable[8].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=9, longName="deleteStaticBuffersAfterUse" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[9]; + ParamDef->init("deleteStaticBuffersAfterUse", TYPE_BOOL, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "If set, static data buffers will be deleted after they are used in createRenderResources.", true); + HintTable[1].init("shortDescription", "If set, static data buffers will be deleted after they are used in createRenderResources.", true); + ParamDefTable[9].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=10, longName="isReferenced" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[10]; + ParamDef->init("isReferenced", TYPE_BOOL, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Is the render mesh asset referenced in other assets", true); + ParamDefTable[10].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[7]; + Children[0] = PDEF_PTR(1); + Children[1] = PDEF_PTR(3); + Children[2] = PDEF_PTR(5); + Children[3] = PDEF_PTR(7); + Children[4] = PDEF_PTR(8); + Children[5] = PDEF_PTR(9); + Children[6] = PDEF_PTR(10); + + ParamDefTable[0].setChildren(Children, 7); + } + + // SetChildren for: nodeIndex=1, longName="submeshes" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(2); + + ParamDefTable[1].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=3, longName="materialNames" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(4); + + ParamDefTable[3].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=5, longName="partBounds" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(6); + + ParamDefTable[5].setChildren(Children, 1); + } + + mBuiltFlag = true; + +} +void RenderMeshAssetParameters::initStrings(void) +{ +} + +void RenderMeshAssetParameters::initDynamicArrays(void) +{ + submeshes.buf = NULL; + submeshes.isAllocated = true; + submeshes.elementSize = sizeof(NvParameterized::Interface*); + submeshes.arraySizes[0] = 0; + materialNames.buf = NULL; + materialNames.isAllocated = true; + materialNames.elementSize = sizeof(NvParameterized::DummyStringStruct); + materialNames.arraySizes[0] = 0; + partBounds.buf = NULL; + partBounds.isAllocated = true; + partBounds.elementSize = sizeof(physx::PxBounds3); + partBounds.arraySizes[0] = 0; +} + +void RenderMeshAssetParameters::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + textureUVOrigin = uint32_t(0); + boneCount = uint32_t(0); + deleteStaticBuffersAfterUse = bool(false); + isReferenced = bool(false); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void RenderMeshAssetParameters::initReferences(void) +{ +} + +void RenderMeshAssetParameters::freeDynamicArrays(void) +{ + if (submeshes.isAllocated && submeshes.buf) + { + mParameterizedTraits->free(submeshes.buf); + } + if (materialNames.isAllocated && materialNames.buf) + { + mParameterizedTraits->free(materialNames.buf); + } + if (partBounds.isAllocated && partBounds.buf) + { + mParameterizedTraits->free(partBounds.buf); + } +} + +void RenderMeshAssetParameters::freeStrings(void) +{ + + for (int i = 0; i < materialNames.arraySizes[0]; ++i) + { + if (materialNames.buf[i].isAllocated && materialNames.buf[i].buf) + { + mParameterizedTraits->strfree((char*)materialNames.buf[i].buf); + } + } +} + +void RenderMeshAssetParameters::freeReferences(void) +{ + + for (int i = 0; i < submeshes.arraySizes[0]; ++i) + { + if (submeshes.buf[i]) + { + submeshes.buf[i]->destroy(); + } + } +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/SubmeshParameters.cpp b/APEX_1.4/framework/src/autogen/SubmeshParameters.cpp new file mode 100644 index 00000000..dd404888 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/SubmeshParameters.cpp @@ -0,0 +1,595 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "SubmeshParameters.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace SubmeshParametersNS; + +const char* const SubmeshParametersFactory::vptr = + NvParameterized::getVptr<SubmeshParameters, SubmeshParameters::ClassAlignment>(); + +const uint32_t NumParamDefs = 10; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 4, 6, 8, 3, 5, 7, 9, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 5 }, + { TYPE_REF, false, (size_t)(&((ParametersStruct*)0)->vertexBuffer), NULL, 0 }, // vertexBuffer + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->indexBuffer), CHILDREN(5), 1 }, // indexBuffer + { TYPE_U32, false, 1 * sizeof(uint32_t), NULL, 0 }, // indexBuffer[] + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->vertexPartition), CHILDREN(6), 1 }, // vertexPartition + { TYPE_U32, false, 1 * sizeof(uint32_t), NULL, 0 }, // vertexPartition[] + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->indexPartition), CHILDREN(7), 1 }, // indexPartition + { TYPE_U32, false, 1 * sizeof(uint32_t), NULL, 0 }, // indexPartition[] + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->smoothingGroups), CHILDREN(8), 1 }, // smoothingGroups + { TYPE_U32, false, 1 * sizeof(uint32_t), NULL, 0 }, // smoothingGroups[] +}; + + +bool SubmeshParameters::mBuiltFlag = false; +NvParameterized::MutexType SubmeshParameters::mBuiltFlagMutex; + +SubmeshParameters::SubmeshParameters(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &SubmeshParametersFactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +SubmeshParameters::~SubmeshParameters() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void SubmeshParameters::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~SubmeshParameters(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* SubmeshParameters::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* SubmeshParameters::getParameterDefinitionTree(void) const +{ + SubmeshParameters* tmpParam = const_cast<SubmeshParameters*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType SubmeshParameters::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType SubmeshParameters::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void SubmeshParameters::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<SubmeshParameters::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void SubmeshParameters::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void SubmeshParameters::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="vertexBuffer" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("vertexBuffer", TYPE_REF, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#else + + static HintImpl HintTable[3]; + static Hint* HintPtrTable[3] = { &HintTable[0], &HintTable[1], &HintTable[2], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("longDescription", "This is the vertex buffer included with this submesh. The submesh is defined\nby a vertex buffer and an index buffer (see indexBuffer). The vertices for\ndifferent mesh parts are stored in contiguous subsets of the whole vertex buffer.\nThe vertexPartition array holds the offsets into the vertexBuffer for each part.\n", true); + HintTable[2].init("shortDescription", "The vertex buffer for this submesh", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 3); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + static const char* const RefVariantVals[] = { "VertexBufferParameters" }; + ParamDefTable[1].setRefVariantVals((const char**)RefVariantVals, 1); + + + + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="indexBuffer" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("indexBuffer", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("NOPVD", uint64_t(1), true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#else + + static HintImpl HintTable[3]; + static Hint* HintPtrTable[3] = { &HintTable[0], &HintTable[1], &HintTable[2], }; + HintTable[0].init("NOPVD", uint64_t(1), true); + HintTable[1].init("longDescription", "This is the vertex buffer included with this submesh. The submesh is defined\nby a index buffer and an vertex buffer (see vertexBuffer). The indices for\ndifferent mesh parts are stored in contiguous subsets of the whole index buffer.\nThe indexPartition array holds the offsets into the indexBuffer for each part.\n", true); + HintTable[2].init("shortDescription", "The index buffer for this submesh", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 3); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="indexBuffer[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("indexBuffer", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("NOPVD", uint64_t(1), true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#else + + static HintImpl HintTable[3]; + static Hint* HintPtrTable[3] = { &HintTable[0], &HintTable[1], &HintTable[2], }; + HintTable[0].init("NOPVD", uint64_t(1), true); + HintTable[1].init("longDescription", "This is the vertex buffer included with this submesh. The submesh is defined\nby a index buffer and an vertex buffer (see vertexBuffer). The indices for\ndifferent mesh parts are stored in contiguous subsets of the whole index buffer.\nThe indexPartition array holds the offsets into the indexBuffer for each part.\n", true); + HintTable[2].init("shortDescription", "The index buffer for this submesh", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 3); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="vertexPartition" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("vertexPartition", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Index offset into vertexBuffer for each part. The first vertex index for part\ni is vertexPartition[i]. The vertexPartition array size is N+1, where N = the\nnumber of mesh parts, and vertexPartition[N] = vertexBuffer.vertexCount (the\nsize of the vertex buffer). This way, the number of vertices for part i can be\nalways be obtained with vertexPartition[i+1]-vertexPartition[i].\n", true); + HintTable[1].init("shortDescription", "Part lookup into vertexBuffer", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="vertexPartition[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("vertexPartition", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Index offset into vertexBuffer for each part. The first vertex index for part\ni is vertexPartition[i]. The vertexPartition array size is N+1, where N = the\nnumber of mesh parts, and vertexPartition[N] = vertexBuffer.vertexCount (the\nsize of the vertex buffer). This way, the number of vertices for part i can be\nalways be obtained with vertexPartition[i+1]-vertexPartition[i].\n", true); + HintTable[1].init("shortDescription", "Part lookup into vertexBuffer", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=6, longName="indexPartition" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[6]; + ParamDef->init("indexPartition", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Index offset into indexBuffer for each part. The first index location in\nindexPartition for part i is indexPartition[i]. The indexPartition array\nsize is N+1, where N = the number of mesh parts, and indexPartition[N] =\nthe size of the indexBuffer. This way, the number of indices for part i\ncan be always be obtained with indexPartition[i+1]-indexPartition[i].\n", true); + HintTable[1].init("shortDescription", "Part lookup into indexBuffer", true); + ParamDefTable[6].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=7, longName="indexPartition[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[7]; + ParamDef->init("indexPartition", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Index offset into indexBuffer for each part. The first index location in\nindexPartition for part i is indexPartition[i]. The indexPartition array\nsize is N+1, where N = the number of mesh parts, and indexPartition[N] =\nthe size of the indexBuffer. This way, the number of indices for part i\ncan be always be obtained with indexPartition[i+1]-indexPartition[i].\n", true); + HintTable[1].init("shortDescription", "Part lookup into indexBuffer", true); + ParamDefTable[7].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=8, longName="smoothingGroups" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[8]; + ParamDef->init("smoothingGroups", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Per-triangle smoothing group masks", true); + HintTable[1].init("shortDescription", "Per-triangle smoothing group masks", true); + ParamDefTable[8].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + } + + // Initialize DefinitionImpl node: nodeIndex=9, longName="smoothingGroups[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[9]; + ParamDef->init("smoothingGroups", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Per-triangle smoothing group masks", true); + HintTable[1].init("shortDescription", "Per-triangle smoothing group masks", true); + ParamDefTable[9].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[5]; + Children[0] = PDEF_PTR(1); + Children[1] = PDEF_PTR(2); + Children[2] = PDEF_PTR(4); + Children[3] = PDEF_PTR(6); + Children[4] = PDEF_PTR(8); + + ParamDefTable[0].setChildren(Children, 5); + } + + // SetChildren for: nodeIndex=2, longName="indexBuffer" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(3); + + ParamDefTable[2].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=4, longName="vertexPartition" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(5); + + ParamDefTable[4].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=6, longName="indexPartition" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(7); + + ParamDefTable[6].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=8, longName="smoothingGroups" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(9); + + ParamDefTable[8].setChildren(Children, 1); + } + + mBuiltFlag = true; + +} +void SubmeshParameters::initStrings(void) +{ +} + +void SubmeshParameters::initDynamicArrays(void) +{ + indexBuffer.buf = NULL; + indexBuffer.isAllocated = true; + indexBuffer.elementSize = sizeof(uint32_t); + indexBuffer.arraySizes[0] = 0; + vertexPartition.buf = NULL; + vertexPartition.isAllocated = true; + vertexPartition.elementSize = sizeof(uint32_t); + vertexPartition.arraySizes[0] = 0; + indexPartition.buf = NULL; + indexPartition.isAllocated = true; + indexPartition.elementSize = sizeof(uint32_t); + indexPartition.arraySizes[0] = 0; + smoothingGroups.buf = NULL; + smoothingGroups.isAllocated = true; + smoothingGroups.elementSize = sizeof(uint32_t); + smoothingGroups.arraySizes[0] = 0; +} + +void SubmeshParameters::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void SubmeshParameters::initReferences(void) +{ + vertexBuffer = NULL; + +} + +void SubmeshParameters::freeDynamicArrays(void) +{ + if (indexBuffer.isAllocated && indexBuffer.buf) + { + mParameterizedTraits->free(indexBuffer.buf); + } + if (vertexPartition.isAllocated && vertexPartition.buf) + { + mParameterizedTraits->free(vertexPartition.buf); + } + if (indexPartition.isAllocated && indexPartition.buf) + { + mParameterizedTraits->free(indexPartition.buf); + } + if (smoothingGroups.isAllocated && smoothingGroups.buf) + { + mParameterizedTraits->free(smoothingGroups.buf); + } +} + +void SubmeshParameters::freeStrings(void) +{ +} + +void SubmeshParameters::freeReferences(void) +{ + if (vertexBuffer) + { + vertexBuffer->destroy(); + } + +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/SurfaceBufferParameters.cpp b/APEX_1.4/framework/src/autogen/SurfaceBufferParameters.cpp new file mode 100644 index 00000000..e50ac12d --- /dev/null +++ b/APEX_1.4/framework/src/autogen/SurfaceBufferParameters.cpp @@ -0,0 +1,404 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "SurfaceBufferParameters.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace SurfaceBufferParametersNS; + +const char* const SurfaceBufferParametersFactory::vptr = + NvParameterized::getVptr<SurfaceBufferParameters, SurfaceBufferParameters::ClassAlignment>(); + +const uint32_t NumParamDefs = 5; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 4 }, + { TYPE_U32, false, (size_t)(&((ParametersStruct*)0)->width), NULL, 0 }, // width + { TYPE_U32, false, (size_t)(&((ParametersStruct*)0)->height), NULL, 0 }, // height + { TYPE_U32, false, (size_t)(&((ParametersStruct*)0)->surfaceFormat), NULL, 0 }, // surfaceFormat + { TYPE_REF, false, (size_t)(&((ParametersStruct*)0)->buffer), NULL, 0 }, // buffer +}; + + +bool SurfaceBufferParameters::mBuiltFlag = false; +NvParameterized::MutexType SurfaceBufferParameters::mBuiltFlagMutex; + +SurfaceBufferParameters::SurfaceBufferParameters(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &SurfaceBufferParametersFactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +SurfaceBufferParameters::~SurfaceBufferParameters() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void SurfaceBufferParameters::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~SurfaceBufferParameters(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* SurfaceBufferParameters::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* SurfaceBufferParameters::getParameterDefinitionTree(void) const +{ + SurfaceBufferParameters* tmpParam = const_cast<SurfaceBufferParameters*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType SurfaceBufferParameters::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType SurfaceBufferParameters::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void SurfaceBufferParameters::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<SurfaceBufferParameters::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ + +void SurfaceBufferParameters::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void SurfaceBufferParameters::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="width" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("width", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Number of point in U-dimension", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="height" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("height", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("shortDescription", "Number of point in V-dimension", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="surfaceFormat" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("surfaceFormat", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("shortDescription", "Attributes of the vertex buffer", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="buffer" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("buffer", TYPE_REF, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#else + + static HintImpl HintTable[3]; + static Hint* HintPtrTable[3] = { &HintTable[0], &HintTable[1], &HintTable[2], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("longDescription", "This is the buffer data according to the description in the SurfaceFormat", true); + HintTable[2].init("shortDescription", "The data buffer", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 3); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + static const char* const RefVariantVals[] = { "BufferU8x1", "BufferU8x2", "BufferU8x3", "BufferU8x4", "BufferU16x1", "BufferU16x2", "BufferU16x3", "BufferU16x4", "BufferU32x1", "BufferU32x2", "BufferU32x3", "BufferU32x4", "BufferF32x1", "BufferF32x2", "BufferF32x3", "BufferF32x4" }; + ParamDefTable[4].setRefVariantVals((const char**)RefVariantVals, 16); + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[4]; + Children[0] = PDEF_PTR(1); + Children[1] = PDEF_PTR(2); + Children[2] = PDEF_PTR(3); + Children[3] = PDEF_PTR(4); + + ParamDefTable[0].setChildren(Children, 4); + } + + mBuiltFlag = true; + +} +void SurfaceBufferParameters::initStrings(void) +{ +} + +void SurfaceBufferParameters::initDynamicArrays(void) +{ +} + +void SurfaceBufferParameters::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + width = uint32_t(0); + height = uint32_t(0); + surfaceFormat = uint32_t(0); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void SurfaceBufferParameters::initReferences(void) +{ + buffer = NULL; + +} + +void SurfaceBufferParameters::freeDynamicArrays(void) +{ +} + +void SurfaceBufferParameters::freeStrings(void) +{ +} + +void SurfaceBufferParameters::freeReferences(void) +{ + if (buffer) + { + buffer->destroy(); + } + +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/VertexBufferParameters.cpp b/APEX_1.4/framework/src/autogen/VertexBufferParameters.cpp new file mode 100644 index 00000000..b13f3e34 --- /dev/null +++ b/APEX_1.4/framework/src/autogen/VertexBufferParameters.cpp @@ -0,0 +1,446 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "VertexBufferParameters.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace VertexBufferParametersNS; + +const char* const VertexBufferParametersFactory::vptr = + NvParameterized::getVptr<VertexBufferParameters, VertexBufferParameters::ClassAlignment>(); + +const uint32_t NumParamDefs = 5; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 3 }, + { TYPE_U32, false, (size_t)(&((ParametersStruct*)0)->vertexCount), NULL, 0 }, // vertexCount + { TYPE_REF, false, (size_t)(&((ParametersStruct*)0)->vertexFormat), NULL, 0 }, // vertexFormat + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->buffers), CHILDREN(3), 1 }, // buffers + { TYPE_REF, false, 1 * sizeof(NvParameterized::Interface*), NULL, 0 }, // buffers[] +}; + + +bool VertexBufferParameters::mBuiltFlag = false; +NvParameterized::MutexType VertexBufferParameters::mBuiltFlagMutex; + +VertexBufferParameters::VertexBufferParameters(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &VertexBufferParametersFactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +VertexBufferParameters::~VertexBufferParameters() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void VertexBufferParameters::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~VertexBufferParameters(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* VertexBufferParameters::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* VertexBufferParameters::getParameterDefinitionTree(void) const +{ + VertexBufferParameters* tmpParam = const_cast<VertexBufferParameters*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType VertexBufferParameters::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType VertexBufferParameters::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void VertexBufferParameters::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<VertexBufferParameters::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ +/* [0] - buffers (not an array of structs) */ + +void VertexBufferParameters::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void VertexBufferParameters::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="vertexCount" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("vertexCount", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "The number of vertices in the verrtex buffer. All vertex data channels (positions, normals, etc.) will contain this many items.", true); + HintTable[1].init("shortDescription", "Number of vertices", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="vertexFormat" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("vertexFormat", TYPE_REF, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[1]; + static Hint* HintPtrTable[1] = { &HintTable[0], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 1); + +#else + + static HintImpl HintTable[3]; + static Hint* HintPtrTable[3] = { &HintTable[0], &HintTable[1], &HintTable[2], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("longDescription", "This describes the data channels which exist per vertex, the vertex winding order, custom buffers, etc. See VertexFormatParameters.", true); + HintTable[2].init("shortDescription", "Attributes of the vertex buffer", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 3); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + static const char* const RefVariantVals[] = { "VertexFormatParameters" }; + ParamDefTable[2].setRefVariantVals((const char**)RefVariantVals, 1); + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="buffers" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("buffers", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("NOPVD", uint64_t(1), true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#else + + static HintImpl HintTable[4]; + static Hint* HintPtrTable[4] = { &HintTable[0], &HintTable[1], &HintTable[2], &HintTable[3], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("NOPVD", uint64_t(1), true); + HintTable[2].init("longDescription", "This is the buffer data according to the description in the VertexFormat", true); + HintTable[3].init("shortDescription", "The data buffers for standard and custom semantics", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 4); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + static const char* const RefVariantVals[] = { "BufferU8x1", "BufferU8x2", "BufferU8x3", "BufferU8x4", "BufferU16x1", "BufferU16x2", "BufferU16x3", "BufferU16x4", "BufferU32x1", "BufferU32x2", "BufferU32x3", "BufferU32x4", "BufferF32x1", "BufferF32x2", "BufferF32x3", "BufferF32x4" }; + ParamDefTable[3].setRefVariantVals((const char**)RefVariantVals, 16); + + + ParamDef->setArraySize(-1); + static const uint8_t dynHandleIndices[1] = { 0, }; + ParamDef->setDynamicHandleIndicesMap(dynHandleIndices, 1); + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="buffers[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("buffers", TYPE_REF, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("NOPVD", uint64_t(1), true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#else + + static HintImpl HintTable[4]; + static Hint* HintPtrTable[4] = { &HintTable[0], &HintTable[1], &HintTable[2], &HintTable[3], }; + HintTable[0].init("INCLUDED", uint64_t(1), true); + HintTable[1].init("NOPVD", uint64_t(1), true); + HintTable[2].init("longDescription", "This is the buffer data according to the description in the VertexFormat", true); + HintTable[3].init("shortDescription", "The data buffers for standard and custom semantics", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 4); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + static const char* const RefVariantVals[] = { "BufferU8x1", "BufferU8x2", "BufferU8x3", "BufferU8x4", "BufferU16x1", "BufferU16x2", "BufferU16x3", "BufferU16x4", "BufferU32x1", "BufferU32x2", "BufferU32x3", "BufferU32x4", "BufferF32x1", "BufferF32x2", "BufferF32x3", "BufferF32x4" }; + ParamDefTable[4].setRefVariantVals((const char**)RefVariantVals, 16); + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[3]; + Children[0] = PDEF_PTR(1); + Children[1] = PDEF_PTR(2); + Children[2] = PDEF_PTR(3); + + ParamDefTable[0].setChildren(Children, 3); + } + + // SetChildren for: nodeIndex=3, longName="buffers" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(4); + + ParamDefTable[3].setChildren(Children, 1); + } + + mBuiltFlag = true; + +} +void VertexBufferParameters::initStrings(void) +{ +} + +void VertexBufferParameters::initDynamicArrays(void) +{ + buffers.buf = NULL; + buffers.isAllocated = true; + buffers.elementSize = sizeof(NvParameterized::Interface*); + buffers.arraySizes[0] = 0; +} + +void VertexBufferParameters::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + vertexCount = uint32_t(0); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void VertexBufferParameters::initReferences(void) +{ + vertexFormat = NULL; + +} + +void VertexBufferParameters::freeDynamicArrays(void) +{ + if (buffers.isAllocated && buffers.buf) + { + mParameterizedTraits->free(buffers.buf); + } +} + +void VertexBufferParameters::freeStrings(void) +{ +} + +void VertexBufferParameters::freeReferences(void) +{ + if (vertexFormat) + { + vertexFormat->destroy(); + } + + + for (int i = 0; i < buffers.arraySizes[0]; ++i) + { + if (buffers.buf[i]) + { + buffers.buf[i]->destroy(); + } + } +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/autogen/VertexFormatParameters.cpp b/APEX_1.4/framework/src/autogen/VertexFormatParameters.cpp new file mode 100644 index 00000000..2d90d3de --- /dev/null +++ b/APEX_1.4/framework/src/autogen/VertexFormatParameters.cpp @@ -0,0 +1,569 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2015 NVIDIA Corporation. All rights reserved. + +// This file was generated by NvParameterized/scripts/GenParameterized.pl + + +#include "VertexFormatParameters.h" +#include <string.h> +#include <stdlib.h> + +using namespace NvParameterized; + +namespace nvidia +{ +namespace apex +{ + +using namespace VertexFormatParametersNS; + +const char* const VertexFormatParametersFactory::vptr = + NvParameterized::getVptr<VertexFormatParameters, VertexFormatParameters::ClassAlignment>(); + +const uint32_t NumParamDefs = 11; +static NvParameterized::DefinitionImpl* ParamDefTable; // now allocated in buildTree [NumParamDefs]; + + +static const size_t ParamLookupChildrenTable[] = +{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +#define TENUM(type) nvidia::##type +#define CHILDREN(index) &ParamLookupChildrenTable[index] +static const NvParameterized::ParamLookupNode ParamLookupTable[NumParamDefs] = +{ + { TYPE_STRUCT, false, 0, CHILDREN(0), 3 }, + { TYPE_U32, false, (size_t)(&((ParametersStruct*)0)->winding), NULL, 0 }, // winding + { TYPE_BOOL, false, (size_t)(&((ParametersStruct*)0)->hasSeparateBoneBuffer), NULL, 0 }, // hasSeparateBoneBuffer + { TYPE_ARRAY, true, (size_t)(&((ParametersStruct*)0)->bufferFormats), CHILDREN(3), 1 }, // bufferFormats + { TYPE_STRUCT, false, 1 * sizeof(BufferFormat_Type), CHILDREN(4), 6 }, // bufferFormats[] + { TYPE_STRING, false, (size_t)(&((BufferFormat_Type*)0)->name), NULL, 0 }, // bufferFormats[].name + { TYPE_I32, false, (size_t)(&((BufferFormat_Type*)0)->semantic), NULL, 0 }, // bufferFormats[].semantic + { TYPE_U32, false, (size_t)(&((BufferFormat_Type*)0)->id), NULL, 0 }, // bufferFormats[].id + { TYPE_U32, false, (size_t)(&((BufferFormat_Type*)0)->format), NULL, 0 }, // bufferFormats[].format + { TYPE_U32, false, (size_t)(&((BufferFormat_Type*)0)->access), NULL, 0 }, // bufferFormats[].access + { TYPE_BOOL, false, (size_t)(&((BufferFormat_Type*)0)->serialize), NULL, 0 }, // bufferFormats[].serialize +}; + + +bool VertexFormatParameters::mBuiltFlag = false; +NvParameterized::MutexType VertexFormatParameters::mBuiltFlagMutex; + +VertexFormatParameters::VertexFormatParameters(NvParameterized::Traits* traits, void* buf, int32_t* refCount) : + NvParameters(traits, buf, refCount) +{ + //mParameterizedTraits->registerFactory(className(), &VertexFormatParametersFactoryInst); + + if (!buf) //Do not init data if it is inplace-deserialized + { + initDynamicArrays(); + initStrings(); + initReferences(); + initDefaults(); + } +} + +VertexFormatParameters::~VertexFormatParameters() +{ + freeStrings(); + freeReferences(); + freeDynamicArrays(); +} + +void VertexFormatParameters::destroy() +{ + // We cache these fields here to avoid overwrite in destructor + bool doDeallocateSelf = mDoDeallocateSelf; + NvParameterized::Traits* traits = mParameterizedTraits; + int32_t* refCount = mRefCount; + void* buf = mBuffer; + + this->~VertexFormatParameters(); + + NvParameters::destroy(this, traits, doDeallocateSelf, refCount, buf); +} + +const NvParameterized::DefinitionImpl* VertexFormatParameters::getParameterDefinitionTree(void) +{ + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +const NvParameterized::DefinitionImpl* VertexFormatParameters::getParameterDefinitionTree(void) const +{ + VertexFormatParameters* tmpParam = const_cast<VertexFormatParameters*>(this); + + if (!mBuiltFlag) // Double-checked lock + { + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + if (!mBuiltFlag) + { + tmpParam->buildTree(); + } + } + + return(&ParamDefTable[0]); +} + +NvParameterized::ErrorType VertexFormatParameters::getParameterHandle(const char* long_name, Handle& handle) const +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +NvParameterized::ErrorType VertexFormatParameters::getParameterHandle(const char* long_name, Handle& handle) +{ + ErrorType Ret = NvParameters::getParameterHandle(long_name, handle); + if (Ret != ERROR_NONE) + { + return(Ret); + } + + size_t offset; + void* ptr; + + getVarPtr(handle, ptr, offset); + + if (ptr == NULL) + { + return(ERROR_INDEX_OUT_OF_RANGE); + } + + return(ERROR_NONE); +} + +void VertexFormatParameters::getVarPtr(const Handle& handle, void*& ptr, size_t& offset) const +{ + ptr = getVarPtrHelper(&ParamLookupTable[0], const_cast<VertexFormatParameters::ParametersStruct*>(¶meters()), handle, offset); +} + + +/* Dynamic Handle Indices */ +/* [1,0] - bufferFormats.name */ + +void VertexFormatParameters::freeParameterDefinitionTable(NvParameterized::Traits* traits) +{ + if (!traits) + { + return; + } + + if (!mBuiltFlag) // Double-checked lock + { + return; + } + + NvParameterized::MutexType::ScopedLock lock(mBuiltFlagMutex); + + if (!mBuiltFlag) + { + return; + } + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + ParamDefTable[i].~DefinitionImpl(); + } + + traits->free(ParamDefTable); + + mBuiltFlag = false; +} + +#define PDEF_PTR(index) (&ParamDefTable[index]) + +void VertexFormatParameters::buildTree(void) +{ + + uint32_t allocSize = sizeof(NvParameterized::DefinitionImpl) * NumParamDefs; + ParamDefTable = (NvParameterized::DefinitionImpl*)(mParameterizedTraits->alloc(allocSize)); + memset(ParamDefTable, 0, allocSize); + + for (uint32_t i = 0; i < NumParamDefs; ++i) + { + NV_PARAM_PLACEMENT_NEW(ParamDefTable + i, NvParameterized::DefinitionImpl)(*mParameterizedTraits); + } + + // Initialize DefinitionImpl node: nodeIndex=0, longName="" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[0]; + ParamDef->init("", TYPE_STRUCT, "STRUCT", true); + + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=1, longName="winding" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[1]; + ParamDef->init("winding", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "This value defines which vertex winding orders will be rendered. See RenderCullMode.", true); + HintTable[1].init("shortDescription", "Vertex orders to be rendered", true); + ParamDefTable[1].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=2, longName="hasSeparateBoneBuffer" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[2]; + ParamDef->init("hasSeparateBoneBuffer", TYPE_BOOL, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Whether or not there exists a separate bone buffer.", true); + HintTable[1].init("shortDescription", "Whether or not there exists a separate bone buffer", true); + ParamDefTable[2].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=3, longName="bufferFormats" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[3]; + ParamDef->init("bufferFormats", TYPE_ARRAY, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "This includes Semantics/Names and Vertex Format for each buffer.", true); + HintTable[1].init("shortDescription", "Format of all buffers.", true); + ParamDefTable[3].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + ParamDef->setArraySize(-1); + static const uint8_t dynHandleIndices[2] = { 1, 0, }; + ParamDef->setDynamicHandleIndicesMap(dynHandleIndices, 2); + + } + + // Initialize DefinitionImpl node: nodeIndex=4, longName="bufferFormats[]" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[4]; + ParamDef->init("bufferFormats", TYPE_STRUCT, "BufferFormat", true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "This includes Semantics/Names and Vertex Format for each buffer.", true); + HintTable[1].init("shortDescription", "Format of all buffers.", true); + ParamDefTable[4].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=5, longName="bufferFormats[].name" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[5]; + ParamDef->init("name", TYPE_STRING, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "Name by which this buffer is referenced, for custom semantics. For standard semantics, VertexFormat::getSemanticName() is used", true); + HintTable[1].init("shortDescription", "Name by which this buffer is referenced", true); + ParamDefTable[5].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=6, longName="bufferFormats[].semantic" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[6]; + ParamDef->init("semantic", TYPE_I32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "This buffer's semantic. If it's a custom buffer, it will be RenderVertexSemantic::CUSTOM.", true); + HintTable[1].init("shortDescription", "This buffer's semantic.", true); + ParamDefTable[6].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=7, longName="bufferFormats[].id" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[7]; + ParamDef->init("id", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "This buffer's id. See VertexFormat::getSemanticID and VertexFormat::getID.", true); + HintTable[1].init("shortDescription", "This buffer's id.", true); + ParamDefTable[7].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=8, longName="bufferFormats[].format" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[8]; + ParamDef->init("format", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "The format of this buffer. Must be one of the RenderDataFormat::Enum values.", true); + HintTable[1].init("shortDescription", "The format of this buffer", true); + ParamDefTable[8].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=9, longName="bufferFormats[].access" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[9]; + ParamDef->init("access", TYPE_U32, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "The access type for this buffer. Must be one of the VertexDataAccess::Enum values.", true); + HintTable[1].init("shortDescription", "The access type for this buffer", true); + ParamDefTable[9].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // Initialize DefinitionImpl node: nodeIndex=10, longName="bufferFormats[].serialize" + { + NvParameterized::DefinitionImpl* ParamDef = &ParamDefTable[10]; + ParamDef->init("serialize", TYPE_BOOL, NULL, true); + +#ifdef NV_PARAMETERIZED_HIDE_DESCRIPTIONS + +#else + + static HintImpl HintTable[2]; + static Hint* HintPtrTable[2] = { &HintTable[0], &HintTable[1], }; + HintTable[0].init("longDescription", "This is only needed if this buffer needs to be rendered, clothing custom buffer MAX_DISTANCE for example doesn't need it.", true); + HintTable[1].init("shortDescription", "Serialize this buffer", true); + ParamDefTable[10].setHints((const NvParameterized::Hint**)HintPtrTable, 2); + +#endif /* NV_PARAMETERIZED_HIDE_DESCRIPTIONS */ + + + + + + } + + // SetChildren for: nodeIndex=0, longName="" + { + static Definition* Children[3]; + Children[0] = PDEF_PTR(1); + Children[1] = PDEF_PTR(2); + Children[2] = PDEF_PTR(3); + + ParamDefTable[0].setChildren(Children, 3); + } + + // SetChildren for: nodeIndex=3, longName="bufferFormats" + { + static Definition* Children[1]; + Children[0] = PDEF_PTR(4); + + ParamDefTable[3].setChildren(Children, 1); + } + + // SetChildren for: nodeIndex=4, longName="bufferFormats[]" + { + static Definition* Children[6]; + Children[0] = PDEF_PTR(5); + Children[1] = PDEF_PTR(6); + Children[2] = PDEF_PTR(7); + Children[3] = PDEF_PTR(8); + Children[4] = PDEF_PTR(9); + Children[5] = PDEF_PTR(10); + + ParamDefTable[4].setChildren(Children, 6); + } + + mBuiltFlag = true; + +} +void VertexFormatParameters::initStrings(void) +{ +} + +void VertexFormatParameters::initDynamicArrays(void) +{ + bufferFormats.buf = NULL; + bufferFormats.isAllocated = true; + bufferFormats.elementSize = sizeof(BufferFormat_Type); + bufferFormats.arraySizes[0] = 0; +} + +void VertexFormatParameters::initDefaults(void) +{ + + freeStrings(); + freeReferences(); + freeDynamicArrays(); + winding = uint32_t(0); + hasSeparateBoneBuffer = bool(false); + + initDynamicArrays(); + initStrings(); + initReferences(); +} + +void VertexFormatParameters::initReferences(void) +{ +} + +void VertexFormatParameters::freeDynamicArrays(void) +{ + if (bufferFormats.isAllocated && bufferFormats.buf) + { + mParameterizedTraits->free(bufferFormats.buf); + } +} + +void VertexFormatParameters::freeStrings(void) +{ + + for (int i = 0; i < bufferFormats.arraySizes[0]; ++i) + { + if (bufferFormats.buf[i].name.isAllocated && bufferFormats.buf[i].name.buf) + { + mParameterizedTraits->strfree((char*)bufferFormats.buf[i].name.buf); + } + } +} + +void VertexFormatParameters::freeReferences(void) +{ +} + +} // namespace apex +} // namespace nvidia diff --git a/APEX_1.4/framework/src/windows/AgMMFile.cpp b/APEX_1.4/framework/src/windows/AgMMFile.cpp new file mode 100644 index 00000000..2b637772 --- /dev/null +++ b/APEX_1.4/framework/src/windows/AgMMFile.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#include "AgMMFile.h" + +using namespace nvidia; + +AgMMFile::AgMMFile(): +mAddr(0), mSize(0), mFileH(0) +{} + +AgMMFile::AgMMFile(char *name, unsigned int size, bool &alreadyExists) +{ + this->create(name, size, alreadyExists); +} + +void AgMMFile::create(char *name, unsigned int size, bool &alreadyExists) +{ + alreadyExists = false; + mSize = size; + + mFileH = CreateFileMapping(INVALID_HANDLE_VALUE, // use paging file + NULL, // default security + PAGE_READWRITE, // read/write access + 0, // buffer size (upper 32bits) + mSize, // buffer size (lower 32bits) + name); // name of mapping object + if (mFileH == NULL || mFileH == INVALID_HANDLE_VALUE) + { + mSize=0; + mAddr=0; + return; + } + + if (ERROR_ALREADY_EXISTS == GetLastError()) + { + alreadyExists = true; + } + + mAddr = MapViewOfFile(mFileH, // handle to map object + FILE_MAP_READ|FILE_MAP_WRITE, // read/write permission + 0, + 0, + mSize); + + if (mFileH == NULL || mAddr == NULL) + { + mSize=0; + mAddr=0; + return; + } +} + +void AgMMFile::destroy() +{ + if (!mAddr || !mFileH || !mSize) + return; + + UnmapViewOfFile(mAddr); + CloseHandle(mFileH); + + mAddr = 0; + mFileH = 0; + mSize = 0; +} + +AgMMFile::~AgMMFile() +{ + destroy(); +} diff --git a/APEX_1.4/framework/src/windows/AgMMFile.h b/APEX_1.4/framework/src/windows/AgMMFile.h new file mode 100644 index 00000000..ca2ddd73 --- /dev/null +++ b/APEX_1.4/framework/src/windows/AgMMFile.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef AG_MMFILE_H +#define AG_MMFILE_H + +#include "windows/PsWindowsInclude.h" + +namespace nvidia +{ + class AgMMFile + { + public: + AgMMFile(); + AgMMFile(char *name, unsigned int size, bool &alreadyExists); + void create(char *name, unsigned int size, bool &alreadyExists); + unsigned int getSize() {return mSize;}; + void * getAddr() {return mAddr;}; + void destroy(); + ~AgMMFile(); + + private: + void *mAddr; + unsigned int mSize; + HANDLE mFileH; + }; +} + +#endif // __AG_MM_FILE__ diff --git a/APEX_1.4/framework/src/windows/PhysXIndicator.cpp b/APEX_1.4/framework/src/windows/PhysXIndicator.cpp new file mode 100644 index 00000000..0ea99185 --- /dev/null +++ b/APEX_1.4/framework/src/windows/PhysXIndicator.cpp @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#include "PhysXIndicator.h" +#include "AgMMFile.h" + +#include <windows.h> +#if _MSC_VER >= 1800 +#include <VersionHelpers.h> // for IsWindowsVistaOrGreater +#endif +#include <stdio.h> + +using namespace nvidia; +using namespace physx; + +// Scope-based to indicate to NV driver that CPU PhysX is happening +PhysXCpuIndicator::PhysXCpuIndicator() : + mPhysXDataPtr(NULL) +{ + bool alreadyExists = false; + + mPhysXDataPtr = (physx::NvPhysXToDrv_Data_V1*)PhysXCpuIndicator::createIndicatorBlock(mSharedMemConfig, alreadyExists); + + if (!mPhysXDataPtr) + { + return; + } + + if (!alreadyExists) + { + mPhysXDataPtr->bGpuPhysicsPresent = 0; + mPhysXDataPtr->bCpuPhysicsPresent = 1; + } + else + { + mPhysXDataPtr->bCpuPhysicsPresent++; + } + + // init header last to prevent race conditions + // this must be done because the driver may have already created the shared memory block, + // thus alreadyExists may be true, even if PhysX hasn't been initialized + NvPhysXToDrv_Header_Init(mPhysXDataPtr->header); +} + +PhysXCpuIndicator::~PhysXCpuIndicator() +{ + if (!mPhysXDataPtr) + { + return; + } + + mPhysXDataPtr->bCpuPhysicsPresent--; + + mPhysXDataPtr = NULL; + mSharedMemConfig.destroy(); +} + +void* PhysXCpuIndicator::createIndicatorBlock(AgMMFile &mmfile, bool &alreadyExists) +{ + char configName[128]; + + // Get the windows version (we can only create Global\\ namespace objects in XP) +#if _MSC_VER >= 1800 + // Windows 8.1 SDK, which comes with VS2013, deprecated the GetVersionEx function + // Windows 8.1 SDK added the IsWindowsVistaOrGreater helper function which we use instead + BOOL bIsVistaOrGreater = IsWindowsVistaOrGreater(); +#else + OSVERSIONINFOEX windowsVersionInfo; + + /** + Operating system Version number + ---------------- -------------- + Windows 7 6.1 + Windows Server 2008 R2 6.1 + Windows Server 2008 6.0 + Windows Vista 6.0 + Windows Server 2003 R2 5.2 + Windows Server 2003 5.2 + Windows XP 5.1 + Windows 2000 5.0 + **/ + windowsVersionInfo.dwOSVersionInfoSize = sizeof(windowsVersionInfo); + GetVersionEx((LPOSVERSIONINFO)&windowsVersionInfo); + + bool bIsVistaOrGreater = (windowsVersionInfo.dwMajorVersion >= 6); +#endif + + if (bIsVistaOrGreater) + { + NvPhysXToDrv_Build_SectionName((uint32_t)GetCurrentProcessId(), configName); + } + else + { + NvPhysXToDrv_Build_SectionNameXP((uint32_t)GetCurrentProcessId(), configName); + } + + mmfile.create(configName, sizeof(physx::NvPhysXToDrv_Data_V1), alreadyExists); + + return mmfile.getAddr(); +} + +//----------------------------------------------------------------------------------------------------------- + +PhysXGpuIndicator::PhysXGpuIndicator() : + mPhysXDataPtr(NULL), + mAlreadyExists(false), + mGpuTrigger(false) +{ + mPhysXDataPtr = (physx::NvPhysXToDrv_Data_V1*)PhysXCpuIndicator::createIndicatorBlock(mSharedMemConfig, mAlreadyExists); + + // init header last to prevent race conditions + // this must be done because the driver may have already created the shared memory block, + // thus alreadyExists may be true, even if PhysX hasn't been initialized + NvPhysXToDrv_Header_Init(mPhysXDataPtr->header); +} + +PhysXGpuIndicator::~PhysXGpuIndicator() +{ + gpuOff(); + + mPhysXDataPtr = NULL; + mSharedMemConfig.destroy(); +} + +// Explicit set functions to indicate to NV driver that GPU PhysX is happening +void PhysXGpuIndicator::gpuOn() +{ + if (!mPhysXDataPtr || mGpuTrigger) + { + return; + } + + if (!mAlreadyExists) + { + mPhysXDataPtr->bGpuPhysicsPresent = 1; + mPhysXDataPtr->bCpuPhysicsPresent = 0; + } + else + { + mPhysXDataPtr->bGpuPhysicsPresent++; + } + + mGpuTrigger = true; +} + +void PhysXGpuIndicator::gpuOff() +{ + if (!mPhysXDataPtr || !mGpuTrigger) + { + return; + } + + mPhysXDataPtr->bGpuPhysicsPresent--; + + mGpuTrigger = false; +} diff --git a/APEX_1.4/framework/src/windows/PhysXIndicator.h b/APEX_1.4/framework/src/windows/PhysXIndicator.h new file mode 100644 index 00000000..155dd4a8 --- /dev/null +++ b/APEX_1.4/framework/src/windows/PhysXIndicator.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef PHYS_XINDICATOR_H +#define PHYS_XINDICATOR_H + +#include "AgMMFile.h" +#include "nvPhysXtoDrv.h" + +namespace nvidia +{ + class PhysXCpuIndicator + { + public: + PhysXCpuIndicator(); + ~PhysXCpuIndicator(); + + static void* createIndicatorBlock(AgMMFile &mmfile, bool &alreadyExists); + + private: + AgMMFile mSharedMemConfig; + physx::NvPhysXToDrv_Data_V1* mPhysXDataPtr; + }; + + class PhysXGpuIndicator + { + public: + PhysXGpuIndicator(); + ~PhysXGpuIndicator(); + + void gpuOn(); + void gpuOff(); + + private: + AgMMFile mSharedMemConfig; + physx::NvPhysXToDrv_Data_V1* mPhysXDataPtr; + bool mAlreadyExists; + bool mGpuTrigger; + + }; +} + +#endif // __PHYSXINDICATOR_H__ diff --git a/APEX_1.4/framework/src/windows/nvPhysXtoDrv.h b/APEX_1.4/framework/src/windows/nvPhysXtoDrv.h new file mode 100644 index 00000000..047763a1 --- /dev/null +++ b/APEX_1.4/framework/src/windows/nvPhysXtoDrv.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + */ + +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_PHYS_XTO_DRV_H +#define NV_PHYS_XTO_DRV_H + +// The puprose of this interface is to provide graphics drivers with information +// about PhysX state to draw PhysX visual indicator + +// We share information between modules using a memory section object. PhysX creates +// such object, graphics drivers try to open it. The name of the object has +// fixed part (NvPhysXToDrv_SectionName) followed by the process id. This allows +// each process to have its own communication channel. + +namespace physx +{ + +#define NvPhysXToDrv_SectionName "PH71828182845_" + +// Vista apps cannot create stuff in Global\\ namespace when NOT elevated, so use local scope +#define NvPhysXToDrv_Build_SectionName(PID, buf) sprintf(buf, NvPhysXToDrv_SectionName "%x", PID) +#define NvPhysXToDrv_Build_SectionNameXP(PID, buf) sprintf(buf, "Global\\" NvPhysXToDrv_SectionName "%x", PID) + +typedef struct NvPhysXToDrv_Header_ +{ + int signature; // header interface signature + int version; // version of the interface + int size; // size of the structure + int reserved; // reserved, must be zero +} NvPhysXToDrv_Header; + +// this structure describes layout of data in the shared memory section +typedef struct NvPhysXToDrv_Data_V1_ +{ + NvPhysXToDrv_Header header; // keep this member first in all versions of the interface. + + int bCpuPhysicsPresent; // nonzero if cpu physics is initialized + int bGpuPhysicsPresent; // nonzero if gpu physics is initialized + +} NvPhysXToDrv_Data_V1; + +// some random magic number as our interface signature +#define NvPhysXToDrv_Header_Signature 0xA7AB + +// use the macro to setup the header to the latest version of the interface +// update the macro when a new verson of the interface is added +#define NvPhysXToDrv_Header_Init(header) \ +{ \ + header.signature = NvPhysXToDrv_Header_Signature; \ + header.version = 1; \ + header.size = sizeof(NvPhysXToDrv_Data_V1); \ + header.reserved = 0; \ +} + +// validate the header against all known interface versions +// add validation checks when new interfaces are added +#define NvPhysXToDrv_Header_Validate(header, curVersion) \ + ( \ + (header.signature == NvPhysXToDrv_Header_Signature) && \ + (header.version == curVersion) && \ + (curVersion == 1) && \ + (header.size == sizeof(NvPhysXToDrv_Data_V1)) \ + ) + +} + +#endif // __NVPHYSXTODRV_H__ |