aboutsummaryrefslogtreecommitdiff
path: root/NvBlast/sdk
diff options
context:
space:
mode:
authorBryan Galdrikian <[email protected]>2017-02-21 12:07:59 -0800
committerBryan Galdrikian <[email protected]>2017-02-21 12:07:59 -0800
commit446ce137c6823ba9eff273bdafdaf266287c7c98 (patch)
treed20aab3e2ed08d7b3ca71c2f40db6a93ea00c459 /NvBlast/sdk
downloadblast-1.0.0-beta.tar.xz
blast-1.0.0-beta.zip
first commitv1.0.0-beta
Diffstat (limited to 'NvBlast/sdk')
-rw-r--r--NvBlast/sdk/CMakeLists.txt90
-rw-r--r--NvBlast/sdk/common/NvBlastAssert.cpp50
-rw-r--r--NvBlast/sdk/common/NvBlastAssert.h63
-rw-r--r--NvBlast/sdk/common/NvBlastAtomic.cpp73
-rw-r--r--NvBlast/sdk/common/NvBlastAtomic.h32
-rw-r--r--NvBlast/sdk/common/NvBlastDLink.h285
-rw-r--r--NvBlast/sdk/common/NvBlastFixedArray.h128
-rw-r--r--NvBlast/sdk/common/NvBlastFixedBitmap.h118
-rw-r--r--NvBlast/sdk/common/NvBlastFixedBoolArray.h106
-rw-r--r--NvBlast/sdk/common/NvBlastFixedPriorityQueue.h192
-rw-r--r--NvBlast/sdk/common/NvBlastGeometry.h122
-rw-r--r--NvBlast/sdk/common/NvBlastIncludeWindows.h90
-rw-r--r--NvBlast/sdk/common/NvBlastIndexFns.h127
-rw-r--r--NvBlast/sdk/common/NvBlastIteratorBase.h135
-rw-r--r--NvBlast/sdk/common/NvBlastMath.h79
-rw-r--r--NvBlast/sdk/common/NvBlastMemory.h123
-rw-r--r--NvBlast/sdk/common/NvBlastPreprocessorInternal.h36
-rw-r--r--NvBlast/sdk/common/NvBlastTime.cpp23
-rw-r--r--NvBlast/sdk/common/NvBlastTime.h108
-rw-r--r--NvBlast/sdk/common/NvBlastTimers.cpp29
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlast.cmake89
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlastExtAuthoring.cmake106
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlastExtConverterLL.cmake77
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlastExtImport.cmake95
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlastExtPhysX.cmake113
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlastExtSerialization.cmake200
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlastExtSerializationLL.cmake151
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlastExtShaders.cmake74
-rw-r--r--NvBlast/sdk/compiler/cmake/NvBlastTk.cmake126
-rw-r--r--NvBlast/sdk/compiler/cmake/modules/CapnProtoGenerate.cmake108
-rw-r--r--NvBlast/sdk/compiler/cmake/modules/FindApexSDK.cmake399
-rw-r--r--NvBlast/sdk/compiler/cmake/modules/FindCapnProtoSDK.cmake111
-rw-r--r--NvBlast/sdk/compiler/cmake/modules/FindFBXSDK.cmake71
-rw-r--r--NvBlast/sdk/compiler/cmake/modules/FindGoogleTestNV.cmake132
-rw-r--r--NvBlast/sdk/compiler/cmake/modules/FindPhysXSDK.cmake566
-rw-r--r--NvBlast/sdk/compiler/cmake/modules/FindPxSharedSDK.cmake293
-rw-r--r--NvBlast/sdk/compiler/cmake/modules/FindXDK.cmake257
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/CMakeLists.txt67
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlast.cmake22
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlastExtAuthoring.cmake24
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlastExtConverterLL.cmake24
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlastExtImport.cmake24
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlastExtPhysX.cmake24
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlastExtSerialization.cmake26
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlastExtSerializationLL.cmake26
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlastExtShaders.cmake24
-rw-r--r--NvBlast/sdk/compiler/cmake/windows/NvBlastTk.cmake27
-rw-r--r--NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringBondGenerator.h136
-rw-r--r--NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringCollisionBuilder.h123
-rw-r--r--NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringFractureTool.h435
-rw-r--r--NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringMesh.h174
-rw-r--r--NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringTypes.h128
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtApexSharedParts.cpp1004
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtApexSharedParts.h51
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringAccelerator.cpp629
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringAccelerator.h147
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBondGenerator.cpp991
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBooleanTool.cpp1351
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBooleanTool.h197
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringCollisionBuilder.cpp279
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringFractureTool.cpp1510
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringInternalCommon.h193
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringMesh.cpp558
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringPerlinNoise.h373
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringTriangulator.cpp1439
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringTriangulator.h261
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringVSA.h312
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtTriangleProcessor.cpp355
-rw-r--r--NvBlast/sdk/extensions/authoring/source/NvBlastExtTriangleProcessor.h158
-rw-r--r--NvBlast/sdk/extensions/common/source/NvBlastExtAllocator.h127
-rw-r--r--NvBlast/sdk/extensions/common/source/NvBlastExtArray.h41
-rw-r--r--NvBlast/sdk/extensions/common/source/NvBlastExtDefs.h64
-rw-r--r--NvBlast/sdk/extensions/common/source/NvBlastExtHashMap.h34
-rw-r--r--NvBlast/sdk/extensions/common/source/NvBlastExtHashSet.h33
-rw-r--r--NvBlast/sdk/extensions/converter/include/NvBlastExtDataConverter.h40
-rw-r--r--NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtAssetBlockVersionConverter_v0_v1.h88
-rw-r--r--NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtBinaryBlockConverter.cpp152
-rw-r--r--NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtBinaryBlockConverter.h57
-rw-r--r--NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtDataConverter.cpp103
-rw-r--r--NvBlast/sdk/extensions/import/include/NvBlastExtApexImportTool.h199
-rw-r--r--NvBlast/sdk/extensions/import/source/NvBlastExtApexDestruction.cpp220
-rw-r--r--NvBlast/sdk/extensions/import/source/NvBlastExtApexDestruction.h108
-rw-r--r--NvBlast/sdk/extensions/import/source/NvBlastExtApexImportTool.cpp490
-rw-r--r--NvBlast/sdk/extensions/import/source/NvBlastExtScopedResource.cpp50
-rw-r--r--NvBlast/sdk/extensions/import/source/NvBlastExtScopedResource.h160
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtImpactDamageManager.h142
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtPx.h29
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtPxActor.h83
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtPxAsset.h201
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtPxFamily.h223
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtPxListener.h55
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtPxManager.h245
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtStressSolver.h209
-rw-r--r--NvBlast/sdk/extensions/physx/include/NvBlastExtSync.h213
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpactDamageManager.cpp448
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpulseStressSolver.cpp1312
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpulseStressSolver.h164
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxActorImpl.cpp180
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxActorImpl.h94
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxAssetImpl.cpp315
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxAssetImpl.h126
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxFamilyImpl.cpp294
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxFamilyImpl.h168
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxManagerImpl.cpp127
-rw-r--r--NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxManagerImpl.h202
-rw-r--r--NvBlast/sdk/extensions/physx/source/sync/NvBlastExtSync.cpp235
-rw-r--r--NvBlast/sdk/extensions/serialization/include/NvBlastExtSerializationInterface.h38
-rw-r--r--NvBlast/sdk/extensions/serialization/include/NvBlastExtSerializationLLInterface.h43
-rw-r--r--NvBlast/sdk/extensions/serialization/source/BlastSerialization.capn162
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/AssetDTO.cpp187
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/AssetDTO.h16
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/DTOMacros.h44
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/ExtPxAssetDTO.cpp78
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/ExtPxAssetDTO.h16
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/ExtPxChunkDTO.cpp43
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/ExtPxChunkDTO.h16
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/ExtPxSubchunkDTO.cpp43
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/ExtPxSubchunkDTO.h16
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/NvBlastBondDTO.cpp63
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/NvBlastBondDTO.h16
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/NvBlastChunkDTO.cpp60
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/NvBlastChunkDTO.h18
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/NvBlastIDDTO.cpp48
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/NvBlastIDDTO.h16
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxConvexMeshGeometryDTO.cpp127
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxConvexMeshGeometryDTO.h17
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxMeshScaleDTO.cpp42
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxMeshScaleDTO.h17
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxQuatDTO.cpp45
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxQuatDTO.h17
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxTransformDTO.cpp42
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxTransformDTO.h17
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxVec3DTO.cpp45
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/PxVec3DTO.h17
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/TkAssetDTO.cpp67
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/TkAssetDTO.h17
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/TkAssetJointDescDTO.cpp53
-rw-r--r--NvBlast/sdk/extensions/serialization/source/DTO/TkAssetJointDescDTO.h17
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtGlobals.h10
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtInputStream.cpp38
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtInputStream.h32
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxInputStream.cpp32
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxInputStream.h41
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxOutputStream.cpp35
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxOutputStream.h40
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtOutputStream.cpp24
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtOutputStream.h31
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.capn95
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.cpp146
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.h172
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationImpl.h75
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationInterface.cpp133
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLL.capn89
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLLImpl.h48
-rw-r--r--NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLLInterface.cpp101
-rw-r--r--NvBlast/sdk/extensions/shaders/include/NvBlastExtDamageShaders.h111
-rw-r--r--NvBlast/sdk/extensions/shaders/source/NvBlastExtRadialShaders.cpp205
-rw-r--r--NvBlast/sdk/extensions/shaders/source/NvBlastExtShearShaders.cpp149
-rw-r--r--NvBlast/sdk/lowlevel/include/NvBlast.h807
-rw-r--r--NvBlast/sdk/lowlevel/include/NvBlastPreprocessor.h31
-rw-r--r--NvBlast/sdk/lowlevel/include/NvBlastProfiler.h52
-rw-r--r--NvBlast/sdk/lowlevel/include/NvBlastTypes.h632
-rw-r--r--NvBlast/sdk/lowlevel/include/NvPreprocessor.h540
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastActor.cpp1316
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastActor.h732
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastActorSerializationBlock.cpp575
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastActorSerializationBlock.h151
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastAsset.cpp931
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastAsset.h294
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastAssetHelper.cpp183
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastChunkHierarchy.h232
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastFamily.cpp295
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastFamily.h238
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastFamilyGraph.cpp629
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastFamilyGraph.h280
-rw-r--r--NvBlast/sdk/lowlevel/source/NvBlastSupportGraph.h134
-rw-r--r--NvBlast/sdk/profiler/NvBlastProfiler.cpp91
-rw-r--r--NvBlast/sdk/profiler/NvBlastProfilerInternal.h58
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTk.h31
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkActor.h239
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkAsset.h136
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkEvent.h166
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkFamily.h124
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkFramework.h365
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkGroup.h133
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkIdentifiable.h61
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkJoint.h60
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkObject.h57
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkSerializable.h60
-rw-r--r--NvBlast/sdk/toolkit/include/NvBlastTkType.h65
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkActorImpl.cpp434
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkActorImpl.h375
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkAllocator.cpp22
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkAllocator.h49
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkArray.h41
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkAssetImpl.cpp337
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkAssetImpl.h162
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkCommon.h110
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkEventQueue.h231
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkFamilyImpl.cpp815
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkFamilyImpl.h245
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkFrameworkImpl.cpp613
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkFrameworkImpl.h225
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkGUID.h135
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkGroupImpl.cpp592
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkGroupImpl.h174
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkHashMap.h34
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkHashSet.h34
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkJointImpl.cpp183
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkJointImpl.h146
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkTaskImpl.cpp263
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkTaskImpl.h444
-rw-r--r--NvBlast/sdk/toolkit/source/NvBlastTkTypeImpl.h174
213 files changed, 41706 insertions, 0 deletions
diff --git a/NvBlast/sdk/CMakeLists.txt b/NvBlast/sdk/CMakeLists.txt
new file mode 100644
index 0000000..6c816a2
--- /dev/null
+++ b/NvBlast/sdk/CMakeLists.txt
@@ -0,0 +1,90 @@
+cmake_minimum_required(VERSION 3.3)
+
+project(BlastSDK CXX)
+
+CMAKE_POLICY(SET CMP0057 NEW) # Enable IN_LIST
+
+IF(NOT DEFINED BLAST_ROOT_DIR)
+
+ STRING(REPLACE "\\" "/" BRD_TEMP $ENV{BLAST_ROOT_DIR})
+
+ # This env variable is set by GenerateProjects.bat, and is no longer available when CMake rebuilds, so this stores it in the cache
+ SET(BLAST_ROOT_DIR ${BRD_TEMP} CACHE INTERNAL "Root of the Blast source tree")
+
+ENDIF()
+
+IF(NOT EXISTS ${BLAST_ROOT_DIR})
+ MESSAGE(FATAL_ERROR "BLAST_ROOT_DIR environment variable wasn't set or was invalid.")
+ENDIF()
+
+SET(GW_DEPS_ROOT $ENV{PM_PACKAGES_ROOT})
+
+# Add the project specific CMake modules to the module path
+LIST(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/compiler/cmake/modules/)
+
+IF(NOT DEFINED CMAKEMODULES_VERSION)
+ SET(CMAKEMODULES_VERSION $ENV{PM_CMakeModules_VERSION} CACHE INTERNAL "CMakeModules version from generation batch")
+ENDIF()
+
+#TODO: More elegance
+IF(NOT EXISTS ${GW_DEPS_ROOT}/CMakeModules/${CMAKEMODULES_VERSION})
+ MESSAGE(FATAL_ERROR "Could not find CMakeModules at ${GW_DEPS_ROOT}")
+ENDIF()
+
+SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${GW_DEPS_ROOT}/CMakeModules/${CMAKEMODULES_VERSION}")
+
+MESSAGE("Module path:" ${CMAKE_MODULE_PATH})
+
+IF(CMAKE_CONFIGURATION_TYPES)
+ SET(CMAKE_CONFIGURATION_TYPES debug profile checked release)
+ SET(CMAKE_CONFIGURATION_TYPES "${CMAKE_CONFIGURATION_TYPES}" CACHE STRING
+ "Reset config to what we need"
+ FORCE)
+
+ SET(CMAKE_SHARED_LINKER_FLAGS_CHECKED "")
+ SET(CMAKE_SHARED_LINKER_FLAGS_PROFILE "")
+
+ENDIF()
+
+# Default to appending "DEBUG", "PROFILE", etc to produced artifacts
+IF(NOT DEFINED APPEND_CONFIG_NAME)
+ SET(APPEND_CONFIG_NAME ON)
+ENDIF()
+
+IF (APPEND_CONFIG_NAME)
+ MESSAGE("Appending config to output names")
+
+ SET(CMAKE_DEBUG_POSTFIX "DEBUG")
+ SET(CMAKE_PROFILE_POSTFIX "PROFILE")
+ SET(CMAKE_CHECKED_POSTFIX "CHECKED")
+ SET(CMAKE_RELEASE_POSTFIX "")
+ENDIF()
+
+INCLUDE(SetOutputPaths)
+
+# Either have to define a single output path, or each one.
+
+IF(NOT DEFINED BL_OUTPUT_DIR)
+ IF (NOT DEFINED BL_LIB_OUTPUT_DIR)
+ MESSAGE(FATAL_ERROR "BL_LIB_OUTPUT_DIR not defined - Define either BL_OUTPUT_DIR or BL_LIB_OUTPUT_DIR and BL_DLL_OUTPUT_DIR and BL_EXE_OUTPUT_DIR")
+ ENDIF()
+
+ IF (NOT DEFINED BL_DLL_OUTPUT_DIR)
+ MESSAGE(FATAL_ERROR "BL_DLL_OUTPUT_DIR not defined - Define either BL_OUTPUT_DIR or BL_LIB_OUTPUT_DIR and BL_DLL_OUTPUT_DIR and BL_EXE_OUTPUT_DIR")
+ ENDIF()
+
+ IF (NOT DEFINED BL_EXE_OUTPUT_DIR)
+ MESSAGE(FATAL_ERROR "BL_EXE_OUTPUT_DIR not defined - Define either BL_OUTPUT_DIR or BL_LIB_OUTPUT_DIR and BL_DLL_OUTPUT_DIR and BL_EXE_OUTPUT_DIR")
+ ENDIF()
+
+ SetLibOutputPath(${BL_LIB_OUTPUT_DIR})
+ SetDllOutputPath(${BL_DLL_OUTPUT_DIR})
+ SetExeOutputPath(${BL_EXE_OUTPUT_DIR})
+ELSE()
+ SetSingleOutputPath(${BL_OUTPUT_DIR})
+ENDIF()
+
+SET(PROJECT_CMAKE_FILES_DIR compiler/cmake/)
+
+# Include the platform specific configuration
+INCLUDE(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/CMakeLists.txt)
diff --git a/NvBlast/sdk/common/NvBlastAssert.cpp b/NvBlast/sdk/common/NvBlastAssert.cpp
new file mode 100644
index 0000000..7731a53
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastAssert.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#include "NvBlastAssert.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#if NV_WINDOWS_FAMILY
+#include <crtdbg.h>
+#endif
+
+extern "C"
+{
+
+void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore)
+{
+ NV_UNUSED(ignore); // is used only in debug windows config
+ char buffer[1024];
+#if NV_WINDOWS_FAMILY
+ sprintf_s(buffer, 1024, "%s(%d) : Assertion failed: %s\n", file, line, expr);
+#else
+ sprintf(buffer, "%s(%d) : Assertion failed: %s\n", file, line, expr);
+#endif
+ puts(buffer);
+#if NV_WINDOWS_FAMILY && NV_DEBUG
+ // _CrtDbgReport returns -1 on error, 1 on 'retry', 0 otherwise including 'ignore'.
+ // Hitting 'abort' will terminate the process immediately.
+ int result = _CrtDbgReport(_CRT_ASSERT, file, line, NULL, "%s", buffer);
+ int mode = _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_REPORT_MODE);
+ ignore = _CRTDBG_MODE_WNDW == mode && result == 0;
+ if (ignore)
+ return;
+ __debugbreak();
+#elif (NV_WINDOWS_FAMILY && NV_CHECKED) || NV_CLANG
+ __debugbreak();
+#else
+ abort();
+#endif
+}
+
+} // extern "C"
diff --git a/NvBlast/sdk/common/NvBlastAssert.h b/NvBlast/sdk/common/NvBlastAssert.h
new file mode 100644
index 0000000..b1b7ca5
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastAssert.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef NVBLASTASSERT_H
+#define NVBLASTASSERT_H
+
+
+#include "NvBlastPreprocessor.h"
+
+
+#if !NV_ENABLE_ASSERTS
+#define NVBLAST_ASSERT(exp) ((void)0)
+#define NVBLAST_ALWAYS_ASSERT_MESSAGE(exp) ((void)0)
+#define NVBLAST_ASSERT_WITH_MESSAGE(condition, message) ((void)0)
+#else
+#if NV_VC
+#define NVBLAST_CODE_ANALYSIS_ASSUME(exp) \
+ __analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a NVBLAST_ASSERT is used
+// to "guard" illegal mem access, for example.
+#else
+#define NVBLAST_CODE_ANALYSIS_ASSUME(exp)
+#endif
+#define NVBLAST_ASSERT(exp) \
+{ \
+ static bool _ignore = false; \
+ if (!(exp) && !_ignore) NvBlastAssertHandler(#exp, __FILE__, __LINE__, _ignore); \
+ NVBLAST_CODE_ANALYSIS_ASSUME(exp); \
+} ((void)0)
+#define NVBLAST_ALWAYS_ASSERT_MESSAGE(exp) \
+{ \
+ static bool _ignore = false; \
+ if(!_ignore) \
+ { \
+ NvBlastAssertHandler(exp, __FILE__, __LINE__, _ignore); \
+ } \
+} ((void)0)
+#define NVBLAST_ASSERT_WITH_MESSAGE(exp, message) \
+{ \
+ static bool _ignore = false; \
+ if (!(exp) && !_ignore) NvBlastAssertHandler(message, __FILE__, __LINE__, _ignore); \
+ NVBLAST_CODE_ANALYSIS_ASSUME(exp); \
+} ((void)0)
+#endif
+
+#define NVBLAST_ALWAYS_ASSERT() NVBLAST_ASSERT(0)
+
+
+extern "C"
+{
+
+NVBLAST_API void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore);
+
+} // extern "C"
+
+
+#endif // #ifndef NVBLASTASSERT_H
diff --git a/NvBlast/sdk/common/NvBlastAtomic.cpp b/NvBlast/sdk/common/NvBlastAtomic.cpp
new file mode 100644
index 0000000..6b9d94b
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastAtomic.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#include "NvBlastAtomic.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Windows/XBOXONE Implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if NV_WINDOWS_FAMILY || NV_XBOXONE
+
+#include "NvBlastIncludeWindows.h"
+
+int32_t atomicIncrement(volatile int32_t* val)
+{
+ return (int32_t)InterlockedIncrement((volatile LONG*)val);
+}
+
+int32_t atomicDecrement(volatile int32_t* val)
+{
+ return (int32_t)InterlockedDecrement((volatile LONG*)val);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Unix/PS4 Implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#elif(NV_UNIX_FAMILY || NV_PS4)
+
+int32_t atomicIncrement(volatile int32_t* val)
+{
+ return __sync_add_and_fetch(val, 1);
+}
+
+int32_t atomicDecrement(volatile int32_t* val)
+{
+ return __sync_sub_and_fetch(val, 1);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Unsupported Platforms
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#else
+
+#error "Platform not supported!"
+
+#endif
+
+
+} // namespace Blast
+} // namespace Nv
+
diff --git a/NvBlast/sdk/common/NvBlastAtomic.h b/NvBlast/sdk/common/NvBlastAtomic.h
new file mode 100644
index 0000000..a3e6755
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastAtomic.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef NVBLASTATOMIC_H
+#define NVBLASTATOMIC_H
+
+#include "NvBlastTypes.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/* increment the specified location. Return the incremented value */
+int32_t atomicIncrement(volatile int32_t* val);
+
+
+/* decrement the specified location. Return the decremented value */
+int32_t atomicDecrement(volatile int32_t* val);
+
+} // namespace Blast
+} // namespace Nv
+
+#endif // #ifndef NVBLASTATOMIC_H
diff --git a/NvBlast/sdk/common/NvBlastDLink.h b/NvBlast/sdk/common/NvBlastDLink.h
new file mode 100644
index 0000000..bfcee24
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastDLink.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef NVBLASTDLINK_H
+#define NVBLASTDLINK_H
+
+
+#include "NvBlastAssert.h"
+#include "NvBlastIndexFns.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+template<typename IndexType>
+struct IndexDLink
+{
+ IndexType m_adj[2];
+};
+
+
+template<typename IndexType>
+class IndexDList
+{
+public:
+ void initLinksSolitary(IndexDLink<IndexType>* links, IndexType linkCount)
+ {
+ for (IndexType i = 0; i < linkCount; ++i)
+ {
+ links[i].m_adj[0] = invalidIndex<IndexType>();
+ links[i].m_adj[1] = invalidIndex<IndexType>();
+ }
+ }
+
+ void initLinksChain(IndexDLink<IndexType>* links, IndexType linkCount)
+ {
+ if (linkCount > 0)
+ {
+ links[0].m_adj[0] = invalidIndex<IndexType>();
+ for (IndexType i = 1; i < linkCount; ++i)
+ {
+ links[i - 1].m_adj[1] = i;
+ links[i].m_adj[0] = i - 1;
+ }
+ links[linkCount - 1].m_adj[1] = invalidIndex<IndexType>();
+ }
+ }
+
+ IndexType getAdj(IndexDLink<IndexType>* links, IndexType linkIndex, int which)
+ {
+ return links[linkIndex].m_adj[which & 1];
+ }
+
+ void remove(IndexDLink<IndexType>* links, IndexType linkIndex)
+ {
+ IndexDLink<IndexType>& link = links[linkIndex];
+ const IndexType adj0 = link.m_adj[0];
+ const IndexType adj1 = link.m_adj[1];
+ if (!isInvalidIndex(adj1))
+ {
+ links[adj1].m_adj[0] = adj0;
+ link.m_adj[1] = invalidIndex<IndexType>();
+ }
+ if (!isInvalidIndex(adj0))
+ {
+ links[adj0].m_adj[1] = adj1;
+ link.m_adj[0] = invalidIndex<IndexType>();
+ }
+ }
+
+ bool isSolitary(IndexDLink<IndexType>* links, IndexType linkIndex)
+ {
+ const IndexDLink<IndexType>& link = links[linkIndex];
+ return isInvalidIndex(link.m_adj[0]) && isInvalidIndex(link.m_adj[1]);
+ }
+
+ void insertListHead(IndexType& listHead, IndexDLink<IndexType>* links, IndexType linkIndex)
+ {
+ NVBLAST_ASSERT(!isInvalidIndex(linkIndex));
+ if (!isInvalidIndex(listHead))
+ {
+ links[listHead].m_adj[0] = linkIndex;
+ }
+ links[linkIndex].m_adj[1] = listHead;
+ listHead = linkIndex;
+ }
+
+ IndexType removeListHead(IndexType& listHead, IndexDLink<IndexType>* links)
+ {
+ const IndexType linkIndex = listHead;
+ if (!isInvalidIndex(linkIndex))
+ {
+ listHead = links[linkIndex].m_adj[1];
+ if (!isInvalidIndex(listHead))
+ {
+ links[listHead].m_adj[0] = invalidIndex<IndexType>();
+ }
+ links[linkIndex].m_adj[1] = invalidIndex<IndexType>();
+ }
+ return linkIndex;
+ }
+
+ void removeFromList(IndexType& listHead, IndexDLink<IndexType>* links, IndexType linkIndex)
+ {
+ NVBLAST_ASSERT(!isInvalidIndex(linkIndex));
+ if (listHead == linkIndex)
+ {
+ listHead = links[linkIndex].m_adj[1];
+ }
+ remove(links, linkIndex);
+ }
+};
+
+
+struct DLink
+{
+ DLink() : m_prev(nullptr), m_next(nullptr) {}
+
+ DLink* getPrev() const
+ {
+ return m_prev;
+ }
+
+ DLink* getNext() const
+ {
+ return m_next;
+ }
+
+private:
+ DLink* m_prev;
+ DLink* m_next;
+
+ friend class DList;
+};
+
+
+class DList
+{
+public:
+ DList() : m_head(nullptr), m_tail(nullptr) {}
+
+ bool isEmpty() const
+ {
+ NVBLAST_ASSERT((m_head == nullptr) == (m_tail == nullptr));
+ return m_head == nullptr;
+ }
+
+ bool isSolitary(const DLink& link) const
+ {
+ return link.m_prev == nullptr && link.m_next == nullptr && m_head != &link;
+ }
+
+ DLink* getHead() const
+ {
+ return m_head;
+ }
+
+ DLink* getTail() const
+ {
+ return m_tail;
+ }
+
+ bool insertHead(DLink& link)
+ {
+ NVBLAST_ASSERT(isSolitary(link));
+ if (!isSolitary(link))
+ {
+ return false;
+ }
+
+ link.m_next = m_head;
+ if (m_head != nullptr)
+ {
+ m_head->m_prev = &link;
+ }
+ m_head = &link;
+ if (m_tail == nullptr)
+ {
+ m_tail = &link;
+ }
+
+ return true;
+ }
+
+ bool insertTail(DLink& link)
+ {
+ NVBLAST_ASSERT(isSolitary(link));
+ if (!isSolitary(link))
+ {
+ return false;
+ }
+
+ link.m_prev = m_tail;
+ if (m_tail != nullptr)
+ {
+ m_tail->m_next = &link;
+ }
+ m_tail = &link;
+ if (m_head == nullptr)
+ {
+ m_head = &link;
+ }
+
+ return true;
+ }
+
+ void remove(DLink& link)
+ {
+ if (link.m_prev != nullptr)
+ {
+ link.m_prev->m_next = link.m_next;
+ }
+ else
+ if (m_head == &link)
+ {
+ m_head = link.m_next;
+ }
+
+ if (link.m_next != nullptr)
+ {
+ link.m_next->m_prev = link.m_prev;
+ }
+ else
+ if (m_tail == &link)
+ {
+ m_tail = link.m_prev;
+ }
+
+ link.m_next = link.m_prev = nullptr;
+ }
+
+ class It
+ {
+ public:
+ enum Direction { Reverse, Forward };
+
+ It(const DList& list, Direction dir = Forward) : m_curr(dir == Forward ? list.getHead() : list.getTail()) {}
+
+ /** Validity of current value. */
+ operator bool() const
+ {
+ return m_curr != nullptr;
+ }
+
+ /** Current value. */
+ operator const DLink*() const
+ {
+ return m_curr;
+ }
+
+ /** Pre-increment. */
+ const DLink* operator ++ ()
+ {
+ return m_curr = m_curr->getNext();
+ }
+
+ /** Pre-deccrement. */
+ const DLink* operator -- ()
+ {
+ return m_curr = m_curr->getPrev();
+ }
+
+ private:
+ const DLink* m_curr;
+ };
+
+private:
+ DLink* m_head;
+ DLink* m_tail;
+};
+
+} // end namespace Blast
+} // end namespace Nv
+
+
+#endif // #ifndef NVBLASTDLINK_H
diff --git a/NvBlast/sdk/common/NvBlastFixedArray.h b/NvBlast/sdk/common/NvBlastFixedArray.h
new file mode 100644
index 0000000..654158b
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastFixedArray.h
@@ -0,0 +1,128 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTFIXEDARRAY_H
+#define NVBLASTFIXEDARRAY_H
+
+#include "NvBlastAssert.h"
+#include "NvBlastMemory.h"
+
+namespace Nv
+{
+namespace Blast
+{
+
+/*!
+FixedArray is a sequential container which is intended to be used with placement new on chunk of memory.
+It'll use following memory for data layout. As follows:
+
+// some memory
+char ​*buf = new char[64 *​ 1024];
+
+// placement new on this memory
+FixedArray<SomeClass>* arr = new (buf) FixedArray<SomeClass>();
+
+// you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left
+buf = buf + FixedArray<SomeClass>::requiredMemorySize(capacity);
+
+buf:
+
++------------------------------------------------------------+
+| uint32_t | T[0] | T[1] | T[2] | ... |
++------------------------------------------------------------+
+
+
+!!!TODO:
+- check ctor/dtor of elements calls
+*/
+template <class T>
+class FixedArray
+{
+public:
+ explicit FixedArray() : m_size(0)
+ {
+ }
+
+ static size_t requiredMemorySize(uint32_t capacity)
+ {
+ return align16(sizeof(FixedArray<T>)) + align16(capacity * sizeof(T));
+ }
+
+ NV_FORCE_INLINE T& pushBack(T& t)
+ {
+ new (data() + m_size) T(t);
+ return data()[m_size++];
+ }
+
+ T popBack()
+ {
+ NVBLAST_ASSERT(m_size);
+ T t = data()[m_size - 1];
+ data()[--m_size].~T();
+ return t;
+ }
+
+ void clear()
+ {
+ for(T* first = data(); first < data() + m_size; ++first)
+ first->~T();
+ m_size = 0;
+ }
+
+ NV_FORCE_INLINE void forceSize_Unsafe(uint32_t s)
+ {
+ m_size = s;
+ }
+
+ NV_FORCE_INLINE T& operator[](uint32_t idx)
+ {
+ NVBLAST_ASSERT(idx < m_size);
+ return data()[idx];
+ }
+
+ NV_FORCE_INLINE const T& operator[](uint32_t idx) const
+ {
+ NVBLAST_ASSERT(idx < m_size);
+ return data()[idx];
+ }
+
+ NV_FORCE_INLINE T& at(uint32_t idx)
+ {
+ NVBLAST_ASSERT(idx < m_size);
+ return data()[idx];
+ }
+
+ NV_FORCE_INLINE const T& at(uint32_t idx) const
+ {
+ NVBLAST_ASSERT(idx < m_size);
+ return data()[idx];
+ }
+
+ NV_FORCE_INLINE uint32_t size() const
+ {
+ return m_size;
+ }
+
+private:
+ uint32_t m_size;
+
+ NV_FORCE_INLINE T* data()
+ {
+ return (T*)((char*)this + sizeof(FixedArray<T>));
+ }
+
+private:
+ FixedArray(const FixedArray& that);
+};
+
+} // namespace Blast
+} // namespace Nv
+
+#endif // ifndef NVBLASTFIXEDARRAY_H
diff --git a/NvBlast/sdk/common/NvBlastFixedBitmap.h b/NvBlast/sdk/common/NvBlastFixedBitmap.h
new file mode 100644
index 0000000..af835cf
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastFixedBitmap.h
@@ -0,0 +1,118 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTFIXEDBITMAP_H
+#define NVBLASTFIXEDBITMAP_H
+
+#include "NvBlastAssert.h"
+#include "NvBlastMemory.h"
+#include <cstring>
+
+namespace Nv
+{
+namespace Blast
+{
+
+/*!
+FixedBitmap is a bitset (bitmap) of fixed side, it's intended to be used with placement new on chunk of memory.
+It'll use following memory for data layout. As follows:
+
+// some memory
+char ​*buf = new char[64 *​ 1024];
+
+const uint32_t bitsCount = 100;
+
+// placement new on this memory
+FixedBitmap* arr = new (buf) FixedBitmap(bitsCount);
+
+// you can get max requiredMemorySize by an bitMap to use memory left
+buf = buf + FixedBitmap<SomeClass>::requiredMemorySize(bitsCount);
+
+buf:
+
++------------------------------------------------------------+
+| uint32_t | word0 | word1 | word2 | ... |
++------------------------------------------------------------+
+
+*/
+class FixedBitmap
+{
+public:
+ explicit FixedBitmap(uint32_t bitsCount)
+ {
+ m_bitsCount = bitsCount;
+ }
+
+ static uint32_t getWordsCount(uint32_t bitsCount)
+ {
+ return (bitsCount + 31) >> 5;
+ }
+
+ static size_t requiredMemorySize(uint32_t bitsCount)
+ {
+ return align16(sizeof(FixedBitmap)) + align16(getWordsCount(bitsCount) * sizeof(uint32_t));
+ }
+
+ void clear()
+ {
+ memset(data(), 0, getWordsCount(m_bitsCount) * sizeof(uint32_t));
+ }
+
+ void fill()
+ {
+ const uint32_t wordCount = getWordsCount(m_bitsCount);
+ uint32_t* mem = data();
+ memset(mem, 0xFF, wordCount * sizeof(uint32_t));
+ const uint32_t bitsRemainder = m_bitsCount & 31;
+ if (bitsRemainder > 0)
+ {
+ mem[wordCount - 1] &= ~(0xFFFFFFFF << bitsRemainder);
+ }
+ }
+
+ int test(uint32_t index) const
+ {
+ NVBLAST_ASSERT(index < m_bitsCount);
+ return data()[index >> 5] & (1 << (index & 31));
+ }
+
+ void set(uint32_t index)
+ {
+ NVBLAST_ASSERT(index < m_bitsCount);
+ data()[index >> 5] |= 1 << (index & 31);
+ }
+
+ void reset(uint32_t index)
+ {
+ NVBLAST_ASSERT(index < m_bitsCount);
+ data()[index >> 5] &= ~(1 << (index & 31));
+ }
+
+private:
+ uint32_t m_bitsCount;
+
+ NV_FORCE_INLINE uint32_t* data()
+ {
+ return (uint32_t*)((char*)this + sizeof(FixedBitmap));
+ }
+
+ NV_FORCE_INLINE const uint32_t* data() const
+ {
+ return (uint32_t*)((char*)this + sizeof(FixedBitmap));
+ }
+
+private:
+ FixedBitmap(const FixedBitmap& that);
+};
+
+} // namespace Blast
+} // namespace Nv
+
+#endif // ifndef NVBLASTFIXEDBITMAP_H
diff --git a/NvBlast/sdk/common/NvBlastFixedBoolArray.h b/NvBlast/sdk/common/NvBlastFixedBoolArray.h
new file mode 100644
index 0000000..253bed6
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastFixedBoolArray.h
@@ -0,0 +1,106 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTFIXEDBOOLARRAY_H
+#define NVBLASTFIXEDBOOLARRAY_H
+
+#include "NvBlastAssert.h"
+#include "NvBlastMemory.h"
+#include <cstring>
+
+namespace Nv
+{
+namespace Blast
+{
+
+/*!
+FixedBoolArray is an array of bools of fixed size, it's intended to be used with placement new on chunk of memory.
+It'll use following memory for data layout. As follows:
+
+// some memory
+char ​*buf = new char[64 *​ 1024];
+
+const uint32_t size = 100;
+
+// placement new on this memory
+FixedBoolArray* arr = new (buf) FixedBoolArray(size);
+
+// you can get max requiredMemorySize by an bitMap to use memory left
+buf = buf + FixedBoolArray<SomeClass>::requiredMemorySize(size);
+
+buf:
+
++------------------------------------------------------------+
+| uint32_t | bool0 | bool1 | bool2 | ... |
++------------------------------------------------------------+
+
+*/
+class FixedBoolArray
+{
+public:
+ explicit FixedBoolArray(uint32_t size)
+ {
+ m_size = size;
+ }
+
+ static size_t requiredMemorySize(uint32_t size)
+ {
+ return align16(sizeof(FixedBoolArray)) + align16(size);
+ }
+
+ void clear()
+ {
+ memset(data(), 0, m_size);
+ }
+
+ void fill()
+ {
+ memset(data(), 1, m_size);
+ }
+
+ int test(uint32_t index) const
+ {
+ NVBLAST_ASSERT(index < m_size);
+ return data()[index];
+ }
+
+ void set(uint32_t index)
+ {
+ NVBLAST_ASSERT(index < m_size);
+ data()[index] = 1;
+ }
+
+ void reset(uint32_t index)
+ {
+ NVBLAST_ASSERT(index < m_size);
+ data()[index] = 0;
+ }
+
+private:
+ uint32_t m_size;
+
+ NV_FORCE_INLINE char* data()
+ {
+ return ((char*)this + sizeof(FixedBoolArray));
+ }
+
+ NV_FORCE_INLINE const char* data() const
+ {
+ return ((char*)this + sizeof(FixedBoolArray));
+ }
+
+private:
+ FixedBoolArray(const FixedBoolArray& that);
+};
+
+} // namespace Blast
+} // namespace Nv
+
+#endif // ifndef NVBLASTFIXEDBOOLARRAY_H
diff --git a/NvBlast/sdk/common/NvBlastFixedPriorityQueue.h b/NvBlast/sdk/common/NvBlastFixedPriorityQueue.h
new file mode 100644
index 0000000..5079edb
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastFixedPriorityQueue.h
@@ -0,0 +1,192 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTFIXEDPRIORITYQUEUE_H
+#define NVBLASTFIXEDPRIORITYQUEUE_H
+
+#include "NvBlastAssert.h"
+#include "NvBlastMemory.h"
+
+namespace Nv
+{
+
+namespace Blast
+{
+
+/*!
+FixedPriorityQueue is a priority queue container which is intended to be used with placement new on chunk of memory.
+It'll use following memory for data layout. As follows:
+
+// some memory
+char ​*buf = new char[64 *​ 1024];
+
+// placement new on this memory
+FixedPriorityQueue<SomeClass>* arr = new (buf) FixedPriorityQueue<SomeClass>();
+
+// you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left
+buf = buf + FixedPriorityQueue<SomeClass>::requiredMemorySize(capacity);
+
+buf:
+
++------------------------------------------------------------+
+| uint32_t | T[0] | T[1] | T[2] | ... |
++------------------------------------------------------------+
+
+*/
+
+template <typename A>
+struct Less
+{
+ bool operator()(const A& a, const A& b) const
+ {
+ return a < b;
+ }
+};
+
+
+template<class Element, class Comparator = Less<Element> >
+class FixedPriorityQueue : protected Comparator // inherit so that stateless comparators take no space
+{
+public:
+ FixedPriorityQueue(const Comparator& less = Comparator()) : Comparator(less), mHeapSize(0)
+ {
+ }
+
+ ~FixedPriorityQueue()
+ {
+ }
+
+ static size_t requiredMemorySize(uint32_t capacity)
+ {
+ return align16(sizeof(FixedPriorityQueue<Element, Comparator>)) + align16(capacity * sizeof(Element));
+ }
+
+ //! Get the element with the highest priority
+ const Element top() const
+ {
+ return data()[0];
+ }
+
+ //! Get the element with the highest priority
+ Element top()
+ {
+ return data()[0];
+ }
+
+ //! Check to whether the priority queue is empty
+ bool empty() const
+ {
+ return (mHeapSize == 0);
+ }
+
+ //! Empty the priority queue
+ void clear()
+ {
+ mHeapSize = 0;
+ }
+
+ //! Insert a new element into the priority queue. Only valid when size() is less than Capacity
+ void push(const Element& value)
+ {
+ uint32_t newIndex;
+ uint32_t parentIndex = parent(mHeapSize);
+
+ for (newIndex = mHeapSize; newIndex > 0 && compare(value, data()[parentIndex]); newIndex = parentIndex, parentIndex= parent(newIndex))
+ {
+ data()[ newIndex ] = data()[parentIndex];
+ }
+ data()[newIndex] = value;
+ mHeapSize++;
+ NVBLAST_ASSERT(valid());
+ }
+
+ //! Delete the highest priority element. Only valid when non-empty.
+ Element pop()
+ {
+ NVBLAST_ASSERT(mHeapSize > 0);
+ uint32_t i, child;
+ //try to avoid LHS
+ uint32_t tempHs = mHeapSize-1;
+ mHeapSize = tempHs;
+ Element min = data()[0];
+ Element last = data()[tempHs];
+
+ for (i = 0; (child = left(i)) < tempHs; i = child)
+ {
+ /* Find highest priority child */
+ const uint32_t rightChild = child + 1;
+
+ child += ((rightChild < tempHs) & compare((data()[rightChild]), (data()[child]))) ? 1 : 0;
+
+ if(compare(last, data()[child]))
+ break;
+
+ data()[i] = data()[child];
+ }
+ data()[ i ] = last;
+
+ NVBLAST_ASSERT(valid());
+ return min;
+ }
+
+ //! Make sure the priority queue sort all elements correctly
+ bool valid() const
+ {
+ const Element& min = data()[0];
+ for(uint32_t i=1; i<mHeapSize; ++i)
+ {
+ if(compare(data()[i], min))
+ return false;
+ }
+
+ return true;
+ }
+
+ //! Return number of elements in the priority queue
+ uint32_t size() const
+ {
+ return mHeapSize;
+ }
+
+private:
+ uint32_t mHeapSize;
+
+ NV_FORCE_INLINE Element* data()
+ {
+ return (Element*)((char*)this + sizeof(FixedPriorityQueue<Element, Comparator>));
+ }
+
+ NV_FORCE_INLINE Element* data() const
+ {
+ return (Element*)((char*)this + sizeof(FixedPriorityQueue<Element, Comparator>));
+ }
+
+ bool compare(const Element& a, const Element& b) const
+ {
+ return Comparator::operator()(a,b);
+ }
+
+ static uint32_t left(uint32_t nodeIndex)
+ {
+ return (nodeIndex << 1) + 1;
+ }
+
+ static uint32_t parent(uint32_t nodeIndex)
+ {
+ return (nodeIndex - 1) >> 1;
+ }
+
+ FixedPriorityQueue<Element, Comparator>& operator = (const FixedPriorityQueue<Element, Comparator>);
+};
+
+} // namespace Blast
+} // namespace Nv
+
+#endif // ifndef NVBLASTFIXEDPRIORITYQUEUE_H
diff --git a/NvBlast/sdk/common/NvBlastGeometry.h b/NvBlast/sdk/common/NvBlastGeometry.h
new file mode 100644
index 0000000..e83ff95
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastGeometry.h
@@ -0,0 +1,122 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+
+#ifndef NVBLASTGEOMETRY_H
+#define NVBLASTGEOMETRY_H
+
+#include "NvBlastTypes.h"
+#include "NvBlastMath.h"
+
+#include<limits>
+
+
+namespace Nv {
+namespace Blast{
+
+NV_FORCE_INLINE uint32_t findNodeByPositionLinked(const float point[4],
+ const uint32_t firstGraphNodeIndex, const uint32_t* familyGraphNodeIndexLinks,
+ const uint32_t* adjacencyPartition, const uint32_t* adjacentNodeIndices, const uint32_t* adjacentBondIndices,
+ const NvBlastBond* bonds, const float* bondHealths)
+{
+ uint32_t nodeIndex = firstGraphNodeIndex;
+ uint32_t closestNode = nodeIndex;
+ float minDist = std::numeric_limits<float>().max();
+
+ while (!Nv::Blast::isInvalidIndex(nodeIndex))
+ {
+ const uint32_t startIndex = adjacencyPartition[nodeIndex];
+ const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1];
+
+ for (uint32_t adjacentIndex = startIndex; adjacentIndex < stopIndex; adjacentIndex++)
+ {
+ const uint32_t neighbourIndex = adjacentNodeIndices[adjacentIndex];
+ if (nodeIndex < neighbourIndex)
+ {
+ const uint32_t bondIndex = adjacentBondIndices[adjacentIndex];
+ if (bondHealths[bondIndex] > 0.0f)
+ {
+ const NvBlastBond& bond = bonds[bondIndex];
+
+ const float* centroid = bond.centroid;
+ float d[3]; VecMath::sub(point, centroid, d);
+ float dist = VecMath::dot(d, d);
+
+ if (dist < minDist)
+ {
+ minDist = dist;
+ float s = VecMath::dot(d, bond.normal);
+ closestNode = s < 0 ? nodeIndex : neighbourIndex;
+ }
+ }
+ }
+ }
+ nodeIndex = familyGraphNodeIndexLinks[nodeIndex];
+ }
+
+ return closestNode;
+}
+
+
+NV_FORCE_INLINE uint32_t findNodeByPosition(const float point[4],
+ const uint32_t graphNodesCount, const uint32_t* graphNodeIndices,
+ const uint32_t* adjacencyPartition, const uint32_t* adjacentNodeIndices, const uint32_t* adjacentBondIndices,
+ const NvBlastBond* bonds, const float* bondHealths)
+{
+ uint32_t closestNode = graphNodesCount > 2 ? invalidIndex<uint32_t>() : graphNodeIndices[0];
+ float minDist = std::numeric_limits<float>().max();
+
+ for (uint32_t i = 0; i < graphNodesCount; i++)
+ {
+ const uint32_t nodeIndex = graphNodeIndices[i];
+ const uint32_t startIndex = adjacencyPartition[nodeIndex];
+ const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1];
+
+ for (uint32_t adjacentIndex = startIndex; adjacentIndex < stopIndex; adjacentIndex++)
+ {
+ const uint32_t bondIndex = adjacentBondIndices[adjacentIndex];
+ if (bondHealths[bondIndex] > 0.0f)
+ {
+ const uint32_t neighbourIndex = adjacentNodeIndices[adjacentIndex];
+ if (nodeIndex < neighbourIndex)
+ {
+ const NvBlastBond& bond = bonds[bondIndex];
+
+ const float* centroid = bond.centroid;
+ float d[3]; VecMath::sub(point, centroid, d);
+ float dist = VecMath::dot(d, d);
+
+ if (dist < minDist)
+ {
+ minDist = dist;
+ float s = VecMath::dot(d, bond.normal);
+ closestNode = s < 0 ? nodeIndex : neighbourIndex;
+ }
+ }
+ }
+ }
+ }
+ return closestNode;
+}
+
+
+NV_FORCE_INLINE uint32_t findNodeByPosition(const float point[4],
+ const uint32_t graphNodesCount, const uint32_t* graphNodeIndices,
+ const NvBlastSupportGraph& graph,
+ const NvBlastBond* bonds, const float* bondHealths)
+{
+ return findNodeByPosition(point, graphNodesCount, graphNodeIndices, graph.adjacencyPartition, graph.adjacentNodeIndices, graph.adjacentBondIndices, bonds, bondHealths);
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // NVBLASTGEOMETRY_H
diff --git a/NvBlast/sdk/common/NvBlastIncludeWindows.h b/NvBlast/sdk/common/NvBlastIncludeWindows.h
new file mode 100644
index 0000000..9115fd4
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastIncludeWindows.h
@@ -0,0 +1,90 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTINCLUDEWINDOWS_H
+#define NVBLASTINCLUDEWINDOWS_H
+
+#ifndef _WINDOWS_ // windows already included if this is defined
+
+#include "NvBlastPreprocessor.h"
+
+#ifndef _WIN32
+#error "This file should only be included by Windows builds!!"
+#endif
+
+// We only support >= Windows XP, and we need this for critical section and
+#if !NV_WINRT
+#define _WIN32_WINNT 0x0501
+#else
+#define _WIN32_WINNT 0x0602
+#endif
+
+// turn off as much as we can for windows. All we really need is the thread functions(critical sections/Interlocked*
+// etc)
+#define NOGDICAPMASKS
+#define NOVIRTUALKEYCODES
+#define NOWINMESSAGES
+#define NOWINSTYLES
+#define NOSYSMETRICS
+#define NOMENUS
+#define NOICONS
+#define NOKEYSTATES
+#define NOSYSCOMMANDS
+#define NORASTEROPS
+#define NOSHOWWINDOW
+#define NOATOM
+#define NOCLIPBOARD
+#define NOCOLOR
+#define NOCTLMGR
+#define NODRAWTEXT
+#define NOGDI
+#define NOMB
+#define NOMEMMGR
+#define NOMETAFILE
+#define NOMINMAX
+#define NOOPENFILE
+#define NOSCROLL
+#define NOSERVICE
+#define NOSOUND
+#define NOTEXTMETRIC
+#define NOWH
+#define NOWINOFFSETS
+#define NOCOMM
+#define NOKANJI
+#define NOHELP
+#define NOPROFILER
+#define NODEFERWINDOWPOS
+#define NOMCX
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#if !NV_WINRT
+#define NOUSER
+#define NONLS
+#define NOMSG
+#endif
+
+#pragma warning(push)
+#pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
+#include <windows.h>
+#pragma warning(pop)
+
+#if NV_SSE2
+#include <xmmintrin.h>
+#endif
+
+#endif // #ifndef _WINDOWS_
+
+#endif // #ifndef NVBLASTINCLUDEWINDOWS_H
diff --git a/NvBlast/sdk/common/NvBlastIndexFns.h b/NvBlast/sdk/common/NvBlastIndexFns.h
new file mode 100644
index 0000000..a800a73
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastIndexFns.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2008-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef NVBLASTINDEXFNS_H
+#define NVBLASTINDEXFNS_H
+
+
+#include "NvBlastTypes.h"
+
+#include <cstring>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Set to invalid index.
+*/
+template<typename T>
+NV_INLINE T invalidIndex()
+{
+ return ~(T)0;
+}
+
+
+/**
+Test for invalid index (max representable integer).
+*/
+template<typename T>
+NV_INLINE bool isInvalidIndex(T index)
+{
+ return index == invalidIndex<T>();
+}
+
+
+/**
+ Create a lookup table for data sorted by a templated index type.
+
+ Note: when using this function with unsigned integer index types invalidIndex<T>() is treated as a value less than zero.
+
+ On input:
+
+ The indices must lie in the interval [indexBase, indexBase+indexRange].
+
+ indexSource must point to the first index in the data.
+
+ indexCount must be set to the number of indices in the data.
+
+ indexByteStride must be set to the distance, in bytes, between subequent indices.
+
+ lookup must point to a T array of size indexRange+2.
+
+ On return:
+
+ lookup will be filled such that:
+
+ lookup[i] = the position of first data element with index (i + indexBase)
+
+ lookup[indexRange+1] = indexCount
+
+ The last (indexRange+1) element is used so that one may always determine the number of data elements with the given index using:
+
+ count = lookup[i+1] - lookup[i]
+
+ Note, if an index (i + indexBase) is not present in the data then, lookup[i+1] = lookup[i], so the count (above) will correctly be zero.
+ In this case, the actual value of lookup[i] is irrelevant.
+*/
+template<typename T>
+void createIndexStartLookup(T* lookup, T indexBase, T indexRange, T* indexSource, T indexCount, T indexByteStride)
+{
+ ++indexBase; // Ordering invalidIndex<T>() as lowest value
+ T indexPos = 0;
+ for (T i = 0; i <= indexRange; ++i)
+ {
+ for (; indexPos < indexCount; ++indexPos, indexSource = (T*)((uintptr_t)indexSource + indexByteStride))
+ {
+ if (*indexSource + 1 >= i + indexBase) // +1 to order invalidIndex<T>() as lowest value
+ {
+ lookup[i] = indexPos;
+ break;
+ }
+ }
+ if (indexPos == indexCount)
+ {
+ lookup[i] = indexPos;
+ }
+ }
+ lookup[indexRange + 1] = indexCount;
+}
+
+
+/**
+Creates the inverse of a map, such that inverseMap[map[i]] = i.
+Unmapped indices are set to invalidIndex<T>.
+
+\param[out] inverseMap inverse map space of given size
+\param[in] map original map of given size, unmapped entries must contain invalidIndex<T>
+\param[in] size size of the involved maps
+*/
+template<typename T>
+void invertMap(T* inverseMap, const T* map, const T size)
+{
+ memset(inverseMap, invalidIndex<T>(), size*sizeof(T));
+
+ for (T i = 0; i < size; i++)
+ {
+ if (!isInvalidIndex(map[i]))
+ {
+ inverseMap[map[i]] = i;
+ }
+ }
+}
+
+} // end namespace Blast
+} // end namespace Nv
+
+
+#endif // #ifndef NVBLASTINDEXFNS_H
diff --git a/NvBlast/sdk/common/NvBlastIteratorBase.h b/NvBlast/sdk/common/NvBlastIteratorBase.h
new file mode 100644
index 0000000..9053f4b
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastIteratorBase.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef NVBLASTITERATORBASE_H
+#define NVBLASTITERATORBASE_H
+
+
+#include "NvBlastIndexFns.h"
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Common functionality and implementation for iterators over an index, using invalidIndex<T>() to indicate termination.
+Derived class needs to implement increment operators.
+*/
+template<typename T>
+class IteratorBase
+{
+public:
+ /** Constructor sets m_curr value */
+ IteratorBase(T curr);
+
+ /** Validity of current value. */
+ operator bool() const;
+
+ /** Current value. */
+ operator T() const;
+
+protected:
+ T m_curr;
+};
+
+
+//////// IteratorBase<T> inline methods ////////
+
+template<typename T>
+NV_INLINE IteratorBase<T>::IteratorBase(T curr) : m_curr(curr)
+{
+}
+
+
+template<typename T>
+NV_INLINE IteratorBase<T>::operator bool() const
+{
+ return !isInvalidIndex<T>(m_curr);
+}
+
+
+template<typename T>
+NV_INLINE IteratorBase<T>::operator T() const
+{
+ return m_curr;
+}
+
+
+/**
+Common functionality and implementation for an indexed linked list iterator
+*/
+template<typename IndexType>
+class LListIt : public IteratorBase<IndexType>
+{
+public:
+ LListIt(IndexType curr, IndexType* links);
+
+ /** Pre-increment. Only use if valid() == true. */
+ uint32_t operator ++ ();
+
+protected:
+ IndexType* m_links;
+};
+
+
+//////// LListIt<IndexType> inline methods ////////
+
+template<typename IndexType>
+NV_INLINE LListIt<IndexType>::LListIt(IndexType curr, IndexType* links) : IteratorBase<IndexType>(curr), m_links(links)
+{
+}
+
+
+template<typename IndexType>
+NV_INLINE uint32_t LListIt<IndexType>::operator ++ ()
+{
+ NVBLAST_ASSERT((bool)(*this));
+ return (this->m_curr = m_links[this->m_curr]);
+}
+
+
+/**
+Common functionality and implementation for an IndexDList<IndexType> iterator
+*/
+template<typename IndexType>
+class DListIt : public IteratorBase<IndexType>
+{
+public:
+ DListIt(IndexType curr, IndexDLink<IndexType>* links);
+
+ /** Pre-increment. Only use if valid() == true. */
+ uint32_t operator ++ ();
+
+protected:
+ IndexDLink<IndexType>* m_links;
+};
+
+
+//////// DListIt<IndexType> inline methods ////////
+
+template<typename IndexType>
+NV_INLINE DListIt<IndexType>::DListIt(IndexType curr, IndexDLink<IndexType>* links) : IteratorBase<IndexType>(curr), m_links(links)
+{
+}
+
+
+template<typename IndexType>
+NV_INLINE uint32_t DListIt<IndexType>::operator ++ ()
+{
+ NVBLAST_ASSERT((bool)(*this));
+ return (this->m_curr = m_links[this->m_curr].m_adj[1]);
+}
+
+} // end namespace Blast
+} // end namespace Nv
+
+
+#endif // #ifndef NVBLASTITERATORBASE_H
diff --git a/NvBlast/sdk/common/NvBlastMath.h b/NvBlast/sdk/common/NvBlastMath.h
new file mode 100644
index 0000000..0a29f14
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastMath.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef NVBLASTMATH_H
+#define NVBLASTMATH_H
+
+#include <math.h>
+
+namespace Nv
+{
+namespace Blast
+{
+
+namespace VecMath
+{
+
+
+NV_INLINE void div(float a[3], float divisor)
+{
+ for (int i = 0; i < 3; i++)
+ a[i] /= divisor;
+}
+
+NV_INLINE void mul(float a[3], float multiplier)
+{
+ for (int i = 0; i < 3; i++)
+ a[i] *= multiplier;
+}
+
+NV_INLINE void add(const float a[3], float b[3])
+{
+ for (int i = 0; i < 3; i++)
+ b[i] = a[i] + b[i];
+}
+
+NV_INLINE void add(const float a[3], const float b[3], float r[3])
+{
+ for (int i = 0; i < 3; i++)
+ r[i] = a[i] + b[i];
+}
+
+NV_INLINE void sub(const float a[3], const float b[3], float r[3])
+{
+ for (int i = 0; i < 3; i++)
+ r[i] = a[i] - b[i];
+}
+
+NV_INLINE float dot(const float a[3], const float b[3])
+{
+ float r = 0;
+ for (int i = 0; i < 3; i++)
+ r += a[i] * b[i];
+ return r;
+}
+
+NV_INLINE float normal(const float a[3], float r[3])
+{
+ float length = sqrtf(dot(a, a));
+ for (int i = 0; i < 3; i++)
+ r[i] = a[i] / length;
+
+ return length;
+}
+
+
+} // namespace VecMath
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTMATH_H
diff --git a/NvBlast/sdk/common/NvBlastMemory.h b/NvBlast/sdk/common/NvBlastMemory.h
new file mode 100644
index 0000000..0fb6a06
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastMemory.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef NVBLASTMEMORY_H
+#define NVBLASTMEMORY_H
+
+#include <math.h>
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+/**
+Utility function to align the given value to the next 16-byte boundary.
+
+Returns the aligned value.
+*/
+template<typename T>
+NV_INLINE T align16(T value)
+{
+ return (value + 0xF)&~(T)0xF;
+}
+
+
+/** Offset void* pointer by 'offset' bytes helper-functions */
+
+template <typename T>
+NV_INLINE T pointerOffset(void* p, ptrdiff_t offset)
+{
+ return reinterpret_cast<T>(reinterpret_cast<char*>(p)+offset);
+}
+
+template <typename T>
+NV_INLINE T pointerOffset(const void* p, ptrdiff_t offset)
+{
+ return reinterpret_cast<T>(reinterpret_cast<const char*>(p)+offset);
+}
+
+NV_INLINE const void* pointerOffset(const void* p, ptrdiff_t offset)
+{
+ return pointerOffset<const void*>(p, offset);
+}
+
+NV_INLINE void* pointerOffset(void* p, ptrdiff_t offset)
+{
+ return pointerOffset<void*>(p, offset);
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+/** Block data offset and accessor macro. */
+#define NvBlastBlockData(_dataType, _name, _accessor) \
+_dataType* _accessor() const \
+{ \
+ return (_dataType*)((uintptr_t)this + _name); \
+} \
+uint32_t _name
+
+
+/** Block data offset and accessor macro for an array (includes an _accessor##ArraySize() function which returns the last expression). */
+#define NvBlastBlockArrayData(_dataType, _name, _accessor, _sizeExpr) \
+_dataType* _accessor() const \
+{ \
+ return (_dataType*)((uintptr_t)this + _name); \
+} \
+uint32_t _accessor##ArraySize() const \
+{ \
+ return _sizeExpr; \
+} \
+uint32_t _name
+
+
+/** Block data offset generation macros. */
+
+/** Start offset generation with this. */
+#define NvBlastCreateOffsetStart(_baseOffset) \
+size_t _lastOffset = _baseOffset; \
+size_t _lastSize = 0
+
+/** Create the next offset generation with this. The value will be aligned to a 16-byte boundary. */
+#define NvBlastCreateOffsetAlign16(_name, _size) \
+_name = align16(_lastOffset + _lastSize); \
+_lastOffset = _name; \
+_lastSize = _size
+
+/** End offset generation with this. It evaluates to the (16-byte aligned) total size of the data block. */
+#define NvBlastCreateOffsetEndAlign16() \
+align16(_lastOffset + _lastSize)
+
+
+/** Stack allocation */
+#if NV_WINDOWS_FAMILY
+#include <malloc.h>
+#define NvBlastAlloca(x) _alloca(x)
+#elif NV_LINUX || NV_ANDROID
+#include <malloc.h>
+#define NvBlastAlloca(x) alloca(x)
+#elif NV_APPLE_FAMILY
+#include <alloca.h>
+#define NvBlastAlloca(x) alloca(x)
+#elif NV_PS4
+#include <memory.h>
+#define NvBlastAlloca(x) alloca(x)
+#elif NV_XBOXONE
+#include <malloc.h>
+#define NvBlastAlloca(x) alloca(x)
+#endif
+
+#define NvBlastAllocaAligned16(x) (void*)(((uintptr_t)PxAlloca(x + 0xF) + 0xF) & ~(uintptr_t)0xF)
+
+
+#endif // #ifndef NVBLASTMEMORY_H
diff --git a/NvBlast/sdk/common/NvBlastPreprocessorInternal.h b/NvBlast/sdk/common/NvBlastPreprocessorInternal.h
new file mode 100644
index 0000000..2d3e185
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastPreprocessorInternal.h
@@ -0,0 +1,36 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTPREPROCESSORINTERNAL_H
+#define NVBLASTPREPROCESSORINTERNAL_H
+
+
+#include "NvPreprocessor.h"
+
+
+/** Blast will check function parameters for debug and checked builds. */
+#define NVBLAST_CHECK_PARAMS (NV_DEBUG || NV_CHECKED)
+
+
+#if NVBLAST_CHECK_PARAMS
+#define NVBLAST_CHECK(_expr, _logFn, _msg, _onFail) \
+ { \
+ if(!(_expr)) \
+ { \
+ if (_logFn) { _logFn(NvBlastMessage::Error, _msg, __FILE__, __LINE__); } \
+ { _onFail; }; \
+ } \
+ }
+#else
+#define NVBLAST_CHECK(_expr, _logFn, _msg, _onFail) NV_UNUSED(_logFn)
+#endif
+
+
+#endif // ifndef NVBLASTPREPROCESSORINTERNAL_H
diff --git a/NvBlast/sdk/common/NvBlastTime.cpp b/NvBlast/sdk/common/NvBlastTime.cpp
new file mode 100644
index 0000000..b16e573
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastTime.cpp
@@ -0,0 +1,23 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastTime.h"
+#include "NvBlast.h"
+#include <cstring>
+
+namespace Nv
+{
+namespace Blast
+{
+
+const double Time::s_secondsPerTick = Time::getTickDuration();
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/common/NvBlastTime.h b/NvBlast/sdk/common/NvBlastTime.h
new file mode 100644
index 0000000..47af36b
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastTime.h
@@ -0,0 +1,108 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTIME_H
+#define NVBLASTTIME_H
+
+#include "NvBlastTypes.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+class Time
+{
+public:
+ Time() : m_lastTickCount(getTimeTicks()) {}
+
+ int64_t getElapsedTicks()
+ {
+ const int64_t lastTickCount = m_lastTickCount;
+ m_lastTickCount = getTimeTicks();
+ return m_lastTickCount - lastTickCount;
+ }
+
+ int64_t peekElapsedTicks() const
+ {
+ return getTimeTicks() - m_lastTickCount;
+ }
+
+ int64_t getLastTickCount() const
+ {
+ return m_lastTickCount;
+ }
+
+ static double seconds(int64_t ticks)
+ {
+ return s_secondsPerTick * ticks;
+ }
+
+private:
+ int64_t getTimeTicks() const;
+ static double getTickDuration();
+
+ int64_t m_lastTickCount;
+ static const double s_secondsPerTick;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+//////// Time inline functions for various platforms ////////
+
+#if NV_MICROSOFT_FAMILY
+
+#include "NvBlastIncludeWindows.h"
+
+NV_INLINE int64_t Nv::Blast::Time::getTimeTicks() const
+{
+ LARGE_INTEGER a;
+ QueryPerformanceCounter(&a);
+ return a.QuadPart;
+}
+
+NV_INLINE double Nv::Blast::Time::getTickDuration()
+{
+ LARGE_INTEGER a;
+ QueryPerformanceFrequency(&a);
+ return 1.0 / (double)a.QuadPart;
+}
+
+#elif NV_UNIX_FAMILY
+
+#include <time.h>
+
+NV_INLINE int64_t Nv::Blast::Time::getTimeTicks() const
+{
+ struct timespec mCurrTimeInt;
+ clock_gettime(CLOCK_REALTIME, &mCurrTimeInt);
+ return (static_cast<int64_t>(mCurrTimeInt.tv_sec) * 1000000000) + (static_cast<int64_t>(mCurrTimeInt.tv_nsec));
+}
+
+NV_INLINE double Nv::Blast::Time::getTickDuration()
+{
+ return 1.e-9;
+}
+
+#elif NV_PS4
+
+#include "ps4/NvBlastTimePS4.h"
+
+#endif
+
+#endif // #ifndef NVBLASTTIME_H
diff --git a/NvBlast/sdk/common/NvBlastTimers.cpp b/NvBlast/sdk/common/NvBlastTimers.cpp
new file mode 100644
index 0000000..ec93134
--- /dev/null
+++ b/NvBlast/sdk/common/NvBlastTimers.cpp
@@ -0,0 +1,29 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlast.h"
+#include "NvBlastTime.h"
+#include <cstring>
+
+
+extern "C"
+{
+
+void NvBlastTimersReset(NvBlastTimers* timers)
+{
+ memset(timers, 0, sizeof(NvBlastTimers));
+}
+
+double NvBlastTicksToSeconds(int64_t ticks)
+{
+ return Nv::Blast::Time::seconds(ticks);
+}
+
+} // extern "C"
diff --git a/NvBlast/sdk/compiler/cmake/NvBlast.cmake b/NvBlast/sdk/compiler/cmake/NvBlast.cmake
new file mode 100644
index 0000000..5ac9ce9
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlast.cmake
@@ -0,0 +1,89 @@
+#
+# Build NvBlast common
+#
+
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+SET(SOLVER_SOURCE_DIR ${PROJECT_SOURCE_DIR}/lowlevel/source)
+SET(PUBLIC_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/lowlevel/include)
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlast.cmake)
+
+SET(COMMON_FILES
+ ${BLAST_PLATFORM_COMMON_FILES}
+
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.h
+ ${COMMON_SOURCE_DIR}/NvBlastAtomic.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAtomic.h
+ ${COMMON_SOURCE_DIR}/NvBlastDLink.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedArray.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedBitmap.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedBoolArray.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedPriorityQueue.h
+ ${COMMON_SOURCE_DIR}/NvBlastGeometry.h
+# ${COMMON_SOURCE_DIR}/NvBlastIndexFns.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastIndexFns.h
+ ${COMMON_SOURCE_DIR}/NvBlastIteratorBase.h
+ ${COMMON_SOURCE_DIR}/NvBlastMath.h
+ ${COMMON_SOURCE_DIR}/NvBlastMemory.h
+ ${COMMON_SOURCE_DIR}/NvBlastPreprocessorInternal.h
+ ${COMMON_SOURCE_DIR}/NvBlastTime.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastTime.h
+ ${COMMON_SOURCE_DIR}/NvBlastTimers.cpp
+)
+
+SET(PUBLIC_FILES
+ ${PUBLIC_INCLUDE_DIR}/NvBlast.h
+ ${PUBLIC_INCLUDE_DIR}/NvBlastPreprocessor.h
+ ${PUBLIC_INCLUDE_DIR}/NvBlastTypes.h
+ ${PUBLIC_INCLUDE_DIR}/NvPreprocessor.h
+)
+
+SET(SOLVER_FILES
+ ${SOLVER_SOURCE_DIR}/NvBlastActor.cpp
+ ${SOLVER_SOURCE_DIR}/NvBlastActor.h
+ ${SOLVER_SOURCE_DIR}/NvBlastFamilyGraph.cpp
+ ${SOLVER_SOURCE_DIR}/NvBlastFamilyGraph.h
+ ${SOLVER_SOURCE_DIR}/NvBlastActorSerializationBlock.cpp
+ ${SOLVER_SOURCE_DIR}/NvBlastActorSerializationBlock.h
+ ${SOLVER_SOURCE_DIR}/NvBlastAsset.cpp
+ ${SOLVER_SOURCE_DIR}/NvBlastAssetHelper.cpp
+ ${SOLVER_SOURCE_DIR}/NvBlastAsset.h
+ ${SOLVER_SOURCE_DIR}/NvBlastSupportGraph.h
+ ${SOLVER_SOURCE_DIR}/NvBlastChunkHierarchy.h
+ ${SOLVER_SOURCE_DIR}/NvBlastFamily.cpp
+ ${SOLVER_SOURCE_DIR}/NvBlastFamily.h
+)
+
+ADD_LIBRARY(NvBlast ${BLAST_LIB_TYPE}
+ ${COMMON_FILES}
+ ${PUBLIC_FILES}
+ ${SOLVER_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+SOURCE_GROUP("public" FILES ${PUBLIC_FILES})
+SOURCE_GROUP("solver" FILES ${SOLVER_FILES})
+
+# Target specific compile options
+
+
+TARGET_INCLUDE_DIRECTORIES(NvBlast
+ PRIVATE ${BLAST_PLATFORM_INCLUDES}
+
+ PUBLIC ${PUBLIC_INCLUDE_DIR}
+ PRIVATE ${COMMON_SOURCE_DIR}
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlast
+ PRIVATE ${BLAST_COMPILE_DEFS}
+)
+
+SET_TARGET_PROPERTIES(NvBlast PROPERTIES
+ PDB_NAME_DEBUG "NvBlast${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlast${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlast${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlast${CMAKE_RELEASE_POSTFIX}"
+)
+
diff --git a/NvBlast/sdk/compiler/cmake/NvBlastExtAuthoring.cmake b/NvBlast/sdk/compiler/cmake/NvBlastExtAuthoring.cmake
new file mode 100644
index 0000000..dda8056
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlastExtAuthoring.cmake
@@ -0,0 +1,106 @@
+#
+# Build NvBlastExt Common
+#
+
+
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+
+SET(AUTHORING_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/authoring/source)
+SET(COMMON_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/common/source)
+SET(AUTHORING_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/authoring/include)
+
+FIND_PACKAGE(PxSharedSDK $ENV{PM_PxShared_VERSION} REQUIRED)
+FIND_PACKAGE(PhysXSDK $ENV{PM_PhysX_VERSION} REQUIRED)
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlastExtAuthoring.cmake)
+
+SET(COMMON_FILES
+ ${BLASTEXT_PLATFORM_COMMON_FILES}
+
+ #${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ #${COMMON_SOURCE_DIR}/NvBlastAssert.h
+)
+
+SET(PUBLIC_FILES
+ ${AUTHORING_EXT_INCLUDE_DIR}/NvBlastExtAuthoringBondGenerator.h
+ ${AUTHORING_EXT_INCLUDE_DIR}/NvBlastExtAuthoringCollisionBuilder.h
+ ${AUTHORING_EXT_INCLUDE_DIR}/NvBlastExtAuthoringFractureTool.h
+ ${AUTHORING_EXT_INCLUDE_DIR}/NvBlastExtAuthoringMesh.h
+ ${AUTHORING_EXT_INCLUDE_DIR}/NvBlastExtAuthoringTypes.h
+)
+
+SET(EXT_AUTHORING_FILES
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringAccelerator.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringAccelerator.h
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringBondGenerator.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringBooleanTool.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringBooleanTool.h
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringCollisionBuilder.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringMesh.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringPerlinNoise.h
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringTriangulator.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringTriangulator.h
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringVSA.h
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringFractureTool.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtTriangleProcessor.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtTriangleProcessor.h
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtApexSharedParts.cpp
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtApexSharedParts.h
+ ${AUTHORING_EXT_SOURCE_DIR}/NvBlastExtAuthoringInternalCommon.h
+)
+
+ADD_LIBRARY(NvBlastExtAuthoring STATIC
+ ${COMMON_FILES}
+ ${PUBLIC_FILES}
+
+ ${EXT_AUTHORING_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+SOURCE_GROUP("public" FILES ${PUBLIC_FILES})
+SOURCE_GROUP("src" FILES ${EXT_AUTHORING_FILES})
+
+
+# Target specific compile options
+
+TARGET_INCLUDE_DIRECTORIES(NvBlastExtAuthoring
+ PRIVATE ${BLASTEXT_PLATFORM_INCLUDES}
+
+ PUBLIC ${PROJECT_SOURCE_DIR}/lowlevel/include
+ PUBLIC ${AUTHORING_EXT_INCLUDE_DIR}
+
+ PRIVATE ${PROJECT_SOURCE_DIR}/common
+ PRIVATE ${COMMON_EXT_SOURCE_DIR}
+
+ PRIVATE ${AUTHORING_EXT_SOURCE_DIR}
+
+ PRIVATE ${PHYSXSDK_INCLUDE_DIRS}
+ PRIVATE ${PXSHAREDSDK_INCLUDE_DIRS}
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlastExtAuthoring
+ PRIVATE ${BLASTEXT_COMPILE_DEFS}
+)
+
+# Warning disables for Capn Proto
+TARGET_COMPILE_OPTIONS(NvBlastExtAuthoring
+ PRIVATE ${BLASTEXT_PLATFORM_COMPILE_OPTIONS}
+)
+
+SET_TARGET_PROPERTIES(NvBlastExtAuthoring PROPERTIES
+ PDB_NAME_DEBUG "NvBlastExtAuthoring${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlastExtAuthoring${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlastExtAuthoring${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlastExtAuthoring${CMAKE_RELEASE_POSTFIX}"
+)
+
+# Do final direct sets after the target has been defined
+TARGET_LINK_LIBRARIES(NvBlastExtAuthoring
+ PRIVATE NvBlast
+ PUBLIC ${BLASTEXT_PLATFORM_LINKED_LIBS}
+ PUBLIC $<$<CONFIG:debug>:${PHYSX3_LIB_DEBUG}> $<$<CONFIG:debug>:${PHYSX3EXTENSIONS_LIB_DEBUG}> $<$<CONFIG:debug>:${PXFOUNDATION_LIB_DEBUG}>
+ PUBLIC $<$<CONFIG:checked>:${PHYSX3_LIB_CHECKED}> $<$<CONFIG:checked>:${PHYSX3EXTENSIONS_LIB_CHECKED}> $<$<CONFIG:checked>:${PXFOUNDATION_LIB_CHECKED}>
+ PUBLIC $<$<CONFIG:profile>:${PHYSX3_LIB_PROFILE}> $<$<CONFIG:profile>:${PHYSX3EXTENSIONS_LIB_PROFILE}> $<$<CONFIG:profile>:${PXFOUNDATION_LIB_PROFILE}>
+ PUBLIC $<$<CONFIG:release>:${PHYSX3_LIB}> $<$<CONFIG:release>:${PHYSX3EXTENSIONS_LIB}> $<$<CONFIG:release>:${PXFOUNDATION_LIB}>
+)
diff --git a/NvBlast/sdk/compiler/cmake/NvBlastExtConverterLL.cmake b/NvBlast/sdk/compiler/cmake/NvBlastExtConverterLL.cmake
new file mode 100644
index 0000000..c84b60d
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlastExtConverterLL.cmake
@@ -0,0 +1,77 @@
+#
+# Build NvBlastExtConverterLL Common
+#
+
+
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+SET(CONVERTERLL_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/converter/source)
+SET(CONVERTERLL_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/converter/include)
+
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlastExtConverterLL.cmake)
+
+SET(COMMON_FILES
+ ${BLASTEXT_PLATFORM_COMMON_FILES}
+
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.h
+)
+
+SET(PUBLIC_FILES
+ ${CONVERTERLL_EXT_INCLUDE_DIR}/NvBlastExtDataConverter.h
+)
+
+SET(EXT_CONVERSION_FILES
+ ${CONVERTERLL_EXT_SOURCE_DIR}/conversion/NvBlastExtAssetBlockVersionConverter_v0_v1.h
+ ${CONVERTERLL_EXT_SOURCE_DIR}/conversion/NvBlastExtBinaryBlockConverter.cpp
+ ${CONVERTERLL_EXT_SOURCE_DIR}/conversion/NvBlastExtBinaryBlockConverter.h
+ ${CONVERTERLL_EXT_SOURCE_DIR}/conversion/NvBlastExtDataConverter.cpp
+)
+
+ADD_LIBRARY(NvBlastExtConverterLL STATIC
+ ${COMMON_FILES}
+ ${PUBLIC_FILES}
+
+ ${EXT_CONVERSION_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+SOURCE_GROUP("public" FILES ${PUBLIC_FILES})
+SOURCE_GROUP("src\\conversion" FILES ${EXT_CONVERSION_FILES})
+
+
+# Target specific compile options
+
+TARGET_INCLUDE_DIRECTORIES(NvBlastExtConverterLL
+ PRIVATE ${BLASTEXT_PLATFORM_INCLUDES}
+
+ PUBLIC ${PROJECT_SOURCE_DIR}/converter/include
+ PUBLIC ${CONVERTERLL_EXT_INCLUDE_DIR}
+
+ PRIVATE ${CONVERTERLL_EXT_SOURCE_DIR}/conversion
+ PRIVATE ${PROJECT_SOURCE_DIR}/common
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlastExtConverterLL
+ PRIVATE ${BLASTEXT_COMPILE_DEFS}
+)
+
+# Warning disables for Capn Proto
+TARGET_COMPILE_OPTIONS(NvBlastExtConverterLL
+ PRIVATE ${BLASTEXT_PLATFORM_COMPILE_OPTIONS}
+)
+
+SET_TARGET_PROPERTIES(NvBlastExtConverterLL PROPERTIES
+ PDB_NAME_DEBUG "NvBlastExtConverterLL${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlastExtConverterLL${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlastExtConverterLL${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlastExtConverterLL${CMAKE_RELEASE_POSTFIX}"
+)
+
+# Do final direct sets after the target has been defined
+#TARGET_LINK_LIBRARIES(NvBlastExtConverterLL NvBlast ${PHYSXSDK_LIBRARIES} ${APEXSDK_LIBRARIES} ${PXSHAREDSDK_LIBRARIES})
+TARGET_LINK_LIBRARIES(NvBlastExtConverterLL
+ PUBLIC NvBlast
+ PUBLIC ${BLASTEXT_PLATFORM_LINKED_LIBS}
+)
diff --git a/NvBlast/sdk/compiler/cmake/NvBlastExtImport.cmake b/NvBlast/sdk/compiler/cmake/NvBlastExtImport.cmake
new file mode 100644
index 0000000..3d7b27a
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlastExtImport.cmake
@@ -0,0 +1,95 @@
+#
+# Build NvBlastExt Common
+#
+
+
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+
+SET(COMMON_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/common/source)
+SET(IMPORT_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/import/source)
+SET(IMPORT_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/import/include)
+SET(PHYSX_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/physx/include)
+
+FIND_PACKAGE(PhysXSDK $ENV{PM_PhysX_VERSION} REQUIRED)
+FIND_PACKAGE(ApexSDK $ENV{PM_Apex_VERSION} REQUIRED)
+FIND_PACKAGE(PxSharedSDK $ENV{PM_PxShared_VERSION} REQUIRED)
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlastExtImport.cmake)
+
+SET(COMMON_FILES
+ ${BLASTEXT_PLATFORM_COMMON_FILES}
+
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.h
+)
+
+SET(PUBLIC_FILES
+ ${IMPORT_EXT_INCLUDE_DIR}/NvBlastExtApexImportTool.h
+)
+
+SET(EXT_IMPORT_FILES
+ ${IMPORT_EXT_SOURCE_DIR}/NvBlastExtApexDestruction.cpp
+ ${IMPORT_EXT_SOURCE_DIR}/NvBlastExtApexDestruction.h
+ ${IMPORT_EXT_SOURCE_DIR}/NvBlastExtApexImportTool.cpp
+ ${IMPORT_EXT_SOURCE_DIR}/NvBlastExtScopedResource.cpp
+ ${IMPORT_EXT_SOURCE_DIR}/NvBlastExtScopedResource.h
+)
+
+ADD_LIBRARY(NvBlastExtImport STATIC
+ ${COMMON_FILES}
+ ${PUBLIC_FILES}
+
+ ${EXT_IMPORT_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+SOURCE_GROUP("public" FILES ${PUBLIC_FILES})
+SOURCE_GROUP("src" FILES ${EXT_IMPORT_FILES})
+
+
+# Target specific compile options
+
+TARGET_INCLUDE_DIRECTORIES(NvBlastExtImport
+ PRIVATE ${BLASTEXT_PLATFORM_INCLUDES}
+
+ PUBLIC ${PROJECT_SOURCE_DIR}/lowlevel/include
+ PUBLIC ${PROJECT_SOURCE_DIR}/toolkit/include
+ PUBLIC ${PHYSX_EXT_INCLUDE_DIR}
+ PUBLIC ${IMPORT_EXT_INCLUDE_DIR}
+
+ PRIVATE ${PROJECT_SOURCE_DIR}/common
+ PRIVATE ${COMMON_EXT_SOURCE_DIR}
+
+ PRIVATE ${IMPORT_EXT_SOURCE_DIR}
+
+ PRIVATE ${PHYSXSDK_INCLUDE_DIRS}
+ PRIVATE ${APEXSDK_INCLUDE_DIRS}
+ PRIVATE ${PXSHAREDSDK_INCLUDE_DIRS}
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlastExtImport
+ PRIVATE ${BLASTEXT_COMPILE_DEFS}
+)
+
+# Warning disables for Capn Proto
+TARGET_COMPILE_OPTIONS(NvBlastExtImport
+ PRIVATE ${BLASTEXT_PLATFORM_COMPILE_OPTIONS}
+)
+
+SET_TARGET_PROPERTIES(NvBlastExtImport PROPERTIES
+ PDB_NAME_DEBUG "NvBlastExtImport${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlastExtImport${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlastExtImport${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlastExtImport${CMAKE_RELEASE_POSTFIX}"
+)
+
+# Do final direct sets after the target has been defined
+TARGET_LINK_LIBRARIES(NvBlastExtImport
+ PRIVATE NvBlast NvBlastTk NvBlastExtAuthoring
+ PUBLIC ${BLASTEXT_PLATFORM_LINKED_LIBS}
+ PUBLIC $<$<CONFIG:debug>:${APEXFRAMEWORK_LIB_DEBUG}> $<$<CONFIG:debug>:${PSFASTXML_LIB_DEBUG}>
+ PUBLIC $<$<CONFIG:checked>:${APEXFRAMEWORK_LIB_CHECKED}> $<$<CONFIG:checked>:${PSFASTXML_LIB_CHECKED}>
+ PUBLIC $<$<CONFIG:profile>:${APEXFRAMEWORK_LIB_PROFILE}> $<$<CONFIG:profile>:${PSFASTXML_LIB_PROFILE}>
+ PUBLIC $<$<CONFIG:release>:${APEXFRAMEWORK_LIB}> $<$<CONFIG:release>:${PSFASTXML_LIB}>
+)
diff --git a/NvBlast/sdk/compiler/cmake/NvBlastExtPhysX.cmake b/NvBlast/sdk/compiler/cmake/NvBlastExtPhysX.cmake
new file mode 100644
index 0000000..d2d0f0a
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlastExtPhysX.cmake
@@ -0,0 +1,113 @@
+#
+# Build NvBlastExtPhysX Common
+#
+
+
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+
+SET(COMMON_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/common/source)
+SET(PHYSX_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/physx/source)
+SET(COMMON_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/common/include)
+SET(PHYSX_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/physx/include)
+
+FIND_PACKAGE(PhysXSDK $ENV{PM_PhysX_VERSION} REQUIRED)
+FIND_PACKAGE(PxSharedSDK $ENV{PM_PxShared_VERSION} REQUIRED)
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlastExtPhysX.cmake)
+
+SET(COMMON_FILES
+ ${BLASTEXT_PLATFORM_COMMON_FILES}
+
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.h
+)
+
+SET(PUBLIC_FILES
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtImpactDamageManager.h
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtPx.h
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtPxActor.h
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtPxAsset.h
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtPxFamily.h
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtPxListener.h
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtPxManager.h
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtStressSolver.h
+ ${PHYSX_EXT_INCLUDE_DIR}/NvBlastExtSync.h
+)
+
+SET(EXT_PHYSICS_FILES
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtImpulseStressSolver.h
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtImpactDamageManager.cpp
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtImpulseStressSolver.cpp
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtPxActorImpl.h
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtPxActorImpl.cpp
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtPxAssetImpl.h
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtPxAssetImpl.cpp
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtPxFamilyImpl.h
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtPxFamilyImpl.cpp
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtPxManagerImpl.h
+ ${PHYSX_EXT_SOURCE_DIR}/physics/NvBlastExtPxManagerImpl.cpp
+)
+
+SET(EXT_SYNC_FILES
+ ${PHYSX_EXT_SOURCE_DIR}/sync/NvBlastExtSync.cpp
+)
+
+ADD_LIBRARY(NvBlastExtPhysX ${BLASTEXT_PHYSX_LIBTYPE}
+ ${COMMON_FILES}
+ ${PUBLIC_FILES}
+
+ ${EXT_PHYSICS_FILES}
+ ${EXT_SYNC_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+SOURCE_GROUP("public" FILES ${PUBLIC_FILES})
+SOURCE_GROUP("src\\physics" FILES ${EXT_PHYSICS_FILES})
+SOURCE_GROUP("src\\sync" FILES ${EXT_SYNC_FILES})
+
+
+# Target specific compile options
+
+TARGET_INCLUDE_DIRECTORIES(NvBlastExtPhysX
+ PRIVATE ${BLASTEXT_PLATFORM_INCLUDES}
+
+ PUBLIC ${PROJECT_SOURCE_DIR}/lowlevel/include
+ PUBLIC ${PHYSX_EXT_INCLUDE_DIR}
+
+ PRIVATE ${PROJECT_SOURCE_DIR}/common
+ PRIVATE ${COMMON_EXT_SOURCE_DIR}
+
+ PRIVATE ${PHYSX_EXT_SOURCE_DIR}/physics
+ PRIVATE ${PHYSX_EXT_SOURCE_DIR}/sync
+
+ PUBLIC ${PHYSXSDK_INCLUDE_DIRS}
+ PRIVATE ${PXSHAREDSDK_INCLUDE_DIRS}
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlastExtPhysX
+ PRIVATE ${BLASTEXT_COMPILE_DEFS}
+)
+
+# Warning disables for Capn Proto
+TARGET_COMPILE_OPTIONS(NvBlastExtPhysX
+ PRIVATE ${BLASTEXT_PLATFORM_COMPILE_OPTIONS}
+)
+
+SET_TARGET_PROPERTIES(NvBlastExtPhysX PROPERTIES
+ PDB_NAME_DEBUG "NvBlastExtPhysX${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlastExtPhysX${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlastExtPhysX${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlastExtPhysX${CMAKE_RELEASE_POSTFIX}"
+)
+
+# Do final direct sets after the target has been defined
+TARGET_LINK_LIBRARIES(NvBlastExtPhysX
+ PUBLIC NvBlast NvBlastExtShaders NvBlastTk
+ PUBLIC $<$<CONFIG:debug>:${PHYSX3_LIB_DEBUG}> $<$<CONFIG:debug>:${PHYSX3COOKING_LIB_DEBUG}> $<$<CONFIG:debug>:${PHYSX3EXTENSIONS_LIB_DEBUG}> $<$<CONFIG:debug>:${PXFOUNDATION_LIB_DEBUG}>
+ PUBLIC $<$<CONFIG:checked>:${PHYSX3_LIB_CHECKED}> $<$<CONFIG:checked>:${PHYSX3COOKING_LIB_CHECKED}> $<$<CONFIG:checked>:${PHYSX3EXTENSIONS_LIB_CHECKED}> $<$<CONFIG:checked>:${PXFOUNDATION_LIB_CHECKED}>
+ PUBLIC $<$<CONFIG:profile>:${PHYSX3_LIB_PROFILE}> $<$<CONFIG:profile>:${PHYSX3COOKING_LIB_PROFILE}> $<$<CONFIG:profile>:${PHYSX3EXTENSIONS_LIB_PROFILE}> $<$<CONFIG:profile>:${PXFOUNDATION_LIB_PROFILE}>
+ PUBLIC $<$<CONFIG:release>:${PHYSX3_LIB}> $<$<CONFIG:release>:${PHYSX3COOKING_LIB}> $<$<CONFIG:release>:${PHYSX3EXTENSIONS_LIB}> $<$<CONFIG:release>:${PXFOUNDATION_LIB}>
+
+ PUBLIC ${BLASTEXT_PLATFORM_LINKED_LIBS}
+)
diff --git a/NvBlast/sdk/compiler/cmake/NvBlastExtSerialization.cmake b/NvBlast/sdk/compiler/cmake/NvBlastExtSerialization.cmake
new file mode 100644
index 0000000..440ad43
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlastExtSerialization.cmake
@@ -0,0 +1,200 @@
+#
+# Build NvBlastExtSerialization Common
+#
+
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+
+SET(SERIAL_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/serialization/source)
+SET(SERIAL_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/serialization/include)
+SET(TK_INCLUDE_DIR ${BLAST_ROOT_DIR}/sdk/toolkit/include)
+SET(PHYSX_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/physx/include)
+SET(PHYSX_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/physx/source)
+SET(EXT_COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/common/source)
+SET(EXT_COMMON_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/common/include)
+
+SET(DTO_SOURCE_DIR ${SERIAL_EXT_SOURCE_DIR}/DTO)
+
+SET(SOLVER_SOURCE_DIR ${PROJECT_SOURCE_DIR}/lowlevel/source)
+
+SET(SERIAL_GENERATED_SOURCE_DIR ${SERIAL_EXT_SOURCE_DIR}/generated)
+
+FIND_PACKAGE(PhysXSDK $ENV{PM_PhysX_VERSION} REQUIRED)
+FIND_PACKAGE(PxSharedSDK $ENV{PM_PxShared_VERSION} REQUIRED)
+FIND_PACKAGE(CapnProtoSDK $ENV{PM_CapnProto_VERSION} REQUIRED)
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlastExtSerialization.cmake)
+
+# Compile the generated files for serialization
+
+INCLUDE(CapnProtoGenerate)
+
+SET(CAPNPC_OUTPUT_DIR ${SERIAL_GENERATED_SOURCE_DIR})
+SET(CAPNPC_SRC_PREFIX ${SERIAL_EXT_SOURCE_DIR})
+CAPNP_GENERATE_CPP(CAPNP_SRCS CAPNP_HDRS ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationLL.capn ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerialization.capn)
+
+SET(COMMON_FILES
+ ${BLASTEXTSERIALIZATION_PLATFORM_COMMON_FILES}
+
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.h
+ ${COMMON_SOURCE_DIR}/NvBlastAtomic.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAtomic.h
+ ${COMMON_SOURCE_DIR}/NvBlastDLink.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedArray.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedBitmap.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedBoolArray.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedPriorityQueue.h
+ ${COMMON_SOURCE_DIR}/NvBlastGeometry.h
+# ${COMMON_SOURCE_DIR}/NvBlastIndexFns.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastIndexFns.h
+ ${COMMON_SOURCE_DIR}/NvBlastIteratorBase.h
+ ${COMMON_SOURCE_DIR}/NvBlastMath.h
+ ${COMMON_SOURCE_DIR}/NvBlastMemory.h
+ ${COMMON_SOURCE_DIR}/NvBlastPreprocessorInternal.h
+ ${COMMON_SOURCE_DIR}/NvBlastTime.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastTime.h
+ ${COMMON_SOURCE_DIR}/NvBlastTimers.cpp
+)
+
+
+SET(EXT_SERIALIZATION_FILES
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationLL.capn
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerialization.capn
+
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerialization.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationImpl.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationLLImpl.h
+
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationLLInterface.cpp
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationInterface.cpp
+
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtOutputStream.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtOutputStream.cpp
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtInputStream.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtInputStream.cpp
+
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtKJPxInputStream.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtKJPxInputStream.cpp
+
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtKJPxOutputStream.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtKJPxOutputStream.cpp
+)
+
+SET(DTO_SOURCE_FILES
+ ${DTO_SOURCE_DIR}/DTOMacros.h
+ ${DTO_SOURCE_DIR}/AssetDTO.h
+ ${DTO_SOURCE_DIR}/AssetDTO.cpp
+ ${DTO_SOURCE_DIR}/TkAssetDTO.h
+ ${DTO_SOURCE_DIR}/TkAssetDTO.cpp
+ ${DTO_SOURCE_DIR}/ExtPxAssetDTO.h
+ ${DTO_SOURCE_DIR}/ExtPxAssetDTO.cpp
+ ${DTO_SOURCE_DIR}/PxVec3DTO.h
+ ${DTO_SOURCE_DIR}/PxVec3DTO.cpp
+ ${DTO_SOURCE_DIR}/NvBlastChunkDTO.h
+ ${DTO_SOURCE_DIR}/NvBlastChunkDTO.cpp
+ ${DTO_SOURCE_DIR}/NvBlastBondDTO.h
+ ${DTO_SOURCE_DIR}/NvBlastBondDTO.cpp
+ ${DTO_SOURCE_DIR}/NvBlastIDDTO.h
+ ${DTO_SOURCE_DIR}/NvBlastIDDTO.cpp
+ ${DTO_SOURCE_DIR}/TkAssetJointDescDTO.h
+ ${DTO_SOURCE_DIR}/TkAssetJointDescDTO.cpp
+ ${DTO_SOURCE_DIR}/ExtPxChunkDTO.h
+ ${DTO_SOURCE_DIR}/ExtPxChunkDTO.cpp
+ ${DTO_SOURCE_DIR}/ExtPxSubchunkDTO.h
+ ${DTO_SOURCE_DIR}/ExtPxSubchunkDTO.cpp
+ ${DTO_SOURCE_DIR}/PxQuatDTO.h
+ ${DTO_SOURCE_DIR}/PxQuatDTO.cpp
+ ${DTO_SOURCE_DIR}/PxTransformDTO.h
+ ${DTO_SOURCE_DIR}/PxTransformDTO.cpp
+ ${DTO_SOURCE_DIR}/PxMeshScaleDTO.h
+ ${DTO_SOURCE_DIR}/PxMeshScaleDTO.cpp
+ ${DTO_SOURCE_DIR}/PxConvexMeshGeometryDTO.h
+ ${DTO_SOURCE_DIR}/PxConvexMeshGeometryDTO.cpp
+
+
+
+)
+
+SET(EXT_SERIALIZATION_INCLUDES
+ ${SERIAL_EXT_INCLUDE_DIR}/NvBlastExtSerializationLLInterface.h
+ ${SERIAL_EXT_INCLUDE_DIR}/NvBlastExtSerializationInterface.h
+)
+
+ADD_LIBRARY(NvBlastExtSerialization ${BLASTEXTSERIALIZATION_LIB_TYPE}
+ ${COMMON_FILES}
+
+ ${DTO_SOURCE_FILES}
+
+ ${EXT_SERIALIZATION_INCLUDES}
+ ${EXT_SERIALIZATION_FILES}
+
+ ${CAPNP_SRCS}
+ ${CAPNP_HDRS}
+
+ ${MD5_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+
+SOURCE_GROUP("include" FILES ${EXT_SERIALIZATION_INCLUDES})
+SOURCE_GROUP("src\\serialization" FILES ${EXT_SERIALIZATION_FILES})
+SOURCE_GROUP("src\\serialization\\DTO" FILES ${DTO_SOURCE_FILES})
+SOURCE_GROUP("src\\serialization\\generated" FILES ${CAPNP_SRCS} ${CAPNP_HDRS})
+
+
+# Target specific compile options
+
+TARGET_INCLUDE_DIRECTORIES(NvBlastExtSerialization
+ PRIVATE ${BLASTEXTSERIALIZATION_PLATFORM_INCLUDES}
+
+ PRIVATE ${PROJECT_SOURCE_DIR}/common
+ PRIVATE ${PROJECT_SOURCE_DIR}/lowlevel/include
+ PRIVATE ${PROJECT_SOURCE_DIR}/lowlevel/source
+
+ PRIVATE ${TK_INCLUDE_DIR}
+
+ PUBLIC ${SERIAL_EXT_INCLUDE_DIR}
+ PUBLIC ${SERIAL_EXT_SOURCE_DIR}
+ PUBLIC ${DTO_SOURCE_DIR}
+
+ PRIVATE ${PHYSX_EXT_INCLUDE_DIR}
+ PRIVATE ${PHYSX_EXT_SOURCE_DIR}
+
+ PRIVATE ${EXT_COMMON_SOURCE_DIR}
+ PRIVATE ${EXT_COMMON_INCLUDE_DIR}
+
+ PUBLIC ${CAPNPROTOSDK_INCLUDE_DIRS}
+
+ PRIVATE ${COMMON_SOURCE_DIR}
+
+ PUBLIC ${PHYSXSDK_INCLUDE_DIRS}
+ PRIVATE ${PXSHAREDSDK_INCLUDE_DIRS}
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlastExtSerialization
+ PUBLIC CAPNP_LITE=1
+ PRIVATE ${BLASTEXTSERIALIZATION_COMPILE_DEFS}
+)
+
+# Warning disables for Capn Proto
+TARGET_COMPILE_OPTIONS(NvBlastExtSerialization
+ PRIVATE ${BLASTEXTSERIALIZATION_COMPILE_OPTIONS}
+)
+
+SET_TARGET_PROPERTIES(NvBlastExtSerialization PROPERTIES
+ PDB_NAME_DEBUG "NvBlastExtSerialization${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlastExtSerialization${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlastExtSerialization${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlastExtSerialization${CMAKE_RELEASE_POSTFIX}"
+)
+
+# Do final direct sets after the target has been defined
+TARGET_LINK_LIBRARIES(NvBlastExtSerialization
+ PRIVATE NvBlast NvBlastExtPhysX NvBlastTk ${CAPNPROTOSDK_LIBRARIES}
+ PUBLIC $<$<CONFIG:debug>:${PHYSX3_LIB_DEBUG}> $<$<CONFIG:debug>:${PHYSX3COOKING_LIB_DEBUG}>
+ PUBLIC $<$<CONFIG:checked>:${PHYSX3_LIB_CHECKED}> $<$<CONFIG:checked>:${PHYSX3COOKING_LIB_CHECKED}>
+ PUBLIC $<$<CONFIG:profile>:${PHYSX3_LIB_PROFILE}> $<$<CONFIG:profile>:${PHYSX3COOKING_LIB_PROFILE}>
+ PUBLIC $<$<CONFIG:release>:${PHYSX3_LIB}> $<$<CONFIG:release>:${PHYSX3COOKING_LIB}>
+
+)
diff --git a/NvBlast/sdk/compiler/cmake/NvBlastExtSerializationLL.cmake b/NvBlast/sdk/compiler/cmake/NvBlastExtSerializationLL.cmake
new file mode 100644
index 0000000..da1a9d2
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlastExtSerializationLL.cmake
@@ -0,0 +1,151 @@
+#
+# Build NvBlastExtSerializationLL Common
+#
+
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+
+SET(SERIAL_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/serialization/source)
+SET(SERIAL_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/serialization/include)
+
+SET(DTO_SOURCE_DIR ${SERIAL_EXT_SOURCE_DIR}/DTO)
+
+SET(SOLVER_SOURCE_DIR ${PROJECT_SOURCE_DIR}/lowlevel/source)
+
+SET(SERIAL_GENERATED_SOURCE_DIR ${SERIAL_EXT_SOURCE_DIR}/generated)
+
+FIND_PACKAGE(CapnProtoSDK $ENV{PM_CapnProto_VERSION} REQUIRED)
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlastExtSerializationLL.cmake)
+
+# Compile the generated files for serialization
+
+INCLUDE(CapnProtoGenerate)
+
+SET(CAPNPC_OUTPUT_DIR ${SERIAL_GENERATED_SOURCE_DIR})
+SET(CAPNPC_SRC_PREFIX ${SERIAL_EXT_SOURCE_DIR})
+CAPNP_GENERATE_CPP(CAPNP_SRCS CAPNP_HDRS ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationLL.capn)
+
+SET(COMMON_FILES
+ ${BLASTEXTSERIALIZATION_PLATFORM_COMMON_FILES}
+
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.h
+ ${COMMON_SOURCE_DIR}/NvBlastAtomic.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAtomic.h
+ ${COMMON_SOURCE_DIR}/NvBlastDLink.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedArray.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedBitmap.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedBoolArray.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedPriorityQueue.h
+ ${COMMON_SOURCE_DIR}/NvBlastGeometry.h
+ ${COMMON_SOURCE_DIR}/NvBlastIndexFns.h
+ ${COMMON_SOURCE_DIR}/NvBlastIteratorBase.h
+ ${COMMON_SOURCE_DIR}/NvBlastMath.h
+ ${COMMON_SOURCE_DIR}/NvBlastMemory.h
+ ${COMMON_SOURCE_DIR}/NvBlastPreprocessorInternal.h
+ ${COMMON_SOURCE_DIR}/NvBlastTime.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastTime.h
+ ${COMMON_SOURCE_DIR}/NvBlastTimers.cpp
+)
+
+
+SET(EXT_SERIALIZATION_FILES
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationLL.capn
+
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerialization.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationLLImpl.h
+
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtSerializationLLInterface.cpp
+
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtOutputStream.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtOutputStream.cpp
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtInputStream.h
+ ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtInputStream.cpp
+
+# ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtKJPxInputStream.h
+# ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtKJPxInputStream.cpp
+
+# ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtKJPxOutputStream.h
+# ${SERIAL_EXT_SOURCE_DIR}/NvBlastExtKJPxOutputStream.cpp
+)
+
+SET(DTO_SOURCE_FILES
+ ${DTO_SOURCE_DIR}/DTOMacros.h
+ ${DTO_SOURCE_DIR}/AssetDTO.h
+ ${DTO_SOURCE_DIR}/AssetDTO.cpp
+ ${DTO_SOURCE_DIR}/NvBlastChunkDTO.h
+ ${DTO_SOURCE_DIR}/NvBlastChunkDTO.cpp
+ ${DTO_SOURCE_DIR}/NvBlastBondDTO.h
+ ${DTO_SOURCE_DIR}/NvBlastBondDTO.cpp
+ ${DTO_SOURCE_DIR}/NvBlastIDDTO.h
+ ${DTO_SOURCE_DIR}/NvBlastIDDTO.cpp
+)
+
+SET(EXT_SERIALIZATION_INCLUDES
+ ${SERIAL_EXT_INCLUDE_DIR}/NvBlastExtSerializationLLInterface.h
+)
+
+ADD_LIBRARY(NvBlastExtSerializationLL ${BLASTEXTSERIALIZATION_LIB_TYPE}
+ ${COMMON_FILES}
+
+ ${DTO_SOURCE_FILES}
+
+ ${EXT_SERIALIZATION_INCLUDES}
+ ${EXT_SERIALIZATION_FILES}
+
+ ${CAPNP_SRCS}
+ ${CAPNP_HDRS}
+
+ ${MD5_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+
+SOURCE_GROUP("include" FILES ${EXT_SERIALIZATION_INCLUDES})
+SOURCE_GROUP("src\\serialization" FILES ${EXT_SERIALIZATION_FILES})
+SOURCE_GROUP("src\\serialization\\DTO" FILES ${DTO_SOURCE_FILES})
+SOURCE_GROUP("src\\serialization\\generated" FILES ${CAPNP_SRCS} ${CAPNP_HDRS})
+
+
+# Target specific compile options
+
+TARGET_INCLUDE_DIRECTORIES(NvBlastExtSerializationLL
+ PRIVATE ${BLASTEXTSERIALIZATION_PLATFORM_INCLUDES}
+
+ PRIVATE ${PROJECT_SOURCE_DIR}/common
+ PRIVATE ${PROJECT_SOURCE_DIR}/lowlevel/include
+ PRIVATE ${PROJECT_SOURCE_DIR}/lowlevel/source
+
+ PRIVATE ${TK_INCLUDE_DIR}
+
+ PUBLIC ${SERIAL_EXT_INCLUDE_DIR}
+ PUBLIC ${SERIAL_EXT_SOURCE_DIR}
+ PUBLIC ${DTO_SOURCE_DIR}
+
+ PUBLIC ${CAPNPROTOSDK_INCLUDE_DIRS}
+
+ PRIVATE ${COMMON_SOURCE_DIR}
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlastExtSerializationLL
+ PUBLIC CAPNP_LITE=1;BLAST_LL_ALLOC=1
+ PRIVATE ${BLASTEXTSERIALIZATION_COMPILE_DEFS}
+)
+
+# Warning disables for Capn Proto
+TARGET_COMPILE_OPTIONS(NvBlastExtSerializationLL
+ PRIVATE ${BLASTEXTSERIALIZATION_COMPILE_OPTIONS}
+)
+
+SET_TARGET_PROPERTIES(NvBlastExtSerializationLL PROPERTIES
+ PDB_NAME_DEBUG "NvBlastExtSerializationLL${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlastExtSerializationLL${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlastExtSerializationLL${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlastExtSerializationLL${CMAKE_RELEASE_POSTFIX}"
+)
+
+# Do final direct sets after the target has been defined
+TARGET_LINK_LIBRARIES(NvBlastExtSerializationLL
+ PRIVATE NvBlast ${CAPNPROTOSDK_LIBRARIES}
+)
diff --git a/NvBlast/sdk/compiler/cmake/NvBlastExtShaders.cmake b/NvBlast/sdk/compiler/cmake/NvBlastExtShaders.cmake
new file mode 100644
index 0000000..df4a3ad
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlastExtShaders.cmake
@@ -0,0 +1,74 @@
+#
+# Build NvBlastExtShaders Common
+#
+
+
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+SET(SHADERS_EXT_SOURCE_DIR ${PROJECT_SOURCE_DIR}/extensions/shaders/source)
+SET(SHADERS_EXT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/extensions/shaders/include)
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlastExtShaders.cmake)
+
+SET(COMMON_FILES
+ ${BLASTEXT_PLATFORM_COMMON_FILES}
+
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.h
+)
+
+SET(PUBLIC_FILES
+ ${SHADERS_EXT_INCLUDE_DIR}/NvBlastExtDamageShaders.h
+)
+
+SET(EXT_SOURCE_FILES
+ ${SHADERS_EXT_SOURCE_DIR}/NvBlastExtRadialShaders.cpp
+ ${SHADERS_EXT_SOURCE_DIR}/NvBlastExtShearShaders.cpp
+)
+
+ADD_LIBRARY(NvBlastExtShaders ${BLAST_EXT_SHARED_LIB_TYPE}
+ ${COMMON_FILES}
+ ${PUBLIC_FILES}
+
+ ${EXT_SOURCE_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+SOURCE_GROUP("public" FILES ${PUBLIC_FILES})
+SOURCE_GROUP("src" FILES ${EXT_SOURCE_FILES})
+
+
+# Target specific compile options
+
+TARGET_INCLUDE_DIRECTORIES(NvBlastExtShaders
+ PRIVATE ${BLASTEXT_PLATFORM_INCLUDES}
+
+ PUBLIC ${PROJECT_SOURCE_DIR}/lowlevel/include
+ PUBLIC ${SHADERS_EXT_INCLUDE_DIR}
+
+ PRIVATE ${SHADERS_EXT_SOURCE_DIR}
+ PRIVATE ${PROJECT_SOURCE_DIR}/common
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlastExtShaders
+ PRIVATE ${BLASTEXT_COMPILE_DEFS}
+)
+
+# Warning disables for Capn Proto
+TARGET_COMPILE_OPTIONS(NvBlastExtShaders
+ PRIVATE ${BLASTEXT_PLATFORM_COMPILE_OPTIONS}
+)
+
+SET_TARGET_PROPERTIES(NvBlastExtShaders PROPERTIES
+ PDB_NAME_DEBUG "NvBlastExtShaders${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlastExtShaders${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlastExtShaders${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlastExtShaders${CMAKE_RELEASE_POSTFIX}"
+)
+
+# Do final direct sets after the target has been defined
+#TARGET_LINK_LIBRARIES(NvBlastExtShaders NvBlast ${PHYSXSDK_LIBRARIES} ${APEXSDK_LIBRARIES} ${PXSHAREDSDK_LIBRARIES})
+TARGET_LINK_LIBRARIES(NvBlastExtShaders
+ PUBLIC NvBlast
+ PUBLIC ${BLASTEXT_PLATFORM_LINKED_LIBS}
+)
diff --git a/NvBlast/sdk/compiler/cmake/NvBlastTk.cmake b/NvBlast/sdk/compiler/cmake/NvBlastTk.cmake
new file mode 100644
index 0000000..be9e04a
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/NvBlastTk.cmake
@@ -0,0 +1,126 @@
+#
+# Build NvBlastTk common
+#
+
+SET(TOOLKIT_DIR ${PROJECT_SOURCE_DIR}/toolkit)
+SET(PROFILER_SOURCE_DIR ${PROJECT_SOURCE_DIR}/profiler)
+SET(COMMON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/common)
+
+FIND_PACKAGE(PxSharedSDK $ENV{PM_PxShared_VERSION} REQUIRED)
+
+# Include here after the directories are defined so that the platform specific file can use the variables.
+include(${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/NvBlastTk.cmake)
+
+SET(COMMON_FILES
+ ${BLASTTK_PLATFORM_COMMON_FILES}
+
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAssert.h
+ ${COMMON_SOURCE_DIR}/NvBlastAtomic.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastAtomic.h
+ ${COMMON_SOURCE_DIR}/NvBlastDLink.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedArray.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedBitmap.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedBoolArray.h
+ ${COMMON_SOURCE_DIR}/NvBlastFixedPriorityQueue.h
+ ${COMMON_SOURCE_DIR}/NvBlastGeometry.h
+# ${COMMON_SOURCE_DIR}/NvBlastIndexFns.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastIndexFns.h
+ ${COMMON_SOURCE_DIR}/NvBlastIteratorBase.h
+ ${COMMON_SOURCE_DIR}/NvBlastMath.h
+ ${COMMON_SOURCE_DIR}/NvBlastMemory.h
+ ${COMMON_SOURCE_DIR}/NvBlastPreprocessorInternal.h
+ ${COMMON_SOURCE_DIR}/NvBlastTime.cpp
+ ${COMMON_SOURCE_DIR}/NvBlastTime.h
+ ${COMMON_SOURCE_DIR}/NvBlastTimers.cpp
+)
+
+SET(PROFILER_FILES
+ ${PROFILER_SOURCE_DIR}/NvBlastProfiler.cpp
+ ${PROFILER_SOURCE_DIR}/NvBlastProfilerInternal.h
+)
+
+SET(PUBLIC_FILES
+ ${TOOLKIT_DIR}/include/NvBlastTk.h
+ ${TOOLKIT_DIR}/include/NvBlastTkActor.h
+ ${TOOLKIT_DIR}/include/NvBlastTkAsset.h
+ ${TOOLKIT_DIR}/include/NvBlastTkEvent.h
+ ${TOOLKIT_DIR}/include/NvBlastTkFamily.h
+ ${TOOLKIT_DIR}/include/NvBlastTkFramework.h
+ ${TOOLKIT_DIR}/include/NvBlastTkGroup.h
+ ${TOOLKIT_DIR}/include/NvBlastTkIdentifiable.h
+ ${TOOLKIT_DIR}/include/NvBlastTkJoint.h
+ ${TOOLKIT_DIR}/include/NvBlastTkObject.h
+ ${TOOLKIT_DIR}/include/NvBlastTkSerializable.h
+ ${TOOLKIT_DIR}/include/NvBlastTkType.h
+)
+
+SET(TOOLKIT_FILES
+ ${TOOLKIT_DIR}/source/NvBlastTkActorImpl.cpp
+ ${TOOLKIT_DIR}/source/NvBlastTkActorImpl.h
+ ${TOOLKIT_DIR}/source/NvBlastTkAllocator.cpp
+ ${TOOLKIT_DIR}/source/NvBlastTkAllocator.h
+ ${TOOLKIT_DIR}/source/NvBlastTkArray.h
+ ${TOOLKIT_DIR}/source/NvBlastTkAssetImpl.cpp
+ ${TOOLKIT_DIR}/source/NvBlastTkAssetImpl.h
+ ${TOOLKIT_DIR}/source/NvBlastTkCommon.h
+ ${TOOLKIT_DIR}/source/NvBlastTkEventQueue.h
+ ${TOOLKIT_DIR}/source/NvBlastTkFamilyImpl.cpp
+ ${TOOLKIT_DIR}/source/NvBlastTkFamilyImpl.h
+ ${TOOLKIT_DIR}/source/NvBlastTkFrameworkImpl.cpp
+ ${TOOLKIT_DIR}/source/NvBlastTkFrameworkImpl.h
+ ${TOOLKIT_DIR}/source/NvBlastTkGroupImpl.cpp
+ ${TOOLKIT_DIR}/source/NvBlastTkGroupImpl.h
+ ${TOOLKIT_DIR}/source/NvBlastTkGUID.h
+ ${TOOLKIT_DIR}/source/NvBlastTkHashMap.h
+ ${TOOLKIT_DIR}/source/NvBlastTkHashSet.h
+ ${TOOLKIT_DIR}/source/NvBlastTkJointImpl.cpp
+ ${TOOLKIT_DIR}/source/NvBlastTkJointImpl.h
+ ${TOOLKIT_DIR}/source/NvBlastTkTaskImpl.cpp
+ ${TOOLKIT_DIR}/source/NvBlastTkTaskImpl.h
+ ${TOOLKIT_DIR}/source/NvBlastTkTypeImpl.h
+)
+
+ADD_LIBRARY(NvBlastTk ${BLASTTK_LIBTYPE}
+ ${COMMON_FILES}
+ ${PROFILER_FILES}
+ ${PUBLIC_FILES}
+ ${TOOLKIT_FILES}
+)
+
+SOURCE_GROUP("common" FILES ${COMMON_FILES})
+SOURCE_GROUP("profiler" FILES ${PROFILER_FILES})
+SOURCE_GROUP("public" FILES ${PUBLIC_FILES})
+SOURCE_GROUP("toolkit" FILES ${TOOLKIT_FILES})
+
+# Target specific compile options
+
+TARGET_INCLUDE_DIRECTORIES(NvBlastTk
+ PRIVATE ${BLASTTK_PLATFORM_INCLUDES}
+
+ PRIVATE ${PROJECT_SOURCE_DIR}/common
+ PRIVATE ${PROJECT_SOURCE_DIR}/profiler
+ PUBLIC ${PROJECT_SOURCE_DIR}/lowlevel/include
+ PUBLIC ${PROJECT_SOURCE_DIR}/toolkit/include
+
+ PRIVATE ${PXSHAREDSDK_INCLUDE_DIRS}
+)
+
+TARGET_COMPILE_DEFINITIONS(NvBlastTk
+ PRIVATE ${BLASTTK_COMPILE_DEFS}
+)
+
+SET_TARGET_PROPERTIES(NvBlastTk PROPERTIES
+ PDB_NAME_DEBUG "NvBlastTk${CMAKE_DEBUG_POSTFIX}"
+ PDB_NAME_CHECKED "NvBlastTk${CMAKE_CHECKED_POSTFIX}"
+ PDB_NAME_PROFILE "NvBlastTk${CMAKE_PROFILE_POSTFIX}"
+ PDB_NAME_RELEASE "NvBlastTk${CMAKE_RELEASE_POSTFIX}"
+)
+
+# Do final direct sets after the target has been defined
+TARGET_LINK_LIBRARIES(NvBlastTk
+ PRIVATE NvBlast
+
+ PUBLIC ${BLASTTK_PLATFORM_LINKED_LIBS}
+)
+
diff --git a/NvBlast/sdk/compiler/cmake/modules/CapnProtoGenerate.cmake b/NvBlast/sdk/compiler/cmake/modules/CapnProtoGenerate.cmake
new file mode 100644
index 0000000..3579784
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/modules/CapnProtoGenerate.cmake
@@ -0,0 +1,108 @@
+# Configuration variables (optional):
+# CAPNPC_OUTPUT_DIR
+# Directory to place compiled schema sources (default: the same directory as the schema file).
+# CAPNPC_IMPORT_DIRS
+# List of additional include directories for the schema compiler.
+# (CMAKE_CURRENT_SOURCE_DIR and CAPNP_INCLUDE_DIRS are always included.)
+# CAPNPC_SRC_PREFIX
+# Schema file source prefix (default: CMAKE_CURRENT_SOURCE_DIR).
+# CAPNPC_FLAGS
+# Additional flags to pass to the schema compiler.
+# Example usage:
+#
+# capnp_generate_cpp(CAPNP_SRCS CAPNP_HDRS schema.capnp)
+# add_executable(a a.cc ${CAPNP_SRCS} ${CAPNP_HDRS})
+# target_link_library(a ${CAPNP_LIBRARIES})
+#
+# For out-of-source builds:
+#
+# set(CAPNPC_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR})
+# include_directories(${CAPNPC_OUTPUT_DIR})
+# capnp_generate_cpp(...)
+
+function(CAPNP_GENERATE_CPP SOURCES HEADERS)
+ if(NOT ARGN)
+ message(SEND_ERROR "CAPNP_GENERATE_CPP() called without any source files.")
+ endif()
+ if(NOT CAPNP_EXECUTABLE)
+ message(SEND_ERROR "Could not locate capnp executable (CAPNP_EXECUTABLE).")
+ endif()
+ if(NOT CAPNPC_CXX_EXECUTABLE)
+ message(SEND_ERROR "Could not locate capnpc-c++ executable (CAPNPC_CXX_EXECUTABLE).")
+ endif()
+ if(NOT CAPNP_INCLUDE_DIRS)
+ message(SEND_ERROR "Could not locate capnp header files (CAPNP_INCLUDE_DIRS).")
+ endif()
+
+ # Default compiler includes
+ set(include_path -I ${CMAKE_CURRENT_SOURCE_DIR} -I ${CAPNP_INCLUDE_DIRS})
+
+ if(DEFINED CAPNPC_IMPORT_DIRS)
+ # Append each directory as a series of '-I' flags in ${include_path}
+ foreach(directory ${CAPNPC_IMPORT_DIRS})
+ get_filename_component(absolute_path "${directory}" ABSOLUTE)
+ list(APPEND include_path -I ${absolute_path})
+ endforeach()
+ endif()
+
+ if(DEFINED CAPNPC_OUTPUT_DIR)
+ # Prepend a ':' to get the format for the '-o' flag right
+ set(output_dir ":${CAPNPC_OUTPUT_DIR}")
+ else()
+ set(output_dir ":.")
+ endif()
+
+ if(NOT DEFINED CAPNPC_SRC_PREFIX)
+ set(CAPNPC_SRC_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}")
+ endif()
+ get_filename_component(CAPNPC_SRC_PREFIX "${CAPNPC_SRC_PREFIX}" ABSOLUTE)
+
+ set(${SOURCES})
+ set(${HEADERS})
+ foreach(schema_file ${ARGN})
+ get_filename_component(file_path "${schema_file}" ABSOLUTE)
+ get_filename_component(file_dir "${file_path}" PATH)
+
+ # Figure out where the output files will go
+ if (NOT DEFINED CAPNPC_OUTPUT_DIR)
+ set(output_base "${file_path}")
+ else()
+ # Output files are placed in CAPNPC_OUTPUT_DIR, at a location as if they were
+ # relative to CAPNPC_SRC_PREFIX.
+ string(LENGTH "${CAPNPC_SRC_PREFIX}" prefix_len)
+ string(SUBSTRING "${file_path}" 0 ${prefix_len} output_prefix)
+ if(NOT "${CAPNPC_SRC_PREFIX}" STREQUAL "${output_prefix}")
+ message(SEND_ERROR "Could not determine output path for '${schema_file}' ('${file_path}') with source prefix '${CAPNPC_SRC_PREFIX}' into '${CAPNPC_OUTPUT_DIR}'.")
+ endif()
+
+ string(SUBSTRING "${file_path}" ${prefix_len} -1 output_path)
+ set(output_base "${CAPNPC_OUTPUT_DIR}${output_path}")
+ endif()
+
+ MESSAGE("Output base: " ${output_base})
+ MESSAGE("Output path: " ${output_path})
+ MESSAGE("Output dir: " ${output_dir})
+ MESSAGE("Src prefix: " ${CAPNPC_SRC_PREFIX})
+
+ add_custom_command(
+ OUTPUT "${output_base}.c++" "${output_base}.h"
+ COMMAND "${CAPNP_EXECUTABLE}"
+ ARGS compile
+ -o ${CAPNPC_CXX_EXECUTABLE}${output_dir}
+ --verbose
+ --src-prefix ${CAPNPC_SRC_PREFIX}
+ ${include_path}
+ ${CAPNPC_FLAGS}
+ ${file_path}
+ DEPENDS "${schema_file}"
+ COMMENT "Compiling Cap'n Proto schema ${schema_file}"
+ VERBATIM
+ )
+ list(APPEND ${SOURCES} "${output_base}.c++")
+ list(APPEND ${HEADERS} "${output_base}.h")
+ endforeach()
+
+ set_source_files_properties(${${SOURCES}} ${${HEADERS}} PROPERTIES GENERATED TRUE)
+ set(${SOURCES} ${${SOURCES}} PARENT_SCOPE)
+ set(${HEADERS} ${${HEADERS}} PARENT_SCOPE)
+endfunction() \ No newline at end of file
diff --git a/NvBlast/sdk/compiler/cmake/modules/FindApexSDK.cmake b/NvBlast/sdk/compiler/cmake/modules/FindApexSDK.cmake
new file mode 100644
index 0000000..9910236
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/modules/FindApexSDK.cmake
@@ -0,0 +1,399 @@
+# - Try to find Apex binary SDK
+# - Sets APEXSDK_LIBS_DEBUG and APEXSDK_LIBS_RELEASE - lists of the libraries found
+# - Sets APEXSDK_INCLUDE_DIRS
+# - Sets APEXSDK_DLLS - List of the DLLs to copy to the bin directory of projects that depend on this
+
+include(FindPackageHandleStandardArgs)
+
+# Find the includes
+
+# TODO: Do the version stuff properly!
+find_path(APEXSDK_PATH include/Apex.h
+ PATHS
+ ${GW_DEPS_ROOT}/$ENV{PM_Apex_NAME}/${ApexSDK_FIND_VERSION}
+ ${GW_DEPS_ROOT}/Apex/${ApexSDK_FIND_VERSION}
+)
+
+if (TARGET_BUILD_PLATFORM STREQUAL "Windows")
+ # If the project pulling in this dependency needs the static crt, then append that to the path.
+ if (STATIC_WINCRT)
+ SET(PXSHARED_CRT_SUFFIX "-staticcrt")
+ else()
+ SET(PXSHARED_CRT_SUFFIX "")
+ endif()
+
+ if (CMAKE_CL_64)
+ SET(APEX_ARCH_FOLDER "win64")
+ SET(APEX_ARCH_FILE "_x64")
+ else()
+ SET(APEX_ARCH_FOLDER "win32")
+ SET(APEX_ARCH_FILE "_x86")
+ endif()
+
+ # What compiler version do we want?
+
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 18.0.0.0 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.0.0.0)
+ SET(VS_STR "vc12")
+ elseif(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 19.0.0.0)
+ SET(VS_STR "vc14")
+ else()
+ MESSAGE(FATAL_ERROR "Failed to find compatible PxSharedSDK - Only supporting VS2013 and VS2015")
+ endif()
+
+ SET(LIB_PATH ${APEXSDK_PATH}/lib/${VS_STR}${APEX_ARCH_FOLDER}-cmake${PHYSX_CRT_SUFFIX})
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".lib" ".dll")
+
+elseif(TARGET_BUILD_PLATFORM STREQUAL "PS4")
+ SET(LIB_PATH ${APEXSDK_PATH}/lib/vc14ps4-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "XboxOne")
+ SET(LIB_PATH ${APEXSDK_PATH}/lib/vc14xboxone-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "linux")
+ SET(LIB_PATH ${APEXSDK_PATH}/lib/linux64-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+endif()
+
+find_library(APEXCLOTHING_LIB
+ NAMES APEX_Clothing${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXDESTRUCTIBLE_LIB
+ NAMES APEX_Destructible${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXLEGACY_LIB
+ NAMES APEX_Legacy${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXLOADER_LIB
+ NAMES APEX_Loader${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXCOMMON_LIB
+ NAMES APEXCommon${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXFRAMEWORK_LIB
+ NAMES APEXFramework${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXSHARED_LIB
+ NAMES APEXShared${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+find_library(APEXCLOTHING_LIB_DEBUG
+ NAMES APEX_ClothingDEBUG${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXDESTRUCTIBLE_LIB_DEBUG
+ NAMES APEX_DestructibleDEBUG${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXLEGACY_LIB_DEBUG
+ NAMES APEX_LegacyDEBUG${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXLOADER_LIB_DEBUG
+ NAMES APEX_LoaderDEBUG${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXCOMMON_LIB_DEBUG
+ NAMES APEXCommonDEBUG${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXFRAMEWORK_LIB_DEBUG
+ NAMES APEXFrameworkDEBUG${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXSHARED_LIB_DEBUG
+ NAMES APEXSharedDEBUG${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+find_library(APEXCLOTHING_LIB_CHECKED
+ NAMES APEX_ClothingCHECKED${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXDESTRUCTIBLE_LIB_CHECKED
+ NAMES APEX_DestructibleCHECKED${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXLEGACY_LIB_CHECKED
+ NAMES APEX_LegacyCHECKED${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXLOADER_LIB_CHECKED
+ NAMES APEX_LoaderCHECKED${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXCOMMON_LIB_CHECKED
+ NAMES APEXCommonCHECKED${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXFRAMEWORK_LIB_CHECKED
+ NAMES APEXFrameworkCHECKED${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXSHARED_LIB_CHECKED
+ NAMES APEXSharedCHECKED${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+find_library(APEXCLOTHING_LIB_PROFILE
+ NAMES APEX_ClothingPROFILE${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXDESTRUCTIBLE_LIB_PROFILE
+ NAMES APEX_DestructiblePROFILE${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXLEGACY_LIB_PROFILE
+ NAMES APEX_LegacyPROFILE${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXLOADER_LIB_PROFILE
+ NAMES APEX_LoaderPROFILE${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXCOMMON_LIB_PROFILE
+ NAMES APEXCommonPROFILE${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXFRAMEWORK_LIB_PROFILE
+ NAMES APEXFrameworkPROFILE${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(APEXSHARED_LIB_PROFILE
+ NAMES APEXSharedPROFILE${APEX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+
+if (TARGET_BUILD_PLATFORM STREQUAL "Windows")
+
+ SET(DLL_PATH ${APEXSDK_PATH}/bin/${VS_STR}${APEX_ARCH_FOLDER}-cmake${APEX_CRT_SUFFIX})
+
+ find_library(APEXCLOTHING_DLL
+ NAMES APEX_Clothing${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXDESTRUCTIBLE_DLL
+ NAMES APEX_Destructible${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXLEGACY_DLL
+ NAMES APEX_Legacy${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXLOADER_DLL
+ NAMES APEX_Loader${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXFRAMEWORK_DLL
+ NAMES APEXFramework${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ find_library(APEXCLOTHING_DLL_DEBUG
+ NAMES APEX_ClothingDEBUG${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXDESTRUCTIBLE_DLL_DEBUG
+ NAMES APEX_DestructibleDEBUG${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXLEGACY_DLL_DEBUG
+ NAMES APEX_LegacyDEBUG${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXLOADER_DLL_DEBUG
+ NAMES APEX_LoaderDEBUG${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXFRAMEWORK_DLL_DEBUG
+ NAMES APEXFrameworkDEBUG${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ find_library(APEXCLOTHING_DLL_CHECKED
+ NAMES APEX_ClothingCHECKED${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXDESTRUCTIBLE_DLL_CHECKED
+ NAMES APEX_DestructibleCHECKED${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXLEGACY_DLL_CHECKED
+ NAMES APEX_LegacyCHECKED${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXLOADER_DLL_CHECKED
+ NAMES APEX_LoaderCHECKED${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXFRAMEWORK_DLL_CHECKED
+ NAMES APEXFrameworkCHECKED${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ find_library(APEXCLOTHING_DLL_PROFILE
+ NAMES APEX_ClothingPROFILE${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXDESTRUCTIBLE_DLL_PROFILE
+ NAMES APEX_DestructiblePROFILE${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXLEGACY_DLL_PROFILE
+ NAMES APEX_LegacyPROFILE${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXLOADER_DLL_PROFILE
+ NAMES APEX_LoaderPROFILE${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(APEXFRAMEWORK_DLL_PROFILE
+ NAMES APEXFrameworkPROFILE${APEX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+
+ SET(DLL_VAR_LIST
+ APEXCLOTHING_DLL
+ APEXDESTRUCTIBLE_DLL
+ APEXLEGACY_DLL
+ APEXLOADER_DLL
+ APEXFRAMEWORK_DLL
+
+ APEXCLOTHING_DLL_DEBUG
+ APEXDESTRUCTIBLE_DLL_DEBUG
+ APEXLEGACY_DLL_DEBUG
+ APEXLOADER_DLL_DEBUG
+ APEXFRAMEWORK_DLL_DEBUG
+
+ APEXCLOTHING_DLL_CHECKED
+ APEXDESTRUCTIBLE_DLL_CHECKED
+ APEXLEGACY_DLL_CHECKED
+ APEXLOADER_DLL_CHECKED
+ APEXFRAMEWORK_DLL_CHECKED
+
+ APEXCLOTHING_DLL_PROFILE
+ APEXDESTRUCTIBLE_DLL_PROFILE
+ APEXLEGACY_DLL_PROFILE
+ APEXLOADER_DLL_PROFILE
+ APEXFRAMEWORK_DLL_PROFILE
+
+ )
+endif()
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(APEXSDK
+ DEFAULT_MSG
+ APEXSDK_PATH
+
+ APEXCLOTHING_LIB
+ APEXDESTRUCTIBLE_LIB
+ APEXLEGACY_LIB
+ APEXLOADER_LIB
+ APEXCOMMON_LIB
+ APEXFRAMEWORK_LIB
+ APEXSHARED_LIB
+
+ APEXCLOTHING_LIB_DEBUG
+ APEXDESTRUCTIBLE_LIB_DEBUG
+ APEXLEGACY_LIB_DEBUG
+ APEXLOADER_LIB_DEBUG
+ APEXCOMMON_LIB_DEBUG
+ APEXFRAMEWORK_LIB_DEBUG
+ APEXSHARED_LIB_DEBUG
+
+ APEXCLOTHING_LIB_CHECKED
+ APEXDESTRUCTIBLE_LIB_CHECKED
+ APEXLEGACY_LIB_CHECKED
+ APEXLOADER_LIB_CHECKED
+ APEXCOMMON_LIB_CHECKED
+ APEXFRAMEWORK_LIB_CHECKED
+ APEXSHARED_LIB_CHECKED
+
+ APEXCLOTHING_LIB_PROFILE
+ APEXDESTRUCTIBLE_LIB_PROFILE
+ APEXLEGACY_LIB_PROFILE
+ APEXLOADER_LIB_PROFILE
+ APEXCOMMON_LIB_PROFILE
+ APEXFRAMEWORK_LIB_PROFILE
+ APEXSHARED_LIB_PROFILE
+
+ ${DLL_VAR_LIST}
+)
+
+if (APEXSDK_FOUND)
+ # NOTE: This include list is way too long and reaches into too many internals.
+ # Also may not be good enough for all users.
+ SET(APEXSDK_INCLUDE_DIRS
+ ${APEXSDK_PATH}/public
+ ${APEXSDK_PATH}/include
+ ${APEXSDK_PATH}/common/include
+ ${APEXSDK_PATH}/common/include/autogen
+ ${APEXSDK_PATH}/NvParameterized/include
+ ${APEXSDK_PATH}/include/destructible
+ ${APEXSDK_PATH}/include/PhysX3
+ ${APEXSDK_PATH}/shared/external/include
+ ${APEXSDK_PATH}/shared/internal/include
+ ${APEXSDK_PATH}/shared/general/shared
+ ${APEXSDK_PATH}/shared/general/RenderDebug/public
+ )
+
+ SET(APEXSDK_LIBS_RELEASE ${APEXCLOTHING_LIB} ${APEXDESTRUCTIBLE_LIB} ${APEXLEGACY_LIB} ${APEXLOADER_LIB} ${APEXCOMMON_LIB} ${APEXFRAMEWORK_LIB} ${APEXSHARED_LIB}
+ CACHE STRING ""
+ )
+ SET(APEXSDK_LIBS_DEBUG ${APEXCLOTHING_LIB_DEBUG} ${APEXDESTRUCTIBLE_LIB_DEBUG} ${APEXLEGACY_LIB_DEBUG} ${APEXLOADER_LIB_DEBUG} ${APEXCOMMON_LIB_DEBUG} ${APEXFRAMEWORK_LIB_DEBUG} ${APEXSHARED_LIB_DEBUG}
+ CACHE STRING ""
+ )
+ SET(APEXSDK_LIBS_CHECKED ${APEXCLOTHING_LIB_CHECKED} ${APEXDESTRUCTIBLE_LIB_CHECKED} ${APEXLEGACY_LIB_CHECKED} ${APEXLOADER_LIB_CHECKED} ${APEXCOMMON_LIB_CHECKED} ${APEXFRAMEWORK_LIB_CHECKED} ${APEXSHARED_LIB_CHECKED}
+ CACHE STRING ""
+ )
+ SET(APEXSDK_LIBS_PROFILE ${APEXCLOTHING_LIB_PROFILE} ${APEXDESTRUCTIBLE_LIB_PROFILE} ${APEXLEGACY_LIB_PROFILE} ${APEXLOADER_LIB_PROFILE} ${APEXCOMMON_LIB_PROFILE} ${APEXFRAMEWORK_LIB_PROFILE} ${APEXSHARED_LIB_PROFILE}
+ CACHE STRING ""
+ )
+
+
+ SET(APEXSDK_DLLS
+ ${APEXCLOTHING_DLL}
+ ${APEXDESTRUCTIBLE_DLL}
+ ${APEXLEGACY_DLL}
+ ${APEXLOADER_DLL}
+ ${APEXFRAMEWORK_DLL}
+
+ ${APEXCLOTHING_DLL_DEBUG}
+ ${APEXDESTRUCTIBLE_DLL_DEBUG}
+ ${APEXLEGACY_DLL_DEBUG}
+ ${APEXLOADER_DLL_DEBUG}
+ ${APEXFRAMEWORK_DLL_DEBUG}
+
+ ${APEXCLOTHING_DLL_CHECKED}
+ ${APEXDESTRUCTIBLE_DLL_CHECKED}
+ ${APEXLEGACY_DLL_CHECKED}
+ ${APEXLOADER_DLL_CHECKED}
+ ${APEXFRAMEWORK_DLL_CHECKED}
+
+ ${APEXCLOTHING_DLL_PROFILE}
+ ${APEXDESTRUCTIBLE_DLL_PROFILE}
+ ${APEXLEGACY_DLL_PROFILE}
+ ${APEXLOADER_DLL_PROFILE}
+ ${APEXFRAMEWORK_DLL_PROFILE}
+ )
+
+ SET(APEXSDK_LIBRARIES "" CACHE STRING "")
+
+ foreach(x ${APEXSDK_LIBS_RELEASE})
+ list(APPEND APEXSDK_LIBRARIES optimized ${x})
+ endforeach()
+
+ foreach(x ${APEXSDK_LIBS_DEBUG})
+ list(APPEND APEXSDK_LIBRARIES debug ${x})
+ endforeach()
+endif()
diff --git a/NvBlast/sdk/compiler/cmake/modules/FindCapnProtoSDK.cmake b/NvBlast/sdk/compiler/cmake/modules/FindCapnProtoSDK.cmake
new file mode 100644
index 0000000..f3200ec
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/modules/FindCapnProtoSDK.cmake
@@ -0,0 +1,111 @@
+# - Try to find CapnProto SDK
+# - Sets CAPNPROTOSDK_LIBRARIES - list of the libraries found
+# - Sets CAPNPROTOSDK_INCLUDE_DIRS
+
+include(FindPackageHandleStandardArgs)
+
+# Find the includes
+
+# TODO: Do the version stuff properly!
+find_path(CAPNPROTOSDK_PATH src/capnp/message.h
+ PATHS ${GW_DEPS_ROOT}/CapnProto/${CapnProtoSDK_FIND_VERSION}
+)
+
+if (TARGET_BUILD_PLATFORM STREQUAL "Windows")
+
+ if (STATIC_WINCRT)
+ SET(CAPNPROTOSDK_CRT_SUFFIX "-mt")
+ else()
+ SET(CAPNPROTOSDK_CRT_SUFFIX "-md")
+ endif()
+
+
+ # If the project pulling in this dependency needs the static crt, then append that to the path.
+ if (CMAKE_CL_64)
+ SET(CAPNPROTOSDK_ARCH_FOLDER "win64")
+ else()
+ SET(CAPNPROTOSDK_ARCH_FOLDER "win32")
+ endif()
+
+ SET(LIB_PATH ${CAPNPROTOSDK_PATH}/bin/${CAPNPROTOSDK_ARCH_FOLDER})
+ SET(EXE_PATH ${CAPNPROTOSDK_PATH}/tools/win32)
+
+
+elseif(TARGET_BUILD_PLATFORM STREQUAL "PS4")
+ SET(LIB_PATH ${CAPNPROTOSDK_PATH}/bin/ps4)
+ SET(EXE_PATH ${CAPNPROTOSDK_PATH}/tools/win32)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "XboxOne")
+ SET(LIB_PATH ${CAPNPROTOSDK_PATH}/bin/xboxone)
+ SET(EXE_PATH ${CAPNPROTOSDK_PATH}/tools/win32)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "linux")
+ SET(LIB_PATH ${CAPNPROTOSDK_PATH}/bin/ubuntu64)
+ SET(EXE_PATH ${CAPNPROTOSDK_PATH}/tools/ubuntu64)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+endif()
+
+find_library(CAPNPROTO_LIB
+ NAMES capnp${CAPNPROTOSDK_CRT_SUFFIX}
+ PATHS ${LIB_PATH}/Release
+)
+find_library(CAPNPROTO_LIB_DEBUG
+ NAMES capnp${CAPNPROTOSDK_CRT_SUFFIX}
+ PATHS ${LIB_PATH}/Debug
+)
+
+find_library(KJ_LIB
+ NAMES kj${CAPNPROTOSDK_CRT_SUFFIX}
+ PATHS ${LIB_PATH}/Release
+)
+find_library(KJ_LIB_DEBUG
+ NAMES kj${CAPNPROTOSDK_CRT_SUFFIX}
+ PATHS ${LIB_PATH}/Debug
+)
+
+find_program(CAPNP_EXECUTABLE
+ NAMES capnp
+ DOC "Cap'n Proto Command-line Tool"
+ PATHS ${EXE_PATH}
+)
+
+find_program(CAPNPC_CXX_EXECUTABLE
+ NAMES capnpc-c++
+ DOC "Capn'n Proto C++ Compiler"
+ PATHS ${EXE_PATH}
+)
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(CAPNPROTOSDK
+ DEFAULT_MSG
+ CAPNPROTOSDK_PATH
+
+ CAPNP_EXECUTABLE
+ CAPNPC_CXX_EXECUTABLE
+
+ CAPNPROTO_LIB
+ KJ_LIB
+
+ CAPNPROTO_LIB_DEBUG
+ KJ_LIB_DEBUG
+)
+
+if (CAPNPROTOSDK_FOUND)
+
+ SET(CAPNPROTOSDK_INCLUDE_DIRS
+ ${CAPNPROTOSDK_PATH}/src/
+ )
+
+ SET(CAPNP_INCLUDE_DIRS
+ ${CAPNPROTOSDK_INCLUDE_DIRS}
+ )
+
+ SET(CAPNPROTOSDK_LIBRARIES "" CACHE STRING "")
+
+ LIST(APPEND CAPNPROTOSDK_LIBRARIES
+ optimized ${CAPNPROTO_LIB} debug ${CAPNPROTO_LIB_DEBUG}
+ optimized ${KJ_LIB} debug ${KJ_LIB_DEBUG}
+ )
+endif()
diff --git a/NvBlast/sdk/compiler/cmake/modules/FindFBXSDK.cmake b/NvBlast/sdk/compiler/cmake/modules/FindFBXSDK.cmake
new file mode 100644
index 0000000..7e1bfb4
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/modules/FindFBXSDK.cmake
@@ -0,0 +1,71 @@
+# - Try to find FBX SDK
+# - Sets FBXSDK_LIBRARIES - list of the libraries found
+# - Sets FBXSDK_INCLUDE_DIRS
+# - Sets FBXSDK_DLLS - List of the DLLs to copy to the bin directory of projects that depend on this
+
+
+include(FindPackageHandleStandardArgs)
+
+# Find the includes
+
+# TODO: Do the version stuff properly!
+find_path(FBXSDK_PATH include/fbxsdk.h
+ PATHS ${GW_DEPS_ROOT}/FBXSDK/${FBXSDK_FIND_VERSION}
+)
+
+if (STATIC_WINCRT)
+ SET(FBXSDK_CRT_SUFFIX "mt")
+else()
+ SET(FBXSDK_CRT_SUFFIX "md")
+endif()
+
+
+# If the project pulling in this dependency needs the static crt, then append that to the path.
+if (CMAKE_CL_64)
+ SET(FBXSDK_ARCH_FOLDER "x64")
+else()
+ SET(FBXSDK_ARCH_FOLDER "x86")
+endif()
+
+# What compiler version do we want?
+
+if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 18.0.0.0 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.0.0.0)
+ SET(VS_STR "vs2013")
+elseif(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 19.0.0.0)
+ SET(VS_STR "vs2015")
+else()
+ MESSAGE(FATAL_ERROR "Failed to find compatible FBXSDK - Only supporting VS2013 and VS2015")
+endif()
+
+# Now find all of the PhysX libs in the lib directory
+
+SET(LIB_PATH ${FBXSDK_PATH}/lib/${VS_STR}/${FBXSDK_ARCH_FOLDER})
+
+find_library(FBX_LIB
+ NAMES libfbxsdk-${FBXSDK_CRT_SUFFIX}
+ PATHS ${LIB_PATH}/release
+)
+find_library(FBX_LIB_DEBUG
+ NAMES libfbxsdk-${FBXSDK_CRT_SUFFIX}
+ PATHS ${LIB_PATH}/debug
+)
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(FBXSDK
+ DEFAULT_MSG
+ FBXSDK_PATH
+ FBX_LIB
+
+ FBX_LIB_DEBUG
+)
+
+if (FBXSDK_FOUND)
+
+ SET(FBXSDK_INCLUDE_DIRS
+ ${FBXSDK_PATH}/include
+ ${FBXSDK_PATH}/include/fbxsdk
+ )
+
+ SET(FBXSDK_LIBRARIES "" CACHE STRING "")
+
+ LIST(APPEND FBXSDK_LIBRARIES optimized ${FBX_LIB} debug ${FBX_LIB_DEBUG})
+endif()
diff --git a/NvBlast/sdk/compiler/cmake/modules/FindGoogleTestNV.cmake b/NvBlast/sdk/compiler/cmake/modules/FindGoogleTestNV.cmake
new file mode 100644
index 0000000..aebc849
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/modules/FindGoogleTestNV.cmake
@@ -0,0 +1,132 @@
+# - Try to find GoogleTest SDK
+# - Sets GOOGLETEST_LIBRARIES - lists of the libraries found
+# - Sets GOOGLETEST_INCLUDE_DIRS
+
+include(FindPackageHandleStandardArgs)
+
+# Find the includes
+
+# TODO: Do the version stuff properly!
+find_path(GOOGLETEST_PATH include/gtest/gtest.h
+ PATHS ${GW_DEPS_ROOT}/googletest-linux-x86_64/${GoogleTestNV_FIND_VERSION}
+ ${GW_DEPS_ROOT}/googletest-nv/${GoogleTestNV_FIND_VERSION}
+)
+
+if (TARGET_BUILD_PLATFORM STREQUAL "Windows")
+ # If the project pulling in this dependency needs the static crt, then append that to the path.
+
+ if (STATIC_WINCRT)
+ SET(GOOGLETEST_CRT_SUFFIX "-staticcrt")
+ else()
+ SET(GOOGLETEST_CRT_SUFFIX "")
+ endif()
+
+ if (CMAKE_CL_64)
+ SET(GOOGLETEST_ARCH_FOLDER "win64")
+ else()
+ SET(GOOGLETEST_ARCH_FOLDER "win32")
+ endif()
+
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 18.0.0.0 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.0.0.0)
+ SET(VS_STR "vc12")
+ elseif(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 19.0.0.0)
+ SET(VS_STR "vc14")
+ else()
+ MESSAGE(FATAL_ERROR "Failed to find compatible FBXSDK - Only supporting VS2013 and VS2015")
+ endif()
+
+
+ # Now find all of the PhysX libs in the lib directory
+
+ SET(LIB_PATH ${GOOGLETEST_PATH}/lib/${VS_STR}${GOOGLETEST_ARCH_FOLDER}-cmake${GOOGLETEST_CRT_SUFFIX})
+
+elseif(TARGET_BUILD_PLATFORM STREQUAL "PS4")
+ SET(LIB_PATH ${GOOGLETEST_PATH}/lib/PS4)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "XboxOne")
+ SET(LIB_PATH ${GOOGLETEST_PATH}/lib/xboxone-${XDK_VERSION})
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "linux")
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 5.0)
+ SET(LIB_PATH ${GOOGLETEST_PATH}/lib/gcc-5.4)
+ else()
+ SET(LIB_PATH ${GOOGLETEST_PATH}/lib/gcc-4.8)
+ endif()
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+endif()
+
+MESSAGE("GTEst libpath:" ${LIB_PATH})
+
+if(TARGET_BUILD_PLATFORM STREQUAL "linux")
+ find_library(GTEST_LIB
+ NAMES gtest
+ PATHS ${LIB_PATH}
+ )
+
+ find_library(GTEST_MAIN_LIB
+ NAMES gtest_main
+ PATHS ${LIB_PATH}
+ )
+
+ SET(GTEST_LIB_DEBUG ${GTEST_LIB})
+ SET(GTEST_MAIN_LIB_DEBUG ${GTEST_MAIN_LIB})
+else()
+ find_library(GTEST_LIB
+ NAMES gtest
+ PATHS ${LIB_PATH}/Release
+ )
+
+ find_library(GTEST_LIB_DEBUG
+ NAMES gtest
+ PATHS ${LIB_PATH}/Debug
+ )
+
+ find_library(GTEST_MAIN_LIB
+ NAMES gtest_main
+ PATHS ${LIB_PATH}/Release
+ )
+
+ find_library(GTEST_MAIN_LIB_DEBUG
+ NAMES gtest_main
+ PATHS ${LIB_PATH}/Debug
+ )
+endif()
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(GOOGLETEST
+ DEFAULT_MSG
+ GOOGLETEST_PATH
+
+ GTEST_LIB
+ GTEST_MAIN_LIB
+ GTEST_LIB_DEBUG
+ GTEST_MAIN_LIB_DEBUG
+)
+
+if (GOOGLETEST_FOUND)
+ # NOTE: This include list is way too long and reaches into too many internals.
+ # Also may not be good enough for all users.
+ SET(GOOGLETEST_INCLUDE_DIRS
+ ${GOOGLETEST_PATH}/include
+ ${GOOGLETEST_PATH}/include/gtest
+ )
+
+ SET(GOOGLETEST_LIBS_RELEASE ${GTEST_LIB} ${GTEST_MAIN_LIB}
+ CACHE STRING ""
+ )
+ SET(GOOGLETEST_LIBS_DEBUG ${GTEST_LIB_DEBUG} ${GTEST_MAIN_LIB_DEBUG}
+ CACHE STRING ""
+ )
+
+ SET(GOOGLETEST_LIBRARIES "" CACHE STRING "")
+
+ foreach(x ${GOOGLETEST_LIBS_RELEASE})
+ list(APPEND GOOGLETEST_LIBRARIES optimized ${x})
+ endforeach()
+
+ foreach(x ${GOOGLETEST_LIBS_DEBUG})
+ list(APPEND GOOGLETEST_LIBRARIES debug ${x})
+ endforeach()
+endif()
diff --git a/NvBlast/sdk/compiler/cmake/modules/FindPhysXSDK.cmake b/NvBlast/sdk/compiler/cmake/modules/FindPhysXSDK.cmake
new file mode 100644
index 0000000..830e6c5
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/modules/FindPhysXSDK.cmake
@@ -0,0 +1,566 @@
+# - Try to find PhysX binary SDK
+# - Sets PHYSXSDK_LIBS_DEBUG, PHYSXSDK_LIBS_RELEASE, PHYSXSDK_LIBS_CHECKED, PHYSXSDK_LIBS_PROFILE - lists of the libraries found
+# - Sets PHYSXSDK_INCLUDE_DIRS
+# - Sets PHYSXSDK_DLLS - List of the DLLs to copy to the bin directory of projects that depend on this
+
+include(FindPackageHandleStandardArgs)
+
+MESSAGE("Looking for PhysXSDK ${PhysXSDK_FIND_VERSION} Cached path: ${PHYSXSDK_PATH}")
+# TODO: Do the version stuff properly!
+find_path(PHYSXSDK_PATH Include/PxActor.h
+ PATHS
+ ${GW_DEPS_ROOT}/$ENV{PM_PhysX_NAME}/${PhysXSDK_FIND_VERSION}
+ ${GW_DEPS_ROOT}/PhysX/${PhysXSDK_FIND_VERSION}
+)
+
+if (TARGET_BUILD_PLATFORM STREQUAL "Windows")
+ # If the project pulling in this dependency needs the static crt, then append that to the path.
+ if (STATIC_WINCRT)
+ SET(PXSHARED_CRT_SUFFIX "-staticcrt")
+ else()
+ SET(PXSHARED_CRT_SUFFIX "")
+ endif()
+
+ if (CMAKE_CL_64)
+ SET(PHYSX_ARCH_FOLDER "win64")
+ SET(PHYSX_ARCH_FILE "_x64")
+ else()
+ SET(PHYSX_ARCH_FOLDER "win32")
+ SET(PHYSX_ARCH_FILE "_x86")
+ endif()
+
+ # What compiler version do we want?
+
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 18.0.0.0 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.0.0.0)
+ SET(VS_STR "vc12")
+ elseif(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 19.0.0.0)
+ SET(VS_STR "vc14")
+ else()
+ MESSAGE(FATAL_ERROR "Failed to find compatible PxSharedSDK - Only supporting VS2013 and VS2015")
+ endif()
+
+ SET(LIB_PATH ${PHYSXSDK_PATH}/lib/${VS_STR}${PHYSX_ARCH_FOLDER}-cmake${PHYSX_CRT_SUFFIX})
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".lib" ".dll")
+
+elseif(TARGET_BUILD_PLATFORM STREQUAL "PS4")
+ SET(LIB_PATH ${PHYSXSDK_PATH}/lib/vc14ps4-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "XboxOne")
+ SET(LIB_PATH ${PHYSXSDK_PATH}/lib/vc14xboxone-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "linux")
+ SET(LIB_PATH ${PHYSXSDK_PATH}/lib/linux64-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+ SET(PHYSX_ARCH_FILE "_x64")
+endif()
+
+# Now find all of the PhysX libs in the lib directory
+
+find_library(PHYSX3_LIB
+ NAMES PhysX3${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3EXTENSIONS_LIB
+ NAMES PhysX3Extensions${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3CHARACTERKINEMATIC_LIB
+ NAMES PhysX3CharacterKinematic${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3COMMON_LIB
+ NAMES PhysX3Common${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3COOKING_LIB
+ NAMES PhysX3Cooking${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVEL_LIB
+ NAMES LowLevel${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELAABB_LIB
+ NAMES LowLevelAABB${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELCLOTH_LIB
+ NAMES LowLevelCloth${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELDYNAMICS_LIB
+ NAMES LowLevelDynamics${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELPARTICLES_LIB
+ NAMES LowLevelParticles${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(SCENEQUERY_LIB
+ NAMES SceneQuery${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(SIMULATIONCONTROLLER_LIB
+ NAMES SimulationController${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+
+find_library(PHYSX3_LIB_DEBUG
+ NAMES PhysX3DEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3EXTENSIONS_LIB_DEBUG
+ NAMES PhysX3ExtensionsDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3CHARACTERKINEMATIC_LIB_DEBUG
+ NAMES PhysX3CharacterKinematicDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3COMMON_LIB_DEBUG
+ NAMES PhysX3CommonDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3COOKING_LIB_DEBUG
+ NAMES PhysX3CookingDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVEL_LIB_DEBUG
+ NAMES LowLevelDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELAABB_LIB_DEBUG
+ NAMES LowLevelAABBDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELCLOTH_LIB_DEBUG
+ NAMES LowLevelClothDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELDYNAMICS_LIB_DEBUG
+ NAMES LowLevelDynamicsDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELPARTICLES_LIB_DEBUG
+ NAMES LowLevelParticlesDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(SCENEQUERY_LIB_DEBUG
+ NAMES SceneQueryDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(SIMULATIONCONTROLLER_LIB_DEBUG
+ NAMES SimulationControllerDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+find_library(PHYSX3_LIB_CHECKED
+ NAMES PhysX3CHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3EXTENSIONS_LIB_CHECKED
+ NAMES PhysX3ExtensionsCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3CHARACTERKINEMATIC_LIB_CHECKED
+ NAMES PhysX3CharacterKinematicCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3COMMON_LIB_CHECKED
+ NAMES PhysX3CommonCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3COOKING_LIB_CHECKED
+ NAMES PhysX3CookingCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVEL_LIB_CHECKED
+ NAMES LowLevelCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELAABB_LIB_CHECKED
+ NAMES LowLevelAABBCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELCLOTH_LIB_CHECKED
+ NAMES LowLevelClothCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELDYNAMICS_LIB_CHECKED
+ NAMES LowLevelDynamicsCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELPARTICLES_LIB_CHECKED
+ NAMES LowLevelParticlesCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(SCENEQUERY_LIB_CHECKED
+ NAMES SceneQueryCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(SIMULATIONCONTROLLER_LIB_CHECKED
+ NAMES SimulationControllerCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+find_library(PHYSX3_LIB_PROFILE
+ NAMES PhysX3PROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3EXTENSIONS_LIB_PROFILE
+ NAMES PhysX3ExtensionsPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3CHARACTERKINEMATIC_LIB_PROFILE
+ NAMES PhysX3CharacterKinematicPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3COMMON_LIB_PROFILE
+ NAMES PhysX3CommonPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PHYSX3COOKING_LIB_PROFILE
+ NAMES PhysX3CookingPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVEL_LIB_PROFILE
+ NAMES LowLevelPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELAABB_LIB_PROFILE
+ NAMES LowLevelAABBPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELCLOTH_LIB_PROFILE
+ NAMES LowLevelClothPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELDYNAMICS_LIB_PROFILE
+ NAMES LowLevelDynamicsPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(LOWLEVELPARTICLES_LIB_PROFILE
+ NAMES LowLevelParticlesPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(SCENEQUERY_LIB_PROFILE
+ NAMES SceneQueryPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(SIMULATIONCONTROLLER_LIB_PROFILE
+ NAMES SimulationControllerPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+if (TARGET_BUILD_PLATFORM STREQUAL "Windows")
+
+ find_library(PHYSX3GPU_LIB
+ NAMES PhysX3Gpu${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+ )
+ find_library(PHYSX3GPU_LIB_DEBUG
+ NAMES PhysX3GpuDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${LIB_PATH}
+ )
+
+
+ SET(DLL_PATH ${PHYSXSDK_PATH}/bin/${VS_STR}${PHYSX_ARCH_FOLDER}-cmake${PHYSX_CRT_SUFFIX})
+
+ find_library(PHYSX3_DLL
+ NAMES PhysX3${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3CHARACTERKINEMATIC_DLL
+ NAMES PhysX3CharacterKinematic${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3COMMON_DLL
+ NAMES PhysX3Common${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3COOKING_DLL
+ NAMES PhysX3Cooking${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3GPU_DLL
+ NAMES PhysX3Gpu${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+
+ find_library(PHYSX3_DLL_DEBUG
+ NAMES PhysX3DEBUG${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3CHARACTERKINEMATIC_DLL_DEBUG
+ NAMES PhysX3CharacterKinematicDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3COMMON_DLL_DEBUG
+ NAMES PhysX3CommonDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3COOKING_DLL_DEBUG
+ NAMES PhysX3CookingDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ # NOTE - GPU dlls not included in required dlls or libs as they're optional.
+ find_library(PHYSX3GPU_DLL_DEBUG
+ NAMES PhysX3GpuDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ find_library(PHYSX3_DLL_PROFILE
+ NAMES PhysX3PROFILE${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3CHARACTERKINEMATIC_DLL_PROFILE
+ NAMES PhysX3CharacterKinematicPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3COMMON_DLL_PROFILE
+ NAMES PhysX3CommonPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3COOKING_DLL_PROFILE
+ NAMES PhysX3CookingPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ # NOTE - GPU dlls not included in required dlls or libs as they're optional.
+ find_library(PHYSX3GPU_DLL_PROFILE
+ NAMES PhysX3GpuPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ find_library(PHYSX3_DLL_CHECKED
+ NAMES PhysX3CHECKED${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3CHARACTERKINEMATIC_DLL_CHECKED
+ NAMES PhysX3CharacterKinematicCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3COMMON_DLL_CHECKED
+ NAMES PhysX3CommonCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PHYSX3COOKING_DLL_CHECKED
+ NAMES PhysX3CookingCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ # NOTE - GPU dlls not included in required dlls or libs as they're optional.
+ find_library(PHYSX3GPU_DLL_CHECKED
+ NAMES PhysX3GpuCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ # Create this list to check for found dlls below
+ SET(DLL_VAR_LIST
+ PHYSX3_DLL
+ PHYSX3CHARACTERKINEMATIC_DLL
+ PHYSX3COMMON_DLL
+ PHYSX3COOKING_DLL
+
+ PHYSX3_DLL_DEBUG
+ PHYSX3CHARACTERKINEMATIC_DLL_DEBUG
+ PHYSX3COMMON_DLL_DEBUG
+ PHYSX3COOKING_DLL_DEBUG
+
+ PHYSX3_DLL_PROFILE
+ PHYSX3CHARACTERKINEMATIC_DLL_PROFILE
+ PHYSX3COMMON_DLL_PROFILE
+ PHYSX3COOKING_DLL_PROFILE
+
+ PHYSX3_DLL_CHECKED
+ PHYSX3CHARACTERKINEMATIC_DLL_CHECKED
+ PHYSX3COMMON_DLL_CHECKED
+ PHYSX3COOKING_DLL_CHECKED
+
+ )
+endif()
+
+if (TARGET_BUILD_PLATFORM STREQUAL "linux")
+ SET(BIN_PATH ${PHYSXSDK_PATH}/bin/linux64-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
+
+ find_library(PHYSX3_LIB
+ NAMES PhysX3${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3COOKING_LIB
+ NAMES PhysX3Cooking${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3CHARACTERKINEMATIC_LIB
+ NAMES PhysX3CharacterKinematic${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3COMMON_LIB
+ NAMES PhysX3Common${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+
+ find_library(PHYSX3_LIB_DEBUG
+ NAMES PhysX3DEBUG${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3COOKING_LIB_DEBUG
+ NAMES PhysX3CookingDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3CHARACTERKINEMATIC_LIB_DEBUG
+ NAMES PhysX3CharacterKinematicDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3COMMON_LIB_DEBUG
+ NAMES PhysX3CommonDEBUG${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+
+ find_library(PHYSX3_LIB_CHECKED
+ NAMES PhysX3CHECKED${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3COOKING_LIB_CHECKED
+ NAMES PhysX3CookingCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3CHARACTERKINEMATIC_LIB_CHECKED
+ NAMES PhysX3CharacterKinematicCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3COMMON_LIB_CHECKED
+ NAMES PhysX3CommonCHECKED${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+
+ find_library(PHYSX3_LIB_PROFILE
+ NAMES PhysX3PROFILE${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3COOKING_LIB_PROFILE
+ NAMES PhysX3CookingPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3CHARACTERKINEMATIC_LIB_PROFILE
+ NAMES PhysX3CharacterKinematicPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PHYSX3COMMON_LIB_PROFILE
+ NAMES PhysX3CommonPROFILE${PHYSX_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+
+endif()
+
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(PHYSXSDK
+ DEFAULT_MSG
+ PHYSXSDK_PATH
+
+ PHYSX3_LIB
+ PHYSX3EXTENSIONS_LIB
+ PHYSX3CHARACTERKINEMATIC_LIB
+ PHYSX3COMMON_LIB
+ PHYSX3COOKING_LIB
+ LOWLEVEL_LIB
+ LOWLEVELAABB_LIB
+ LOWLEVELCLOTH_LIB
+ LOWLEVELDYNAMICS_LIB
+ LOWLEVELPARTICLES_LIB
+ SCENEQUERY_LIB
+ SIMULATIONCONTROLLER_LIB
+
+ PHYSX3_LIB_DEBUG
+ PHYSX3EXTENSIONS_LIB_DEBUG
+ PHYSX3CHARACTERKINEMATIC_LIB_DEBUG
+ PHYSX3COMMON_LIB_DEBUG
+ PHYSX3COOKING_LIB_DEBUG
+ LOWLEVEL_LIB_DEBUG
+ LOWLEVELAABB_LIB_DEBUG
+ LOWLEVELCLOTH_LIB_DEBUG
+ LOWLEVELDYNAMICS_LIB_DEBUG
+ LOWLEVELPARTICLES_LIB_DEBUG
+ SCENEQUERY_LIB_DEBUG
+ SIMULATIONCONTROLLER_LIB_DEBUG
+
+# PHYSX3_LIB_CHECKED
+# PHYSX3EXTENSIONS_LIB_CHECKED
+# PHYSX3CHARACTERKINEMATIC_LIB_CHECKED
+# PHYSX3COMMON_LIB_CHECKED
+# PHYSX3COOKING_LIB_CHECKED
+# LOWLEVEL_LIB_CHECKED
+# LOWLEVELAABB_LIB_CHECKED
+# LOWLEVELCLOTH_LIB_CHECKED
+# LOWLEVELDYNAMICS_LIB_CHECKED
+# LOWLEVELPARTICLES_LIB_CHECKED
+# SCENEQUERY_LIB_CHECKED
+# SIMULATIONCONTROLLER_LIB_CHECKED
+#
+# PHYSX3_LIB_PROFILE
+# PHYSX3EXTENSIONS_LIB_PROFILE
+# PHYSX3CHARACTERKINEMATIC_LIB_PROFILE
+# PHYSX3COMMON_LIB_PROFILE
+# PHYSX3COOKING_LIB_PROFILE
+# LOWLEVEL_LIB_PROFILE
+# LOWLEVELAABB_LIB_PROFILE
+# LOWLEVELCLOTH_LIB_PROFILE
+# LOWLEVELDYNAMICS_LIB_PROFILE
+# LOWLEVELPARTICLES_LIB_PROFILE
+# SCENEQUERY_LIB_PROFILE
+# SIMULATIONCONTROLLER_LIB_PROFILE
+
+ ${DLL_VAR_LIST}
+)
+
+if (PHYSXSDK_FOUND)
+
+ SET(PHYSXSDK_INCLUDE_DIRS
+ ${PHYSXSDK_PATH}/Include
+ ${PHYSXSDK_PATH}/Include/common
+ ${PHYSXSDK_PATH}/Include/cooking
+ ${PHYSXSDK_PATH}/Include/extensions
+ ${PHYSXSDK_PATH}/Include/geometry
+ ${PHYSXSDK_PATH}/Include/gpu
+ ${PHYSXSDK_PATH}/Include/deformable
+ ${PHYSXSDK_PATH}/Include/particles
+ ${PHYSXSDK_PATH}/Include/characterkinematic
+ ${PHYSXSDK_PATH}/Include/characterdynamic
+ ${PHYSXSDK_PATH}/Include/vehicle
+ )
+
+ SET(PHYSXSDK_LIBS_RELEASE ${PHYSX3_LIB} ${PHYSX3EXTENSIONS_LIB} ${PHYSX3CHARACTERKINEMATIC_LIB} ${PHYSX3COMMON_LIB} ${PHYSX3COOKING_LIB} ${PHYSX3GPU_LIB} ${LOWLEVEL_LIB} ${LOWLEVELAABB_LIB} ${LOWLEVELCLOTH_LIB} ${LOWLEVELDYNAMICS_LIB} ${LOWLEVELPARTICLES_LIB} ${SCENEQUERY_LIB} ${SIMULATIONCONTROLLER_LIB}
+ CACHE STRING ""
+ )
+ SET(PHYSXSDK_LIBS_DEBUG ${PHYSX3_LIB_DEBUG} ${PHYSX3EXTENSIONS_LIB_DEBUG} ${PHYSX3CHARACTERKINEMATIC_LIB_DEBUG} ${PHYSX3COMMON_LIB_DEBUG} ${PHYSX3COOKING_LIB_DEBUG} ${PHYSX3GPU_LIB_DEBUG} ${LOWLEVEL_LIB_DEBUG} ${LOWLEVELAABB_LIB_DEBUG} ${LOWLEVELCLOTH_LIB_DEBUG} ${LOWLEVELDYNAMICS_LIB_DEBUG} ${LOWLEVELPARTICLES_LIB_DEBUG} ${SCENEQUERY_LIB_DEBUG} ${SIMULATIONCONTROLLER_LIB_DEBUG}
+ CACHE STRING ""
+ )
+ SET(PHYSXSDK_LIBS_CHECKED ${PHYSX3_LIB_CHECKED} ${PHYSX3EXTENSIONS_LIB_CHECKED} ${PHYSX3CHARACTERKINEMATIC_LIB_CHECKED} ${PHYSX3COMMON_LIB_CHECKED} ${PHYSX3COOKING_LIB_CHECKED} ${PHYSX3GPU_LIB_CHECKED} ${LOWLEVEL_LIB_CHECKED} ${LOWLEVELAABB_LIB_CHECKED} ${LOWLEVELCLOTH_LIB_CHECKED} ${LOWLEVELDYNAMICS_LIB_CHECKED} ${LOWLEVELPARTICLES_LIB_CHECKED} ${SCENEQUERY_LIB_CHECKED} ${SIMULATIONCONTROLLER_LIB_CHECKED}
+ CACHE STRING ""
+ )
+ SET(PHYSXSDK_LIBS_PROFILE ${PHYSX3_LIB_PROFILE} ${PHYSX3EXTENSIONS_LIB_PROFILE} ${PHYSX3CHARACTERKINEMATIC_LIB_PROFILE} ${PHYSX3COMMON_LIB_PROFILE} ${PHYSX3COOKING_LIB_PROFILE} ${PHYSX3GPU_LIB_PROFILE} ${LOWLEVEL_LIB_PROFILE} ${LOWLEVELAABB_LIB_PROFILE} ${LOWLEVELCLOTH_LIB_PROFILE} ${LOWLEVELDYNAMICS_LIB_PROFILE} ${LOWLEVELPARTICLES_LIB_PROFILE} ${SCENEQUERY_LIB_PROFILE} ${SIMULATIONCONTROLLER_LIB_PROFILE}
+ CACHE STRING ""
+ )
+
+ #NOTE: This is all dll configs, might need to be split.
+ SET(PHYSXSDK_DLLS
+ ${PHYSX3_DLL} ${PHYSX3CHARACTERKINEMATIC_DLL} ${PHYSX3COMMON_DLL} ${PHYSX3COOKING_DLL} ${PHYSX3GPU_DLL}
+ ${PHYSX3_DLL_DEBUG} ${PHYSX3CHARACTERKINEMATIC_DLL_DEBUG} ${PHYSX3COMMON_DLL_DEBUG} ${PHYSX3COOKING_DLL_DEBUG} ${PHYSX3GPU_DLL_DEBUG}
+ ${PHYSX3_DLL_PROFILE} ${PHYSX3CHARACTERKINEMATIC_DLL_PROFILE} ${PHYSX3COMMON_DLL_PROFILE} ${PHYSX3COOKING_DLL_PROFILE} ${PHYSX3GPU_DLL_PROFILE}
+ ${PHYSX3_DLL_CHECKED} ${PHYSX3CHARACTERKINEMATIC_DLL_CHECKED} ${PHYSX3COMMON_DLL_CHECKED} ${PHYSX3COOKING_DLL_CHECKED} ${PHYSX3GPU_DLL_CHECKED}
+ )
+
+ SET(PHYSXSDK_LIBRARIES "" CACHE STRING "")
+
+ foreach(x ${PHYSXSDK_LIBS_RELEASE})
+ list(APPEND PHYSXSDK_LIBRARIES optimized ${x})
+ endforeach()
+
+ foreach(x ${PHYSXSDK_LIBS_DEBUG})
+ list(APPEND PHYSXSDK_LIBRARIES debug ${x})
+ endforeach()
+endif()
diff --git a/NvBlast/sdk/compiler/cmake/modules/FindPxSharedSDK.cmake b/NvBlast/sdk/compiler/cmake/modules/FindPxSharedSDK.cmake
new file mode 100644
index 0000000..0f67f5c
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/modules/FindPxSharedSDK.cmake
@@ -0,0 +1,293 @@
+# - Try to find PxShared binary SDK
+# - Sets PXSHAREDSDK_LIBS_DEBUG and PXSHAREDSDK_LIBS_RELEASE - lists of the libraries found
+# - Sets PXSHAREDSDK_INCLUDE_DIRS
+# - Sets PXSHAREDSDK_DLLS - List of the DLLs to copy to the bin directory of projects that depend on this
+
+include(FindPackageHandleStandardArgs)
+
+# Find the includes
+
+# TODO: Do the version stuff properly!
+find_path(PXSHAREDSDK_PATH include/foundation/Px.h
+ PATHS
+ ${GW_DEPS_ROOT}/$ENV{PM_PxShared_NAME}/${PxSharedSDK_FIND_VERSION}
+ ${GW_DEPS_ROOT}/PxShared/${PxSharedSDK_FIND_VERSION}
+)
+
+if (TARGET_BUILD_PLATFORM STREQUAL "Windows")
+ # If the project pulling in this dependency needs the static crt, then append that to the path.
+ if (STATIC_WINCRT)
+ SET(PXSHARED_CRT_SUFFIX "-staticcrt")
+ else()
+ SET(PXSHARED_CRT_SUFFIX "")
+ endif()
+
+ if (CMAKE_CL_64)
+ SET(PXSHARED_ARCH_FOLDER "win64")
+ SET(PXSHARED_ARCH_FILE "_x64")
+ else()
+ SET(PXSHARED_ARCH_FOLDER "win32")
+ SET(PXSHARED_ARCH_FILE "_x86")
+ endif()
+
+ # What compiler version do we want?
+
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 18.0.0.0 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.0.0.0)
+ SET(VS_STR "vc12")
+ elseif(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 19.0.0.0)
+ SET(VS_STR "vc14")
+ else()
+ MESSAGE(FATAL_ERROR "Failed to find compatible PxSharedSDK - Only supporting VS2013 and VS2015")
+ endif()
+
+ SET(LIB_PATH ${PXSHAREDSDK_PATH}/lib/${VS_STR}${PXSHARED_ARCH_FOLDER}-cmake${PXSHARED_CRT_SUFFIX})
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".lib" ".dll")
+
+elseif(TARGET_BUILD_PLATFORM STREQUAL "PS4")
+ SET(LIB_PATH ${PXSHAREDSDK_PATH}/lib/vc14ps4-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "XboxOne")
+ SET(LIB_PATH ${PXSHAREDSDK_PATH}/lib/vc14xboxone-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+elseif(TARGET_BUILD_PLATFORM STREQUAL "linux")
+ SET(LIB_PATH ${PXSHAREDSDK_PATH}/lib/linux64-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+ SET(CMAKE_FIND_LIBRARY_PREFIXES "lib")
+ SET(PXSHARED_ARCH_FILE "_x64")
+endif()
+
+
+# Now find all of the PhysX libs in the lib directory
+
+
+find_library(PSFASTXML_LIB
+ NAMES PsFastXml${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXFOUNDATION_LIB
+ NAMES PxFoundation${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXPVDSDK_LIB
+ NAMES PxPvdSDK${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXTASK_LIB
+ NAMES PxTask${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+find_library(PSFASTXML_LIB_DEBUG
+ NAMES PsFastXmlDEBUG${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXFOUNDATION_LIB_DEBUG
+ NAMES PxFoundationDEBUG${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXPVDSDK_LIB_DEBUG
+ NAMES PxPvdSDKDEBUG${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXTASK_LIB_DEBUG
+ NAMES PxTaskDEBUG${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+find_library(PSFASTXML_LIB_CHECKED
+ NAMES PsFastXmlCHECKED${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXFOUNDATION_LIB_CHECKED
+ NAMES PxFoundationCHECKED${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXPVDSDK_LIB_CHECKED
+ NAMES PxPvdSDKCHECKED${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXTASK_LIB_CHECKED
+ NAMES PxTaskCHECKED${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+find_library(PSFASTXML_LIB_PROFILE
+ NAMES PsFastXmlPROFILE${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXFOUNDATION_LIB_PROFILE
+ NAMES PxFoundationPROFILE${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXPVDSDK_LIB_PROFILE
+ NAMES PxPvdSDKPROFILE${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+find_library(PXTASK_LIB_PROFILE
+ NAMES PxTaskPROFILE${PXSHARED_ARCH_FILE}
+ PATHS ${LIB_PATH}
+)
+
+if (TARGET_BUILD_PLATFORM STREQUAL "Windows")
+ SET(DLL_PATH ${PXSHAREDSDK_PATH}/bin/${VS_STR}${PXSHARED_ARCH_FOLDER}-cmake${PXSHARED_CRT_SUFFIX})
+
+ find_library(PXFOUNDATION_DLL
+ NAMES PxFoundation${PXSHARED_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PXPVDSDK_DLL
+ NAMES PxPvdSDK${PXSHARED_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ find_library(PXFOUNDATION_DLL_DEBUG
+ NAMES PxFoundationDEBUG${PXSHARED_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PXPVDSDK_DLL_DEBUG
+ NAMES PxPvdSDKDEBUG${PXSHARED_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ find_library(PXFOUNDATION_DLL_CHECKED
+ NAMES PxFoundationCHECKED${PXSHARED_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PXPVDSDK_DLL_CHECKED
+ NAMES PxPvdSDKCHECKED${PXSHARED_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ find_library(PXFOUNDATION_DLL_PROFILE
+ NAMES PxFoundationPROFILE${PXSHARED_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+ find_library(PXPVDSDK_DLL_PROFILE
+ NAMES PxPvdSDKPROFILE${PXSHARED_ARCH_FILE}
+ PATHS ${DLL_PATH}
+ )
+
+ #Put the found dll variables in a list if we actually looked for them (windows)
+ SET(DLL_VAR_LIST
+ PXFOUNDATION_DLL
+ PXPVDSDK_DLL
+
+ PXFOUNDATION_DLL_DEBUG
+ PXPVDSDK_DLL_DEBUG
+
+ PXFOUNDATION_DLL_CHECKED
+ PXPVDSDK_DLL_CHECKED
+
+ PXFOUNDATION_DLL_PROFILE
+ PXPVDSDK_DLL_PROFILE
+
+ )
+endif()
+
+if (TARGET_BUILD_PLATFORM STREQUAL "linux")
+ SET(BIN_PATH ${PXSHAREDSDK_PATH}/bin/linux64-cmake)
+ SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
+
+ find_library(PXFOUNDATION_LIB
+ NAMES PxFoundation${PXSHARED_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PXPVDSDK_LIB
+ NAMES PxPvdSDK${PXSHARED_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+
+ find_library(PXFOUNDATION_LIB_DEBUG
+ NAMES PxFoundationDEBUG${PXSHARED_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+ find_library(PXPVDSDK_LIB_DEBUG
+ NAMES PxPvdSDKDEBUG${PXSHARED_ARCH_FILE}
+ PATHS ${BIN_PATH}
+ )
+endif()
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(PXSHAREDSDK
+ DEFAULT_MSG
+ PXSHAREDSDK_PATH
+
+ PSFASTXML_LIB
+ PXFOUNDATION_LIB
+ PXPVDSDK_LIB
+ PXTASK_LIB
+
+ PSFASTXML_LIB_DEBUG
+ PXFOUNDATION_LIB_DEBUG
+ PXPVDSDK_LIB_DEBUG
+ PXTASK_LIB_DEBUG
+
+# PSFASTXML_LIB_CHECKED
+# PXFOUNDATION_LIB_CHECKED
+# PXPVDSDK_LIB_CHECKED
+# PXTASK_LIB_CHECKED
+#
+# PSFASTXML_LIB_PROFILE
+# PXFOUNDATION_LIB_PROFILE
+# PXPVDSDK_LIB_PROFILE
+# PXTASK_LIB_PROFILE
+
+
+ ${DLL_VAR_LIST}
+)
+
+if (PXSHAREDSDK_FOUND)
+
+ # NOTE: This include list is way too long and reaches into too many internals.
+ # Also may not be good enough for all users.
+ SET(PXSHAREDSDK_INCLUDE_DIRS
+ ${PXSHAREDSDK_PATH}/include
+ ${PXSHAREDSDK_PATH}/include/pvd
+ ${PXSHAREDSDK_PATH}/include/task
+ ${PXSHAREDSDK_PATH}/include/filebuf
+ ${PXSHAREDSDK_PATH}/include/foundation
+ ${PXSHAREDSDK_PATH}/src/foundation/include
+ ${PXSHAREDSDK_PATH}/src/filebuf/include
+ ${PXSHAREDSDK_PATH}/src/fastxml/include
+ )
+
+ SET(PXSHAREDSDK_LIBS_RELEASE ${PSFASTXML_LIB} ${PXFOUNDATION_LIB} ${PXPVDSDK_LIB} ${PXTASK_LIB}
+ CACHE STRING ""
+ )
+ SET(PXSHAREDSDK_LIBS_DEBUG ${PSFASTXML_LIB_DEBUG} ${PXFOUNDATION_LIB_DEBUG} ${PXPVDSDK_LIB_DEBUG} ${PXTASK_LIB_DEBUG}
+ CACHE STRING ""
+ )
+
+ SET(PXSHAREDSDK_LIBS_CHECKED ${PSFASTXML_LIB_CHECKED} ${PXFOUNDATION_LIB_CHECKED} ${PXPVDSDK_LIB_CHECKED} ${PXTASK_LIB_CHECKED}
+ CACHE STRING ""
+ )
+
+ SET(PXSHAREDSDK_LIBS_PROFILE ${PSFASTXML_LIB_PROFILE} ${PXFOUNDATION_LIB_PROFILE} ${PXPVDSDK_LIB_PROFILE} ${PXTASK_LIB_PROFILE}
+ CACHE STRING ""
+ )
+
+ SET(PXSHAREDSDK_DLLS
+ ${PXFOUNDATION_DLL}
+ ${PXPVDSDK_DLL}
+
+ ${PXFOUNDATION_DLL_DEBUG}
+ ${PXPVDSDK_DLL_DEBUG}
+
+ ${PXFOUNDATION_DLL_CHECKED}
+ ${PXPVDSDK_DLL_CHECKED}
+
+ ${PXFOUNDATION_DLL_PROFILE}
+ ${PXPVDSDK_DLL_PROFILE}
+ )
+
+ SET(PXSHAREDSDK_LIBRARIES "" CACHE STRING "")
+
+ foreach(x ${PXSHAREDSDK_LIBS_RELEASE})
+ list(APPEND PXSHAREDSDK_LIBRARIES optimized ${x})
+ endforeach()
+
+ foreach(x ${PXSHAREDSDK_LIBS_DEBUG})
+ list(APPEND PXSHAREDSDK_LIBRARIES debug ${x})
+ endforeach()
+endif()
diff --git a/NvBlast/sdk/compiler/cmake/modules/FindXDK.cmake b/NvBlast/sdk/compiler/cmake/modules/FindXDK.cmake
new file mode 100644
index 0000000..601dd8f
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/modules/FindXDK.cmake
@@ -0,0 +1,257 @@
+# - Try to find XDLK
+# - Sets XDK_LIBRARIES - list of the libraries found
+# - Sets XDK_INCLUDE_DIRS
+
+include(FindPackageHandleStandardArgs)
+
+# Find the includes
+
+# TODO: Do the version stuff properly!
+# TODO: Handle a binary dep version of the XDK
+find_path(XDK_PATH include/winrt/Windows.Xbox.UI.h
+ PATHS
+ $ENV{XboxOneXDKLatest}/xdk
+)
+
+SET(LIB_PATH ${XDK_PATH}/Lib/amd64/)
+SET(CMAKE_FIND_LIBRARY_PREFIXES "")
+SET(CMAKE_FIND_LIBRARY_SUFFIXES ".lib")
+
+MESSAGE("XDK LIB PATH: ${LIB_PATH}")
+
+find_library(ACPHAL_LIB
+ NAMES acphal
+ PATHS ${LIB_PATH}
+)
+
+find_library(APPMODEL_LIB
+ NAMES appmodel
+ PATHS ${LIB_PATH}
+)
+
+find_library(BCRYPT_LIB
+ NAMES bcrypt
+ PATHS ${LIB_PATH}
+)
+
+find_library(COMBASE_LIB
+ NAMES combase
+ PATHS ${LIB_PATH}
+)
+
+find_library(CONSOLEID_LIB
+ NAMES consoleid
+ PATHS ${LIB_PATH}
+)
+
+find_library(D3D11_X_LIB
+ NAMES d3d11_x
+ PATHS ${LIB_PATH}
+)
+
+find_library(D3D12_X_LIB
+ NAMES d3d12_x
+ PATHS ${LIB_PATH}
+)
+
+find_library(D3DCOMPILER_LIB
+ NAMES d3dcompiler
+ PATHS ${LIB_PATH}
+)
+
+find_library(DXGUID_LIB
+ NAMES dxguid
+ PATHS ${LIB_PATH}
+)
+
+find_library(ETWPLUS_LIB
+ NAMES etwplus
+ PATHS ${LIB_PATH}
+)
+
+find_library(IXMLHTTPREQUEST2_LIB
+ NAMES ixmlhttprequest2
+ PATHS ${LIB_PATH}
+)
+
+find_library(KERNELX_LIB
+ NAMES kernelx
+ PATHS ${LIB_PATH}
+)
+
+find_library(KSTUDIOCLIENT_LIB
+ NAMES kstudioclient
+ PATHS ${LIB_PATH}
+)
+
+find_library(MFPLAT_LIB
+ NAMES mfplat
+ PATHS ${LIB_PATH}
+)
+
+find_library(MFREADWRITE_LIB
+ NAMES mfreadwrite
+ PATHS ${LIB_PATH}
+)
+
+find_library(MFUUID_LIB
+ NAMES mfuuid
+ PATHS ${LIB_PATH}
+)
+
+find_library(MMDEVAPI_LIB
+ NAMES mmdevapi
+ PATHS ${LIB_PATH}
+)
+
+find_library(PIXEVT_LIB
+ NAMES PIXEvt
+ PATHS ${LIB_PATH}
+)
+
+find_library(RPCRT4_LIB
+ NAMES rpcrt4
+ PATHS ${LIB_PATH}
+)
+
+find_library(SMARTGLASSINTEROP_LIB
+ NAMES smartglassinterop
+ PATHS ${LIB_PATH}
+)
+
+find_library(STRMIIDS_LIB
+ NAMES strmiids
+ PATHS ${LIB_PATH}
+)
+
+find_library(TOOLHELPX_LIB
+ NAMES toolhelpx
+ PATHS ${LIB_PATH}
+)
+
+find_library(UUID_LIB
+ NAMES uuid
+ PATHS ${LIB_PATH}
+)
+
+find_library(WINDOWSCODECS_LIB
+ NAMES windowscodecs
+ PATHS ${LIB_PATH}
+)
+
+find_library(WMCODECDSPUUID_LIB
+ NAMES wmcodecdspuuid
+ PATHS ${LIB_PATH}
+)
+
+find_library(WS2_32_LIB
+ NAMES ws2_32
+ PATHS ${LIB_PATH}
+)
+
+find_library(XAPOBASE_LIB
+ NAMES xapobase
+ PATHS ${LIB_PATH}
+)
+
+find_library(XAUDIO2_LIB
+ NAMES xaudio2
+ PATHS ${LIB_PATH}
+)
+
+find_library(XG_X_LIB
+ NAMES xg_x
+ PATHS ${LIB_PATH}
+)
+
+find_library(XI_LIB
+ NAMES xi
+ PATHS ${LIB_PATH}
+)
+
+find_library(XSTUDIOCLIENT_LIB
+ NAMES xstudioclient
+ PATHS ${LIB_PATH}
+)
+
+
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(XDK
+ DEFAULT_MSG
+ XDK_PATH
+
+ ACPHAL_LIB
+ APPMODEL_LIB
+ BCRYPT_LIB
+ COMBASE_LIB
+ CONSOLEID_LIB
+ D3D11_X_LIB
+ D3D12_X_LIB
+ D3DCOMPILER_LIB
+ DXGUID_LIB
+ ETWPLUS_LIB
+ IXMLHTTPREQUEST2_LIB
+ KERNELX_LIB
+ KSTUDIOCLIENT_LIB
+ MFPLAT_LIB
+ MFREADWRITE_LIB
+ MFUUID_LIB
+ MMDEVAPI_LIB
+ PIXEVT_LIB
+ RPCRT4_LIB
+ SMARTGLASSINTEROP_LIB
+ STRMIIDS_LIB
+ TOOLHELPX_LIB
+ UUID_LIB
+ WINDOWSCODECS_LIB
+ WMCODECDSPUUID_LIB
+ WS2_32_LIB
+ XAPOBASE_LIB
+ XAUDIO2_LIB
+ XG_X_LIB
+ XI_LIB
+ XSTUDIOCLIENT_LIB
+)
+
+if (XDK_FOUND)
+ SET(XDK_INCLUDE_DIRS
+ ${XDK_PATH}/Include
+ ${XDK_PATH}/Include/shared
+ ${XDK_PATH}/Include/um
+ ${XDK_PATH}/Include/winrt
+ )
+
+ SET(XDK_LIBRARIES
+ ${ACPHAL_LIB}
+ ${APPMODEL_LIB}
+ ${BCRYPT_LIB}
+ ${COMBASE_LIB}
+ ${CONSOLEID_LIB}
+ ${D3D11_X_LIB}
+ ${D3D12_X_LIB}
+ ${D3DCOMPILER_LIB}
+ ${DXGUID_LIB}
+ ${ETWPLUS_LIB}
+ ${IXMLHTTPREQUEST2_LIB}
+ ${KERNELX_LIB}
+ ${KSTUDIOCLIENT_LIB}
+ ${MFPLAT_LIB}
+ ${MFREADWRITE_LIB}
+ ${MFUUID_LIB}
+ ${MMDEVAPI_LIB}
+ ${PIXEVT_LIB}
+ ${RPCRT4_LIB}
+ ${SMARTGLASSINTEROP_LIB}
+ ${STRMIIDS_LIB}
+ ${TOOLHELPX_LIB}
+ ${UUID_LIB}
+ ${WINDOWSCODECS_LIB}
+ ${WMCODECDSPUUID_LIB}
+ ${WS2_32_LIB}
+ ${XAPOBASE_LIB}
+ ${XAUDIO2_LIB}
+ ${XG_X_LIB}
+ ${XI_LIB}
+ ${XSTUDIOCLIENT_LIB}
+ )
+endif()
diff --git a/NvBlast/sdk/compiler/cmake/windows/CMakeLists.txt b/NvBlast/sdk/compiler/cmake/windows/CMakeLists.txt
new file mode 100644
index 0000000..ac8b186
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/CMakeLists.txt
@@ -0,0 +1,67 @@
+#Platform specific compile flags and project includes
+
+SET(CMAKE_CXX_FLAGS "/GR- /GF /MP /Gy /d2Zi+ /errorReport:prompt /fp:fast /Gd /Gm- /GS- /nologo /W4 /WX /Zc:forScope /Zc:inline /Zc:wchar_t /Zi")
+
+OPTION(STATIC_WINCRT "Use static WinCRT" OFF)
+
+# Are we using the static or dynamic RT library? Whatever we use, it needs to be the same in any dependencies
+# we pull in or we're potentially having mismatch issues.
+IF(STATIC_WINCRT)
+ SET(WINCRT_NDEBUG "/MT")
+ SET(WINCRT_DEBUG "/MTd")
+ELSE()
+ SET(WINCRT_NDEBUG "/MD")
+ SET(WINCRT_DEBUG "/MDd")
+ENDIF()
+
+SET(CMAKE_CXX_FLAGS_DEBUG "/Od /RTCsu ${WINCRT_DEBUG}")
+SET(CMAKE_CXX_FLAGS_CHECKED "/Ox ${WINCRT_NDEBUG}")
+SET(CMAKE_CXX_FLAGS_PROFILE "/Ox ${WINCRT_NDEBUG}")
+SET(CMAKE_CXX_FLAGS_RELEASE "/Ox ${WINCRT_NDEBUG}")
+
+MESSAGE("STATIC OR DYN: ${WINCRT_DEBUG}")
+
+# Build PDBs for all configurations
+SET(CMAKE_SHARED_LINKER_FLAGS "/DEBUG")
+
+IF(CMAKE_CL_64)
+ ADD_DEFINITIONS(-DWIN64)
+ENDIF(CMAKE_CL_64)
+
+SET(BLAST_SLN_COMPILE_DEFS _HAS_EXCEPTIONS=0;WIN32;WIN64;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;)
+#NvBlastExt doesn't have the _CONSOLE flag
+
+SET(BLAST_SLN_DEBUG_COMPILE_DEFS _DEBUG;NV_DEBUG=1;)
+SET(BLAST_SLN_CHECKED_COMPILE_DEFS NDEBUG;NV_CHECKED=1;)
+SET(BLAST_SLN_PROFILE_COMPILE_DEFS NDEBUG;NV_PROFILE=1;)
+SET(BLAST_SLN_RELEASE_COMPILE_DEFS NDEBUG;)
+
+IF(CMAKE_CL_64)
+ SET(LIBPATH_SUFFIX "x64")
+ELSE(CMAKE_CL_64)
+ SET(LIBPATH_SUFFIX "x86")
+ENDIF(CMAKE_CL_64)
+
+SET(CMAKE_DEBUG_POSTFIX "${CMAKE_DEBUG_POSTFIX}_${LIBPATH_SUFFIX}")
+SET(CMAKE_PROFILE_POSTFIX "${CMAKE_PROFILE_POSTFIX}_${LIBPATH_SUFFIX}")
+SET(CMAKE_CHECKED_POSTFIX "${CMAKE_CHECKED_POSTFIX}_${LIBPATH_SUFFIX}")
+SET(CMAKE_RELEASE_POSTFIX "${CMAKE_RELEASE_POSTFIX}_${LIBPATH_SUFFIX}")
+
+
+# Include all of the projects
+INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlast.cmake)
+INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlastTk.cmake)
+INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlastExtConverterLL.cmake)
+INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlastExtShaders.cmake)
+INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlastExtPhysX.cmake)
+INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlastExtAuthoring.cmake)
+INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlastExtImport.cmake)
+
+#Sadly, CapnProto won't compile on anything less than VS2015.
+if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 19.0.0.0)
+ SET(SERIALIZATION_INCLUDED 1 PARENT_SCOPE)
+ INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlastExtSerialization.cmake)
+ INCLUDE(${PROJECT_CMAKE_FILES_DIR}/NvBlastExtSerializationLL.cmake)
+else()
+ SET(SERIALIZATION_INCLUDED 0)
+endif()
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlast.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlast.cmake
new file mode 100644
index 0000000..64b6b02
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlast.cmake
@@ -0,0 +1,22 @@
+#
+# Build NvBlast Windows
+#
+
+SET(BLAST_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLAST_PLATFORM_INCLUDES
+)
+
+SET(BLAST_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLAST_LIB_TYPE SHARED)
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlastExtAuthoring.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtAuthoring.cmake
new file mode 100644
index 0000000..7406d74
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtAuthoring.cmake
@@ -0,0 +1,24 @@
+#
+# Build NvBlastExt Windows
+#
+
+SET(BLASTEXT_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLASTEXT_PLATFORM_INCLUDES
+)
+
+SET(BLASTEXT_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE;
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLAST_EXT_SHARED_LIB_TYPE SHARED)
+
+SET(BLASTEXT_PLATFORM_COMPILE_OPTIONS "/wd4100;/wd4239;/wd4244;/wd4245;/wd4267;/EHsc")
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlastExtConverterLL.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtConverterLL.cmake
new file mode 100644
index 0000000..7406d74
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtConverterLL.cmake
@@ -0,0 +1,24 @@
+#
+# Build NvBlastExt Windows
+#
+
+SET(BLASTEXT_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLASTEXT_PLATFORM_INCLUDES
+)
+
+SET(BLASTEXT_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE;
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLAST_EXT_SHARED_LIB_TYPE SHARED)
+
+SET(BLASTEXT_PLATFORM_COMPILE_OPTIONS "/wd4100;/wd4239;/wd4244;/wd4245;/wd4267;/EHsc")
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlastExtImport.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtImport.cmake
new file mode 100644
index 0000000..7406d74
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtImport.cmake
@@ -0,0 +1,24 @@
+#
+# Build NvBlastExt Windows
+#
+
+SET(BLASTEXT_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLASTEXT_PLATFORM_INCLUDES
+)
+
+SET(BLASTEXT_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE;
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLAST_EXT_SHARED_LIB_TYPE SHARED)
+
+SET(BLASTEXT_PLATFORM_COMPILE_OPTIONS "/wd4100;/wd4239;/wd4244;/wd4245;/wd4267;/EHsc")
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlastExtPhysX.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtPhysX.cmake
new file mode 100644
index 0000000..9bf7614
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtPhysX.cmake
@@ -0,0 +1,24 @@
+#
+# Build NvBlastExt Windows
+#
+
+SET(BLASTEXT_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLASTEXT_PLATFORM_INCLUDES
+)
+
+SET(BLASTEXT_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE;
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLASTEXT_PHYSX_LIBTYPE SHARED)
+
+SET(BLASTEXT_PLATFORM_COMPILE_OPTIONS "/wd4100;/wd4239;/wd4244;/wd4245;/wd4267;/EHsc")
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlastExtSerialization.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtSerialization.cmake
new file mode 100644
index 0000000..20910be
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtSerialization.cmake
@@ -0,0 +1,26 @@
+#
+# Build NvBlastExtSerialization Windows
+#
+
+SET(BLASTEXTSERIALIZATION_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLASTEXTSERIALIZATION_PLATFORM_INCLUDES
+)
+
+SET(BLASTEXTSERIALIZATION_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE;
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLASTEXTSERIALIZATION_LIB_TYPE SHARED)
+
+
+SET(BLASTEXTSERIALIZATION_COMPILE_OPTIONS "/wd4100;/wd4239;/wd4244;/wd4245;/wd4267;/EHsc")
+
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlastExtSerializationLL.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtSerializationLL.cmake
new file mode 100644
index 0000000..20910be
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtSerializationLL.cmake
@@ -0,0 +1,26 @@
+#
+# Build NvBlastExtSerialization Windows
+#
+
+SET(BLASTEXTSERIALIZATION_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLASTEXTSERIALIZATION_PLATFORM_INCLUDES
+)
+
+SET(BLASTEXTSERIALIZATION_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE;
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLASTEXTSERIALIZATION_LIB_TYPE SHARED)
+
+
+SET(BLASTEXTSERIALIZATION_COMPILE_OPTIONS "/wd4100;/wd4239;/wd4244;/wd4245;/wd4267;/EHsc")
+
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlastExtShaders.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtShaders.cmake
new file mode 100644
index 0000000..7406d74
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlastExtShaders.cmake
@@ -0,0 +1,24 @@
+#
+# Build NvBlastExt Windows
+#
+
+SET(BLASTEXT_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLASTEXT_PLATFORM_INCLUDES
+)
+
+SET(BLASTEXT_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE;
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLAST_EXT_SHARED_LIB_TYPE SHARED)
+
+SET(BLASTEXT_PLATFORM_COMPILE_OPTIONS "/wd4100;/wd4239;/wd4244;/wd4245;/wd4267;/EHsc")
diff --git a/NvBlast/sdk/compiler/cmake/windows/NvBlastTk.cmake b/NvBlast/sdk/compiler/cmake/windows/NvBlastTk.cmake
new file mode 100644
index 0000000..d12f78f
--- /dev/null
+++ b/NvBlast/sdk/compiler/cmake/windows/NvBlastTk.cmake
@@ -0,0 +1,27 @@
+#
+# Build NvBlastTk Windows
+#
+
+FIND_PACKAGE(nvToolsExt $ENV{PM_nvToolsExt_VERSION} REQUIRED)
+
+SET(BLASTTK_PLATFORM_COMMON_FILES
+ ${COMMON_SOURCE_DIR}/NvBlastIncludeWindows.h
+)
+
+SET(BLASTTK_PLATFORM_INCLUDES
+ PRIVATE $<$<OR:$<CONFIG:debug>,$<CONFIG:checked>,$<CONFIG:profile>>:${NVTOOLSEXT_INCLUDE_DIRS}>
+)
+
+SET(BLASTTK_COMPILE_DEFS
+ # Common to all configurations
+ ${BLAST_SLN_COMPILE_DEFS};_CONSOLE
+
+ $<$<CONFIG:debug>:${BLAST_SLN_DEBUG_COMPILE_DEFS}>
+ $<$<CONFIG:checked>:${BLAST_SLN_CHECKED_COMPILE_DEFS}>
+ $<$<CONFIG:profile>:${BLAST_SLN_PROFILE_COMPILE_DEFS}>
+ $<$<CONFIG:release>:${BLAST_SLN_RELEASE_COMPILE_DEFS}>
+)
+
+SET(BLASTTK_LIBTYPE "SHARED")
+
+SET(BLASTTK_PLATFORM_LINKED_LIBS Rpcrt4 $<$<OR:$<CONFIG:debug>,$<CONFIG:checked>,$<CONFIG:profile>>:${NVTOOLSEXT_LIB}>)
diff --git a/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringBondGenerator.h b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringBondGenerator.h
new file mode 100644
index 0000000..68767eb
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringBondGenerator.h
@@ -0,0 +1,136 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAUTHORINGBONDGENERATOR_H
+#define NVBLASTEXTAUTHORINGBONDGENERATOR_H
+
+#include "NvBlastExtAuthoringTypes.h"
+#include "NvBlastExtAuthoringFractureTool.h"
+#include "NvBlastTypes.h"
+#include "../cooking/PxCooking.h"
+#include <PxPlane.h>
+#include <NvBlastExtAuthoringCollisionBuilder.h>
+struct NvBlastBondDesc;
+struct NvBlastChunkDesc;
+struct NvBlastBond;
+
+using namespace physx;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TriangleProcessor;
+struct PlaneChunkIndexer;
+
+/**
+ Bond interface generation configuration
+ EXACT - common surface will be searched
+ AVERAGE - Inerface is approximated by projections or intersecitons with midplane
+*/
+struct BondGenerationConfig
+{
+ enum BondGenMode { EXACT, AVERAGE };
+ BondGenMode bondMode;
+};
+
+
+struct PlaneChunkIndexer
+{
+ PlaneChunkIndexer(int32_t chunkId, int32_t trId, physx::PxPlane pl) : chunkId(chunkId), trId(trId), plane(pl) {}
+ int32_t chunkId;
+ int32_t trId;
+ physx::PxPlane plane;
+};
+
+
+/**
+ Tool for gathering bond information from provided mesh geometry
+*/
+
+class BlastBondGenerator
+{
+public:
+
+ BlastBondGenerator(physx::PxCooking* cooking, physx::PxPhysicsInsertionCallback* insertionCallback) : mPxCooking(cooking), mPxInsertionCallback(insertionCallback){};
+
+ /**
+ This method based on marking triangles during fracture process, so can be used only with internally fractured meshes.
+ \param[in] tool FractureTool which contains chunks representation, tool->finalizeFracturing() should be called before.
+ \param[in] chunkIsSupport Array of flags, if true - chunk is support. Array size should be equal to chunk count in tool.
+ \param[out] resultBondDescs Array of created bond descriptors.
+ \param[out] resultChunkDescriptors Array of created chunk descriptors.
+ \return 0 if success
+ */
+ int32_t buildDescFromInternalFracture(FractureTool* tool, const std::vector<bool>& chunkIsSupport, std::vector<NvBlastBondDesc>& resultBondDescs, std::vector<NvBlastChunkDesc>& resultChunkDescriptors);
+
+
+ /**
+ Creates bond description between two meshes
+ \param[in] meshA Array of triangles of mesh A.
+ \param[in] meshB Array of triangles of mesh B.
+ \param[out] resultBond Result bond description.
+ \param[in] conf Bond creation mode.
+ \return 0 if success
+ */
+ int32_t createBondBetweenMeshes(const std::vector<Triangle>& meshA, const std::vector<Triangle>& meshB, NvBlastBond& resultBond, BondGenerationConfig conf = BondGenerationConfig());
+
+ /**
+ Creates bond description between number of meshes
+ \param[in] geometry Array of arrays of triangles for each chunk.
+ \param[out] resultBond Array of result bonds.
+ \param[in] overlaps Array of pairs - indexes of chunks, for which bond should be created.
+ \param[in] cfg Bond creation mode.
+ \return 0 if success
+ */
+ int32_t createBondBetweenMeshes(const std::vector<std::vector<Triangle> >& geometry, std::vector<NvBlastBondDesc>& resultBond, const std::vector<std::pair<uint32_t, uint32_t> >& overlaps, BondGenerationConfig cfg);
+
+
+ /**
+ Creates bond description for prefractured meshes, when there is no info about which chunks should be connected with bond.
+ \param[in] geometry Array of arrays of triangles for each chunk.
+ \param[in] chunkIsSupport Array of flags, if true - chunk is support. Array size should be equal to chunk count in tool.
+ \param[out] resultBondDescs Array of result bonds.
+ \param[in] conf Bond creation mode.
+ \return 0 if success
+ */
+ int32_t bondsFromPrefractured(const std::vector<std::vector<Triangle>>& geometry, const std::vector<bool>& chunkIsSupport, std::vector<NvBlastBondDesc>& resultBondDescs, BondGenerationConfig conf = BondGenerationConfig());
+
+private:
+ float processWithMidplanes(TriangleProcessor* trProcessor, const std::vector<physx::PxVec3>& chunk1Points, const std::vector<physx::PxVec3>& chunk2Points,
+ const std::vector<physx::PxVec3>& hull1p,const std::vector<physx::PxVec3>& hull2p, physx::PxVec3& normal, physx::PxVec3& centroid);
+
+ int32_t createFullBondListAveraged(const std::vector<std::vector<Triangle>>& chunksGeometry, const std::vector<bool>& supportFlags, std::vector<NvBlastBondDesc>& mResultBondDescs, BondGenerationConfig conf);
+ int32_t createFullBondListExact(const std::vector<std::vector<Triangle>>& chunksGeometry, const std::vector<bool>& supportFlags, std::vector<NvBlastBondDesc>& mResultBondDescs, BondGenerationConfig conf);
+ int32_t createFullBondListExactInternal(const std::vector<std::vector<Triangle>>& chunksGeometry, std::vector < PlaneChunkIndexer >& planeTriangleMapping , std::vector<NvBlastBondDesc>& mResultBondDescs);
+ int32_t createBondForcedInternal(const std::vector<PxVec3>& hull0, const std::vector<PxVec3>& hull1,const CollisionHull& cHull0, const CollisionHull& cHull1,PxBounds3 bound0, PxBounds3 bound1, NvBlastBond& resultBond, float overlapping);
+
+ void buildGeometryCache(const std::vector<std::vector<Triangle> >& geometry);
+ void resetGeometryCache();
+
+ physx::PxCooking* mPxCooking;
+ physx::PxPhysicsInsertionCallback* mPxInsertionCallback;
+
+
+ std::vector<std::vector<Triangle> > mGeometryCache;
+
+ std::vector<PlaneChunkIndexer> mPlaneCache;
+ std::vector<CollisionHull> mCHullCache;
+ std::vector<std::vector<physx::PxVec3> > mHullsPointsCache;
+ std::vector<physx::PxBounds3 > mBoundsCache;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+#endif // NVBLASTEXTAUTHORINGBONDGENERATOR_H \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringCollisionBuilder.h b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringCollisionBuilder.h
new file mode 100644
index 0000000..b3e143a
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringCollisionBuilder.h
@@ -0,0 +1,123 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAUTHORINGCOLLISIONBUILDER_H
+#define NVBLASTEXTAUTHORINGCOLLISIONBUILDER_H
+
+#include "NvBlastTypes.h"
+#include <vector>
+#include <PxVec3.h>
+
+namespace physx
+{
+ class PxCooking;
+ class PxPhysicsInsertionCallback;
+ class PxVec3;
+ class PxConvexMesh;
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+ Collision hull geometry format.
+*/
+struct CollisionHull
+{
+ /**
+ Collision hull polygon format.
+ */
+ struct HullPolygon
+ {
+ // Polygon base plane
+ float mPlane[4];
+ // Number vertices in polygon
+ uint16_t mNbVerts;
+ // First index in CollisionHull.indices array for this polygon
+ uint16_t mIndexBase;
+ };
+ ///**
+
+ CollisionHull(){};
+
+ std::vector<physx::PxVec3> points;
+ std::vector<uint32_t> indices;
+ std::vector<HullPolygon> polygonData;
+};
+
+
+/**
+ ConvexMeshBuilder provides routine to build collision hulls from array of vertices.
+ Collision hull is built as convex hull of provided point set.
+ If due to some reason building of convex hull is failed, collision hull is built as bounding box of vertex set.
+*/
+class ConvexMeshBuilder
+{
+public:
+
+ /**
+ Constructor should be provided with PxCoocking and PxPhysicsInsertionCallback objects.
+ */
+ ConvexMeshBuilder(physx::PxCooking* cooking, physx::PxPhysicsInsertionCallback* insertionCallback) : mInsertionCallback(insertionCallback), mCooking(cooking) {}
+
+ /**
+ Method creates CollisionHull from provided array of vertices.
+ \param[in] vertexData Vertex array of some object, for which collision geometry should be built
+ \param[out] output Reference on CollisionHull object in which generated geometry should be saved
+ */
+ void buildCollisionGeometry(const std::vector<physx::PxVec3>& vertexData, CollisionHull& output);
+
+ /**
+ Method creates PxConvexMesh from provided array of vertices.
+ \param[in] vertexData Vertex array of some object, for which collision geometry should be built
+
+ \return pointer to the PxConvexMesh object if it was built successfully, 'nullptr' otherwise.
+ */
+ physx::PxConvexMesh* buildConvexMesh(std::vector<physx::PxVec3>& vertexData);
+
+
+ /**
+ Method creates PxConvexMesh from provided ConvexHull geometry
+ \param[in] hull ConvexHull geometry
+
+ \return pointer to the PxConvexMesh object if it was built successfully, 'nullptr' otherwise.
+ */
+ physx::PxConvexMesh* buildConvexMesh(CollisionHull& hull);
+
+
+ /**
+ Convex geometry trimming.
+ Using slicing with noised slicing surface can result in intersecting collision geometry.
+ It leads to unstable behaviour of rigid body simulation.
+ This method trims all intersecting parts of collision geometry.
+ As a drawback, trimming collision geometry can lead to penetrating render meshes during simulation.
+
+
+ \param[in] in ConvexHull geometry which should be clipped.
+ \param[in] chunkDepth Array of depth levels of convex hulls corresponding chunks.
+
+ */
+
+ void trimCollisionGeometry(std::vector<CollisionHull>& in, const std::vector<uint32_t>& chunkDepth);
+
+
+private:
+ physx::PxPhysicsInsertionCallback* mInsertionCallback;
+ physx::PxCooking* mCooking;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTAUTHORINGCOLLISIONBUILDER_H
diff --git a/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringFractureTool.h b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringFractureTool.h
new file mode 100644
index 0000000..528ffbc
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringFractureTool.h
@@ -0,0 +1,435 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTAUTHORINGFRACTURETOOL_H
+#define NVBLASTAUTHORINGFRACTURETOOL_H
+
+#include "NvBlastExtAuthoringMesh.h"
+#include "NvBlastTypes.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+class SpatialAccelerator;
+class ChunkPostProcessor;
+
+
+/*
+ Chunk data, chunk with chunkId == 0 is always source mesh.
+*/
+struct ChunkInfo
+{
+ Mesh* meshData;
+ int32_t parent;
+ int32_t chunkId;
+ bool isLeaf;
+};
+
+
+/*
+ Slicing fracturing configuration
+
+
+ default:
+ x_slices = 1;
+ y_slices = 1;
+ z_slices = 1;
+
+ offset_variations = 0.f;
+ angle_variations = 0.f;
+ noiseAmplitude = 0.f;
+ noiseFrequency = 1.f;
+ noiseOctaveNumber = 1;
+ surfaceResolution = 1;
+*/
+struct SlicingConfiguration
+{
+ /**
+ Number of slices in each direction
+ */
+ int32_t x_slices, y_slices, z_slices;
+
+ /**
+ Offset variation, value in [0, 1]
+ */
+ float offset_variations;
+ /**
+ Angle variation, value in [0, 1]
+ */
+ float angle_variations;
+
+
+ /**
+ Noisy slicing configutaion:
+
+ Amplitude of cutting surface noise. If it is 0 - noise is disabled.
+ */
+ float noiseAmplitude;
+ /**
+ Frequencey of cutting surface noise.
+ */
+ float noiseFrequency;
+ /**
+ Octave number in slicing surface noise.
+ */
+ uint32_t noiseOctaveNumber;
+ /**
+ Cutting surface resolution.
+ */
+ int32_t surfaceResolution;
+
+
+ SlicingConfiguration()
+ {
+ reset();
+ }
+ /**
+ Set default params.
+ */
+ void reset()
+ {
+ x_slices = 1;
+ y_slices = 1;
+ z_slices = 1;
+
+ offset_variations = 0.f;
+ angle_variations = 0.f;
+ noiseAmplitude = 0.f;
+ noiseFrequency = 1.f;
+ noiseOctaveNumber = 1;
+ surfaceResolution = 1;
+ }
+
+};
+
+
+
+/**
+ Class for voronoi sites generation inside supplied mesh.
+*/
+class VoronoiSitesGenerator
+{
+public:
+
+ /**
+ Voronoi sites should not be generated outside of the fractured mesh, so VoronoiSitesGenerator
+ should be supplied with fracture mesh.
+ \param[in] mesh Fracture mesh
+ \param[in] rnd User supplied random value generator.
+ \return
+ */
+ VoronoiSitesGenerator(Mesh* mesh, RandomGeneratorBase* rnd);
+ ~VoronoiSitesGenerator();
+
+ /**
+ Set base fracture mesh
+ */
+ void setBaseMesh(Mesh* m);
+
+ /**
+ Returns reference on vector of generated voronoi sites.
+ */
+ std::vector<physx::PxVec3>& getVoronoiSites();
+
+ /**
+ Add site in particular point
+ \param[in] site Site coordinates
+ */
+ void addSite(const physx::PxVec3& site);
+ /**
+ Uniformly generate sites inside the mesh
+ \param[in] numberOfSites Number of generated sites
+ */
+ void uniformlyGenerateSitesInMesh(const uint32_t numberOfSites);
+
+ /**
+ Generate sites in clustered fashion
+ \param[in] numberOfClusters Number of generated clusters
+ \param[in] sitesPerCluster Number of sites in each cluster
+ \param[in] clusterRadius Voronoi cells cluster radius
+ */
+ void clusteredSitesGeneration(const uint32_t numberOfClusters, const uint32_t sitesPerCluster, float clusterRadius);
+
+ /**
+ Radial pattern of sites generation
+ \param[in] center Center of generated pattern
+ \param[in] normal Normal to plane in which sites are generated
+ \param[in] radius Pattern radius
+ \param[in] angularSteps Number of angular steps
+ \param[in] radialSteps Number of radial steps
+ \param[in] angleOffset Angle offset at each radial step
+ \param[in] variability Randomness of sites distribution
+ */
+ void radialPattern(const physx::PxVec3& center, const physx::PxVec3& normal, float radius, int32_t angularSteps, int32_t radialSteps, float angleOffset = 0.0f, float variability = 0.0f);
+
+ /**
+ Generate sites inside sphere
+ \param[in] count Count of generated sites
+ \param[in] radius Radius of sphere
+ \param[in] center Center of sphere
+ */
+ void generateInSphere(const uint32_t count, const float radius, const physx::PxVec3& center);
+ /**
+ Set stencil mesh. With stencil mesh sites are generated only inside both of fracture and stencil meshes.
+ \param[in] stencil Stencil mesh.
+ */
+ void setStencil(Mesh* stencil);
+ /**
+ Removes stencil mesh
+ */
+ void clearStencil();
+
+ /**
+ Deletes sites inside supplied sphere
+ \param[in] radius Radius of sphere
+ \param[in] center Center of sphere
+ \param[in] eraserProbability Probability of removing some particular site
+ */
+ void deleteInSphere(const float radius, const physx::PxVec3& center, const float eraserProbability = 1);
+
+private:
+ std::vector<physx::PxVec3> mGeneratedSites;
+ Mesh* mMesh;
+ Mesh* mStencil;
+ RandomGeneratorBase* mRnd;
+ SpatialAccelerator* mAccelerator;
+};
+
+
+
+/**
+ FractureTool class provides methods to fracture provided mesh and generate Blast asset data
+*/
+class FractureTool
+{
+
+public:
+
+ /**
+ FractureTool can log asset creation info if logCallback is provided.
+ */
+ FractureTool(NvBlastLog logCallback = nullptr)
+ {
+ mPlaneIndexerOffset = 1;
+ mChunkIdCounter = 0;
+ mRemoveIslands = false;
+ mLoggingCallback = logCallback;
+ }
+
+ ~FractureTool()
+ {
+ reset();
+ }
+
+ /**
+ Reset FractureTool state.
+ */
+ void reset();
+
+
+ /**
+ Set input mesh wich will be fractured, FractureTool will be reseted.
+ */
+ void setSourceMesh(Mesh* mesh);
+
+ /**
+ Get chunk mesh in polygonal representation
+ */
+ Mesh getChunkMesh(int32_t chunkId);
+
+ /**
+ Input mesh is scaled and transformed internally to fit unit cube centered in origin.
+ Method provides offset vector and scale parameter;
+ */
+ void getTransformation(physx::PxVec3& offset, float& scale);
+
+
+ /**
+ Fractures specified chunk with voronoi method.
+ \param[in] chunkId Chunk to fracture
+ \param[in] cellPoints Array of voronoi sites
+ \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them.
+ Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
+ \return If 0, fracturing is successful.
+ */
+ int32_t voronoiFracturing(uint32_t chunkId, const std::vector<physx::PxVec3>& cellPoints, bool replaceChunk);
+
+ /**
+ Fractures specified chunk with voronoi method. Cells can be scaled along x,y,z axes.
+ \param[in] chunkId Chunk to fracture
+ \param[in] cellPoints Array of voronoi sites
+ \param[in] cellPoints Array of voronoi sites
+ \param[in] scale Voronoi cells scaling factor
+ \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them.
+ Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
+ \return If 0, fracturing is successful.
+ */
+ int32_t voronoiFracturing(uint32_t chunkId, const std::vector<physx::PxVec3>& cellPoints, const physx::PxVec3& scale, bool replaceChunk);
+
+
+ /**
+ Fractures specified chunk with slicing method.
+ \param[in] chunkId Chunk to fracture
+ \param[in] conf Slicing parameters, see SlicingConfiguration.
+ \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them.
+ Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
+ \param[in] rnd User supplied random number generator
+
+ \return If 0, fracturing is successful.
+ */
+ int32_t slicing(uint32_t chunkId, SlicingConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd);
+
+
+ /**
+ Creates resulting fractured mesh geometry from intermediate format
+ */
+ void finalizeFracturing();
+
+ /**
+ Get chunk information
+ */
+ const std::vector<ChunkInfo>& getChunkList();
+
+
+ /**
+ Tesselate interior surfaces
+ \param[in] averageEdgeLength - Average length of edge on internal surface.
+ */
+ void tesselate(float averageEdgeLength);
+
+ /**
+ Apply noise to interior surfaces. Must be called only after tesselation!
+ \param[in] amplitude Amplitude of noise
+ \param[in] frequency Frequency of noise
+ \param[in] octaves Number of noise octaves
+ \param[in] falloff - damping of noise around of external surface
+ \param[in] relaxIterations - number of smoothing iterations before applying noise
+ \param[in] relaxFactor - amount of smoothing before applying noise.
+ \param[in] seed Random seed value
+ */
+ void applyNoise(float amplitude, float frequency, int32_t octaves, float falloff, int32_t relaxIterations, float relaxFactor, int32_t seed = 0);
+
+ /**
+ Get percentage of mesh overlap.
+ percentage computed as volume(intersection(meshA , meshB)) / volume (meshA)
+ \param[in] meshA Mesh A
+ \param[in] meshB Mesh B
+ \return mesh overlap percentage
+ */
+ static float getMeshOverlap(Mesh& meshA, Mesh& meshB);
+
+ /**
+ Get chunk base mesh
+ \param[in] chunkIndex Chunk index
+ \param[out] output Array of triangles to be filled
+ */
+ void getBaseMesh(int32_t chunkIndex, std::vector<Triangle>& output);
+
+ /**
+ Get chunk mesh with noise
+ \param[in] chunkIndex Chunk index
+ \param[out] output Array of triangles to be filled
+ */
+ void getNoisedMesh(int32_t chunkIndex, std::vector<Triangle>& output);
+
+
+ /**
+ Return index of chunk with specified chunkId
+ \param[in] chunkId Chunk ID
+ \return Chunk index in internal buffer, if not exist -1 is returned.
+ */
+ int32_t getChunkIndex(int32_t chunkId);
+
+ /**
+ Return id of chunk with specified index.
+ \param[in] chunkIndex Chunk index
+ \return Chunk id or -1 if there is no such chunk.
+ */
+ int32_t getChunkId(int32_t chunkIndex);
+
+ /**
+ Return depth level of the given chunk
+ \param[in] chunkId Chunk ID
+ \return Chunk depth or -1 if there is no such chunk.
+ */
+ int32_t getChunkDepth(int32_t chunkId);
+
+ /**
+ Return array of chunks IDs with given depth.
+ \param[in] depth Chunk depth
+ \return Array of chunk IDs
+ */
+ std::vector<int32_t> getChunksIdAtDepth(uint32_t depth);
+
+
+ /**
+ Get result geometry without noise as vertex and index buffers, where index buffers contain series of triplets
+ which represent triangles.
+ \param[out] vertexBuffer Array of vertices to be filled
+ \param[out] indexBuffer Array of arrays of indices to be filled
+ */
+ void getBufferedBaseMeshes(std::vector<Vertex>& vertexBuffer, std::vector<std::vector<uint32_t> >& indexBuffer);
+
+ /**
+ Get result geometry after tesselation and application of noise as vertex and index buffers, where index buffers contain series of triplets
+ which represent triangles.
+ \param[out] vertexBuffer Array of vertices to be filled
+ \param[out] indexBuffer Array of arrays of indices to be filled
+ */
+ void getBufferedNoiseMeshes(std::vector<Vertex>& vertexBuffer, std::vector<std::vector<uint32_t> >& indexBuffer);
+
+ /**
+ Set automatic islands removing. May cause instabilities.
+ \param[in] isRemoveIslands Flag whether remove or not islands.
+ */
+ void setRemoveIslands(bool isRemoveIslands);
+
+ /**
+ Try find islands and remove them on some specifical chunk. If chunk has childs, island removing can lead to wrong results! Apply it before further chunk splitting.
+ \param[in] chunkId Chunk ID which should be checked for islands
+ \return Number of found islands is returned
+ */
+ int32_t islandDetectionAndRemoving(int32_t chunkId);
+
+private:
+ void eraseChunk(int32_t chunkId);
+ bool isAncestorForChunk(int32_t ancestorId, int32_t chunkId);
+ void deleteAllChildsOfChunk(int32_t chunkId);
+ int32_t slicingNoisy(uint32_t chunkId, SlicingConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd);
+
+protected:
+ /**
+ Mesh scaled to unite-cube and translated to the origin
+ */
+ float mScaleFactor;
+ physx::PxVec3 mOffset;
+
+ /* Chunk mesh wrappers */
+ std::vector<ChunkPostProcessor*> mChunkPostprocessors;
+
+
+
+ int32_t mPlaneIndexerOffset;
+ int32_t mChunkIdCounter;
+ std::vector<ChunkInfo> mChunkData;
+
+ bool mRemoveIslands;
+
+ NvBlastLog mLoggingCallback;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTAUTHORINGFRACTURETOOL_H
diff --git a/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringMesh.h b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringMesh.h
new file mode 100644
index 0000000..2b1806a
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringMesh.h
@@ -0,0 +1,174 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTAUTHORINGMESH_H
+#define NVBLASTAUTHORINGMESH_H
+
+#include "NvBlastExtAuthoringTypes.h"
+#include <vector>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+ Class for internal mesh representation
+*/
+class Mesh
+{
+public:
+
+ /**
+ Constructs mesh object from array of triangles.
+ \param[in] position Array of vertex positions
+ \param[in] normals Array of vertex normals
+ \param[in] uv Array of vertex uv coordinates
+ \param[in] verticesCount Vertices count
+ \param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle.
+ \param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3)
+ */
+ Mesh(physx::PxVec3* position, physx::PxVec3* normals, physx::PxVec2* uv, uint32_t verticesCount, uint32_t* indices, uint32_t indicesCount);
+
+ /**
+ Constructs mesh object from array of facets.
+ \param[in] vertices Array of vertices
+ \param[in] edges Array of edges
+ \param[in] facets Array of facets
+ \param[in] posCount Vertices count
+ \param[in] edgesCount Edges count
+ \param[in] facetsCount Facets count
+ */
+ Mesh(Vertex* vertices, Edge* edges, Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount);
+
+ ~Mesh();
+
+ /**
+ Return true if mesh is valid
+ */
+ bool isValid();
+
+ /**
+ Return pointer on vertices array
+ */
+ Vertex* getVertices();
+
+ /**
+ Return pointer on edges array
+ */
+ Edge* getEdges();
+
+ /**
+ Return pointer on facets array
+ */
+ Facet* getFacetsBuffer();
+
+ /**
+ Return pointer on specified facet
+ */
+ Facet* getFacet(int32_t facet);
+
+ /**
+ Return edges count
+ */
+ uint32_t getEdgesCount();
+
+ /**
+ Return vertices count
+ */
+ uint32_t getVerticesCount();
+
+ /**
+ Return facet count
+ */
+ uint32_t getFacetCount();
+
+ /**
+ Return reference on mesh bounding box.
+ */
+ physx::PxBounds3& getBoundingBox();
+
+ /**
+ Recalculate bounding box
+ */
+ void recalculateBoundingBox();
+
+ /**
+ Compute mesh volume. Can be used only for triangulated meshes.
+ Return mesh volume. If mesh is not triangulated return 0.
+ */
+ float getMeshVolume();
+
+private:
+ std::vector<Vertex> mVertices;
+ std::vector<Edge> mEdges;
+ std::vector<Facet> mFacets;
+ physx::PxBounds3 mBounds;
+};
+
+
+/**
+ Helper functions
+*/
+
+/**
+ Set cutting box at some particular position.
+ \param[in] point Cutting face center
+ \param[in] normal Cutting face normal
+ \param[in] mesh Cutting box mesh
+ \param[in] size Cutting box size
+ \param[in] id Cutting box ID
+*/
+void setCuttingBox(const physx::PxVec3& point, const physx::PxVec3& normal, Mesh* mesh, float size, int32_t id);
+/**
+ Create cutting box at some particular position.
+ \param[in] point Cutting face center
+ \param[in] normal Cutting face normal
+ \param[in] size Cutting box size
+ \param[in] id Cutting box ID
+*/
+Mesh* getCuttingBox(const physx::PxVec3& point, const physx::PxVec3& normal, float size, int32_t id);
+
+/**
+ Create box at some particular position.
+ \param[in] point Cutting face center
+ \param[in] size Cutting box size
+*/
+Mesh* getBigBox(const physx::PxVec3& point, float size);
+
+/**
+ Create slicing box with noisy cutting surface.
+ \param[in] point Cutting face center
+ \param[in] normal Cutting face normal
+ \param[in] size Cutting box size
+ \param[in] jaggedPlaneSize Noisy surface size
+ \param[in] resolution Noisy surface resolution
+ \param[in] id Cutting box ID
+ \param[in] amplitude Noise amplitude
+ \param[in] frequency Noise frequency
+ \param[in] octaves Noise octaves
+ \param[in] seed Random generator seed, used for noise generation.
+*/
+Mesh* getNoisyCuttingBoxPair(const physx::PxVec3& point, const physx::PxVec3& normal, float size, float jaggedPlaneSize, uint32_t resolution, int32_t id, float amplitude, float frequency, int32_t octaves, int32_t seed);
+
+
+/**
+ Inverses normals of cutting box and sets indices.
+ \param[in] mesh Cutting box mesh
+ \param[in] id Cutting box ID
+*/
+void inverseNormalAndSetIndices(Mesh* mesh, int32_t id);
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTAUTHORINGMESH_H
diff --git a/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringTypes.h b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringTypes.h
new file mode 100644
index 0000000..de28866
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/include/NvBlastExtAuthoringTypes.h
@@ -0,0 +1,128 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTAUTHORINGTYPES_H
+#define NVBLASTAUTHORINGTYPES_H
+
+#include <PxVec3.h>
+#include <PxVec2.h>
+#include <PxBounds3.h>
+#include <algorithm>
+#include "NvBlastTypes.h"
+
+#define NOT_VALID_VERTEX INT32_MAX
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+ Edge representation
+*/
+struct Edge
+{
+ uint32_t s, e;
+ Edge() : s(NOT_VALID_VERTEX), e(NOT_VALID_VERTEX){}
+ Edge(int s, int e) : s(s), e(e) {}
+ bool operator<(const Edge& b) const
+ {
+ if (s == b.s)
+ return e < b.e;
+ else
+ return s < b.s;
+ }
+};
+
+/**
+ Mesh vertex representation
+*/
+struct Vertex
+{
+ physx::PxVec3 p; // Position
+ physx::PxVec3 n; // Normal
+ physx::PxVec2 uv[1]; // UV-coordinates array, currently supported only one UV coordinate.
+};
+
+/**
+ Mesh triangle representation
+*/
+struct Triangle
+{
+ Triangle() {};
+ Triangle(Vertex a, Vertex b, Vertex c) : a(a), b(b), c(c) {};
+ Vertex a, b, c;
+ int32_t userInfo;
+ physx::PxVec3 getNormal()
+ {
+ return ((b.p - a.p).cross(c.p - a.p));
+ }
+};
+
+
+/**
+ Index based triangle
+*/
+struct TriangleIndexed
+{
+ TriangleIndexed() {};
+ TriangleIndexed(uint32_t a, uint32_t b, uint32_t c) : ea(a), eb(b), ec(c) {};
+
+ uint32_t getOpposite(uint32_t a, uint32_t b)
+ {
+ if (ea != a && ea != b)
+ return ea;
+ if (eb != a && eb != b)
+ return eb;
+ if (ec != a && ec != b)
+ return ec;
+ return NOT_VALID_VERTEX;
+ }
+
+ bool isContainEdge(uint32_t a, uint32_t b)
+ {
+ return (a == ea || a == eb || a == ec) && (b == ea || b == eb || b == ec);
+ }
+
+ uint32_t ea, eb, ec;
+ int32_t userInfo;
+};
+
+/**
+ Mesh facet representation
+*/
+struct Facet
+{
+ int32_t firstEdgeNumber;
+ uint32_t edgesCount;
+ int32_t userData;
+ Facet(int32_t fEdge = 0, uint32_t eCount = 0, int32_t userData = 0) : firstEdgeNumber(fEdge), edgesCount(eCount), userData(userData) {}
+};
+
+/**
+Abstract base class for user-defined random value generator.
+*/
+class RandomGeneratorBase
+{
+public:
+ // Generates uniformly distributed value in [0, 1] range.
+ virtual float getRandomValue() = 0;
+ // Seeds random value generator
+ virtual void seed(int32_t seed) = 0;
+ virtual ~RandomGeneratorBase() {};
+};
+
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTAUTHORINGTYPES_H
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtApexSharedParts.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtApexSharedParts.cpp
new file mode 100644
index 0000000..73c59b8
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtApexSharedParts.cpp
@@ -0,0 +1,1004 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtApexSharedParts.h"
+
+#include "PxMat44.h"
+#include "PxBounds3.h"
+#include "PxFoundation.h"
+#include "PxPhysics.h"
+#include "PsVecMath.h"
+#include <vector>
+
+using namespace physx;
+using namespace physx::shdfnd::aos;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+PX_NOALIAS PX_FORCE_INLINE BoolV PointOutsideOfPlane4(const Vec3VArg _a, const Vec3VArg _b, const Vec3VArg _c, const Vec3VArg _d)
+{
+ // this is not 0 because of the following scenario:
+ // All the points lie on the same plane and the plane goes through the origin (0,0,0).
+ // On the Wii U, the math below has the problem that when point A gets projected on the
+ // plane cumputed by A, B, C, the distance to the plane might not be 0 for the mentioned
+ // scenario but a small positive or negative value. This can lead to the wrong boolean
+ // results. Using a small negative value as threshold is more conservative but safer.
+ const Vec4V zero = V4Load(-1e-6);
+
+ const Vec3V ab = V3Sub(_b, _a);
+ const Vec3V ac = V3Sub(_c, _a);
+ const Vec3V ad = V3Sub(_d, _a);
+ const Vec3V bd = V3Sub(_d, _b);
+ const Vec3V bc = V3Sub(_c, _b);
+
+ const Vec3V v0 = V3Cross(ab, ac);
+ const Vec3V v1 = V3Cross(ac, ad);
+ const Vec3V v2 = V3Cross(ad, ab);
+ const Vec3V v3 = V3Cross(bd, bc);
+
+ const FloatV signa0 = V3Dot(v0, _a);
+ const FloatV signa1 = V3Dot(v1, _a);
+ const FloatV signa2 = V3Dot(v2, _a);
+ const FloatV signd3 = V3Dot(v3, _a);
+
+ const FloatV signd0 = V3Dot(v0, _d);
+ const FloatV signd1 = V3Dot(v1, _b);
+ const FloatV signd2 = V3Dot(v2, _c);
+ const FloatV signa3 = V3Dot(v3, _b);
+
+ const Vec4V signa = V4Merge(signa0, signa1, signa2, signa3);
+ const Vec4V signd = V4Merge(signd0, signd1, signd2, signd3);
+ return V4IsGrtrOrEq(V4Mul(signa, signd), zero);//same side, outside of the plane
+}
+
+PX_NOALIAS PX_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg a, const Vec3VArg b)
+{
+ const FloatV zero = FZero();
+ const FloatV one = FOne();
+
+ //Test degenerated case
+ const Vec3V ab = V3Sub(b, a);
+ const FloatV denom = V3Dot(ab, ab);
+ const Vec3V ap = V3Neg(a);//V3Sub(origin, a);
+ const FloatV nom = V3Dot(ap, ab);
+ const BoolV con = FIsEq(denom, zero);
+ const FloatV tValue = FClamp(FDiv(nom, denom), zero, one);
+ const FloatV t = FSel(con, zero, tValue);
+
+ return V3Sel(con, a, V3ScaleAdd(ab, t, a));
+}
+
+PX_NOALIAS PX_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1,
+ const Vec3VArg B0, const Vec3VArg B1, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+{
+ const Vec3V a = Q0;
+ const Vec3V b = Q1;
+
+ const BoolV bTrue = BTTTT();
+ const FloatV zero = FZero();
+ const FloatV one = FOne();
+
+ //Test degenerated case
+ const Vec3V ab = V3Sub(b, a);
+ const FloatV denom = V3Dot(ab, ab);
+ const Vec3V ap = V3Neg(a);//V3Sub(origin, a);
+ const FloatV nom = V3Dot(ap, ab);
+ const BoolV con = FIsEq(denom, zero);
+
+ if (BAllEq(con, bTrue))
+ {
+ size = 1;
+ closestA = A0;
+ closestB = B0;
+ return Q0;
+ }
+
+ const Vec3V v = V3Sub(A1, A0);
+ const Vec3V w = V3Sub(B1, B0);
+ const FloatV tValue = FClamp(FDiv(nom, denom), zero, one);
+ const FloatV t = FSel(con, zero, tValue);
+
+ const Vec3V tempClosestA = V3ScaleAdd(v, t, A0);
+ const Vec3V tempClosestB = V3ScaleAdd(w, t, B0);
+ closestA = tempClosestA;
+ closestB = tempClosestB;
+ return V3Sub(tempClosestA, tempClosestB);
+}
+
+PX_NOALIAS Vec3V closestPtPointSegmentTesselation(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1,
+ const Vec3VArg B0, const Vec3VArg B1, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+{
+ const FloatV half = FHalf();
+
+ const FloatV targetSegmentLengthSq = FLoad(10000.f);//100 unit
+
+ Vec3V q0 = Q0;
+ Vec3V q1 = Q1;
+ Vec3V a0 = A0;
+ Vec3V a1 = A1;
+ Vec3V b0 = B0;
+ Vec3V b1 = B1;
+
+ for (;;)
+ {
+ const Vec3V midPoint = V3Scale(V3Add(q0, q1), half);
+ const Vec3V midA = V3Scale(V3Add(a0, a1), half);
+ const Vec3V midB = V3Scale(V3Add(b0, b1), half);
+
+ const Vec3V v = V3Sub(midPoint, q0);
+ const FloatV sqV = V3Dot(v, v);
+ if (FAllGrtr(targetSegmentLengthSq, sqV))
+ break;
+ //split the segment into half
+ const Vec3V tClos0 = closestPtPointSegment(q0, midPoint);
+ const FloatV sqDist0 = V3Dot(tClos0, tClos0);
+
+ const Vec3V tClos1 = closestPtPointSegment(q1, midPoint);
+ const FloatV sqDist1 = V3Dot(tClos1, tClos1);
+ //const BoolV con = FIsGrtr(sqDist0, sqDist1);
+ if (FAllGrtr(sqDist0, sqDist1))
+ {
+ //segment [m, q1]
+ q0 = midPoint;
+ a0 = midA;
+ b0 = midB;
+ }
+ else
+ {
+ //segment [q0, m]
+ q1 = midPoint;
+ a1 = midA;
+ b1 = midB;
+ }
+
+ }
+
+ return closestPtPointSegment(q0, q1, a0, a1, b0, b1, size, closestA, closestB);
+}
+
+PX_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* PX_RESTRICT Q, const Vec3V* PX_RESTRICT A, const Vec3V* PX_RESTRICT B, const PxU32* PX_RESTRICT indices, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+{
+ size = 3;
+ const FloatV zero = FZero();
+ const FloatV eps = FEps();
+ const FloatV half = FHalf();
+ const BoolV bTrue = BTTTT();
+ const FloatV four = FLoad(4.f);
+ const FloatV sixty = FLoad(100.f);
+
+ const PxU32 ind0 = indices[0];
+ const PxU32 ind1 = indices[1];
+ const PxU32 ind2 = indices[2];
+
+ const Vec3V a = Q[ind0];
+ const Vec3V b = Q[ind1];
+ const Vec3V c = Q[ind2];
+
+ Vec3V ab_ = V3Sub(b, a);
+ Vec3V ac_ = V3Sub(c, a);
+ Vec3V bc_ = V3Sub(b, c);
+
+ const FloatV dac_ = V3Dot(ac_, ac_);
+ const FloatV dbc_ = V3Dot(bc_, bc_);
+ if (FAllGrtrOrEq(eps, FMin(dac_, dbc_)))
+ {
+ //degenerate
+ size = 2;
+ return closestPtPointSegment(Q[ind0], Q[ind1], A[ind0], A[ind1], B[ind0], B[ind1], size, closestA, closestB);
+ }
+
+ Vec3V ap = V3Neg(a);
+ Vec3V bp = V3Neg(b);
+ Vec3V cp = V3Neg(c);
+
+ FloatV d1 = V3Dot(ab_, ap); // snom
+ FloatV d2 = V3Dot(ac_, ap); // tnom
+ FloatV d3 = V3Dot(ab_, bp); // -sdenom
+ FloatV d4 = V3Dot(ac_, bp); // unom = d4 - d3
+ FloatV d5 = V3Dot(ab_, cp); // udenom = d5 - d6
+ FloatV d6 = V3Dot(ac_, cp); // -tdenom
+ /* FloatV unom = FSub(d4, d3);
+ FloatV udenom = FSub(d5, d6);*/
+
+ FloatV va = FNegScaleSub(d5, d4, FMul(d3, d6));//edge region of BC
+ FloatV vb = FNegScaleSub(d1, d6, FMul(d5, d2));//edge region of AC
+ FloatV vc = FNegScaleSub(d3, d2, FMul(d1, d4));//edge region of AB
+
+ //check if p in vertex region outside a
+ const BoolV con00 = FIsGrtrOrEq(zero, d1); // snom <= 0
+ const BoolV con01 = FIsGrtrOrEq(zero, d2); // tnom <= 0
+ const BoolV con0 = BAnd(con00, con01); // vertex region a
+ if (BAllEq(con0, bTrue))
+ {
+ //size = 1;
+ closestA = A[ind0];
+ closestB = B[ind0];
+ return Q[ind0];
+ }
+
+ //check if p in vertex region outside b
+ const BoolV con10 = FIsGrtrOrEq(d3, zero);
+ const BoolV con11 = FIsGrtrOrEq(d3, d4);
+ const BoolV con1 = BAnd(con10, con11); // vertex region b
+ if (BAllEq(con1, bTrue))
+ {
+ /*size = 1;
+ indices[0] = ind1;*/
+ closestA = A[ind1];
+ closestB = B[ind1];
+ return Q[ind1];
+ }
+
+
+ //check if p in vertex region outside of c
+ const BoolV con20 = FIsGrtrOrEq(d6, zero);
+ const BoolV con21 = FIsGrtrOrEq(d6, d5);
+ const BoolV con2 = BAnd(con20, con21); // vertex region c
+ if (BAllEq(con2, bTrue))
+ {
+ closestA = A[ind2];
+ closestB = B[ind2];
+ return Q[ind2];
+ }
+
+ //check if p in edge region of AB
+ const BoolV con30 = FIsGrtrOrEq(zero, vc);
+ const BoolV con31 = FIsGrtrOrEq(d1, zero);
+ const BoolV con32 = FIsGrtrOrEq(zero, d3);
+ const BoolV con3 = BAnd(con30, BAnd(con31, con32));
+
+ if (BAllEq(con3, bTrue))
+ {
+ //size = 2;
+ //p in edge region of AB, split AB
+ return closestPtPointSegmentTesselation(Q[ind0], Q[ind1], A[ind0], A[ind1], B[ind0], B[ind1], size, closestA, closestB);
+ }
+
+ //check if p in edge region of BC
+ const BoolV con40 = FIsGrtrOrEq(zero, va);
+ const BoolV con41 = FIsGrtrOrEq(d4, d3);
+ const BoolV con42 = FIsGrtrOrEq(d5, d6);
+ const BoolV con4 = BAnd(con40, BAnd(con41, con42));
+
+ if (BAllEq(con4, bTrue))
+ {
+ //p in edge region of BC, split BC
+ return closestPtPointSegmentTesselation(Q[ind1], Q[ind2], A[ind1], A[ind2], B[ind1], B[ind2], size, closestA, closestB);
+ }
+
+ //check if p in edge region of AC
+ const BoolV con50 = FIsGrtrOrEq(zero, vb);
+ const BoolV con51 = FIsGrtrOrEq(d2, zero);
+ const BoolV con52 = FIsGrtrOrEq(zero, d6);
+ const BoolV con5 = BAnd(con50, BAnd(con51, con52));
+
+ if (BAllEq(con5, bTrue))
+ {
+ //p in edge region of AC, split AC
+ return closestPtPointSegmentTesselation(Q[ind0], Q[ind2], A[ind0], A[ind2], B[ind0], B[ind2], size, closestA, closestB);
+ }
+
+ size = 3;
+
+ Vec3V q0 = Q[ind0];
+ Vec3V q1 = Q[ind1];
+ Vec3V q2 = Q[ind2];
+ Vec3V a0 = A[ind0];
+ Vec3V a1 = A[ind1];
+ Vec3V a2 = A[ind2];
+ Vec3V b0 = B[ind0];
+ Vec3V b1 = B[ind1];
+ Vec3V b2 = B[ind2];
+
+ for (;;)
+ {
+
+ const Vec3V ab = V3Sub(q1, q0);
+ const Vec3V ac = V3Sub(q2, q0);
+ const Vec3V bc = V3Sub(q2, q1);
+
+ const FloatV dab = V3Dot(ab, ab);
+ const FloatV dac = V3Dot(ac, ac);
+ const FloatV dbc = V3Dot(bc, bc);
+
+ const FloatV fMax = FMax(dab, FMax(dac, dbc));
+ const FloatV fMin = FMin(dab, FMin(dac, dbc));
+
+ const Vec3V w = V3Cross(ab, ac);
+
+ const FloatV area = V3Length(w);
+ const FloatV ratio = FDiv(FSqrt(fMax), FSqrt(fMin));
+ if (FAllGrtr(four, ratio) && FAllGrtr(sixty, area))
+ break;
+
+ //calculate the triangle normal
+ const Vec3V triNormal = V3Normalize(w);
+
+ PX_ASSERT(V3AllEq(triNormal, V3Zero()) == 0);
+
+
+ //split the longest edge
+ if (FAllGrtrOrEq(dab, dac) && FAllGrtrOrEq(dab, dbc))
+ {
+ //split edge q0q1
+ const Vec3V midPoint = V3Scale(V3Add(q0, q1), half);
+ const Vec3V midA = V3Scale(V3Add(a0, a1), half);
+ const Vec3V midB = V3Scale(V3Add(b0, b1), half);
+
+ const Vec3V v = V3Sub(midPoint, q2);
+ const Vec3V n = V3Normalize(V3Cross(v, triNormal));
+
+ const FloatV d = FNeg(V3Dot(n, midPoint));
+ const FloatV dp = FAdd(V3Dot(n, q0), d);
+ const FloatV sum = FMul(d, dp);
+
+ if (FAllGrtr(sum, zero))
+ {
+ //q0 and origin at the same side, split triangle[q0, m, q2]
+ q1 = midPoint;
+ a1 = midA;
+ b1 = midB;
+ }
+ else
+ {
+ //q1 and origin at the same side, split triangle[m, q1, q2]
+ q0 = midPoint;
+ a0 = midA;
+ b0 = midB;
+ }
+
+ }
+ else if (FAllGrtrOrEq(dac, dbc))
+ {
+ //split edge q0q2
+ const Vec3V midPoint = V3Scale(V3Add(q0, q2), half);
+ const Vec3V midA = V3Scale(V3Add(a0, a2), half);
+ const Vec3V midB = V3Scale(V3Add(b0, b2), half);
+
+ const Vec3V v = V3Sub(midPoint, q1);
+ const Vec3V n = V3Normalize(V3Cross(v, triNormal));
+
+ const FloatV d = FNeg(V3Dot(n, midPoint));
+ const FloatV dp = FAdd(V3Dot(n, q0), d);
+ const FloatV sum = FMul(d, dp);
+
+ if (FAllGrtr(sum, zero))
+ {
+ //q0 and origin at the same side, split triangle[q0, q1, m]
+ q2 = midPoint;
+ a2 = midA;
+ b2 = midB;
+ }
+ else
+ {
+ //q2 and origin at the same side, split triangle[m, q1, q2]
+ q0 = midPoint;
+ a0 = midA;
+ b0 = midB;
+ }
+ }
+ else
+ {
+ //split edge q1q2
+ const Vec3V midPoint = V3Scale(V3Add(q1, q2), half);
+ const Vec3V midA = V3Scale(V3Add(a1, a2), half);
+ const Vec3V midB = V3Scale(V3Add(b1, b2), half);
+
+ const Vec3V v = V3Sub(midPoint, q0);
+ const Vec3V n = V3Normalize(V3Cross(v, triNormal));
+
+ const FloatV d = FNeg(V3Dot(n, midPoint));
+ const FloatV dp = FAdd(V3Dot(n, q1), d);
+ const FloatV sum = FMul(d, dp);
+
+ if (FAllGrtr(sum, zero))
+ {
+ //q1 and origin at the same side, split triangle[q0, q1, m]
+ q2 = midPoint;
+ a2 = midA;
+ b2 = midB;
+ }
+ else
+ {
+ //q2 and origin at the same side, split triangle[q0, m, q2]
+ q1 = midPoint;
+ a1 = midA;
+ b1 = midB;
+ }
+
+
+ }
+ }
+
+ //P must project inside face region. Compute Q using Barycentric coordinates
+ ab_ = V3Sub(q1, q0);
+ ac_ = V3Sub(q2, q0);
+ ap = V3Neg(q0);
+ bp = V3Neg(q1);
+ cp = V3Neg(q2);
+
+ d1 = V3Dot(ab_, ap); // snom
+ d2 = V3Dot(ac_, ap); // tnom
+ d3 = V3Dot(ab_, bp); // -sdenom
+ d4 = V3Dot(ac_, bp); // unom = d4 - d3
+ d5 = V3Dot(ab_, cp); // udenom = d5 - d6
+ d6 = V3Dot(ac_, cp); // -tdenom
+
+ va = FNegScaleSub(d5, d4, FMul(d3, d6));//edge region of BC
+ vb = FNegScaleSub(d1, d6, FMul(d5, d2));//edge region of AC
+ vc = FNegScaleSub(d3, d2, FMul(d1, d4));//edge region of AB
+
+ const FloatV toRecipD = FAdd(va, FAdd(vb, vc));
+ const FloatV denom = FRecip(toRecipD);//V4GetW(recipTmp);
+ const Vec3V v0 = V3Sub(a1, a0);
+ const Vec3V v1 = V3Sub(a2, a0);
+ const Vec3V w0 = V3Sub(b1, b0);
+ const Vec3V w1 = V3Sub(b2, b0);
+
+ const FloatV t = FMul(vb, denom);
+ const FloatV w = FMul(vc, denom);
+ const Vec3V vA1 = V3Scale(v1, w);
+ const Vec3V vB1 = V3Scale(w1, w);
+ const Vec3V tempClosestA = V3Add(a0, V3ScaleAdd(v0, t, vA1));
+ const Vec3V tempClosestB = V3Add(b0, V3ScaleAdd(w0, t, vB1));
+ closestA = tempClosestA;
+ closestB = tempClosestB;
+ return V3Sub(tempClosestA, tempClosestB);
+}
+
+PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3V* PX_RESTRICT A, Vec3V* PX_RESTRICT B, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+{
+ const FloatV eps = FEps();
+ const Vec3V zeroV = V3Zero();
+ PxU32 tempSize = size;
+
+ FloatV bestSqDist = FLoad(PX_MAX_REAL);
+ const Vec3V a = Q[0];
+ const Vec3V b = Q[1];
+ const Vec3V c = Q[2];
+ const Vec3V d = Q[3];
+ const BoolV bTrue = BTTTT();
+ const BoolV bFalse = BFFFF();
+
+ //degenerated
+ const Vec3V ad = V3Sub(d, a);
+ const Vec3V bd = V3Sub(d, b);
+ const Vec3V cd = V3Sub(d, c);
+ const FloatV dad = V3Dot(ad, ad);
+ const FloatV dbd = V3Dot(bd, bd);
+ const FloatV dcd = V3Dot(cd, cd);
+ const FloatV fMin = FMin(dad, FMin(dbd, dcd));
+ if (FAllGrtr(eps, fMin))
+ {
+ size = 3;
+ PxU32 tempIndices[] = { 0, 1, 2 };
+ return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB);
+ }
+
+ Vec3V _Q[] = { Q[0], Q[1], Q[2], Q[3] };
+ Vec3V _A[] = { A[0], A[1], A[2], A[3] };
+ Vec3V _B[] = { B[0], B[1], B[2], B[3] };
+
+ PxU32 indices[3] = { 0, 1, 2 };
+
+ const BoolV bIsOutside4 = PointOutsideOfPlane4(a, b, c, d);
+
+ if (BAllEq(bIsOutside4, bFalse))
+ {
+ //origin is inside the tetrahedron, we are done
+ return zeroV;
+ }
+
+ Vec3V result = zeroV;
+ Vec3V tempClosestA, tempClosestB;
+
+ if (BAllEq(BGetX(bIsOutside4), bTrue))
+ {
+
+ PxU32 tempIndices[] = { 0, 1, 2 };
+ PxU32 _size = 3;
+
+ result = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
+
+ const FloatV sqDist = V3Dot(result, result);
+ bestSqDist = sqDist;
+
+ indices[0] = tempIndices[0];
+ indices[1] = tempIndices[1];
+ indices[2] = tempIndices[2];
+
+ tempSize = _size;
+ closestA = tempClosestA;
+ closestB = tempClosestB;
+ }
+
+ if (BAllEq(BGetY(bIsOutside4), bTrue))
+ {
+
+ PxU32 tempIndices[] = { 0, 2, 3 };
+
+ PxU32 _size = 3;
+
+ const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
+
+ const FloatV sqDist = V3Dot(q, q);
+ const BoolV con = FIsGrtr(bestSqDist, sqDist);
+ if (BAllEq(con, bTrue))
+ {
+ result = q;
+ bestSqDist = sqDist;
+ indices[0] = tempIndices[0];
+ indices[1] = tempIndices[1];
+ indices[2] = tempIndices[2];
+
+ tempSize = _size;
+ closestA = tempClosestA;
+ closestB = tempClosestB;
+ }
+ }
+
+ if (BAllEq(BGetZ(bIsOutside4), bTrue))
+ {
+
+ PxU32 tempIndices[] = { 0, 3, 1 };
+ PxU32 _size = 3;
+
+ const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
+
+ const FloatV sqDist = V3Dot(q, q);
+ const BoolV con = FIsGrtr(bestSqDist, sqDist);
+ if (BAllEq(con, bTrue))
+ {
+ result = q;
+ bestSqDist = sqDist;
+ indices[0] = tempIndices[0];
+ indices[1] = tempIndices[1];
+ indices[2] = tempIndices[2];
+ tempSize = _size;
+ closestA = tempClosestA;
+ closestB = tempClosestB;
+ }
+
+ }
+
+ if (BAllEq(BGetW(bIsOutside4), bTrue))
+ {
+
+ PxU32 tempIndices[] = { 1, 3, 2 };
+ PxU32 _size = 3;
+
+ const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
+
+ const FloatV sqDist = V3Dot(q, q);
+ const BoolV con = FIsGrtr(bestSqDist, sqDist);
+
+ if (BAllEq(con, bTrue))
+ {
+ result = q;
+ bestSqDist = sqDist;
+
+ indices[0] = tempIndices[0];
+ indices[1] = tempIndices[1];
+ indices[2] = tempIndices[2];
+
+ tempSize = _size;
+ closestA = tempClosestA;
+ closestB = tempClosestB;
+ }
+ }
+
+ A[0] = _A[indices[0]]; A[1] = _A[indices[1]]; A[2] = _A[indices[2]];
+ B[0] = _B[indices[0]]; B[1] = _B[indices[1]]; B[2] = _B[indices[2]];
+ Q[0] = _Q[indices[0]]; Q[1] = _Q[indices[1]]; Q[2] = _Q[indices[2]];
+
+
+ size = tempSize;
+ return result;
+}
+
+PX_NOALIAS PX_FORCE_INLINE Vec3V doTesselation(Vec3V* PX_RESTRICT Q, Vec3V* PX_RESTRICT A, Vec3V* PX_RESTRICT B,
+ const Vec3VArg support, const Vec3VArg supportA, const Vec3VArg supportB, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+{
+ switch (size)
+ {
+ case 1:
+ {
+ closestA = supportA;
+ closestB = supportB;
+ return support;
+ }
+ case 2:
+ {
+ return closestPtPointSegmentTesselation(Q[0], support, A[0], supportA, B[0], supportB, size, closestA, closestB);
+ }
+ case 3:
+ {
+
+ PxU32 tempIndices[3] = { 0, 1, 2 };
+ return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB);
+ }
+ case 4:
+ {
+ return closestPtPointTetrahedronTesselation(Q, A, B, size, closestA, closestB);
+ }
+ default:
+ PX_ASSERT(0);
+ }
+ return support;
+}
+
+
+
+
+enum Status
+{
+ STATUS_NON_INTERSECT,
+ STATUS_CONTACT,
+ STATUS_DEGENERATE,
+};
+
+struct Output
+{
+ /// Get the normal to push apart in direction from A to B
+ PX_FORCE_INLINE Vec3V getNormal() const { return V3Normalize(V3Sub(mClosestB, mClosestA)); }
+ Vec3V mClosestA; ///< Closest point on A
+ Vec3V mClosestB; ///< Closest point on B
+ FloatV mDistSq;
+};
+
+struct ConvexV
+{
+ void calcExtent(const Vec3V& dir, PxF32& minOut, PxF32& maxOut) const
+ {
+ // Expand
+ const Vec4V x = Vec4V_From_FloatV(V3GetX(dir));
+ const Vec4V y = Vec4V_From_FloatV(V3GetY(dir));
+ const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir));
+
+ const Vec4V* src = mAovVertices;
+ const Vec4V* end = src + mNumAovVertices * 3;
+
+ // Do first step
+ Vec4V max = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2])));
+ Vec4V min = max;
+ src += 3;
+ // Do the rest
+ for (; src < end; src += 3)
+ {
+ const Vec4V dot = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2])));
+ max = V4Max(dot, max);
+ min = V4Min(dot, min);
+ }
+ FStore(V4ExtractMax(max), &maxOut);
+ FStore(V4ExtractMin(min), &minOut);
+ }
+ Vec3V calcSupport(const Vec3V& dir) const
+ {
+ // Expand
+ const Vec4V x = Vec4V_From_FloatV(V3GetX(dir));
+ const Vec4V y = Vec4V_From_FloatV(V3GetY(dir));
+ const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir));
+
+ PX_ALIGN(16, static const PxF32 index4const[]) = { 0.0f, 1.0f, 2.0f, 3.0f };
+ Vec4V index4 = *(const Vec4V*)index4const;
+ PX_ALIGN(16, static const PxF32 delta4const[]) = { 4.0f, 4.0f, 4.0f, 4.0f };
+ const Vec4V delta4 = *(const Vec4V*)delta4const;
+
+ const Vec4V* src = mAovVertices;
+ const Vec4V* end = src + mNumAovVertices * 3;
+
+ // Do first step
+ Vec4V max = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2])));
+ Vec4V maxIndex = index4;
+ index4 = V4Add(index4, delta4);
+ src += 3;
+ // Do the rest
+ for (; src < end; src += 3)
+ {
+ const Vec4V dot = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2])));
+ const BoolV cmp = V4IsGrtr(dot, max);
+ max = V4Max(dot, max);
+ maxIndex = V4Sel(cmp, index4, maxIndex);
+ index4 = V4Add(index4, delta4);
+ }
+ Vec4V horiMax = Vec4V_From_FloatV(V4ExtractMax(max));
+ PxU32 mask = BGetBitMask(V4IsEq(horiMax, max));
+ const PxU32 simdIndex = (0x12131210 >> (mask + mask)) & PxU32(3);
+
+ /// NOTE! Could be load hit store
+ /// Would be better to have all simd.
+ PX_ALIGN(16, PxF32 f[4]);
+ V4StoreA(maxIndex, f);
+ PxU32 index = PxU32(PxI32(f[simdIndex]));
+
+ const Vec4V* aovIndex = (mAovVertices + (index >> 2) * 3);
+ const PxF32* aovOffset = ((const PxF32*)aovIndex) + (index & 3);
+
+ return Vec3V_From_Vec4V(V4LoadXYZW(aovOffset[0], aovOffset[4], aovOffset[8], 1.0f));
+ }
+
+ const Vec4V* mAovVertices; ///< Vertices storex x,x,x,x, y,y,y,y, z,z,z,z
+ PxU32 mNumAovVertices; ///< Number of groups of 4 of vertices
+};
+
+Status Collide(const Vec3V& initialDir, const ConvexV& convexA, const Mat34V& bToA, const ConvexV& convexB, Output& out)
+{
+ Vec3V Q[4];
+ Vec3V A[4];
+ Vec3V B[4];
+
+ Mat33V aToB = M34Trnsps33(bToA);
+
+ PxU32 size = 0;
+
+ const Vec3V zeroV = V3Zero();
+ const BoolV bTrue = BTTTT();
+
+ //Vec3V v = V3UnitX();
+ Vec3V v = V3Sel(FIsGrtr(V3Dot(initialDir, initialDir), FZero()), initialDir, V3UnitX());
+
+ //const FloatV minMargin = zero;
+ //const FloatV eps2 = FMul(minMargin, FLoad(0.01f));
+ //FloatV eps2 = zero;
+ FloatV eps2 = FLoad(1e-6f);
+ const FloatV epsRel = FLoad(0.000225f);
+
+ Vec3V closA(zeroV), closB(zeroV);
+ FloatV sDist = FMax();
+ FloatV minDist = sDist;
+ Vec3V closAA = zeroV;
+ Vec3V closBB = zeroV;
+
+ BoolV bNotTerminated = bTrue;
+ BoolV bCon = bTrue;
+
+ do
+ {
+ minDist = sDist;
+ closAA = closA;
+ closBB = closB;
+
+ PxU32 index = size++;
+ PX_ASSERT(index < 4);
+
+ const Vec3V supportA = convexA.calcSupport(V3Neg(v));
+ const Vec3V supportB = M34MulV3(bToA, convexB.calcSupport(M33MulV3(aToB, v)));
+ const Vec3V support = Vec3V_From_Vec4V(Vec4V_From_Vec3V(V3Sub(supportA, supportB)));
+
+ A[index] = supportA;
+ B[index] = supportB;
+ Q[index] = support;
+
+ const FloatV signDist = V3Dot(v, support);
+ const FloatV tmp0 = FSub(sDist, signDist);
+ if (FAllGrtr(FMul(epsRel, sDist), tmp0))
+ {
+ out.mClosestA = closA;
+ out.mClosestB = closB;
+ out.mDistSq = sDist;
+ return STATUS_NON_INTERSECT;
+ }
+
+ //calculate the closest point between two convex hull
+ v = doTesselation(Q, A, B, support, supportA, supportB, size, closA, closB);
+ sDist = V3Dot(v, v);
+ bCon = FIsGrtr(minDist, sDist);
+
+ bNotTerminated = BAnd(FIsGrtr(sDist, eps2), bCon);
+ } while (BAllEq(bNotTerminated, bTrue));
+
+ out.mClosestA = V3Sel(bCon, closA, closAA);
+ out.mClosestB = V3Sel(bCon, closB, closBB);
+ out.mDistSq = FSel(bCon, sDist, minDist);
+ return Status(BAllEq(bCon, bTrue) == 1 ? STATUS_CONTACT : STATUS_DEGENERATE);
+}
+
+static void _calcSeparation(const ConvexV& convexA, const physx::PxTransform& aToWorldIn, const Mat34V& bToA, ConvexV& convexB, Output& out, Separation& sep)
+{
+
+ Mat33V aToB = M34Trnsps33(bToA);
+ Vec3V normalA = out.getNormal();
+
+ convexA.calcExtent(normalA, sep.min0, sep.max0);
+ Vec3V normalB = M33MulV3(aToB, normalA);
+ convexB.calcExtent(normalB, sep.min1, sep.max1);
+
+ {
+ // Offset the min max taking into account transform
+ // Distance of origin from B's space in As space in direction of the normal in As space should fix it...
+ PxF32 fix;
+ FStore(V3Dot(bToA.col3, normalA), &fix);
+ sep.min1 += fix;
+ sep.max1 += fix;
+ }
+
+ // Looks like it's the plane at the midpoint
+ Vec3V center = V3Scale(V3Add(out.mClosestA, out.mClosestB), FLoad(0.5f));
+ // Transform to world space
+ Mat34V aToWorld;
+ *(PxMat44*)&aToWorld = aToWorldIn;
+ // Put the normal in world space
+ Vec3V worldCenter = M34MulV3(aToWorld, center);
+ Vec3V worldNormal = M34Mul33V3(aToWorld, normalA);
+
+ FloatV dist = V3Dot(worldNormal, worldCenter);
+ V3StoreU(worldNormal, sep.plane.n);
+ FStore(dist, &sep.plane.d);
+ sep.plane.d = -sep.plane.d;
+}
+
+static void _arrayVec3ToVec4(const PxVec3* src, Vec4V* dst, PxU32 num)
+{
+ const PxU32 num4 = num >> 2;
+ for (PxU32 i = 0; i < num4; i++, dst += 3, src += 4)
+ {
+ Vec3V v0 = V3LoadU(&src[0].x);
+ Vec3V v1 = V3LoadU(&src[1].x);
+ Vec3V v2 = V3LoadU(&src[2].x);
+ Vec3V v3 = V3LoadU(&src[3].x);
+ // Transpose
+ V4Transpose(v0, v1, v2, v3);
+ // Save
+ dst[0] = v0;
+ dst[1] = v1;
+ dst[2] = v2;
+ }
+ const PxU32 remain = num & 3;
+ if (remain)
+ {
+ Vec3V work[4];
+ PxU32 i = 0;
+ for (; i < remain; i++) work[i] = V3LoadU(&src[i].x);
+ for (; i < 4; i++) work[i] = work[remain - 1];
+ V4Transpose(work[0], work[1], work[2], work[3]);
+ dst[0] = work[0];
+ dst[1] = work[1];
+ dst[2] = work[2];
+ }
+}
+
+
+static void _arrayVec3ToVec4(const PxVec3* src, const Vec3V& scale, Vec4V* dst, PxU32 num)
+{
+ // If no scale - use the faster version
+ if (V3AllEq(scale, V3One()))
+ {
+ return _arrayVec3ToVec4(src, dst, num);
+ }
+
+ const PxU32 num4 = num >> 2;
+ for (PxU32 i = 0; i < num4; i++, dst += 3, src += 4)
+ {
+ Vec3V v0 = V3Mul(scale, V3LoadU(&src[0].x));
+ Vec3V v1 = V3Mul(scale, V3LoadU(&src[1].x));
+ Vec3V v2 = V3Mul(scale, V3LoadU(&src[2].x));
+ Vec3V v3 = V3Mul(scale, V3LoadU(&src[3].x));
+ // Transpose
+ V4Transpose(v0, v1, v2, v3);
+ // Save
+ dst[0] = v0;
+ dst[1] = v1;
+ dst[2] = v2;
+ }
+ const PxU32 remain = num & 3;
+ if (remain)
+ {
+ Vec3V work[4];
+ PxU32 i = 0;
+ for (; i < remain; i++) work[i] = V3Mul(scale, V3LoadU(&src[i].x));
+ for (; i < 4; i++) work[i] = work[remain - 1];
+ V4Transpose(work[0], work[1], work[2], work[3]);
+ dst[0] = work[0];
+ dst[1] = work[1];
+ dst[2] = work[2];
+ }
+}
+
+
+bool importerHullsInProximityApexFree(const std::vector<PxVec3>& hull0, PxBounds3& hull0Bounds, const physx::PxTransform& localToWorldRT0In, const physx::PxVec3& scale0In,
+ const std::vector<PxVec3>& hull1, PxBounds3& hull1Bounds, const physx::PxTransform& localToWorldRT1In, const physx::PxVec3& scale1In,
+ physx::PxF32 maxDistance, Separation* separation)
+{
+
+
+ const PxU32 numVerts0 = static_cast<PxU32>(hull0.size());
+ const PxU32 numVerts1 = static_cast<PxU32>(hull1.size());
+ const PxU32 numAov0 = (numVerts0 + 3) >> 2;
+ const PxU32 numAov1 = (numVerts1 + 3) >> 2;
+ Vec4V* verts0 = (Vec4V*)alloca((numAov0 + numAov1) * sizeof(Vec4V) * 3);
+
+ // Make sure it's aligned
+ PX_ASSERT((size_t(verts0) & 0xf) == 0);
+
+ Vec4V* verts1 = verts0 + (numAov0 * 3);
+
+ const Vec3V scale0 = V3LoadU(&scale0In.x);
+ const Vec3V scale1 = V3LoadU(&scale1In.x);
+ std::vector<PxVec3> vert0(numVerts0);
+ for (uint32_t i = 0; i < numVerts0; ++i)
+ {
+ vert0[i] = hull0[i];
+ }
+ std::vector<PxVec3> vert1(numVerts1);
+ for (uint32_t i = 0; i < numVerts1; ++i)
+ {
+ vert1[i] = hull1[i];
+ }
+
+ _arrayVec3ToVec4(&vert0[0], scale0, verts0, numVerts0);
+ _arrayVec3ToVec4(&vert1[0], scale1, verts1, numVerts1);
+
+ const PxTransform trans1To0 = localToWorldRT0In.transformInv(localToWorldRT1In);
+
+ // Load into simd mat
+ Mat34V bToA;
+ *(PxMat44*)&bToA = trans1To0;
+ (*(PxMat44*)&bToA).column3.w = 0.0f; // AOS wants the 4th component of Vec3V to be 0 to work properly
+
+ ConvexV convexA;
+ ConvexV convexB;
+
+ convexA.mNumAovVertices = numAov0;
+ convexA.mAovVertices = verts0;
+
+ convexB.mNumAovVertices = numAov1;
+ convexB.mAovVertices = verts1;
+
+ // Take the origin of B in As space as the inital direction as it is 'the difference in transform origins B-A in A's space'
+ // Should be a good first guess
+ const Vec3V initialDir = bToA.col3;
+ Output output;
+ Status status = Collide(initialDir, convexA, bToA, convexB, output);
+
+ if (status == STATUS_DEGENERATE)
+ {
+ // Calculate the tolerance from the extents
+ const PxVec3 extents0 = hull0Bounds.getExtents();
+ const PxVec3 extents1 = hull1Bounds.getExtents();
+
+ const FloatV tolerance0 = V3ExtractMin(V3Mul(V3LoadU(&extents0.x), scale0));
+ const FloatV tolerance1 = V3ExtractMin(V3Mul(V3LoadU(&extents1.x), scale1));
+
+ const FloatV tolerance = FMul(FAdd(tolerance0, tolerance1), FLoad(0.01f));
+ const FloatV sqTolerance = FMul(tolerance, tolerance);
+
+ status = FAllGrtr(sqTolerance, output.mDistSq) ? STATUS_CONTACT : STATUS_NON_INTERSECT;
+ }
+
+ switch (status)
+ {
+ case STATUS_CONTACT:
+ {
+ if (separation)
+ {
+ _calcSeparation(convexA, localToWorldRT0In, bToA, convexB, output, *separation);
+ }
+ return true;
+ }
+ default:
+ case STATUS_NON_INTERSECT:
+ {
+ if (separation)
+ {
+ _calcSeparation(convexA, localToWorldRT0In, bToA, convexB, output, *separation);
+ }
+ PxF32 val;
+ FStore(output.mDistSq, &val);
+ return val < (maxDistance * maxDistance);
+ }
+ }
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtApexSharedParts.h b/NvBlast/sdk/extensions/authoring/source/NvBlastExtApexSharedParts.h
new file mode 100644
index 0000000..68e0412
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtApexSharedParts.h
@@ -0,0 +1,51 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAPEXSHAREDPARTS_H
+#define NVBLASTEXTAPEXSHAREDPARTS_H
+
+#include "NvBlast.h"
+#include <vector>
+#include <PxPlane.h>
+namespace physx
+{
+ class PxVec3;
+ class PxTransform;
+ class PxBounds3;
+}
+
+namespace Nv
+{
+namespace Blast
+{
+
+struct Separation
+{
+ physx::PxPlane plane;
+ float min0, max0, min1, max1;
+
+ float getDistance()
+ {
+ return physx::PxMax(min0 - max1, min1 - max0);
+ }
+};
+
+/**
+ Function to compute midplane between two convex hulls. Is copied from APEX.
+*/
+bool importerHullsInProximityApexFree(const std::vector<physx::PxVec3>& hull0, physx::PxBounds3& hull0Bounds, const physx::PxTransform& localToWorldRT0In, const physx::PxVec3& scale0In,
+ const std::vector<physx::PxVec3>& hull1, physx::PxBounds3& hull1Bounds, const physx::PxTransform& localToWorldRT1In, const physx::PxVec3& scale1In,
+ physx::PxF32 maxDistance, Separation* separation);
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // NVBLASTEXTAPEXSHAREDPARTS_H
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringAccelerator.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringAccelerator.cpp
new file mode 100644
index 0000000..075bce9
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringAccelerator.cpp
@@ -0,0 +1,629 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtAuthoringAccelerator.h"
+#include "NvBlastExtAuthoringMesh.h"
+#include "NvBlastExtAuthoringInternalCommon.h"
+
+
+using namespace physx;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+DummyAccelerator::DummyAccelerator(int32_t count) :count(count)
+{
+ current = 0;
+}
+void DummyAccelerator::setState(Vertex* pos, Edge* ed, Facet& fc)
+{
+ current = 0;
+ (void)pos;
+ (void)ed;
+ (void)fc;
+}
+void DummyAccelerator::setState(const physx::PxVec3& point) {
+ current = 0;
+ (void)point;
+}
+int32_t DummyAccelerator::getNextFacet()
+{
+ if (current < count)
+ {
+ ++current;
+ return current - 1;
+ }
+ else
+ return -1;
+}
+
+
+
+BBoxBasedAccelerator::BBoxBasedAccelerator(Mesh* mesh, int32_t resolution) : mResolution(resolution), alreadyGotValue(1)
+{
+ mBounds = mesh->getBoundingBox();
+ mSpatialMap.resize(resolution * resolution * resolution);
+ mCells.resize(resolution * resolution * resolution);
+ int32_t currentCell = 0;
+ PxVec3 incr = (mBounds.maximum - mBounds.minimum) * (1.0f / mResolution);
+ for (int32_t z = 0; z < resolution; ++z)
+ {
+ for (int32_t y = 0; y < resolution; ++y)
+ {
+ for (int32_t x = 0; x < resolution; ++x)
+ {
+ mCells[currentCell].minimum.x = mBounds.minimum.x + x * incr.x;
+ mCells[currentCell].minimum.y = mBounds.minimum.y + y * incr.y;
+ mCells[currentCell].minimum.z = mBounds.minimum.z + z * incr.z;
+
+ mCells[currentCell].maximum.x = mBounds.minimum.x + (x + 1) * incr.x;
+ mCells[currentCell].maximum.y = mBounds.minimum.y + (y + 1) * incr.y;
+ mCells[currentCell].maximum.z = mBounds.minimum.z + (z + 1) * incr.z;
+
+ ++currentCell;
+ }
+ }
+ }
+
+ buildAccelStructure(mesh->getVertices(), mesh->getEdges(), mesh->getFacetsBuffer(), mesh->getFacetCount());
+}
+
+
+BBoxBasedAccelerator::~BBoxBasedAccelerator()
+{
+ mResolution = 0;
+ mBounds.setEmpty();
+ mSpatialMap.clear();
+ mCells.clear();
+}
+
+int32_t BBoxBasedAccelerator::getNextFacet()
+{
+ int32_t facetId = -1;
+
+ while (mIteratorCell != -1)
+ {
+ if (mIteratorFacet >= (int32_t)mSpatialMap[mIteratorCell].size())
+ {
+ if (!cellList.empty())
+ {
+ mIteratorCell = cellList.back();
+ cellList.pop_back();
+ mIteratorFacet = 0;
+ }
+ else
+ {
+ mIteratorCell = -1;
+ break;
+ }
+ }
+ if (alreadyGotFlag[mSpatialMap[mIteratorCell][mIteratorFacet]] != alreadyGotValue)
+ {
+ facetId = mSpatialMap[mIteratorCell][mIteratorFacet];
+ mIteratorFacet++;
+ break;
+ }
+ else
+ {
+ mIteratorFacet++;
+ }
+ }
+ if (facetId != -1)
+ {
+ alreadyGotFlag[facetId] = alreadyGotValue;
+ }
+ return facetId;
+}
+void BBoxBasedAccelerator::setState(Vertex* pos, Edge* ed, Facet& fc)
+{
+ alreadyGotValue++;
+ mIteratorCell = -1;
+ mIteratorFacet = -1;
+ cellList.clear();
+ facetBox.setEmpty();
+ Edge* edge = ed + fc.firstEdgeNumber;
+ uint32_t count = fc.edgesCount;
+ for (uint32_t ec = 0; ec < count; ++ec)
+ {
+ facetBox.include(pos[edge->s].p);
+ facetBox.include(pos[edge->e].p);
+ edge++;
+ }
+ for (uint32_t i = 0; i < mCells.size(); ++i)
+ {
+ if (testCellPolygonIntersection(i, facetBox))
+ {
+ if (!mSpatialMap[i].empty())
+ cellList.push_back(i);
+ }
+ }
+ if (!cellList.empty())
+ {
+ mIteratorFacet = 0;
+ mIteratorCell = cellList.back();
+ cellList.pop_back();
+ }
+}
+
+
+void BBoxBasedAccelerator::setState(const PxVec3& p)
+{
+ alreadyGotValue++;
+ mIteratorCell = -1;
+ mIteratorFacet = -1;
+ cellList.clear();
+ int32_t perSlice = mResolution * mResolution;
+ for (uint32_t i = 0; i < mCells.size(); ++i)
+ {
+ if (mCells[i].contains(p))
+ {
+ int32_t xyCellId = i % perSlice;
+ for (int32_t zCell = 0; zCell < mResolution; ++zCell)
+ {
+ int32_t cell = zCell * perSlice + xyCellId;
+ if (!mSpatialMap[cell].empty())
+ cellList.push_back(cell);
+ }
+ }
+ }
+ if (!cellList.empty())
+ {
+ mIteratorFacet = 0;
+ mIteratorCell = cellList.back();
+ cellList.pop_back();
+ }
+}
+
+
+bool BBoxBasedAccelerator::testCellPolygonIntersection(int32_t cellId, PxBounds3& facetBB)
+{
+ if (weakBoundingBoxIntersection(mCells[cellId], facetBB))
+ {
+ return true;
+ }
+ else
+ return false;
+}
+
+void BBoxBasedAccelerator::buildAccelStructure(Vertex* pos, Edge* edges, Facet* fc, int32_t facetCount)
+{
+ for (int32_t facet = 0; facet < facetCount; ++facet)
+ {
+ PxBounds3 bBox;
+ bBox.setEmpty();
+ Edge* edge = &edges[0] + fc->firstEdgeNumber;
+ int32_t count = fc->edgesCount;
+ for (int32_t ec = 0; ec < count; ++ec)
+ {
+ bBox.include(pos[edge->s].p);
+ bBox.include(pos[edge->e].p);
+ edge++;
+ }
+
+ for (uint32_t i = 0; i < mCells.size(); ++i)
+ {
+ if (testCellPolygonIntersection(i, bBox))
+ {
+ mSpatialMap[i].push_back(facet);
+ }
+ }
+ fc++;
+ }
+ alreadyGotFlag.resize(facetCount, 0);
+ cellList.resize(mCells.size());
+}
+
+int32_t testEdgeAgainstCube(PxVec3& p1, PxVec3& p2)
+{
+ PxVec3 vec = p2 - p1;
+ PxVec3 vecSigns;
+ for (int32_t i = 0; i < 3; ++i)
+ {
+ vecSigns[i] = (vec[i] < 0) ? -1 : 1;
+ }
+ for (int32_t i = 0; i < 3; ++i)
+ {
+ if (p1[i] * vecSigns[i] > 0.5f) return 0;
+ if (p2[i] * vecSigns[i] < -0.5f) return 0;
+ }
+
+ for (int32_t i = 0; i < 3; ++i)
+ {
+ int32_t ip1 = (i + 1) % 3;
+ int32_t ip2 = (i + 2) % 3;
+
+ float vl1 = vec[ip2] * p1[ip1] - vec[ip1] * p1[ip2];
+ float vl2 = 0.5f * (vec[ip2] * vecSigns[ip1] + vec[ip1] * vecSigns[ip2]);
+ if (vl1 * vl1 > vl2 * vl2)
+ {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+NV_INLINE int32_t isInSegm(float a, float b, float c)
+{
+ return (b >= c) - (a >= c);
+}
+
+NV_INLINE int32_t edgeIsAbovePoint(PxVec2& p1, PxVec2& p2, PxVec2& p)
+{
+ int32_t direction = isInSegm(p1.x, p2.x, p.x);
+ if (direction != 0)
+ {
+ if (isInSegm(p1.y, p2.y, p.y))
+ {
+ if (direction * (p.x - p1.x) * (p2.y - p1.y) >= direction * (p.y - p1.y) * (p2.x - p1.x))
+ {
+ return direction;
+ }
+ }
+ else
+ {
+ if (p1.y > p.y)
+ return direction;
+ }
+ }
+ return 0;
+}
+
+int32_t pointInPolygon(PxVec3* vertices, PxVec3& diagPoint, int32_t edgeCount, PxVec3& normal)
+{
+ std::vector<PxVec2> projectedVertices(edgeCount * 2);
+ ProjectionDirections pDir = getProjectionDirection(normal);
+ PxVec2 projectedDiagPoint = getProjectedPoint(diagPoint, pDir);
+ PxVec2* saveVert = projectedVertices.data();
+ PxVec3* p = vertices;
+ for (int32_t i = 0; i < edgeCount * 2; ++i)
+ {
+ *saveVert = getProjectedPoint(*p, pDir);
+ ++saveVert;
+ ++p;
+ }
+ int32_t counter = 0;
+ PxVec2* v = projectedVertices.data();
+ for (int32_t i = 0; i < edgeCount; ++i)
+ {
+ PxVec2& p1 = *v;
+ PxVec2& p2 = *(v + 1);
+ counter += edgeIsAbovePoint(p1, p2, projectedDiagPoint);
+ v += 2;
+ }
+ return counter != 0;
+}
+
+
+
+int32_t testFacetUnitCubeIntersectionInternal(PxVec3* vertices,PxVec3& facetNormal, int32_t edgeCount)
+{
+ PxVec3* pnt_p = vertices;
+ for (int32_t i = 0; i < edgeCount; ++i)
+ {
+ if (testEdgeAgainstCube(*pnt_p, *(pnt_p + 1)) == 1)
+ {
+ return 1;
+ }
+ pnt_p += 2;
+ }
+
+ PxVec3 cubeDiag(0, 0, 0);
+ for (int32_t i = 0; i < 3; ++i)
+ cubeDiag[i] = (facetNormal[i] < 0) ? -1 : 1;
+ float t = vertices->dot(facetNormal) / (cubeDiag.dot(facetNormal));
+ if (t > 0.5 || t < -0.5)
+ return 0;
+
+ PxVec3 intersPoint = cubeDiag * t;
+ int trs = pointInPolygon(vertices, intersPoint, edgeCount, facetNormal);
+ return trs;
+}
+
+enum TrivialFlags
+{
+ HAS_POINT_BELOW_HIGH_X = ~(1 << 0),
+ HAS_POINT_ABOVE_LOW_X = ~(1 << 1),
+
+ HAS_POINT_BELOW_HIGH_Y = ~(1 << 2),
+ HAS_POINT_ABOVE_LOW_Y = ~(1 << 3),
+
+ HAS_POINT_BELOW_HIGH_Z = ~(1 << 4),
+ HAS_POINT_ABOVE_LOW_Z = ~(1 << 5),
+
+
+
+ ALL_ONE = (1 << 6) - 1
+};
+
+
+
+
+
+int32_t testFacetUnitCubeIntersection(Vertex* vertices, Edge* edges, Facet& fc, PxBounds3 cube, float fattening)
+{
+ Edge* ed = edges + fc.firstEdgeNumber;
+ int32_t trivialFlags = ALL_ONE;
+ cube.fattenFast(fattening);
+ for (uint32_t i = 0; i < fc.edgesCount; ++i)
+ {
+ {
+ PxVec3& p = vertices[ed->s].p;
+ if (cube.contains(p))
+ return 1;
+ if (p.x < cube.getCenter().x + 0.5)
+ trivialFlags &= HAS_POINT_BELOW_HIGH_X;
+ if (p.x > cube.getCenter().x - 0.5)
+ trivialFlags &= HAS_POINT_ABOVE_LOW_X;
+
+ if (p.y < cube.getCenter().y + 0.5)
+ trivialFlags &= HAS_POINT_BELOW_HIGH_Y;
+ if (p.y > cube.getCenter().y - 0.5)
+ trivialFlags &= HAS_POINT_ABOVE_LOW_Y;
+
+ if (p.z < cube.getCenter().z + 0.5)
+ trivialFlags &= HAS_POINT_BELOW_HIGH_Z;
+ if (p.z > cube.getCenter().z - 0.5)
+ trivialFlags &= HAS_POINT_ABOVE_LOW_Z;
+ }
+ {
+ PxVec3& p = vertices[ed->e].p;
+ if (cube.contains(p))
+ return 1;
+ if (p.x < cube.getCenter().x + 0.5)
+ trivialFlags &= HAS_POINT_BELOW_HIGH_X;
+ if (p.x > cube.getCenter().x - 0.5)
+ trivialFlags &= HAS_POINT_ABOVE_LOW_X;
+
+ if (p.y < cube.getCenter().y + 0.5)
+ trivialFlags &= HAS_POINT_BELOW_HIGH_Y;
+ if (p.y > cube.getCenter().y - 0.5)
+ trivialFlags &= HAS_POINT_ABOVE_LOW_Y;
+
+ if (p.z < cube.getCenter().z + 0.5)
+ trivialFlags &= HAS_POINT_BELOW_HIGH_Z;
+ if (p.z > cube.getCenter().z - 0.5)
+ trivialFlags &= HAS_POINT_ABOVE_LOW_Z;
+ }
+
+ ++ed;
+ }
+ if (trivialFlags != 0)
+ {
+ return 0;
+ }
+ std::vector<PxVec3> verticesRescaled(fc.edgesCount * 2);
+
+ int32_t vrt = 0;
+ ed = edges + fc.firstEdgeNumber;
+ PxVec3 offset = cube.getCenter();
+ PxVec3 normal(1, 1, 1);
+
+ /**
+ Compute normal
+ */
+ PxVec3& v1 = vertices[ed->s].p;
+ PxVec3* v2 = nullptr;
+ PxVec3* v3 = nullptr;
+
+ for (uint32_t i = 0; i < fc.edgesCount; ++i)
+ {
+ if (v1 != vertices[ed->s].p)
+ {
+ v2 = &vertices[ed->s].p;
+ break;
+ }
+ if (v1 != vertices[ed->e].p)
+ {
+ v2 = &vertices[ed->e].p;
+ break;
+ }
+ ed++;
+ }
+ ed = edges + fc.firstEdgeNumber;
+ for (uint32_t i = 0; i < fc.edgesCount; ++i)
+ {
+ if (v1 != vertices[ed->s].p && *v2 != vertices[ed->s].p)
+ {
+ v3 = &vertices[ed->s].p;
+ break;
+ }
+ if (v1 != vertices[ed->e].p && *v2 != vertices[ed->e].p)
+ {
+ v3 = &vertices[ed->e].p;
+ break;
+ }
+ ed++;
+ }
+ ed = edges + fc.firstEdgeNumber;
+ if (v2 != nullptr && v3 != nullptr)
+ {
+ normal = (*v2 - v1).cross(*v3 - v1);
+ }
+ else
+ {
+ return true; // If cant find normal, assume it intersects box.
+ }
+
+
+ normal.normalize();
+
+ PxVec3 rescale(.5f / (cube.getExtents().x), .5f / (cube.getExtents().y), 0.5f / (cube.getExtents().z));
+ for (uint32_t i = 0; i < fc.edgesCount; ++i)
+ {
+ verticesRescaled[vrt] = vertices[ed->s].p - offset;
+ verticesRescaled[vrt].x *= rescale.x;
+ verticesRescaled[vrt].y *= rescale.y;
+ verticesRescaled[vrt].z *= rescale.z;
+ ++vrt;
+ verticesRescaled[vrt] = vertices[ed->e].p - offset;
+ verticesRescaled[vrt].x *= rescale.x;
+ verticesRescaled[vrt].y *= rescale.y;
+ verticesRescaled[vrt].z *= rescale.z;
+ ++ed;
+ ++vrt;
+ }
+ return testFacetUnitCubeIntersectionInternal(verticesRescaled.data(), normal, fc.edgesCount);
+}
+
+
+IntersectionTestingAccelerator::IntersectionTestingAccelerator(Mesh* in, int32_t resolution)
+{
+
+
+ alreadyGotFlag.resize(in->getFacetCount(), 0);
+ alreadyGotValue = 0;
+ mResolution = resolution;
+
+ float cubeSize = 1.0f / resolution;
+ PxVec3 cubeMinimal(-0.5, -0.5, -0.5);
+ PxVec3 extents(cubeSize, cubeSize, cubeSize);
+ mCubes.resize(mResolution * mResolution * mResolution);
+ mSpatialMap.resize(mCubes.size());
+ int32_t cubeId = 0;
+
+ // Build unit cube partition
+ for (int32_t i = 0; i < mResolution; ++i)
+ {
+ cubeMinimal.y = -0.5;
+ cubeMinimal.z = -0.5;
+ for (int32_t j = 0; j < mResolution; ++j)
+ {
+ cubeMinimal.z = -0.5;
+ for (int32_t k = 0; k < mResolution; ++k)
+ {
+ mCubes[cubeId].minimum = cubeMinimal;
+ mCubes[cubeId].maximum = cubeMinimal + extents;
+ cubeMinimal.z += cubeSize;
+ ++cubeId;
+ }
+ cubeMinimal.y += cubeSize;
+ }
+ cubeMinimal.x += cubeSize;
+ }
+
+
+ for (uint32_t i = 0; i < in->getFacetCount(); ++i)
+ {
+ for (uint32_t c = 0; c < mCubes.size(); ++c)
+ {
+ if (testFacetUnitCubeIntersection(in->getVertices(), in->getEdges(), *in->getFacet(i), mCubes[c], 0.001))
+ {
+ mSpatialMap[c].push_back(i);
+ }
+ }
+ }
+}
+
+
+int32_t IntersectionTestingAccelerator::getNextFacet()
+{
+ int32_t facetId = -1;
+
+ while (mIteratorCell != -1)
+ {
+ if (mIteratorFacet >= (int32_t)mSpatialMap[mIteratorCell].size())
+ {
+ if (!cellList.empty())
+ {
+ mIteratorCell = cellList.back();
+ cellList.pop_back();
+ mIteratorFacet = 0;
+ }
+ else
+ {
+ mIteratorCell = -1;
+ break;
+ }
+ }
+ if (alreadyGotFlag[mSpatialMap[mIteratorCell][mIteratorFacet]] != alreadyGotValue)
+ {
+ facetId = mSpatialMap[mIteratorCell][mIteratorFacet];
+ mIteratorFacet++;
+ break;
+ }
+ else
+ {
+ mIteratorFacet++;
+ }
+ }
+ if (facetId != -1)
+ {
+ alreadyGotFlag[facetId] = alreadyGotValue;
+ }
+ return facetId;
+}
+
+void IntersectionTestingAccelerator::setState(Vertex* pos, Edge* ed, Facet& fc)
+{
+ alreadyGotValue++;
+ mIteratorCell = -1;
+ mIteratorFacet = -1;
+ cellList.clear();
+ PxBounds3 bigBox(PxVec3(-0.5, -0.5, -0.5), PxVec3(0.5, 0.5, 0.5));
+ if (!testFacetUnitCubeIntersection(pos, ed, fc, bigBox, 0.001f))
+ {
+ return;
+ }
+ for (uint32_t i = 0; i < mCubes.size(); ++i)
+ {
+ if (testFacetUnitCubeIntersection(pos, ed, fc, mCubes[i], 0.001f))
+ {
+ if (!mSpatialMap[i].empty())
+ cellList.push_back(i);
+ }
+ }
+ if (!cellList.empty())
+ {
+ mIteratorFacet = 0;
+ mIteratorCell = cellList.back();
+ cellList.pop_back();
+ }
+}
+
+void IntersectionTestingAccelerator::setState(const PxVec3& p)
+{
+ alreadyGotValue++;
+ mIteratorCell = -1;
+ mIteratorFacet = -1;
+ cellList.clear();
+
+
+ for (uint32_t i = 0; i < mCubes.size(); ++i)
+ {
+ PxBounds3 tmp = mCubes[i];
+ tmp.fattenFast(0.001);
+ if (tmp.contains(p))
+ {
+ int32_t xyCellId = (((int)((float)i / mResolution)) * mResolution);
+ for (int32_t zCell = 0; zCell < mResolution; ++zCell)
+ {
+ int32_t cell = zCell + xyCellId;
+ if (!mSpatialMap[cell].empty())
+ {
+ cellList.push_back(cell);
+ }
+
+ }
+ }
+ }
+ if (!cellList.empty())
+ {
+ mIteratorFacet = 0;
+ mIteratorCell = cellList.back();
+ cellList.pop_back();
+ }
+}
+
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringAccelerator.h b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringAccelerator.h
new file mode 100644
index 0000000..8284cd7
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringAccelerator.h
@@ -0,0 +1,147 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAUTHORINGACCELERATOR_H
+#define NVBLASTEXTAUTHORINGACCELERATOR_H
+
+#include <set>
+#include <vector>
+#include "NvBlastExtAuthoringTypes.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+class Mesh;
+
+
+/**
+ Acceleration structure interface.
+*/
+class SpatialAccelerator
+{
+public:
+ /**
+ Set state of accelerator to return all facets which possibly can intersect given facet.
+ \param[in] pos Vertex buffer
+ \param[in] ed Edge buffer
+ \param[in] fc Facet which should be tested.
+ */
+ virtual void setState(Vertex* pos, Edge* ed, Facet& fc) = 0;
+ /**
+ Set state of accelerator to return all facets which possibly can cover given point. Needed for testing whether point is inside mesh.
+ \param[in] point Point which should be tested.
+ */
+ virtual void setState(const physx::PxVec3& point) = 0;
+ /**
+ Recieve next facet for setted state.
+ \return Next facet index, or -1 if no facets left.
+ */
+ virtual int32_t getNextFacet() = 0;
+
+ virtual ~SpatialAccelerator() {};
+};
+
+
+/**
+ Dummy accelerator iterates through all facets of mesh.
+*/
+class DummyAccelerator : public SpatialAccelerator
+{
+public:
+ /**
+ \param[in] count Mesh facets count for which accelerator should be built.
+ */
+ DummyAccelerator(int32_t count);
+ virtual void setState(Vertex* pos, Edge* ed, Facet& fc);
+ virtual void setState(const physx::PxVec3& point);
+ virtual int32_t getNextFacet();
+
+private:
+ int32_t count;
+ int32_t current;
+};
+
+/**
+ Accelerator which builds map from 3d grid to initial mesh facets.
+ To find all facets which possibly intersect given one, it return all facets which are pointed by grid cells, which intersects with bounding box of given facet.
+ To find all facets which possibly cover given point, all facets which are pointed by cells in column which contains given point are returned.
+*/
+class BBoxBasedAccelerator : public SpatialAccelerator
+{
+public:
+ /**
+ \param[in] mesh Mesh for which acceleration structure should be built.
+ \param[in] resolution Resolution on 3d grid.
+ */
+ BBoxBasedAccelerator(Mesh* mesh, int32_t resolution);
+ virtual ~BBoxBasedAccelerator();
+ int32_t getNextFacet();
+ void setState(Vertex* pos, Edge* ed, Facet& fc);
+ void setState(const physx::PxVec3& p);
+private:
+
+ bool testCellPolygonIntersection(int32_t cellId, physx::PxBounds3& facetBB);
+ void buildAccelStructure(Vertex* pos, Edge* edges, Facet* fc, int32_t facetCount);
+
+ int32_t mResolution;
+ physx::PxBounds3 mBounds;
+ physx::PxBounds3 facetBox;
+ std::vector< std::vector<int32_t> > mSpatialMap;
+ std::vector<physx::PxBounds3> mCells;
+
+
+ // Iterator data
+ std::vector<uint32_t> alreadyGotFlag;
+ uint32_t alreadyGotValue;
+ std::vector<int32_t> cellList;
+ int32_t mIteratorCell;
+ int32_t mIteratorFacet;
+};
+
+
+
+/**
+ Accelerator which builds map from 3d grid to initial mesh facets.
+ To find all facets which possibly intersect given one, it return all facets which are pointed by grid cells, which are intersected by given facet.
+ To find all facets which possibly cover given point, all facets which are pointed by cells in column which contains given point are returned.
+
+ In difference with BBoxBasedAccelerator this accelerator computes actual intersection of cube with polygon. It is more precise and omits much more intersections but slower.
+*/
+
+class IntersectionTestingAccelerator : public SpatialAccelerator
+{
+public:
+ IntersectionTestingAccelerator(Mesh* mesh, int32_t resolution);
+ int32_t getNextFacet();
+ void setState(Vertex* pos, Edge* ed, Facet& fc);
+ void setState(const physx::PxVec3& p);
+
+
+private:
+ std::vector< std::vector<int32_t> > mSpatialMap;
+ std::vector<physx::PxBounds3> mCubes;
+ int32_t mResolution;
+
+ // Iterator data
+ std::vector<uint32_t> alreadyGotFlag;
+ uint32_t alreadyGotValue;
+ std::vector<int32_t> cellList;
+ int32_t mIteratorCell;
+ int32_t mIteratorFacet;
+};
+
+} // namespace Blast
+} // namsepace Nv
+
+
+#endif // ifndef NVBLASTEXTAUTHORINGACCELERATOR_H
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBondGenerator.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBondGenerator.cpp
new file mode 100644
index 0000000..b2c3883
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBondGenerator.cpp
@@ -0,0 +1,991 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+// This warning arises when using some stl containers with older versions of VC
+// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
+#include "NvPreprocessor.h"
+#if NV_VC && NV_VC < 14
+#pragma warning(disable : 4702)
+#endif
+
+#include <NvBlastExtAuthoringBondGenerator.h>
+#include <NvBlastTypes.h>
+#include <NvBlast.h>
+#include "NvBlastExtTriangleProcessor.h"
+#include "NvBlastExtApexSharedParts.h"
+#include "NvBlastExtAuthoringCollisionBuilder.h"
+#include "NvBlastExtAuthoringInternalCommon.h"
+#include <vector>
+#include <map>
+#include <PxPlane.h>
+#include <algorithm>
+#include <cmath>
+
+using physx::PxVec3;
+using physx::PxBounds3;
+
+//#define DEBUG_OUTPUT
+#ifdef DEBUG_OUTPUT
+
+void saveGeometryToObj(std::vector<PxVec3>& triangles, const char* filepath)
+{
+
+ FILE* outStream = fopen(filepath, "w");
+
+ for (uint32_t i = 0; i < triangles.size(); ++i)
+ {
+ fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z);
+ ++i;
+ fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z);
+ ++i;
+ fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z);
+ }
+ for (uint32_t i = 0; i < triangles.size() / 3; ++i)
+ {
+ PxVec3 normal = (triangles[3 * i + 2] - triangles[3 * i]).cross((triangles[3 * i + 1] - triangles[3 * i])).getNormalized();
+ fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z);
+ fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z);
+ fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z);
+ }
+ int indx = 1;
+ for (uint32_t i = 0; i < triangles.size() / 3; ++i)
+ {
+ fprintf(outStream, "f %d//%d ", indx, indx);
+ indx++;
+ fprintf(outStream, "%d//%d ", indx, indx);
+ indx++;
+ fprintf(outStream, "%d//%d \n", indx, indx);
+ indx++;
+ }
+
+ fclose(outStream);
+
+}
+
+
+std::vector<PxVec3> intersectionBuffer;
+std::vector<PxVec3> meshBuffer;
+#endif
+
+namespace Nv
+{
+ namespace Blast
+ {
+
+ #define EPS_PLANE 0.0001f
+
+ bool planeComparer(const PlaneChunkIndexer& as, const PlaneChunkIndexer& bs)
+ {
+ const PxPlane& a = as.plane;
+ const PxPlane& b = bs.plane;
+
+ if (a.d + EPS_PLANE < b.d) return true;
+ if (a.d - EPS_PLANE > b.d) return false;
+ if (a.n.x + EPS_PLANE < b.n.x) return true;
+ if (a.n.x - EPS_PLANE > b.n.x) return false;
+ if (a.n.y + EPS_PLANE < b.n.y) return true;
+ if (a.n.y - EPS_PLANE > b.n.y) return false;
+ return a.n.z + EPS_PLANE < b.n.z;
+ }
+
+
+ struct Bond
+ {
+ int32_t m_chunkId;
+ int32_t m_planeIndex;
+ int32_t triangleIndex;
+
+ bool operator<(const Bond& inp) const
+ {
+ if (abs(m_planeIndex) == abs(inp.m_planeIndex))
+ {
+ return m_chunkId < inp.m_chunkId;
+ }
+ else
+ {
+ return abs(m_planeIndex) < abs(inp.m_planeIndex);
+ }
+ }
+ };
+
+
+ struct BondInfo
+ {
+ float area;
+ physx::PxBounds3 m_bb;
+ physx::PxVec3 centroid;
+ physx::PxVec3 normal;
+ int32_t m_chunkId;
+ };
+
+
+ float BlastBondGenerator::processWithMidplanes(TriangleProcessor* trProcessor, const std::vector<PxVec3>& chunk1Points, const std::vector<PxVec3>& chunk2Points,
+ const std::vector<PxVec3>& hull1p, const std::vector<PxVec3>& hull2p, PxVec3& normal, PxVec3& centroid)
+ {
+ PxBounds3 bounds;
+ PxBounds3 aBounds;
+ PxBounds3 bBounds;
+ bounds.setEmpty();
+ aBounds.setEmpty();
+ bBounds.setEmpty();
+
+ PxVec3 chunk1Centroid(0, 0, 0);
+ PxVec3 chunk2Centroid(0, 0, 0);
+
+ ///////////////////////////////////////////////////////////////////////////////////
+ if (chunk1Points.size() < 4 || chunk2Points.size() < 4)
+ {
+ return 0.0;
+ }
+
+ for (uint32_t i = 0; i < chunk1Points.size(); ++i)
+ {
+ chunk1Centroid += chunk1Points[i];
+ bounds.include(chunk1Points[i]);
+ aBounds.include(chunk1Points[i]);
+ }
+ for (uint32_t i = 0; i < chunk2Points.size(); ++i)
+ {
+ chunk2Centroid += chunk2Points[i];
+ bounds.include(chunk2Points[i]);
+ bBounds.include(chunk2Points[i]);
+ }
+
+
+ chunk1Centroid *= (1.0f / chunk1Points.size());
+ chunk2Centroid *= (1.0f / chunk2Points.size());
+
+ Separation separation;
+ if (!importerHullsInProximityApexFree(hull1p, aBounds, PxTransform(PxIdentity), PxVec3(1, 1, 1), hull2p, bBounds, PxTransform(PxIdentity), PxVec3(1, 1, 1), 0.000, &separation))
+ {
+ return 0.0;
+ }
+
+ // Build first plane interface
+ PxPlane midplane = separation.plane;
+ if (!midplane.n.isFinite())
+ {
+ return 0.0;
+ }
+ std::vector<PxVec3> interfacePoints;
+
+ float firstCentroidSide = midplane.distance(chunk1Centroid);
+ float secondCentroidSide = midplane.distance(chunk2Centroid);
+
+ for (uint32_t i = 0; i < chunk1Points.size(); ++i)
+ {
+ float dst = midplane.distance(chunk1Points[i]);
+ if (dst * firstCentroidSide < 0)
+ {
+ interfacePoints.push_back(chunk1Points[i]);
+ }
+ }
+
+ for (uint32_t i = 0; i < chunk2Points.size(); ++i)
+ {
+ float dst = midplane.distance(chunk2Points[i]);
+ if (dst * secondCentroidSide < 0)
+ {
+ interfacePoints.push_back(chunk2Points[i]);
+ }
+ }
+ std::vector<PxVec3> convexHull;
+ trProcessor->buildConvexHull(interfacePoints, convexHull, midplane.n);
+ float area = 0;
+ PxVec3 centroidLocal(0, 0, 0);
+ if (convexHull.size() < 3)
+ {
+ return 0.0;
+ }
+ for (uint32_t i = 0; i < convexHull.size() - 1; ++i)
+ {
+ centroidLocal += convexHull[i];
+ area += (convexHull[i] - convexHull[0]).cross((convexHull[i + 1] - convexHull[0])).magnitude();
+ }
+ centroidLocal += convexHull.back();
+ centroidLocal *= (1.0f / convexHull.size());
+ float direction = midplane.n.dot(chunk2Centroid - chunk1Centroid);
+ if (direction < 0)
+ {
+ normal = -1.0f * normal;
+ }
+ normal = midplane.n;
+ centroid = centroidLocal;
+ return area * 0.5f;
+ }
+
+
+ int32_t BlastBondGenerator::bondsFromPrefractured(const std::vector<std::vector<Triangle>>& geometry, const std::vector<bool>& chunkIsSupport, std::vector<NvBlastBondDesc>& resultBondDescs, BondGenerationConfig conf)
+ {
+ int32_t ret_val = 0;
+ switch (conf.bondMode)
+ {
+ case BondGenerationConfig::AVERAGE:
+ ret_val = createFullBondListAveraged(geometry, chunkIsSupport, resultBondDescs, conf);
+ break;
+ case BondGenerationConfig::EXACT:
+ ret_val = createFullBondListExact(geometry, chunkIsSupport, resultBondDescs, conf);
+ break;
+ }
+ return ret_val;
+ }
+
+ int32_t BlastBondGenerator::createFullBondListAveraged(const std::vector<std::vector<Triangle>>& chunksGeometry, const std::vector<bool>& supportFlags, std::vector<NvBlastBondDesc>& mResultBondDescs, BondGenerationConfig conf)
+ {
+ NV_UNUSED(conf);
+
+ std::vector<std::vector<PxVec3> > chunksPoints(chunksGeometry.size());
+
+ for (uint32_t i = 0; i < chunksGeometry.size(); ++i)
+ {
+ if (!supportFlags[i])
+ {
+ continue;
+ }
+ for (uint32_t j = 0; j < chunksGeometry[i].size(); ++j)
+ {
+ chunksPoints[i].push_back(chunksGeometry[i][j].a.p);
+ chunksPoints[i].push_back(chunksGeometry[i][j].b.p);
+ chunksPoints[i].push_back(chunksGeometry[i][j].c.p);
+ }
+ }
+
+ Nv::Blast::ConvexMeshBuilder builder(mPxCooking, mPxInsertionCallback);
+
+ std::vector<CollisionHull> cHulls(chunksGeometry.size());
+
+ for (uint32_t i = 0; i < chunksGeometry.size(); ++i)
+ {
+ if (!supportFlags[i])
+ {
+ continue;
+ }
+ builder.buildCollisionGeometry(chunksPoints[i], cHulls[i]);
+ }
+
+ std::vector<std::vector<PxVec3> > hullPoints(cHulls.size());
+
+ for (uint32_t chunk = 0; chunk < cHulls.size(); ++chunk)
+ {
+ if (!supportFlags[chunk])
+ {
+ continue;
+ }
+
+ hullPoints[chunk].resize(cHulls[chunk].points.size());
+ for (uint32_t i = 0; i < cHulls[chunk].points.size(); ++i)
+ {
+ hullPoints[chunk][i].x = cHulls[chunk].points[i].x;
+ hullPoints[chunk][i].y = cHulls[chunk].points[i].y;
+ hullPoints[chunk][i].z = cHulls[chunk].points[i].z;
+ }
+ }
+
+ TriangleProcessor trProcessor;
+
+ for (uint32_t i = 0; i < chunksGeometry.size(); ++i)
+ {
+ if (!supportFlags[i])
+ {
+ continue;
+ }
+ for (uint32_t j = i + 1; j < chunksGeometry.size(); ++j)
+ {
+ if (!supportFlags[i])
+ {
+ continue;
+ }
+ PxVec3 normal;
+ PxVec3 centroid;
+
+ float area = processWithMidplanes(&trProcessor, chunksPoints[i], chunksPoints[j], hullPoints[i], hullPoints[j], normal, centroid);
+
+ if (area > 0)
+ {
+ NvBlastBondDesc bDesc;
+ bDesc.chunkIndices[0] = i;
+ bDesc.chunkIndices[1] = j;
+ bDesc.bond.area = area;
+ bDesc.bond.centroid[0] = centroid.x;
+ bDesc.bond.centroid[1] = centroid.y;
+ bDesc.bond.centroid[2] = centroid.z;
+
+ bDesc.bond.normal[0] = normal.x;
+ bDesc.bond.normal[1] = normal.y;
+ bDesc.bond.normal[2] = normal.z;
+
+
+ mResultBondDescs.push_back(bDesc);
+ }
+
+ }
+ }
+
+ return 0;
+ }
+
+ uint32_t isSamePlane(PxPlane& a, PxPlane& b)
+ {
+ if (PxAbs(a.d - b.d) > EPS_PLANE) return 0;
+ if (PxAbs(a.n.x - b.n.x) > EPS_PLANE) return 0;
+ if (PxAbs(a.n.y - b.n.y) > EPS_PLANE) return 0;
+ if (PxAbs(a.n.z - b.n.z) > EPS_PLANE) return 0;
+ return 1;
+ }
+
+ int32_t BlastBondGenerator::createFullBondListExact(const std::vector<std::vector<Triangle>>& chunksGeometry, const std::vector<bool>& supportFlags, std::vector<NvBlastBondDesc>& mResultBondDescs, BondGenerationConfig conf)
+ {
+ std::vector < PlaneChunkIndexer > planeTriangleMapping;
+ NV_UNUSED(conf);
+ for (uint32_t i = 0; i < chunksGeometry.size(); ++i)
+ {
+ if (!supportFlags[i])
+ {
+ continue;
+ }
+ for (uint32_t j = 0; j < chunksGeometry[i].size(); ++j)
+ {
+#ifdef DEBUG_OUTPUT
+ meshBuffer.push_back(chunksGeometry[i][j].a.p );
+ meshBuffer.push_back(chunksGeometry[i][j].b.p);
+ meshBuffer.push_back(chunksGeometry[i][j].c.p );
+#endif
+
+ PxPlane nPlane = PxPlane(chunksGeometry[i][j].a.p, chunksGeometry[i][j].b.p, chunksGeometry[i][j].c.p);
+ planeTriangleMapping.push_back(PlaneChunkIndexer(i, j, nPlane));
+ }
+ }
+
+ std::sort(planeTriangleMapping.begin(), planeTriangleMapping.end(), planeComparer);
+ return createFullBondListExactInternal(chunksGeometry, planeTriangleMapping, mResultBondDescs);
+ }
+
+ void BlastBondGenerator::buildGeometryCache(const std::vector<std::vector<Triangle> >& geometry)
+ {
+ mGeometryCache = geometry;
+ mHullsPointsCache.resize(geometry.size());
+ mBoundsCache.resize(geometry.size());
+ mCHullCache.resize(geometry.size());
+ for (uint32_t i = 0; i < mGeometryCache.size(); ++i)
+ {
+ for (uint32_t j = 0; j < mGeometryCache[i].size(); ++j)
+ {
+
+ PxPlane nPlane = PxPlane(mGeometryCache[i][j].a.p, mGeometryCache[i][j].b.p, mGeometryCache[i][j].c.p);
+ mPlaneCache.push_back(PlaneChunkIndexer(i, j, nPlane));
+ }
+ }
+
+ for (uint32_t ch = 0; ch < mGeometryCache.size(); ++ch)
+ {
+ std::vector<PxVec3> chunksPoints(mGeometryCache[ch].size() * 3);
+
+ int32_t sp = 0;
+ for (uint32_t i = 0; i < mGeometryCache[ch].size(); ++i)
+ {
+ chunksPoints[sp++] = mGeometryCache[ch][i].a.p;
+ chunksPoints[sp++] = mGeometryCache[ch][i].b.p;
+ chunksPoints[sp++] = mGeometryCache[ch][i].c.p;
+ }
+
+ Nv::Blast::ConvexMeshBuilder builder(mPxCooking, mPxInsertionCallback);
+
+ CollisionHull& cHull = mCHullCache[ch];
+
+ builder.buildCollisionGeometry(chunksPoints, cHull);
+
+ mHullsPointsCache[ch].resize(cHull.points.size());
+
+ mBoundsCache[ch].setEmpty();
+ for (uint32_t i = 0; i < cHull.points.size(); ++i)
+ {
+ mHullsPointsCache[ch][i].x = cHull.points[i].x;
+ mHullsPointsCache[ch][i].y = cHull.points[i].y;
+ mHullsPointsCache[ch][i].z = cHull.points[i].z;
+ mBoundsCache[ch].include(mHullsPointsCache[ch][i]);
+ }
+ }
+ }
+
+ void BlastBondGenerator::resetGeometryCache()
+ {
+ mGeometryCache.clear();
+ mPlaneCache.clear();
+ mHullsPointsCache.clear();
+ mCHullCache.clear();
+ mBoundsCache.clear();
+ }
+
+ int32_t BlastBondGenerator::createFullBondListExactInternal(const std::vector<std::vector<Triangle>>& chunksGeometry, std::vector < PlaneChunkIndexer >& planeTriangleMapping, std::vector<NvBlastBondDesc>& mResultBondDescs)
+ {
+ std::map<std::pair<int32_t, int32_t>, std::pair<NvBlastBondDesc, int32_t> > bonds;
+
+ TriangleProcessor trPrc;
+ std::vector<PxVec3> intersectionBufferLocal;
+
+ NvBlastBondDesc cleanBond;
+ memset(&cleanBond, 0, sizeof(NvBlastBondDesc));
+ for (uint32_t tIndex = 0; tIndex < planeTriangleMapping.size(); ++tIndex)
+ {
+
+ PlaneChunkIndexer opp = planeTriangleMapping[tIndex];
+
+ opp.plane.d *= -1;
+ opp.plane.n *= -1;
+
+ uint32_t startIndex = (uint32_t)(std::lower_bound(planeTriangleMapping.begin(), planeTriangleMapping.end(), opp, planeComparer) - planeTriangleMapping.begin());
+ uint32_t endIndex = (uint32_t)(std::upper_bound(planeTriangleMapping.begin(), planeTriangleMapping.end(), opp, planeComparer) - planeTriangleMapping.begin());
+ // uint32_t startIndex = 0;
+ // uint32_t endIndex = (uint32_t)planeTriangleMapping.size();
+
+ PlaneChunkIndexer& mappedTr = planeTriangleMapping[tIndex];
+ const Triangle& trl = chunksGeometry[mappedTr.chunkId][mappedTr.trId];
+ PxPlane pln = mappedTr.plane;
+ TrPrcTriangle trp(trl.a.p, trl.b.p, trl.c.p);
+ PxVec3 trCentroid = (trl.a.p + trl.b.p + trl.c.p) * (1.0f / 3.0f);
+ trp.points[0] -= trCentroid;
+ trp.points[1] -= trCentroid;
+ trp.points[2] -= trCentroid;
+ ProjectionDirections pDir = getProjectionDirection(pln.n);
+ TrPrcTriangle2d trp2d;
+ trp2d.points[0] = getProjectedPointWithWinding(trp.points[0], pDir);
+ trp2d.points[1] = getProjectedPointWithWinding(trp.points[1], pDir);
+ trp2d.points[2] = getProjectedPointWithWinding(trp.points[2], pDir);
+
+ for (uint32_t i = startIndex; i <= endIndex && i < planeTriangleMapping.size(); ++i)
+ {
+ PlaneChunkIndexer& mappedTr2 = planeTriangleMapping[i];
+ if (mappedTr2.trId == opp.chunkId)
+ {
+ continue;
+ }
+
+ if (!isSamePlane(opp.plane, mappedTr2.plane))
+ {
+ continue;
+ }
+
+ if (mappedTr.chunkId == mappedTr2.chunkId)
+ {
+ continue;
+ }
+ std::pair<int32_t, int32_t> bondEndPoints = std::make_pair(mappedTr.chunkId, mappedTr2.chunkId);
+ if (bondEndPoints.second < bondEndPoints.first) continue;
+ std::pair<int32_t, int32_t> bondEndPointsSwapped = std::make_pair(mappedTr2.chunkId, mappedTr.chunkId);
+ if (bonds.find(bondEndPoints) == bonds.end() && bonds.find(bondEndPointsSwapped) != bonds.end())
+ {
+ continue; // We do not need account interface surface twice
+ }
+ if (bonds.find(bondEndPoints) == bonds.end())
+ {
+ bonds[bondEndPoints].second = 0;
+ bonds[bondEndPoints].first = cleanBond;
+ bonds[bondEndPoints].first.chunkIndices[0] = bondEndPoints.first;
+ bonds[bondEndPoints].first.chunkIndices[1] = bondEndPoints.second;
+ bonds[bondEndPoints].first.bond.normal[0] = pln.n[0];
+ bonds[bondEndPoints].first.bond.normal[1] = pln.n[1];
+ bonds[bondEndPoints].first.bond.normal[2] = pln.n[2];
+ }
+
+ const Triangle& trl2 = chunksGeometry[mappedTr2.chunkId][mappedTr2.trId];
+
+ TrPrcTriangle trp2(trl2.a.p, trl2.b.p, trl2.c.p);
+
+ intersectionBufferLocal.clear();
+ intersectionBufferLocal.reserve(32);
+ trPrc.getTriangleIntersection(trp, trp2d, trp2, trCentroid, intersectionBufferLocal, pln.n);
+ PxVec3 centroidPoint(0, 0, 0);
+ int32_t collectedVerticesCount = 0;
+ float area = 0;
+ if (intersectionBufferLocal.size() >= 3)
+ {
+#ifdef DEBUG_OUTPUT
+ for (uint32_t p = 1; p < intersectionBufferLocal.size() - 1; ++p)
+ {
+ intersectionBuffer.push_back(intersectionBufferLocal[0]);
+ intersectionBuffer.push_back(intersectionBufferLocal[p]);
+ intersectionBuffer.push_back(intersectionBufferLocal[p + 1]);
+ }
+#endif
+ centroidPoint = intersectionBufferLocal[0] + intersectionBufferLocal.back();
+ collectedVerticesCount = 2;
+
+ for (uint32_t j = 1; j < intersectionBufferLocal.size() - 1; ++j)
+ {
+ ++collectedVerticesCount;
+ centroidPoint += intersectionBufferLocal[j];
+ area += (intersectionBufferLocal[j + 1] - intersectionBufferLocal[0]).cross(intersectionBufferLocal[j] - intersectionBufferLocal[0]).magnitude();
+ }
+ }
+ if (area > 0.00001f)
+ {
+ bonds[bondEndPoints].second += collectedVerticesCount;
+
+ bonds[bondEndPoints].first.bond.area += area * 0.5f;
+ bonds[bondEndPoints].first.bond.centroid[0] += (centroidPoint.x);
+ bonds[bondEndPoints].first.bond.centroid[1] += (centroidPoint.y);
+ bonds[bondEndPoints].first.bond.centroid[2] += (centroidPoint.z);
+ }
+ }
+ }
+
+ for (auto it : bonds)
+ {
+ if (it.second.first.bond.area > 0)
+ {
+ float mlt = 1.0f / (it.second.second);
+ it.second.first.bond.centroid[0] *= mlt;
+ it.second.first.bond.centroid[1] *= mlt;
+ it.second.first.bond.centroid[2] *= mlt;
+
+ mResultBondDescs.push_back(it.second.first);
+ }
+
+ }
+#ifdef DEBUG_OUTPUT
+ saveGeometryToObj(meshBuffer, "Mesh.obj");
+ saveGeometryToObj(intersectionBuffer, "inter.obj");
+#endif
+ return 0;
+ }
+
+ int32_t BlastBondGenerator::createBondForcedInternal(const std::vector<PxVec3>& hull0, const std::vector<PxVec3>& hull1,
+ const CollisionHull& cHull0,const CollisionHull& cHull1,
+ PxBounds3 bound0, PxBounds3 bound1, NvBlastBond& resultBond, float overlapping)
+ {
+
+ TriangleProcessor trProcessor;
+ Separation separation;
+ importerHullsInProximityApexFree(hull0, bound0, PxTransform(PxIdentity), PxVec3(1, 1, 1), hull1, bound1, PxTransform(PxIdentity), PxVec3(1, 1, 1), 0.000, &separation);
+
+ if (std::isnan(separation.plane.d))
+ {
+ importerHullsInProximityApexFree(hull0, bound0, PxTransform(PxVec3(0.000001f, 0.000001f, 0.000001f)), PxVec3(1, 1, 1), hull1, bound1, PxTransform(PxIdentity), PxVec3(1, 1, 1), 0.000, &separation);
+ if (std::isnan(separation.plane.d))
+ {
+ return 1;
+ }
+ }
+
+ PxPlane pl = separation.plane;
+ std::vector<PxVec3> ifsPoints[2];
+
+ float dst[2][2];
+
+ dst[0][0] = 0;
+ dst[0][1] = MAXIMUM_EXTENT;
+ for (uint32_t p = 0; p < cHull0.points.size(); ++p)
+ {
+ float d = pl.distance(PxVec3(cHull0.points[p].x, cHull0.points[p].y, cHull0.points[p].z));
+ if (PxAbs(d) > PxAbs(dst[0][0]))
+ {
+ dst[0][0] = d;
+ }
+ if (PxAbs(d) < PxAbs(dst[0][1]))
+ {
+ dst[0][1] = d;
+ }
+ }
+
+ dst[1][0] = 0;
+ dst[1][1] = MAXIMUM_EXTENT;
+ for (uint32_t p = 0; p < cHull1.points.size(); ++p)
+ {
+ float d = pl.distance(PxVec3(cHull1.points[p].x, cHull1.points[p].y, cHull1.points[p].z));
+ if (PxAbs(d) > PxAbs(dst[1][0]))
+ {
+ dst[1][0] = d;
+ }
+ if (PxAbs(d) < PxAbs(dst[1][1]))
+ {
+ dst[1][1] = d;
+ }
+ }
+
+
+ float cvOffset[2] = { dst[0][1] + (dst[0][0] - dst[0][1]) * overlapping, dst[1][1] + (dst[1][0] - dst[1][1]) * overlapping };
+
+ for (uint32_t i = 0; i < cHull0.polygonData.size(); ++i)
+ {
+ uint32_t offset = cHull0.polygonData[i].mIndexBase;
+ PxVec3 result;
+ for (uint32_t j = 0; j < cHull0.polygonData[i].mNbVerts; ++j)
+ {
+ uint32_t nxj = (j + 1) % cHull0.polygonData[i].mNbVerts;
+ const uint32_t* ind = &cHull0.indices[0];
+ PxVec3 a = hull0[ind[j + offset]] - pl.n * cvOffset[0];
+ PxVec3 b = hull0[ind[nxj + offset]] - pl.n * cvOffset[0];
+
+ if (getPlaneSegmentIntersection(pl, a, b, result))
+ {
+ ifsPoints[0].push_back(result);
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < cHull1.polygonData.size(); ++i)
+ {
+ uint32_t offset = cHull1.polygonData[i].mIndexBase;
+ PxVec3 result;
+ for (uint32_t j = 0; j < cHull1.polygonData[i].mNbVerts; ++j)
+ {
+ uint32_t nxj = (j + 1) % cHull1.polygonData[i].mNbVerts;
+ const uint32_t* ind = &cHull1.indices[0];
+ PxVec3 a = hull1[ind[j + offset]] - pl.n * cvOffset[1];
+ PxVec3 b = hull1[ind[nxj + offset]] - pl.n * cvOffset[1];
+
+ if (getPlaneSegmentIntersection(pl, a, b, result))
+ {
+ ifsPoints[1].push_back(result);
+ }
+ }
+ }
+
+
+ std::vector<PxVec3> convexes[2];
+
+ trProcessor.buildConvexHull(ifsPoints[0], convexes[0], pl.n);
+ trProcessor.buildConvexHull(ifsPoints[1], convexes[1], pl.n);
+
+ float areas[2] = { 0, 0 };
+ PxVec3 centroids[2] = { PxVec3(0, 0, 0), PxVec3(0, 0, 0) };
+
+ for (uint32_t cv = 0; cv < 2; ++cv)
+ {
+ if (convexes[cv].size() == 0)
+ {
+ continue;
+ }
+ centroids[cv] = convexes[cv][0] + convexes[cv].back();
+ for (uint32_t i = 1; i < convexes[cv].size() - 1; ++i)
+ {
+ centroids[cv] += convexes[cv][i];
+ areas[cv] += (convexes[cv][i + 1] - convexes[cv][0]).cross(convexes[cv][i] - convexes[cv][0]).magnitude();
+#ifdef DEBUG_OUTPUT
+ intersectionBuffer.push_back(convexes[cv][0]);
+ intersectionBuffer.push_back(convexes[cv][i]);
+ intersectionBuffer.push_back(convexes[cv][i + 1]);
+#endif
+
+ }
+ centroids[cv] *= (1.0f / convexes[cv].size());
+ areas[cv] = PxAbs(areas[cv]);
+ }
+
+ resultBond.area = (areas[0] + areas[1]) * 0.5f;
+ resultBond.centroid[0] = (centroids[0][0] + centroids[1][0]) * 0.5f;
+ resultBond.centroid[1] = (centroids[0][1] + centroids[1][1]) * 0.5f;
+ resultBond.centroid[2] = (centroids[0][2] + centroids[1][2]) * 0.5f;
+ resultBond.normal[0] = pl.n[0];
+ resultBond.normal[1] = pl.n[1];
+ resultBond.normal[2] = pl.n[2];
+
+#ifdef DEBUG_OUTPUT
+ saveGeometryToObj(meshBuffer, "ArbitMeshes.obj");
+ saveGeometryToObj(intersectionBuffer, "inter.obj");
+#endif
+
+
+ return 0;
+ }
+
+
+ int32_t BlastBondGenerator::buildDescFromInternalFracture(FractureTool* tool, const std::vector<bool>& chunkIsSupport, std::vector<NvBlastBondDesc>& mResultBondDescs, std::vector<NvBlastChunkDesc>& mResultChunkDescriptors)
+ {
+ const std::vector<ChunkInfo>& chunkData = tool->getChunkList();
+ std::vector<std::vector<Triangle> > trianglesBuffer(chunkData.size());
+
+ for (uint32_t i = 0; i < trianglesBuffer.size(); ++i)
+ {
+ tool->getBaseMesh(i, trianglesBuffer[i]);
+ }
+
+ if (chunkData.empty() || trianglesBuffer.empty())
+ {
+ return 1;
+ }
+ mResultChunkDescriptors.resize(trianglesBuffer.size());
+ std::vector<Bond> bondDescriptors;
+ mResultChunkDescriptors[0].parentChunkIndex = UINT32_MAX;
+ mResultChunkDescriptors[0].userData = 0;
+
+ {
+ PxVec3 chunkCentroid(0, 0, 0);
+ for (uint32_t tr = 0; tr < trianglesBuffer[0].size(); ++tr)
+ {
+ chunkCentroid += trianglesBuffer[0][tr].a.p;
+ chunkCentroid += trianglesBuffer[0][tr].b.p;
+ chunkCentroid += trianglesBuffer[0][tr].c.p;
+ }
+ chunkCentroid *= (1.0f / (3 * trianglesBuffer[0].size()));
+ mResultChunkDescriptors[0].centroid[0] = chunkCentroid[0];
+ mResultChunkDescriptors[0].centroid[1] = chunkCentroid[1];
+ mResultChunkDescriptors[0].centroid[2] = chunkCentroid[2];
+ }
+
+ for (uint32_t i = 1; i < chunkData.size(); ++i)
+ {
+
+ mResultChunkDescriptors[i].userData = i;
+ mResultChunkDescriptors[i].parentChunkIndex = tool->getChunkIndex(chunkData[i].parent);
+ if (chunkIsSupport[i])
+ mResultChunkDescriptors[i].flags = NvBlastChunkDesc::SupportFlag;
+ PxVec3 chunkCentroid(0, 0, 0);
+ for (uint32_t tr = 0; tr < trianglesBuffer[i].size(); ++tr)
+ {
+ chunkCentroid += trianglesBuffer[i][tr].a.p;
+ chunkCentroid += trianglesBuffer[i][tr].b.p;
+ chunkCentroid += trianglesBuffer[i][tr].c.p;
+
+ Triangle& trRef = trianglesBuffer[i][tr];
+ int32_t id = trRef.userInfo;
+ if (id == 0)
+ continue;
+ bondDescriptors.push_back(Bond());
+ Bond& bond = bondDescriptors.back();
+ bond.m_chunkId = i;
+ bond.m_planeIndex = id;
+ bond.triangleIndex = tr;
+ }
+ chunkCentroid *= (1.0f / (3 * trianglesBuffer[i].size()));
+ mResultChunkDescriptors[i].centroid[0] = chunkCentroid[0];
+ mResultChunkDescriptors[i].centroid[1] = chunkCentroid[1];
+ mResultChunkDescriptors[i].centroid[2] = chunkCentroid[2];
+ }
+ std::sort(bondDescriptors.begin(), bondDescriptors.end());
+ if (bondDescriptors.empty())
+ {
+ return 0;
+ }
+ int32_t chunkId, planeId;
+ chunkId = bondDescriptors[0].m_chunkId;
+ planeId = bondDescriptors[0].m_planeIndex;
+ std::vector<BondInfo> forwardChunks;
+ std::vector<BondInfo> backwardChunks;
+
+ float area = 0;
+ PxVec3 normal(0, 0, 0);
+ PxVec3 centroid(0, 0, 0);
+ int32_t collected = 0;
+ PxBounds3 bb = PxBounds3::empty();
+
+ chunkId = -1;
+ planeId = bondDescriptors[0].m_planeIndex;
+ for (uint32_t i = 0; i <= bondDescriptors.size(); ++i)
+ {
+ if (i == bondDescriptors.size() || (chunkId != bondDescriptors[i].m_chunkId || abs(planeId) != abs(bondDescriptors[i].m_planeIndex)))
+ {
+ if (chunkId != -1)
+ {
+ if (bondDescriptors[i - 1].m_planeIndex > 0) {
+ forwardChunks.push_back(BondInfo());
+ forwardChunks.back().area = area;
+ forwardChunks.back().normal = normal;
+ forwardChunks.back().centroid = centroid * (1.0f / 3.0f / collected);
+ forwardChunks.back().m_chunkId = chunkId;
+ forwardChunks.back().m_bb = bb;
+
+ }
+ else
+ {
+ backwardChunks.push_back(BondInfo());
+ backwardChunks.back().area = area;
+ backwardChunks.back().normal = normal;
+ backwardChunks.back().centroid = centroid * (1.0f / 3.0f / collected);
+ backwardChunks.back().m_chunkId = chunkId;
+ backwardChunks.back().m_bb = bb;
+ }
+ }
+ bb.setEmpty();
+ collected = 0;
+ area = 0;
+ normal = PxVec3(0, 0, 0);
+ centroid = PxVec3(0, 0, 0);
+ if (i != bondDescriptors.size())
+ chunkId = bondDescriptors[i].m_chunkId;
+ }
+ if (i == bondDescriptors.size() || abs(planeId) != abs(bondDescriptors[i].m_planeIndex))
+ {
+ for (uint32_t fchunk = 0; fchunk < forwardChunks.size(); ++fchunk)
+ {
+ for (uint32_t bchunk = 0; bchunk < backwardChunks.size(); ++bchunk)
+ {
+ if (weakBoundingBoxIntersection(forwardChunks[fchunk].m_bb, backwardChunks[bchunk].m_bb) == 0)
+ {
+ continue;
+ }
+ if (chunkIsSupport[forwardChunks[fchunk].m_chunkId] == false || chunkIsSupport[backwardChunks[bchunk].m_chunkId] == false)
+ {
+ continue;
+ }
+ mResultBondDescs.push_back(NvBlastBondDesc());
+ mResultBondDescs.back().bond.area = std::min(forwardChunks[fchunk].area, backwardChunks[bchunk].area);
+ mResultBondDescs.back().bond.normal[0] = forwardChunks[fchunk].normal.x;
+ mResultBondDescs.back().bond.normal[1] = forwardChunks[fchunk].normal.y;
+ mResultBondDescs.back().bond.normal[2] = forwardChunks[fchunk].normal.z;
+
+ mResultBondDescs.back().bond.centroid[0] = (forwardChunks[fchunk].centroid.x + backwardChunks[bchunk].centroid.x ) * 0.5;
+ mResultBondDescs.back().bond.centroid[1] = (forwardChunks[fchunk].centroid.y + backwardChunks[bchunk].centroid.y) * 0.5;
+ mResultBondDescs.back().bond.centroid[2] = (forwardChunks[fchunk].centroid.z + backwardChunks[bchunk].centroid.z) * 0.5;
+
+
+ mResultBondDescs.back().chunkIndices[0] = forwardChunks[fchunk].m_chunkId;
+ mResultBondDescs.back().chunkIndices[1] = backwardChunks[bchunk].m_chunkId;
+ }
+ }
+ forwardChunks.clear();
+ backwardChunks.clear();
+ if (i != bondDescriptors.size())
+ {
+ planeId = bondDescriptors[i].m_planeIndex;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ collected++;
+ int32_t tr = bondDescriptors[i].triangleIndex;
+ PxVec3 n = trianglesBuffer[chunkId][tr].getNormal();
+ area += n.magnitude();
+ normal = n.getNormalized();
+ centroid += trianglesBuffer[chunkId][tr].a.p;
+ centroid += trianglesBuffer[chunkId][tr].b.p;
+ centroid += trianglesBuffer[chunkId][tr].c.p;
+
+ bb.include(trianglesBuffer[chunkId][tr].a.p);
+ bb.include(trianglesBuffer[chunkId][tr].b.p);
+ bb.include(trianglesBuffer[chunkId][tr].c.p);
+ }
+
+ return 0;
+ }
+
+ int32_t BlastBondGenerator::createBondBetweenMeshes(const std::vector<std::vector<Triangle> >& geometry, std::vector<NvBlastBondDesc>& resultBond,const std::vector<std::pair<uint32_t, uint32_t> >& overlaps, BondGenerationConfig cfg)
+ {
+ if (cfg.bondMode == BondGenerationConfig::AVERAGE)
+ {
+ resetGeometryCache();
+ buildGeometryCache(geometry);
+ }
+ resultBond.clear();
+ resultBond.resize(overlaps.size());
+
+ if (cfg.bondMode == BondGenerationConfig::EXACT)
+ {
+ for (uint32_t i = 0; i < overlaps.size(); ++i)
+ {
+ resultBond[i].chunkIndices[0] = overlaps[i].first;
+ resultBond[i].chunkIndices[1] = overlaps[i].second;
+ createBondBetweenMeshes(geometry[overlaps[i].first], geometry[overlaps[i].second], resultBond[i].bond, cfg);
+ }
+ }
+ else
+ {
+ for (uint32_t i = 0; i < overlaps.size(); ++i)
+ {
+ resultBond[i].chunkIndices[0] = overlaps[i].first;
+ resultBond[i].chunkIndices[1] = overlaps[i].second;
+ createBondForcedInternal(mHullsPointsCache[overlaps[i].first], mHullsPointsCache[overlaps[i].second], mCHullCache[overlaps[i].first], mCHullCache[overlaps[i].second],
+ mBoundsCache[overlaps[i].first], mBoundsCache[overlaps[i].second], resultBond[i].bond, 0.3f);
+ }
+ }
+
+ return 0;
+ }
+
+
+ int32_t BlastBondGenerator::createBondBetweenMeshes(const std::vector<Triangle>& meshA, const std::vector<Triangle>& meshB, NvBlastBond& resultBond, BondGenerationConfig conf)
+ {
+ float overlapping = 0.3;
+ if (conf.bondMode == BondGenerationConfig::EXACT)
+ {
+ std::vector<std::vector<Triangle> > chunks;
+ chunks.push_back(meshA);
+ chunks.push_back(meshB);
+ std::vector<bool> isSupport(2, true);
+ std::vector<NvBlastBondDesc> desc;
+ createFullBondListExact(chunks, isSupport, desc, conf);
+ if (desc.size() > 0)
+ {
+ resultBond = desc.back().bond;
+ }
+ else
+ {
+ return 1;
+ }
+ return 0;
+ }
+
+ std::vector<PxVec3> chunksPoints1(meshA.size() * 3);
+ std::vector<PxVec3> chunksPoints2(meshB.size() * 3);
+
+ int32_t sp = 0;
+ for (uint32_t i = 0; i < meshA.size(); ++i)
+ {
+ chunksPoints1[sp++] = meshA[i].a.p;
+ chunksPoints1[sp++] = meshA[i].b.p;
+ chunksPoints1[sp++] = meshA[i].c.p;
+#ifdef DEBUG_OUTPUT
+ meshBuffer.push_back(meshA[i].a.p);
+ meshBuffer.push_back(meshA[i].b.p);
+ meshBuffer.push_back(meshA[i].c.p);
+#endif
+
+
+ }
+ sp = 0;
+ for (uint32_t i = 0; i < meshB.size(); ++i)
+ {
+ chunksPoints2[sp++] = meshB[i].a.p;
+ chunksPoints2[sp++] = meshB[i].b.p;
+ chunksPoints2[sp++] = meshB[i].c.p;
+#ifdef DEBUG_OUTPUT
+ meshBuffer.push_back(meshB[i].a.p);
+ meshBuffer.push_back(meshB[i].b.p);
+ meshBuffer.push_back(meshB[i].c.p);
+#endif
+ }
+
+
+ Nv::Blast::ConvexMeshBuilder builder(mPxCooking, mPxInsertionCallback);
+
+ CollisionHull cHull[2];
+
+ builder.buildCollisionGeometry(chunksPoints1, cHull[0]);
+ builder.buildCollisionGeometry(chunksPoints2, cHull[1]);
+
+ std::vector<PxVec3> hullPoints[2];
+ hullPoints[0].resize(cHull[0].points.size());
+ hullPoints[1].resize(cHull[1].points.size());
+
+
+ PxBounds3 bb[2];
+ bb[0].setEmpty();
+ bb[1].setEmpty();
+
+ for (uint32_t cv = 0; cv < 2; ++cv)
+ {
+ for (uint32_t i = 0; i < cHull[cv].points.size(); ++i)
+ {
+ hullPoints[cv][i].x = cHull[cv].points[i].x;
+ hullPoints[cv][i].y = cHull[cv].points[i].y;
+ hullPoints[cv][i].z = cHull[cv].points[i].z;
+ bb[cv].include(hullPoints[cv][i]);
+ }
+ }
+ return createBondForcedInternal(hullPoints[0], hullPoints[1], cHull[0], cHull[1], bb[0], bb[1], resultBond, overlapping);
+ }
+
+
+
+ }
+}
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBooleanTool.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBooleanTool.cpp
new file mode 100644
index 0000000..b5030d7
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBooleanTool.cpp
@@ -0,0 +1,1351 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtAuthoringBooleanTool.h"
+#include "NvBlastExtAuthoringMesh.h"
+#include "NvBlastExtAuthoringAccelerator.h"
+
+#include <math.h>
+#include <set>
+#include <algorithm>
+
+using physx::PxVec3;
+using physx::PxVec2;
+using physx::PxBounds3;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/* Linear interpolation of vectors */
+
+NV_FORCE_INLINE void vec3Lerp(const PxVec3& a, const PxVec3& b, PxVec3& out, float t)
+{
+ out.x = (b.x - a.x) * t + a.x;
+ out.y = (b.y - a.y) * t + a.y;
+ out.z = (b.z - a.z) * t + a.z;
+}
+
+NV_FORCE_INLINE void vec2Lerp(const PxVec2& a, const PxVec2& b, PxVec2& out, float t)
+{
+ out.x = (b.x - a.x) * t + a.x;
+ out.y = (b.y - a.y) * t + a.y;
+}
+
+
+NV_FORCE_INLINE int32_t BooleanEvaluator::addIfNotExist(Vertex& p)
+{
+ mVerticesAggregate.push_back(p);
+ return static_cast<int32_t>(mVerticesAggregate.size()) - 1;
+}
+
+NV_FORCE_INLINE void BooleanEvaluator::addEdgeIfValid(EdgeWithParent& ed)
+{
+ mEdgeAggregate.push_back(ed);
+}
+
+/**
+Vertex level shadowing functions
+*/
+NV_FORCE_INLINE int32_t vertexShadowing(const PxVec3& a, const PxVec3& b)
+{
+ return (b.x >= a.x) ? 1 : 0;
+}
+/**
+Vertex-edge status functions
+*/
+NV_FORCE_INLINE int32_t veStatus01(const PxVec3& sEdge, const PxVec3& eEdge, const PxVec3& p)
+{
+ return vertexShadowing(p, eEdge) - vertexShadowing(p, sEdge);
+}
+
+NV_FORCE_INLINE int32_t veStatus10(const PxVec3& sEdge, const PxVec3& eEdge, const PxVec3& p)
+{
+ return -vertexShadowing(eEdge, p) + vertexShadowing(sEdge, p);
+}
+
+/**
+Vertex-edge shadowing functions
+*/
+int32_t shadowing01(const Vertex& sEdge, const Vertex& eEdge, const PxVec3& p, Vertex& onEdgePoint, bool& hasOnEdge)
+{
+ int32_t winding = veStatus01(sEdge.p, eEdge.p, p);
+ if (winding != 0)
+ {
+ float t = (p.x - sEdge.p.x) / (eEdge.p.x - sEdge.p.x);
+ if (t >= 1)
+ {
+ onEdgePoint = eEdge;
+ }
+ else if (t <= 0)
+ {
+ onEdgePoint = sEdge;
+ }
+ else
+ {
+ vec3Lerp(sEdge.p, eEdge.p, onEdgePoint.p, t);
+ vec3Lerp(sEdge.n, eEdge.n, onEdgePoint.n, t);
+ vec2Lerp(sEdge.uv[0], eEdge.uv[0], onEdgePoint.uv[0], t);
+ }
+ hasOnEdge = true;
+ if (onEdgePoint.p.y >= p.y)
+ {
+ return winding;
+ }
+ }
+ else
+ {
+ hasOnEdge = false;
+ }
+ return 0;
+}
+int32_t shadowing10(const Vertex& sEdge, const Vertex& eEdge, const PxVec3& p, Vertex& onEdgePoint, bool& hasOnEdge)
+{
+ int32_t winding = veStatus10(sEdge.p, eEdge.p, p);
+ if (winding != 0)
+ {
+ float t = (p.x - sEdge.p.x) / (eEdge.p.x - sEdge.p.x);
+ if (t >= 1)
+ {
+ onEdgePoint = eEdge;
+ }
+ else if (t <= 0)
+ {
+ onEdgePoint = sEdge;
+ }
+ else
+ {
+ vec3Lerp(sEdge.p, eEdge.p, onEdgePoint.p, t);
+ vec3Lerp(sEdge.n, eEdge.n, onEdgePoint.n, t);
+ vec2Lerp(sEdge.uv[0], eEdge.uv[0], onEdgePoint.uv[0], t);
+ }
+ hasOnEdge = true;
+ if (onEdgePoint.p.y < p.y)
+ {
+ return winding;
+ }
+ }
+ else
+ {
+ hasOnEdge = false;
+ }
+ return 0;
+}
+
+int32_t shadowing01(const PxVec3& sEdge, const PxVec3& eEdge, const PxVec3& p)
+{
+ int32_t winding = veStatus01(sEdge, eEdge, p);
+ if (winding != 0)
+ {
+ float t = ((p.x - sEdge.x) / (eEdge.x - sEdge.x));
+ PxVec3 onEdgePoint;
+ if (t >= 1)
+ onEdgePoint = eEdge;
+ else if (t <= 0)
+ onEdgePoint = sEdge;
+ else
+ vec3Lerp(sEdge, eEdge, onEdgePoint, t);
+ if (onEdgePoint.y >= p.y)
+ {
+ return winding;
+ }
+ }
+ return 0;
+}
+
+int32_t shadowing10(const PxVec3& sEdge, const PxVec3& eEdge, const PxVec3& p)
+{
+ int32_t winding = veStatus10(sEdge, eEdge, p);
+ if (winding != 0)
+ {
+ float t = ((p.x - sEdge.x) / (eEdge.x - sEdge.x));
+ PxVec3 onEdgePoint;
+ if (t >= 1)
+ onEdgePoint = eEdge;
+ else if (t <= 0)
+ onEdgePoint = sEdge;
+ else
+ vec3Lerp(sEdge, eEdge, onEdgePoint, t);
+ if (onEdgePoint.y < p.y)
+ {
+ return winding;
+ }
+ }
+ return 0;
+}
+
+/**
+Vertex-facet shadowing functions
+*/
+
+int32_t vfStatus02(const PxVec3& p, const Vertex* points, const Edge* edges, int32_t edgesCount, Vertex& out1, Vertex& out2)
+{
+ int32_t val = 0;
+ Vertex pnt;
+ bool hasOnEdge = false;
+ for (int32_t i = 0; i < edgesCount; ++i)
+ {
+ val -= shadowing01(points[edges->s], points[edges->e], p, pnt, hasOnEdge);
+ if (hasOnEdge != 0)
+ {
+ out2 = out1;
+ out1 = pnt;
+ }
+ ++edges;
+ }
+ return val;
+}
+
+
+int32_t shadowing02(const PxVec3& p, const Vertex* points, const Edge* edges, int edgesCount, bool& hasOnFacetPoint, Vertex& onFacetPoint)
+{
+ Vertex p1, p2;
+ int32_t stat = vfStatus02(p, points, edges, edgesCount, p1, p2);
+ float z = 0;
+ hasOnFacetPoint = false;
+ if (stat != 0)
+ {
+ PxVec3 vc = p2.p - p1.p;
+ float t = 0;
+ t = (abs(vc.x) > abs(vc.y)) ? (p.x - p1.p.x) / vc.x : (p.y - p1.p.y) / vc.y;
+ t = (t < 0) ? 0 : t;
+ t = (t > 1) ? 1 : t;
+ z = t * vc.z + p1.p.z;
+
+ hasOnFacetPoint = true;
+ onFacetPoint.p.x = p.x;
+ onFacetPoint.p.y = p.y;
+ onFacetPoint.p.z = z;
+
+ vec2Lerp(p1.uv[0], p2.uv[0], onFacetPoint.uv[0], t);
+ vec3Lerp(p1.n, p2.n, onFacetPoint.n, t);
+
+ if (z >= p.z)
+ {
+ return stat;
+ }
+ }
+ return 0;
+}
+
+int32_t vfStatus20(const PxVec3& p, const Vertex* points, const Edge* edges, int32_t edgesCount, Vertex& out1, Vertex& out2)
+{
+ int32_t val = 0;
+ Vertex pnt;
+ bool hasOnEdge = false;
+ for (int32_t i = 0; i < edgesCount; ++i)
+ {
+ val += shadowing10(points[edges->s], points[edges->e], p, pnt, hasOnEdge);
+ if (hasOnEdge != 0)
+ {
+ out2 = out1;
+ out1 = pnt;
+ }
+ ++edges;
+ }
+ return val;
+}
+
+int32_t shadowing20(const PxVec3& p, const Vertex* points, const Edge* edges, int edgesCount, bool& hasOnFacetPoint, Vertex& onFacetPoint)
+{
+ Vertex p1, p2;
+ int32_t stat = vfStatus20(p, points, edges, edgesCount, p1, p2);
+ hasOnFacetPoint = false;
+ if (stat != 0)
+ {
+ PxVec3 vc = p2.p - p1.p;
+ float t = 0;
+ t = (abs(vc.x) > abs(vc.y)) ? (p.x - p1.p.x) / vc.x : (p.y - p1.p.y) / vc.y;
+ t = (t < 0) ? 0 : t;
+ t = (t > 1) ? 1 : t;
+
+ hasOnFacetPoint = true;
+ onFacetPoint.p.x = p.x;
+ onFacetPoint.p.y = p.y;
+
+ onFacetPoint.p.z = t * vc.z + p1.p.z;
+
+ vec2Lerp(p1.uv[0], p2.uv[0], onFacetPoint.uv[0], t);
+ vec3Lerp(p1.n, p2.n, onFacetPoint.n, t);
+
+ if (onFacetPoint.p.z < p.z)
+ {
+ return stat;
+ }
+ }
+ return 0;
+}
+
+
+NV_FORCE_INLINE int32_t edgesCrossCheck(const PxVec3& eAs, const PxVec3& eAe, const PxVec3& eBs, const PxVec3& eBe)
+{
+ return shadowing01(eBs, eBe, eAe) - shadowing01(eBs, eBe, eAs) + shadowing10(eAs, eAe, eBe) - shadowing10(eAs, eAe, eBs);
+}
+
+int32_t edgesIntersection(const Vertex& eAs, const Vertex& eAe, const Vertex& eBs, const Vertex& eBe, Vertex& intersectionA, Vertex& intersectionB, bool& hasPoints)
+{
+ int32_t status = edgesCrossCheck(eAs.p, eAe.p, eBs.p, eBe.p);
+ hasPoints = false;
+ if (status == 0)
+ return 0;
+ Vertex tempPoint;
+
+ Vertex bShadowingPair[2];
+ Vertex aShadowingPair[2];
+ bool hasOnEdge = false;
+ int32_t shadowingType = shadowing10(eAs, eAe, eBs.p, tempPoint, hasOnEdge);
+
+ bool aShadowing = false;
+ bool bShadowing = false;
+
+
+ if (shadowingType == 0 && hasOnEdge)
+ {
+ aShadowing = true;
+ aShadowingPair[0] = eBs;
+ aShadowingPair[1] = tempPoint;
+ }
+ else
+ {
+ if (shadowingType == 1 || shadowingType == -1)
+ {
+ bShadowing = true;
+ bShadowingPair[0] = eBs;
+ bShadowingPair[1] = tempPoint;
+ }
+ }
+
+ shadowingType = shadowing10(eAs, eAe, eBe.p, tempPoint, hasOnEdge);
+
+ if (shadowingType == 0 && !aShadowing && hasOnEdge)
+ {
+ aShadowing = true;
+ aShadowingPair[0] = eBe;
+ aShadowingPair[1] = tempPoint;
+ }
+ else
+ {
+ if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
+ {
+ bShadowing = true;
+ bShadowingPair[0] = eBe;
+ bShadowingPair[1] = tempPoint;
+ }
+ }
+ shadowingType = shadowing01(eBs, eBe, eAe.p, tempPoint, hasOnEdge);
+
+ if (shadowingType == 0 && !aShadowing && hasOnEdge)
+ {
+ aShadowing = true;
+ aShadowingPair[1] = eAe;
+ aShadowingPair[0] = tempPoint;
+ }
+ else
+ {
+ if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
+ {
+ bShadowing = true;
+ bShadowingPair[1] = eAe;
+ bShadowingPair[0] = tempPoint;
+ }
+ }
+
+ shadowingType = shadowing01(eBs, eBe, eAs.p, tempPoint, hasOnEdge);
+
+ if (shadowingType == 0 && !aShadowing && hasOnEdge)
+ {
+ aShadowing = true;
+ aShadowingPair[1] = eAs;
+ aShadowingPair[0] = tempPoint;
+ }
+ else
+ {
+ if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
+ {
+ bShadowing = true;
+ bShadowingPair[1] = eAs;
+ bShadowingPair[0] = tempPoint;
+ }
+ }
+ float deltaPlus = bShadowingPair[0].p.y - bShadowingPair[1].p.y;
+ float deltaMinus = aShadowingPair[0].p.y - aShadowingPair[1].p.y;
+ float div = 0;
+ if (deltaPlus > 0)
+ div = deltaPlus / (deltaPlus - deltaMinus);
+ else
+ div = 0;
+
+ intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p);
+ intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n);
+ intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div;
+ intersectionB.p = intersectionA.p;
+ intersectionB.p.z = bShadowingPair[0].p.z - div * (bShadowingPair[0].p.z - aShadowingPair[0].p.z);
+ intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n);
+ intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div;
+
+ hasPoints = true;
+ return status;
+}
+
+NV_FORCE_INLINE int32_t edgeEdgeShadowing(const Vertex& eAs, const Vertex& eAe, const Vertex& eBs, const Vertex& eBe, Vertex& intersectionA, Vertex& intersectionB, bool& hasPoints)
+{
+ int32_t status = edgesIntersection(eAs, eAe, eBs, eBe, intersectionA, intersectionB, hasPoints);
+ if (intersectionB.p.z >= intersectionA.p.z)
+ {
+ return status;
+ }
+ return 0;
+}
+
+int32_t edgeFacetIntersection12(const Vertex& edSt, const Vertex& edEnd, const Vertex* points, const Edge* edges, int edgesCount, Vertex& intersectionA, Vertex& intersectionB)
+{
+ int32_t status = 0;
+ Vertex p1, p2;
+ Vertex bShadowingPair[2];
+ Vertex aShadowingPair[2];
+ bool hasPoint = false;
+ int32_t shadowingType = shadowing02(edEnd.p, points, edges, edgesCount, hasPoint, p1);
+ status -= shadowingType;
+ bool aShadowing = false;
+ bool bShadowing = false;
+
+ if (shadowingType == 0 && hasPoint)
+ {
+ aShadowing = true;
+ aShadowingPair[0] = p1;
+ aShadowingPair[1] = edEnd;
+ }
+ else
+ {
+ if (shadowingType == 1 || shadowingType == -1)
+ {
+ bShadowing = true;
+ bShadowingPair[0] = p1;
+ bShadowingPair[1] = edEnd;
+ }
+ }
+
+ shadowingType = shadowing02(edSt.p, points, edges, edgesCount, hasPoint, p1);
+ status += shadowingType;
+ if (shadowingType == 0 && !aShadowing && hasPoint)
+ {
+ aShadowing = true;
+ aShadowingPair[0] = p1;
+ aShadowingPair[1] = edSt;
+ }
+ else
+ {
+ if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
+ {
+ bShadowing = true;
+ bShadowingPair[0] = p1;
+ bShadowingPair[1] = edSt;
+ }
+ }
+
+ for (int32_t ed = 0; ed < edgesCount; ++ed)
+ {
+ shadowingType = edgeEdgeShadowing(edSt, edEnd, points[edges[ed].s], points[edges[ed].e], p1, p2, hasPoint);
+ status -= shadowingType;
+ if (shadowingType == 0 && !aShadowing && hasPoint)
+ {
+ aShadowing = true;
+ aShadowingPair[0] = p2;
+ aShadowingPair[1] = p1;
+ }
+ else
+ {
+ if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
+ {
+ bShadowing = true;
+ bShadowingPair[0] = p2;
+ bShadowingPair[1] = p1;
+ }
+ }
+ }
+ if (status == 0)
+ {
+ return 0;
+ }
+ if (!bShadowing || !aShadowing)
+ {
+ return 0;
+ }
+ float deltaPlus = bShadowingPair[0].p.z - bShadowingPair[1].p.z;
+ float div = 0;
+ if (deltaPlus != 0)
+ {
+ float deltaMinus = aShadowingPair[0].p.z - aShadowingPair[1].p.z;
+ div = deltaPlus / (deltaPlus - deltaMinus);
+ }
+ intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p);
+ intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n);
+ intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div;
+
+ intersectionB.p = intersectionA.p;
+ intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n);
+ intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div;
+
+
+ return status;
+}
+
+
+int32_t edgeFacetIntersection21(const Vertex& edSt, const Vertex& edEnd, const Vertex* points, const Edge* edges, int edgesCount, Vertex& intersectionA, Vertex& intersectionB)
+{
+ int32_t status = 0;
+ Vertex p1, p2;
+
+ Vertex bShadowingPair[2];
+ Vertex aShadowingPair[2];
+ bool hasPoint = false;
+ int32_t shadowingType = shadowing20(edEnd.p, points, edges, edgesCount, hasPoint, p1);
+ status = shadowingType;
+ bool aShadowing = false;
+ bool bShadowing = false;
+ if (shadowingType == 0 && hasPoint)
+ {
+ aShadowing = true;
+ aShadowingPair[0] = edEnd;
+ aShadowingPair[1] = p1;
+ }
+ else
+ {
+ if (shadowingType == 1 || shadowingType == -1)
+ {
+ bShadowing = true;
+ bShadowingPair[0] = edEnd;
+ bShadowingPair[1] = p1;
+ }
+ }
+
+ shadowingType = shadowing20(edSt.p, points, edges, edgesCount, hasPoint, p1);
+ status -= shadowingType;
+ if (shadowingType == 0 && !aShadowing && hasPoint)
+ {
+ aShadowing = true;
+ aShadowingPair[0] = edSt;
+ aShadowingPair[1] = p1;
+ }
+ else
+ {
+ if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
+ {
+ bShadowing = true;
+ bShadowingPair[0] = edSt;
+ bShadowingPair[1] = p1;
+ }
+ }
+
+ for (int32_t ed = 0; ed < edgesCount; ++ed)
+ {
+ shadowingType = edgeEdgeShadowing(points[edges[ed].s], points[edges[ed].e], edSt, edEnd, p1, p2, hasPoint);
+ status -= shadowingType;
+ if (shadowingType == 0)
+ {
+ if (!aShadowing && hasPoint)
+ {
+ aShadowing = true;
+ aShadowingPair[0] = p2;
+ aShadowingPair[1] = p1;
+ }
+ }
+ else
+ {
+ if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
+ {
+ bShadowing = true;
+ bShadowingPair[0] = p2;
+ bShadowingPair[1] = p1;
+ }
+ }
+ }
+ if (status == 0)
+ {
+ return 0;
+ }
+ if (!bShadowing || !aShadowing)
+ {
+ return 0;
+ }
+ float deltaPlus = bShadowingPair[0].p.z - bShadowingPair[1].p.z;
+ float div = 0;
+ if (deltaPlus != 0)
+ {
+ float deltaMinus = aShadowingPair[0].p.z - aShadowingPair[1].p.z;
+ div = deltaPlus / (deltaPlus - deltaMinus);
+ }
+ intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p);
+ intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n);
+ intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div;
+
+ intersectionB.p = intersectionA.p;
+ intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n);
+ intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div;
+
+ return status;
+}
+
+int32_t BooleanEvaluator::vertexMeshStatus03(const PxVec3& p, Mesh* mesh)
+{
+ int32_t status = 0;
+ Vertex pnt;
+ bool hasPoint = false;
+ mAcceleratorB->setState(p);
+ int32_t facet = mAcceleratorB->getNextFacet();
+ while (facet != -1)
+ {
+ Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber;
+ status += shadowing02(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoint, pnt);
+ facet = mAcceleratorB->getNextFacet();
+ }
+
+ //for (int32_t facet = 0; facet < mesh->getFacetCount(); ++facet)
+ //{
+ // Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber;
+ // status += shadowing02(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoint, pnt);
+ //}
+
+ return status;
+}
+
+int32_t BooleanEvaluator::vertexMeshStatus30(const PxVec3& p, Mesh* mesh)
+{
+ int32_t status = 0;
+ bool hasPoints = false;
+ Vertex point;
+ mAcceleratorA->setState(p);
+ int32_t facet = mAcceleratorA->getNextFacet();
+ while ( facet != -1)
+ {
+ Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber;
+ status -= shadowing20(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoints, point);
+ facet = mAcceleratorA->getNextFacet();
+ }
+
+ //for (int32_t facet = 0; facet < mesh->getFacetCount(); ++facet)
+ //{
+ // Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber;
+ // status -= shadowing20(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoints, point);
+ //}
+ return status;
+}
+
+NV_FORCE_INLINE int32_t inclusionValue03(BooleanConf& conf, int32_t xValue)
+{
+ return conf.ca + conf.ci * xValue;
+}
+
+NV_FORCE_INLINE int32_t inclusionValueEdgeFace(BooleanConf& conf, int32_t xValue)
+{
+ return conf.ci * xValue;
+}
+
+NV_FORCE_INLINE int32_t inclusionValue30(BooleanConf& conf, int32_t xValue)
+{
+ return conf.cb + conf.ci * xValue;
+}
+
+struct VertexComparator
+{
+ VertexComparator(PxVec3 base = PxVec3()) : basePoint(base) {};
+ PxVec3 basePoint;
+ bool operator()(const Vertex& a, const Vertex& b)
+ {
+ return (b.p - a.p).dot(basePoint) > 0.0;
+ }
+};
+
+struct VertexPairComparator
+{
+ VertexPairComparator(PxVec3 base = PxVec3()) : basePoint(base) {};
+ PxVec3 basePoint;
+ bool operator()(const std::pair<Vertex, Vertex>& a, const std::pair<Vertex, Vertex>& b)
+ {
+ return (b.first.p - a.first.p).dot(basePoint) > 0.0;
+ }
+};
+
+int32_t BooleanEvaluator::isPointContainedInMesh(Mesh* msh, const PxVec3& point)
+{
+ if (msh == nullptr)
+ {
+ return 0;
+ }
+ DummyAccelerator dmAccel(msh->getFacetCount());
+ mAcceleratorA = &dmAccel;
+ return vertexMeshStatus30(point, msh);
+
+}
+
+int32_t BooleanEvaluator::isPointContainedInMesh(Mesh* msh, SpatialAccelerator* spAccel, const PxVec3& point)
+{
+ if (msh == nullptr)
+ {
+ return 0;
+ }
+ mAcceleratorA = spAccel;
+ return vertexMeshStatus30(point, msh);
+}
+
+
+bool shouldSwap(const PxVec3& a, const PxVec3& b)
+{
+ if (a.x < b.x) return false;
+ if (a.x > b.x) return true;
+
+ if (a.y < b.y) return false;
+ if (a.y > b.y) return true;
+
+ if (a.z < b.z) return false;
+ if (a.z > b.z) return true;
+
+ return false;
+}
+
+void BooleanEvaluator::buildFaceFaceIntersections(BooleanConf mode)
+{
+ int32_t statusValue = 0;
+ int32_t inclusionValue = 0;
+
+ std::vector<std::pair<Vertex, Vertex> > retainedStarts;
+ std::vector<std::pair<Vertex, Vertex>> retainedEnds;
+ VertexPairComparator comp;
+
+ Vertex newPointA;
+ Vertex newPointB;
+
+ Vertex* meshAPoints = mMeshA->getVertices();
+ Vertex* meshBPoints = mMeshB->getVertices();
+ EdgeWithParent newEdge;
+ mEdgeFacetIntersectionData12.clear();
+ mEdgeFacetIntersectionData21.clear();
+
+ mEdgeFacetIntersectionData12.resize(mMeshA->getFacetCount());
+ mEdgeFacetIntersectionData21.resize(mMeshB->getFacetCount());
+
+ for (uint32_t facetB = 0; facetB < mMeshB->getFacetCount(); ++facetB)
+ {
+ mAcceleratorA->setState(mMeshB->getVertices(), mMeshB->getEdges(), *mMeshB->getFacet(facetB));
+ int32_t facetA = mAcceleratorA->getNextFacet();
+ while (facetA != -1)
+ {
+ Edge* facetBEdges = mMeshB->getEdges() + mMeshB->getFacet(facetB)->firstEdgeNumber;
+ Edge* facetAEdges = mMeshA->getEdges() + mMeshA->getFacet(facetA)->firstEdgeNumber;
+ Edge* fbe = facetBEdges;
+ Edge* fae = facetAEdges;
+ retainedStarts.clear();
+ retainedEnds.clear();
+ PxVec3 compositeEndPoint(0, 0, 0);
+ PxVec3 compositeStartPoint(0, 0, 0);
+ uint32_t facetAEdgeCount = mMeshA->getFacet(facetA)->edgesCount;
+ uint32_t facetBEdgeCount = mMeshB->getFacet(facetB)->edgesCount;
+ int32_t ic = 0;
+ for (uint32_t i = 0; i < facetAEdgeCount; ++i)
+ {
+ if (shouldSwap(meshAPoints[fae->e].p, meshAPoints[fae->s].p))
+ {
+ statusValue = -edgeFacetIntersection12(meshAPoints[fae->e], meshAPoints[fae->s], mMeshB->getVertices(), facetBEdges, facetBEdgeCount, newPointA, newPointB);
+ }
+ else
+ {
+ statusValue = edgeFacetIntersection12(meshAPoints[fae->s], meshAPoints[fae->e], mMeshB->getVertices(), facetBEdges, facetBEdgeCount, newPointA, newPointB);
+ }
+ inclusionValue = -inclusionValueEdgeFace(mode, statusValue);
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEnds.push_back(std::make_pair(newPointA, newPointB));
+ compositeEndPoint += newPointA.p;
+ }
+ mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA));
+ }
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStarts.push_back(std::make_pair(newPointA, newPointB));
+ compositeStartPoint += newPointA.p;
+ }
+ mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA));
+ }
+ fae++;
+ }
+ for (uint32_t i = 0; i < facetBEdgeCount; ++i)
+ {
+ if (shouldSwap(meshBPoints[fbe->e].p, meshBPoints[fbe->s].p))
+ {
+ statusValue = -edgeFacetIntersection21(meshBPoints[(fbe)->e], meshBPoints[(fbe)->s], mMeshA->getVertices(), facetAEdges, facetAEdgeCount, newPointA, newPointB);
+ }
+ else
+ {
+ statusValue = edgeFacetIntersection21(meshBPoints[(fbe)->s], meshBPoints[(fbe)->e], mMeshA->getVertices(), facetAEdges, facetAEdgeCount, newPointA, newPointB);
+ }
+ inclusionValue = inclusionValueEdgeFace(mode, statusValue);
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEnds.push_back(std::make_pair(newPointA, newPointB));
+ compositeEndPoint += newPointB.p;
+ }
+ mEdgeFacetIntersectionData21[facetB].push_back(EdgeFacetIntersectionData( i, statusValue, newPointB));
+ }
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStarts.push_back(std::make_pair(newPointA, newPointB));
+ compositeStartPoint += newPointB.p;
+ }
+ mEdgeFacetIntersectionData21[facetB].push_back(EdgeFacetIntersectionData(i, statusValue, newPointB));
+ }
+ fbe++;
+ }
+ if (retainedStarts.size() != retainedEnds.size())
+ {
+ NVBLAST_LOG_ERROR(mLoggingCallback, "Not equal number of starting and ending vertices! Probably input mesh has open edges.");
+ return;
+ }
+ if (retainedStarts.size() > 1)
+ {
+ comp.basePoint = compositeEndPoint - compositeStartPoint;
+ std::sort(retainedStarts.begin(), retainedStarts.end(), comp);
+ std::sort(retainedEnds.begin(), retainedEnds.end(), comp);
+ }
+ for (uint32_t rv = 0; rv < retainedStarts.size(); ++rv)
+ {
+ newEdge.s = addIfNotExist(retainedStarts[rv].first);
+ newEdge.e = addIfNotExist(retainedEnds[rv].first);
+ newEdge.parent = facetA;
+ addEdgeIfValid(newEdge);
+ newEdge.parent = facetB + mMeshA->getFacetCount();
+ newEdge.e = addIfNotExist(retainedStarts[rv].second);
+ newEdge.s = addIfNotExist(retainedEnds[rv].second);
+ addEdgeIfValid(newEdge);
+ }
+ facetA = mAcceleratorA->getNextFacet();
+ } // while (*iter != -1)
+
+ } // for (uint32_t facetB = 0; facetB < mMeshB->getFacetCount(); ++facetB)
+
+
+
+}
+
+
+void BooleanEvaluator::buildFastFaceFaceIntersection(BooleanConf mode)
+{
+ int32_t statusValue = 0;
+ int32_t inclusionValue = 0;
+
+ std::vector<std::pair<Vertex, Vertex> > retainedStarts;
+ std::vector<std::pair<Vertex, Vertex>> retainedEnds;
+ VertexPairComparator comp;
+
+ Vertex newPointA;
+ Vertex newPointB;
+
+ Vertex* meshAPoints = mMeshA->getVertices();
+ EdgeWithParent newEdge;
+
+ mEdgeFacetIntersectionData12.clear();
+ mEdgeFacetIntersectionData21.clear();
+
+ mEdgeFacetIntersectionData12.resize(mMeshA->getFacetCount());
+ mEdgeFacetIntersectionData21.resize(mMeshB->getFacetCount());
+
+ for (uint32_t facetA = 0; facetA < mMeshA->getFacetCount(); ++facetA)
+ {
+ Edge* facetAEdges = mMeshA->getEdges() + mMeshA->getFacet(facetA)->firstEdgeNumber;
+ int32_t facetB = 0;
+ Edge* facetBEdges = mMeshB->getEdges() + mMeshB->getFacet(facetB)->firstEdgeNumber;
+ Edge* fae = facetAEdges;
+ retainedStarts.clear();
+ retainedEnds.clear();
+ PxVec3 compositeEndPoint(0, 0, 0);
+ PxVec3 compositeStartPoint(0, 0, 0);
+ uint32_t facetAEdgeCount = mMeshA->getFacet(facetA)->edgesCount;
+ uint32_t facetBEdgeCount = mMeshB->getFacet(facetB)->edgesCount;
+ int32_t ic = 0;
+ for (uint32_t i = 0; i < facetAEdgeCount; ++i)
+ {
+ if (shouldSwap(meshAPoints[fae->e].p, meshAPoints[fae->s].p))
+ {
+ statusValue = -edgeFacetIntersection12(meshAPoints[fae->e], meshAPoints[fae->s], mMeshB->getVertices(), facetBEdges, facetBEdgeCount, newPointA, newPointB);
+ }
+ else
+ {
+ statusValue = edgeFacetIntersection12(meshAPoints[fae->s], meshAPoints[fae->e], mMeshB->getVertices(), facetBEdges, facetBEdgeCount, newPointA, newPointB);
+ }
+ inclusionValue = -inclusionValueEdgeFace(mode, statusValue);
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEnds.push_back(std::make_pair(newPointA, newPointB));
+ compositeEndPoint += newPointA.p;
+ }
+ mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA));
+ }
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStarts.push_back(std::make_pair(newPointA, newPointB));
+ compositeStartPoint += newPointA.p;
+ }
+ mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA));
+ }
+ fae++;
+ }
+ if (retainedStarts.size() != retainedEnds.size())
+ {
+ NVBLAST_LOG_ERROR(mLoggingCallback, "Not equal number of starting and ending vertices! Probably input mesh has open edges.");
+ return;
+ }
+ if (retainedStarts.size() > 1)
+ {
+ comp.basePoint = compositeEndPoint - compositeStartPoint;
+ std::sort(retainedStarts.begin(), retainedStarts.end(), comp);
+ std::sort(retainedEnds.begin(), retainedEnds.end(), comp);
+ }
+ for (uint32_t rv = 0; rv < retainedStarts.size(); ++rv)
+ {
+ newEdge.s = addIfNotExist(retainedStarts[rv].first);
+ newEdge.e = addIfNotExist(retainedEnds[rv].first);
+ newEdge.parent = facetA;
+ addEdgeIfValid(newEdge);
+ newEdge.parent = facetB + mMeshA->getFacetCount();
+ newEdge.e = addIfNotExist(retainedStarts[rv].second);
+ newEdge.s = addIfNotExist(retainedEnds[rv].second);
+ addEdgeIfValid(newEdge);
+ }
+ }
+
+}
+
+
+
+void BooleanEvaluator::collectRetainedPartsFromA(BooleanConf mode)
+{
+
+ int32_t statusValue = 0;
+ int32_t inclusionValue = 0;
+ Vertex* vertices = mMeshA->getVertices();
+ Vertex newPoint;
+ VertexComparator comp;
+ PxBounds3& bMeshBoudning = mMeshB->getBoundingBox();
+ Edge* facetEdges = mMeshA->getEdges();
+ std::vector<Vertex> retainedStartVertices;
+ std::vector<Vertex> retainedEndVertices;
+ retainedStartVertices.reserve(255);
+ retainedEndVertices.reserve(255);
+ int32_t ic = 0;
+ for (uint32_t facetId = 0; facetId < mMeshA->getFacetCount(); ++facetId)
+ {
+ retainedStartVertices.clear();
+ retainedEndVertices.clear();
+ for (uint32_t i = 0; i < mMeshA->getFacet(facetId)->edgesCount; ++i)
+ {
+ PxVec3 compositeEndPoint(0, 0, 0);
+ PxVec3 compositeStartPoint(0, 0, 0);
+
+ int32_t lastPos = static_cast<int32_t>(retainedEndVertices.size());
+ /* Test start and end point of edge against mesh */
+ if (bMeshBoudning.contains(vertices[facetEdges->s].p))
+ {
+ statusValue = vertexMeshStatus03(vertices[facetEdges->s].p, mMeshB);
+ }
+ else
+ {
+ statusValue = 0;
+ }
+ inclusionValue = -inclusionValue03(mode, statusValue);
+
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEndVertices.push_back(vertices[facetEdges->s]);
+ compositeEndPoint += vertices[facetEdges->s].p;
+ }
+ }
+ else
+ {
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStartVertices.push_back(vertices[facetEdges->s]);
+ compositeStartPoint += vertices[facetEdges->s].p;
+ }
+ }
+ }
+
+ if (bMeshBoudning.contains(vertices[facetEdges->e].p))
+ {
+ statusValue = vertexMeshStatus03(vertices[facetEdges->e].p, mMeshB);
+ }
+ else
+ {
+ statusValue = 0;
+ }
+ inclusionValue = inclusionValue03(mode, statusValue);
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEndVertices.push_back(vertices[facetEdges->e]);
+ compositeEndPoint += vertices[facetEdges->e].p;
+ }
+ }
+ else
+ {
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStartVertices.push_back(vertices[facetEdges->e]);
+ compositeStartPoint += vertices[facetEdges->e].p;
+ }
+ }
+ }
+ /* Test edge intersection with mesh*/
+ for (uint32_t intrs = 0; intrs < mEdgeFacetIntersectionData12[facetId].size(); ++intrs)
+ {
+ EdgeFacetIntersectionData& intr = mEdgeFacetIntersectionData12[facetId][intrs];
+ if (intr.edId != (int32_t)i)
+ continue;
+ newPoint = intr.intersectionPoint;
+ inclusionValue = inclusionValueEdgeFace(mode, intr.intersectionType);
+
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEndVertices.push_back(newPoint);
+ compositeEndPoint += newPoint.p;
+ }
+ }
+ else
+ {
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStartVertices.push_back(newPoint);
+ compositeStartPoint += newPoint.p;
+ }
+ }
+ }
+ }
+ facetEdges++;
+ if (retainedStartVertices.size() != retainedEndVertices.size())
+ {
+ NVBLAST_LOG_ERROR(mLoggingCallback, "Not equal number of starting and ending vertices! Probably input mesh has open edges.");
+ return;
+ }
+ if (retainedEndVertices.size() > 1)
+ {
+ comp.basePoint = compositeEndPoint - compositeStartPoint;
+ std::sort(retainedStartVertices.begin() + lastPos, retainedStartVertices.end(), comp);
+ std::sort(retainedEndVertices.begin() + lastPos, retainedEndVertices.end(), comp);
+ }
+ }
+
+
+ EdgeWithParent newEdge;
+ for (uint32_t rv = 0; rv < retainedStartVertices.size(); ++rv)
+ {
+ newEdge.s = addIfNotExist(retainedStartVertices[rv]);
+ newEdge.e = addIfNotExist(retainedEndVertices[rv]);
+ newEdge.parent = facetId;
+ addEdgeIfValid(newEdge);
+ }
+ }
+
+ return;
+}
+
+void BooleanEvaluator::collectRetainedPartsFromB(BooleanConf mode)
+{
+ int32_t statusValue = 0;
+ int32_t inclusionValue = 0;
+ Vertex* vertices = mMeshB->getVertices();
+ Vertex newPoint;
+ VertexComparator comp;
+ PxBounds3& aMeshBoudning = mMeshA->getBoundingBox();
+ Edge* facetEdges = mMeshB->getEdges();
+ std::vector<Vertex> retainedStartVertices;
+ std::vector<Vertex> retainedEndVertices;
+ retainedStartVertices.reserve(255);
+ retainedEndVertices.reserve(255);
+ int32_t ic = 0;
+ for (uint32_t facetId = 0; facetId < mMeshB->getFacetCount(); ++facetId)
+ {
+ retainedStartVertices.clear();
+ retainedEndVertices.clear();
+ for (uint32_t i = 0; i < mMeshB->getFacet(facetId)->edgesCount; ++i)
+ {
+ PxVec3 compositeEndPoint(0, 0, 0);
+ PxVec3 compositeStartPoint(0, 0, 0);
+ int32_t lastPos = static_cast<int32_t>(retainedEndVertices.size());
+ if (aMeshBoudning.contains(vertices[facetEdges->s].p))
+ {
+ statusValue = vertexMeshStatus30(vertices[facetEdges->s].p, mMeshA);
+ }
+ else
+ {
+ statusValue = 0;
+ }
+ inclusionValue = -inclusionValue30(mode, statusValue);
+
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEndVertices.push_back(vertices[facetEdges->s]);
+ compositeEndPoint += vertices[facetEdges->s].p;
+ }
+
+ }
+ else
+ {
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStartVertices.push_back(vertices[facetEdges->s]);
+ compositeStartPoint += vertices[facetEdges->s].p;
+ }
+
+ }
+ }
+
+ if (aMeshBoudning.contains(vertices[facetEdges->e].p))
+ {
+ statusValue = vertexMeshStatus30(vertices[facetEdges->e].p, mMeshA);
+ }
+ else
+ {
+ statusValue = 0;
+ }
+ inclusionValue = inclusionValue30(mode, statusValue);
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEndVertices.push_back(vertices[facetEdges->e]);
+ compositeEndPoint += vertices[facetEdges->e].p;
+ }
+
+ }
+ else
+ {
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStartVertices.push_back(vertices[facetEdges->e]);
+ compositeStartPoint += vertices[facetEdges->e].p;
+ }
+
+ }
+ }
+ for (uint32_t intrs = 0; intrs < mEdgeFacetIntersectionData21[facetId].size(); ++intrs)
+ {
+ EdgeFacetIntersectionData& intr = mEdgeFacetIntersectionData21[facetId][intrs];
+ if (intr.edId != (int32_t)i)
+ continue;
+ newPoint = intr.intersectionPoint;
+ inclusionValue = inclusionValueEdgeFace(mode, intr.intersectionType);
+
+ if (inclusionValue > 0)
+ {
+ for (ic = 0; ic < inclusionValue; ++ic)
+ {
+ retainedEndVertices.push_back(newPoint);
+ compositeEndPoint += newPoint.p;
+ }
+ }
+ else
+ {
+ if (inclusionValue < 0)
+ {
+ for (ic = 0; ic < -inclusionValue; ++ic)
+ {
+ retainedStartVertices.push_back(newPoint);
+ compositeStartPoint += newPoint.p;
+ }
+ }
+ }
+ }
+ facetEdges++;
+ if (retainedStartVertices.size() != retainedEndVertices.size())
+ {
+ NVBLAST_LOG_ERROR(mLoggingCallback, "Not equal number of starting and ending vertices! Probably input mesh has open edges.");
+ return;
+ }
+ if (retainedEndVertices.size() - lastPos > 1)
+ {
+ comp.basePoint = compositeEndPoint - compositeStartPoint;
+ std::sort(retainedStartVertices.begin() + lastPos, retainedStartVertices.end(), comp);
+ std::sort(retainedEndVertices.begin() + lastPos, retainedEndVertices.end(), comp);
+ }
+ }
+ EdgeWithParent newEdge;
+ for (uint32_t rv = 0; rv < retainedStartVertices.size(); ++rv)
+ {
+ newEdge.s = addIfNotExist(retainedStartVertices[rv]);
+ newEdge.e = addIfNotExist(retainedEndVertices[rv]);
+ newEdge.parent = facetId + mMeshA->getFacetCount();
+ addEdgeIfValid(newEdge);
+ }
+ }
+ return;
+}
+
+bool EdgeWithParentSortComp(const EdgeWithParent& a, const EdgeWithParent& b)
+{
+ return a.parent < b.parent;
+}
+
+
+void BooleanEvaluator::performBoolean(Mesh* meshA, Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, BooleanConf mode)
+{
+ reset();
+ mMeshA = meshA;
+ mMeshB = meshB;
+ mAcceleratorA = spAccelA;
+ mAcceleratorB = spAccelB;
+ buildFaceFaceIntersections(mode);
+ collectRetainedPartsFromA(mode);
+ collectRetainedPartsFromB(mode);
+ mAcceleratorA = nullptr;
+ mAcceleratorB = nullptr;
+}
+
+void BooleanEvaluator::performBoolean(Mesh* meshA, Mesh* meshB, BooleanConf mode)
+{
+ reset();
+ mMeshA = meshA;
+ mMeshB = meshB;
+ DummyAccelerator ac = DummyAccelerator(mMeshA->getFacetCount());
+ DummyAccelerator bc = DummyAccelerator(mMeshB->getFacetCount());
+ performBoolean(meshA, meshB, &ac, &bc, mode);
+}
+
+
+void BooleanEvaluator::performFastCutting(Mesh* meshA, Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, BooleanConf mode)
+{
+ reset();
+ mMeshA = meshA;
+ mMeshB = meshB;
+ mAcceleratorA = spAccelA;
+ mAcceleratorB = spAccelB;
+ buildFastFaceFaceIntersection(mode);
+ collectRetainedPartsFromA(mode);
+ mAcceleratorA = nullptr;
+ mAcceleratorB = nullptr;
+}
+
+void BooleanEvaluator::performFastCutting(Mesh* meshA, Mesh* meshB, BooleanConf mode)
+{
+ reset();
+ mMeshA = meshA;
+ mMeshB = meshB;
+ DummyAccelerator ac = DummyAccelerator(mMeshA->getFacetCount());
+ DummyAccelerator bc = DummyAccelerator(mMeshB->getFacetCount());
+ performFastCutting(meshA, meshB, &ac, &bc, mode);
+}
+
+
+
+
+BooleanEvaluator::BooleanEvaluator(NvBlastLog loggingCallback)
+{
+ mMeshA = nullptr;
+ mMeshB = nullptr;
+ mAcceleratorA = nullptr;
+ mAcceleratorB = nullptr;
+ mLoggingCallback = loggingCallback;
+}
+BooleanEvaluator::~BooleanEvaluator()
+{
+ reset();
+}
+
+
+
+Mesh* BooleanEvaluator::createNewMesh()
+{
+ if (mEdgeAggregate.size() == 0)
+ {
+ return nullptr;
+ }
+ std::sort(mEdgeAggregate.begin(), mEdgeAggregate.end(), EdgeWithParentSortComp);
+ std::vector<Facet> newFacets;
+ std::vector<Edge> newEdges(mEdgeAggregate.size());
+ int32_t lastPos = 0;
+ int32_t lastParent = mEdgeAggregate[0].parent;
+ uint32_t collected = 0;
+ int32_t userData = 0;
+ for (uint32_t i = 0; i < mEdgeAggregate.size(); ++i)
+ {
+ if (mEdgeAggregate[i].parent != lastParent)
+ {
+ if (lastParent < (int32_t)mMeshA->getFacetCount())
+ {
+ userData = mMeshA->getFacet(lastParent)->userData;
+ }
+ else
+ {
+ userData = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->userData;
+ }
+ newFacets.push_back(Facet(lastPos, collected, userData));
+ lastPos = i;
+ lastParent = mEdgeAggregate[i].parent;
+ collected = 0;
+ }
+ collected++;
+ newEdges[i].s = mEdgeAggregate[i].s;
+ newEdges[i].e = mEdgeAggregate[i].e;
+ }
+ int32_t pr = lastParent - mMeshA->getFacetCount();
+ if (lastParent < (int32_t)mMeshA->getFacetCount())
+ {
+ userData = mMeshA->getFacet(lastParent)->userData;
+ }
+ else
+ {
+ userData = mMeshB->getFacet(pr)->userData;
+ }
+ newFacets.push_back(Facet(lastPos, collected, userData));
+ return new Mesh(&mVerticesAggregate[0], &newEdges[0], &newFacets[0], static_cast<uint32_t>(mVerticesAggregate.size()), static_cast<uint32_t>(mEdgeAggregate.size()), static_cast<uint32_t>(newFacets.size()));
+}
+
+void BooleanEvaluator::reset()
+{
+ mMeshA = nullptr;
+ mMeshB = nullptr;
+ mAcceleratorA = nullptr;
+ mAcceleratorB = nullptr;
+ mEdgeAggregate.clear();
+ mVerticesAggregate.clear();
+ mEdgeFacetIntersectionData12.clear();
+ mEdgeFacetIntersectionData21.clear();
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBooleanTool.h b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBooleanTool.h
new file mode 100644
index 0000000..0b0b73a
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringBooleanTool.h
@@ -0,0 +1,197 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAUTHORINGBOOLEANTOOL_H
+#define NVBLASTEXTAUTHORINGBOOLEANTOOL_H
+
+#include "NvBlastExtAuthoringTypes.h"
+#include "NvBlastExtAuthoringInternalCommon.h"
+#include <vector>
+#include <map>
+#include "NvBlastTypes.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+class Mesh;
+
+/**
+ Boolean tool config, used to perform different operations: UNION, INTERSECTION, DIFFERENCE
+*/
+struct BooleanConf
+{
+ int32_t ca, cb, ci;
+ BooleanConf(int32_t a, int32_t b, int32_t c) : ca(a), cb(b), ci(c)
+ {
+ }
+};
+
+
+namespace BooleanConfigurations
+{
+ /**
+ Creates boolean tool configuration to perform intersection of meshes A and B.
+ */
+inline BooleanConf BOOLEAN_INTERSECION()
+{
+ return BooleanConf(0, 0, 1);
+}
+
+/**
+ Creates boolean tool configuration to perform union of meshes A and B.
+*/
+inline BooleanConf BOOLEAN_UNION()
+{
+ return BooleanConf(1, 1, -1);
+}
+/**
+ Creates boolean tool configuration to perform difference of meshes(A - B).
+*/
+inline BooleanConf BOOLEAN_DIFFERENCE()
+{
+ return BooleanConf(1, 0, -1);
+}
+}
+
+/**
+ Structure which holds information about intersection facet with edge.
+*/
+struct EdgeFacetIntersectionData
+{
+ int32_t edId;
+ int32_t intersectionType;
+ Vertex intersectionPoint;
+ EdgeFacetIntersectionData(int32_t edId, int32_t intersType, Vertex& inters) : edId(edId), intersectionType(intersType), intersectionPoint(inters)
+ { }
+ EdgeFacetIntersectionData(int32_t edId) : edId(edId)
+ { }
+ bool operator<(const EdgeFacetIntersectionData& b) const
+ {
+ return edId < b.edId;
+ }
+};
+
+
+class SpatialAccelerator;
+
+/**
+ Tool for performing boolean operations on polygonal meshes.
+ Tool supports only closed meshes. Performing boolean on meshes with holes can lead to unexpected behavior, e.g. holes in result geometry.
+*/
+class BooleanEvaluator
+{
+
+public:
+ BooleanEvaluator(NvBlastLog logCallback = nullptr);
+ ~BooleanEvaluator();
+
+ /**
+ Perform boolean operation on two polygonal meshes (A and B).
+ \param[in] meshA Mesh A
+ \param[in] meshB Mesh B
+ \param[in] spAccelA Acceleration structure for mesh A
+ \param[in] spAccelB Acceleration structure for mesh B
+ \param[in] mode Boolean operation type
+ */
+ void performBoolean(Mesh* meshA, Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, BooleanConf mode);
+
+ /**
+ Perform boolean operation on two polygonal meshes (A and B).
+ \param[in] meshA Mesh A
+ \param[in] meshB Mesh B
+ \param[in] mode Boolean operation type
+ */
+ void performBoolean(Mesh* meshA, Mesh* meshB, BooleanConf mode);
+
+ /**
+ Perform cutting of mesh with some large box, which represents cutting plane. This method skips part of intersetion computations, so
+ should be used ONLY with cutting box, received from getBigBox(...) method from NvBlastExtAuthoringMesh.h. For cutting use only BOOLEAN_INTERSECTION or BOOLEAN_DIFFERENCE mode.
+ \param[in] meshA Mesh A
+ \param[in] meshB Cutting box
+ \param[in] spAccelA Acceleration structure for mesh A
+ \param[in] spAccelB Acceleration structure for cutting box
+ \param[in] mode Boolean operation type
+ */
+ void performFastCutting(Mesh* meshA, Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, BooleanConf mode);
+
+ /**
+ Perform cutting of mesh with some large box, which represents cutting plane. This method skips part of intersetion computations, so
+ should be used ONLY with cutting box, received from getBigBox(...) method from NvBlastExtAuthoringMesh.h. For cutting use only BOOLEAN_INTERSECTION or BOOLEAN_DIFFERENCE mode.
+ \param[in] meshA Mesh A
+ \param[in] meshB Cutting box
+ \param[in] mode Boolean operation type
+ */
+ void performFastCutting(Mesh* meshA, Mesh* meshB, BooleanConf mode);
+
+ /**
+ Test whether point contained in mesh.
+ \param[in] mesh Mesh geometry
+ \param[in] point Point which should be tested
+ \return not 0 if point is inside of mesh
+ */
+ int32_t isPointContainedInMesh(Mesh* mesh, const physx::PxVec3& point);
+ /**
+ Test whether point contained in mesh.
+ \param[in] mesh Mesh geometry
+ \param[in] spAccel Acceleration structure for mesh
+ \param[in] point Point which should be tested
+ \return not 0 if point is inside of mesh
+ */
+ int32_t isPointContainedInMesh(Mesh* mesh, SpatialAccelerator* spAccel, const physx::PxVec3& point);
+
+
+ /**
+ Generates result polygon mesh after performing boolean operation.
+ \return If not nullptr - result mesh geometry.
+ */
+ Mesh* createNewMesh();
+
+ /**
+ Reset tool state.
+ */
+ void reset();
+
+private:
+
+ void buildFaceFaceIntersections(BooleanConf);
+ void buildFastFaceFaceIntersection(BooleanConf);
+ void collectRetainedPartsFromA(BooleanConf mode);
+ void collectRetainedPartsFromB(BooleanConf mode);
+
+ int32_t addIfNotExist(Vertex& p);
+ void addEdgeIfValid(EdgeWithParent& ed);
+private:
+
+ int32_t vertexMeshStatus03(const physx::PxVec3& p, Mesh* mesh);
+ int32_t vertexMeshStatus30(const physx::PxVec3& p, Mesh* mesh);
+
+ Mesh* mMeshA;
+ Mesh* mMeshB;
+
+ SpatialAccelerator* mAcceleratorA;
+ SpatialAccelerator* mAcceleratorB;
+
+ std::vector<EdgeWithParent> mEdgeAggregate;
+ std::vector<Vertex> mVerticesAggregate;
+
+ std::vector<std::vector<EdgeFacetIntersectionData> > mEdgeFacetIntersectionData12;
+ std::vector<std::vector<EdgeFacetIntersectionData> > mEdgeFacetIntersectionData21;
+
+ NvBlastLog mLoggingCallback;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTAUTHORINGBOOLEANTOOL_H
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringCollisionBuilder.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringCollisionBuilder.cpp
new file mode 100644
index 0000000..becdce9
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringCollisionBuilder.cpp
@@ -0,0 +1,279 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtAuthoringCollisionBuilder.h"
+#include <PxConvexMesh.h>
+#include <PxVec3.h>
+#include <PxBounds3.h>
+#include "PxPhysics.h"
+#include "cooking/PxCooking.h"
+#include <NvBlastExtApexSharedParts.h>
+#include <NvBlastExtAuthoringInternalCommon.h>
+
+#include <NvBlastExtAuthoringBooleanTool.h>
+#include <NvBlastExtAuthoringMesh.h>
+
+using namespace physx;
+
+namespace Nv
+{
+namespace Blast
+{
+
+void ConvexMeshBuilder::buildCollisionGeometry(const std::vector<PxVec3>& vData, CollisionHull& output)
+{
+ std::vector<physx::PxVec3> vertexData = vData;
+
+ PxConvexMeshDesc convexMeshDescr;
+ PxConvexMesh* resultConvexMesh;
+ PxBounds3 bounds;
+ // Scale chunk to unit cube size, to avoid numerical errors
+ bounds.setEmpty();
+ for (uint32_t i = 0; i < vertexData.size(); ++i)
+ {
+ bounds.include(vertexData[i]);
+ }
+ PxVec3 bbCenter = bounds.getCenter();
+ float scale = PxMax(PxAbs(bounds.getExtents(0)), PxMax(PxAbs(bounds.getExtents(1)), PxAbs(bounds.getExtents(2))));
+ for (uint32_t i = 0; i < vertexData.size(); ++i)
+ {
+ vertexData[i] = vertexData[i] - bbCenter;
+ vertexData[i] *= (1.0f / scale);
+ }
+ bounds.setEmpty();
+ for (uint32_t i = 0; i < vertexData.size(); ++i)
+ {
+ bounds.include(vertexData[i]);
+ }
+ convexMeshDescr.points.data = vertexData.data();
+ convexMeshDescr.points.stride = sizeof(PxVec3);
+ convexMeshDescr.points.count = (uint32_t)vertexData.size();
+ convexMeshDescr.flags = PxConvexFlag::eCOMPUTE_CONVEX;
+ resultConvexMesh = mCooking->createConvexMesh(convexMeshDescr, *mInsertionCallback);
+ if (!resultConvexMesh)
+ {
+ vertexData.clear();
+ vertexData.push_back(bounds.minimum);
+ vertexData.push_back(PxVec3(bounds.minimum.x, bounds.maximum.y, bounds.minimum.z));
+ vertexData.push_back(PxVec3(bounds.maximum.x, bounds.maximum.y, bounds.minimum.z));
+ vertexData.push_back(PxVec3(bounds.maximum.x, bounds.minimum.y, bounds.minimum.z));
+ vertexData.push_back(PxVec3(bounds.minimum.x, bounds.minimum.y, bounds.maximum.z));
+ vertexData.push_back(PxVec3(bounds.minimum.x, bounds.maximum.y, bounds.maximum.z));
+ vertexData.push_back(PxVec3(bounds.maximum.x, bounds.maximum.y, bounds.maximum.z));
+ vertexData.push_back(PxVec3(bounds.maximum.x, bounds.minimum.y, bounds.maximum.z));
+ convexMeshDescr.points.data = vertexData.data();
+ convexMeshDescr.points.count = (uint32_t)vertexData.size();
+ resultConvexMesh = mCooking->createConvexMesh(convexMeshDescr, *mInsertionCallback);
+ }
+ output.polygonData.resize(resultConvexMesh->getNbPolygons());
+ output.points.resize(resultConvexMesh->getNbVertices());
+ int32_t indicesCount = 0;
+ PxHullPolygon hPoly;
+ for (uint32_t i = 0; i < resultConvexMesh->getNbPolygons(); ++i)
+ {
+ CollisionHull::HullPolygon& pd = output.polygonData[i];
+ resultConvexMesh->getPolygonData(i, hPoly);
+ pd.mIndexBase = hPoly.mIndexBase;
+ pd.mNbVerts = hPoly.mNbVerts;
+ pd.mPlane[0] = hPoly.mPlane[0];
+ pd.mPlane[1] = hPoly.mPlane[1];
+ pd.mPlane[2] = hPoly.mPlane[2];
+ pd.mPlane[3] = hPoly.mPlane[3];
+
+ pd.mPlane[0] /= scale;
+ pd.mPlane[1] /= scale;
+ pd.mPlane[2] /= scale;
+ pd.mPlane[3] -= (pd.mPlane[0] * bbCenter.x + pd.mPlane[1] * bbCenter.y + pd.mPlane[2] * bbCenter.z);
+ float length = sqrt(pd.mPlane[0] * pd.mPlane[0] + pd.mPlane[1] * pd.mPlane[1] + pd.mPlane[2] * pd.mPlane[2]);
+ pd.mPlane[0] /= length;
+ pd.mPlane[1] /= length;
+ pd.mPlane[2] /= length;
+ pd.mPlane[3] /= length;
+ indicesCount = PxMax(indicesCount, pd.mIndexBase + pd.mNbVerts);
+ }
+ output.indices.resize(indicesCount);
+ for (uint32_t i = 0; i < resultConvexMesh->getNbVertices(); ++i)
+ {
+ PxVec3 p = resultConvexMesh->getVertices()[i] * scale + bbCenter;
+ output.points[i] = p;
+ }
+ for (int32_t i = 0; i < indicesCount; ++i)
+ {
+ output.indices[i] = resultConvexMesh->getIndexBuffer()[i];
+ }
+ resultConvexMesh->release();
+}
+
+void ConvexMeshBuilder::trimCollisionGeometry(std::vector<CollisionHull>& in, const std::vector<uint32_t>& chunkDepth)
+{
+ std::vector<std::vector<PxPlane> > chunkMidplanes(in.size());
+ std::vector<PxVec3> centers(in.size());
+ std::vector<PxBounds3> hullsBounds(in.size());
+ for (uint32_t i = 0; i < in.size(); ++i)
+ {
+ hullsBounds[i].setEmpty();
+ centers[i] = PxVec3(0, 0, 0);
+ for (uint32_t p = 0; p < in[i].points.size(); ++p)
+ {
+ centers[i] += in[i].points[p];
+ hullsBounds[i].include(in[i].points[p]);
+ }
+ centers[i] = hullsBounds[i].getCenter();
+ }
+
+ Separation params;
+ for (uint32_t hull = 0; hull < in.size(); ++hull)
+ {
+ for (uint32_t hull2 = hull + 1; hull2 < in.size(); ++hull2)
+ {
+ if (chunkDepth[hull] != chunkDepth[hull2])
+ {
+ continue;
+ }
+ if (importerHullsInProximityApexFree(in[hull].points, hullsBounds[hull], PxTransform(PxIdentity), PxVec3(1, 1, 1),
+ in[hull2].points, hullsBounds[hull2], PxTransform(PxIdentity), PxVec3(1, 1, 1), 0.0, &params) == false)
+ {
+ continue;
+ }
+ PxVec3 c1 = centers[hull];
+ PxVec3 c2 = centers[hull2];
+ float d = FLT_MAX;
+ PxVec3 n1;
+ PxVec3 n2;
+ for (uint32_t p = 0; p < in[hull].points.size(); ++p)
+ {
+ float ld = (in[hull].points[p] - c2).magnitude();
+ if (ld < d)
+ {
+ n1 = in[hull].points[p];
+ d = ld;
+ }
+ }
+ d = FLT_MAX;
+ for (uint32_t p = 0; p < in[hull2].points.size(); ++p)
+ {
+ float ld = (in[hull2].points[p] - c1).magnitude();
+ if (ld < d)
+ {
+ n2 = in[hull2].points[p];
+ d = ld;
+ }
+ }
+
+ PxVec3 dir = c2 - c1;
+
+ PxPlane pl = PxPlane((n1 + n2) * 0.5, dir.getNormalized());
+ chunkMidplanes[hull].push_back(pl);
+ PxPlane pl2 = PxPlane((n1 + n2) * 0.5, -dir.getNormalized());
+ chunkMidplanes[hull2].push_back(pl2);
+ }
+ }
+ std::vector<PxVec3> hPoints;
+ for (uint32_t i = 0; i < in.size(); ++i)
+ {
+ std::vector<Facet> facets;
+ std::vector<Vertex> vertices;
+ std::vector<Edge> edges;
+ for (uint32_t fc = 0; fc < in[i].polygonData.size(); ++fc)
+ {
+ Facet nFc;
+ nFc.firstEdgeNumber = edges.size();
+ uint32_t n = in[i].polygonData[fc].mNbVerts;
+ for (uint32_t ed = 0; ed < n; ++ed)
+ {
+ uint32_t vr1 = in[i].indices[(ed) + in[i].polygonData[fc].mIndexBase];
+ uint32_t vr2 = in[i].indices[(ed + 1) % n + in[i].polygonData[fc].mIndexBase];
+ edges.push_back(Edge(vr1, vr2));
+ }
+ nFc.edgesCount = n;
+ facets.push_back(nFc);
+ }
+ vertices.resize(in[i].points.size());
+ for (uint32_t vr = 0; vr < in[i].points.size(); ++vr)
+ {
+ vertices[vr].p = in[i].points[vr];
+ }
+ Mesh* hullMesh = new Mesh(vertices.data(), edges.data(), facets.data(), vertices.size(), edges.size(), facets.size());
+ BooleanEvaluator evl;
+ Mesh* cuttingMesh = getCuttingBox(PxVec3(0, 0, 0), PxVec3(0, 0, 1), 40, 0);
+ for (uint32_t p = 0; p < chunkMidplanes[i].size(); ++p)
+ {
+ PxPlane& pl = chunkMidplanes[i][p];
+ setCuttingBox(pl.pointInPlane(), pl.n.getNormalized(), cuttingMesh, 60, 0);
+ evl.performFastCutting(hullMesh, cuttingMesh, BooleanConfigurations::BOOLEAN_DIFFERENCE());
+ Mesh* result = evl.createNewMesh();
+ if (result == nullptr)
+ {
+ break;
+ }
+ delete hullMesh;
+ hullMesh = result;
+ }
+ delete cuttingMesh;
+ if (hullMesh == nullptr)
+ {
+ continue;
+ }
+ hPoints.clear();
+ hPoints.resize(hullMesh->getVerticesCount());
+ for (uint32_t v = 0; v < hullMesh->getVerticesCount(); ++v)
+ {
+ hPoints[v] = hullMesh->getVertices()[v].p;
+ }
+ delete hullMesh;
+ buildCollisionGeometry(hPoints, in[i]);
+ }
+}
+
+
+PxConvexMesh* ConvexMeshBuilder::buildConvexMesh(std::vector<PxVec3>& vertexData)
+{
+ CollisionHull hull;
+ buildCollisionGeometry(vertexData, hull);
+
+ PxConvexMeshDesc convexMeshDescr;
+ convexMeshDescr.indices.data = hull.indices.data();
+ convexMeshDescr.indices.count = (uint32_t)hull.indices.size();
+ convexMeshDescr.indices.stride = sizeof(uint32_t);
+
+ convexMeshDescr.points.data = hull.points.data();
+ convexMeshDescr.points.count = (uint32_t)hull.points.size();
+ convexMeshDescr.points.stride = sizeof(PxVec3);
+
+ convexMeshDescr.polygons.data = hull.polygonData.data();
+ convexMeshDescr.polygons.count = (uint32_t)hull.polygonData.size();
+ convexMeshDescr.polygons.stride = sizeof(PxHullPolygon);
+
+ PxConvexMesh* convexMesh = mCooking->createConvexMesh(convexMeshDescr, *mInsertionCallback);
+ return convexMesh;
+}
+
+PxConvexMesh* ConvexMeshBuilder::buildConvexMesh(CollisionHull& hull)
+{
+ PxConvexMeshDesc convexMeshDescr;
+ convexMeshDescr.indices.data = hull.indices.data();
+ convexMeshDescr.indices.count = (uint32_t)hull.indices.size();
+ convexMeshDescr.indices.stride = sizeof(uint32_t);
+
+ convexMeshDescr.points.data = hull.points.data();
+ convexMeshDescr.points.count = (uint32_t)hull.points.size();
+ convexMeshDescr.points.stride = sizeof(PxVec3);
+
+ convexMeshDescr.polygons.data = hull.polygonData.data();
+ convexMeshDescr.polygons.count = (uint32_t)hull.polygonData.size();
+ convexMeshDescr.polygons.stride = sizeof(PxHullPolygon);
+
+ PxConvexMesh* convexMesh = mCooking->createConvexMesh(convexMeshDescr, *mInsertionCallback);
+ return convexMesh;
+}
+
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringFractureTool.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringFractureTool.cpp
new file mode 100644
index 0000000..48830fe
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringFractureTool.cpp
@@ -0,0 +1,1510 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtAuthoringFractureTool.h"
+// This warning arises when using some stl containers with older versions of VC
+// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
+#if NV_VC && NV_VC < 14
+#pragma warning(disable : 4702)
+#endif
+#include <queue>
+#include <vector>
+#include "NvBlastExtAuthoringVSA.h"
+#include <float.h>
+#include "NvBlastExtAuthoringTriangulator.h"
+#include "NvBlastExtAuthoringBooleanTool.h"
+#include "NvBlastExtAuthoringAccelerator.h"
+#include "NvBlast.h"
+#include "NvBlastExtAuthoringPerlinNoise.h"
+#include <NvBlastAssert.h>
+using namespace physx;
+
+#define DEFAULT_BB_ACCELARATOR_RES 10
+
+namespace Nv
+{
+namespace Blast
+{
+
+struct Halfspace_partitioning : public VSA::VS3D_Halfspace_Set
+{
+ std::vector<physx::PxPlane> planes;
+ VSA::real farthest_halfspace(VSA::real plane[4], const VSA::real point[4])
+ {
+ float biggest_d = -FLT_MAX;
+ for (uint32_t i = 0; i < planes.size(); ++i)
+ {
+ float d = planes[i].n.x * point[0] + planes[i].n.y * point[1] + planes[i].n.z * point[2] + planes[i].d * point[3];
+ if (d > biggest_d)
+ {
+ biggest_d = d;
+ plane[0] = planes[i].n.x;
+ plane[1] = planes[i].n.y;
+ plane[2] = planes[i].n.z;
+ plane[3] = planes[i].d;
+ }
+ }
+ return biggest_d;
+ };
+};
+
+
+void findCellBasePlanes(const std::vector<PxVec3>& sites, std::vector<std::vector<int32_t> >& neighboors)
+{
+ Halfspace_partitioning prt;
+ std::vector<physx::PxPlane>& planes = prt.planes;
+ neighboors.resize(sites.size());
+ for (uint32_t cellId = 0; cellId + 1 < sites.size(); ++cellId)
+ {
+ planes.clear();
+ planes.resize(sites.size() - 1 - cellId);
+ std::vector<PxVec3> midpoints(sites.size() - 1);
+ int32_t collected = 0;
+
+ for (uint32_t i = cellId + 1; i < sites.size(); ++i)
+ {
+ PxVec3 midpoint = 0.5 * (sites[i] + sites[cellId]);
+ PxVec3 direction = (sites[i] - sites[cellId]).getNormalized();
+ planes[collected].n = direction;
+ planes[collected].d = -direction.dot(midpoint);
+ midpoints[collected] = midpoint;
+ ++collected;
+ }
+ for (uint32_t i = 0; i < planes.size(); ++i)
+ {
+ planes[i].n = -planes[i].n;
+ planes[i].d = -planes[i].d;
+
+ if (VSA::vs3d_test(prt))
+ {
+ neighboors[cellId].push_back(i + cellId + 1);
+ neighboors[i + cellId + 1].push_back(cellId);
+ };
+ planes[i].n = -planes[i].n;
+ planes[i].d = -planes[i].d;
+ }
+ }
+}
+
+
+#define SITE_BOX_SIZE 4
+#define CUTTING_BOX_SIZE 40
+
+Mesh* getCellMesh(BooleanEvaluator& eval, int32_t planeIndexerOffset, int32_t cellId, const std::vector<PxVec3>& sites, std::vector < std::vector<int32_t> >& neighboors)
+{
+ Mesh* cell = getBigBox(sites[cellId], SITE_BOX_SIZE);
+ Mesh* cuttingMesh = getCuttingBox(PxVec3(0, 0, 0), PxVec3(1, 1, 1), CUTTING_BOX_SIZE, 0);
+
+ for (uint32_t i = 0; i < neighboors[cellId].size(); ++i)
+ {
+ int32_t nCell = neighboors[cellId][i];
+ PxVec3 midpoint = 0.5 * (sites[nCell] + sites[cellId]);
+ PxVec3 direction = (sites[nCell] - sites[cellId]).getNormalized();
+ int32_t planeIndex = static_cast<int32_t>(sites.size()) * std::min(cellId, nCell) + std::max(cellId, nCell) + planeIndexerOffset;
+ if (nCell < cellId)
+ planeIndex = -planeIndex;
+ setCuttingBox(midpoint, -direction, cuttingMesh, CUTTING_BOX_SIZE, planeIndex);
+ eval.performFastCutting(cell, cuttingMesh, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* newCell = eval.createNewMesh();
+ delete cell;
+ cell = newCell;
+ if (cell == nullptr)
+ break;
+ }
+ return cell;
+}
+
+
+bool blastBondComparator(const NvBlastBondDesc& a, const NvBlastBondDesc& b)
+{
+ if (a.chunkIndices[0] == b.chunkIndices[0])
+ return a.chunkIndices[1] < b.chunkIndices[1];
+ else
+ return a.chunkIndices[0] < b.chunkIndices[0];
+}
+
+
+#define MAX_VORONOI_ATTEMPT_NUMBER 450
+
+VoronoiSitesGenerator::VoronoiSitesGenerator(Mesh* mesh, RandomGeneratorBase* rnd)
+{
+ mMesh = mesh;
+ mRnd = rnd;
+ mAccelerator = new BBoxBasedAccelerator(mMesh, DEFAULT_BB_ACCELARATOR_RES);
+ mStencil = nullptr;
+}
+
+void VoronoiSitesGenerator::setBaseMesh(Mesh* m)
+{
+ mGeneratedSites.clear();
+ delete mAccelerator;
+ mMesh = m;
+ mAccelerator = new BBoxBasedAccelerator(mMesh, DEFAULT_BB_ACCELARATOR_RES);
+}
+
+VoronoiSitesGenerator::~VoronoiSitesGenerator()
+{
+ delete mAccelerator;
+ mAccelerator = nullptr;
+}
+
+
+void VoronoiSitesGenerator::setStencil(Mesh* stencil)
+{
+ mStencil = stencil;
+}
+
+
+void VoronoiSitesGenerator::clearStencil()
+{
+ mStencil = nullptr;
+}
+
+
+void VoronoiSitesGenerator::uniformlyGenerateSitesInMesh(const uint32_t sitesCount)
+{
+ BooleanEvaluator voronoiMeshEval(nullptr);
+ PxVec3 mn = mMesh->getBoundingBox().minimum;
+ PxVec3 mx = mMesh->getBoundingBox().maximum;
+ PxVec3 vc = mx - mn;
+ uint32_t attemptNumber = 0;
+ uint32_t generatedSites = 0;
+ while (generatedSites < sitesCount && attemptNumber < MAX_VORONOI_ATTEMPT_NUMBER)
+ {
+ float rn1 = mRnd->getRandomValue() * vc.x;
+ float rn2 = mRnd->getRandomValue() * vc.y;
+ float rn3 = mRnd->getRandomValue() * vc.z;
+ if (voronoiMeshEval.isPointContainedInMesh(mMesh, PxVec3(rn1, rn2, rn3) + mn) && (mStencil == nullptr
+ || voronoiMeshEval.isPointContainedInMesh(mStencil, PxVec3(rn1, rn2, rn3) + mn)))
+ {
+ generatedSites++;
+ mGeneratedSites.push_back(PxVec3(rn1, rn2, rn3) + mn);
+ attemptNumber = 0;
+ }
+ else
+ {
+ attemptNumber++;
+ if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER)
+ break;
+ }
+ }
+}
+
+
+void VoronoiSitesGenerator::clusteredSitesGeneration(const uint32_t numberOfClusters, const uint32_t sitesPerCluster, float clusterRadius)
+{
+ BooleanEvaluator voronoiMeshEval(nullptr);
+ PxVec3 mn = mMesh->getBoundingBox().minimum;
+ PxVec3 mx = mMesh->getBoundingBox().maximum;
+ PxVec3 middle = (mx + mn) * 0.5;
+ PxVec3 vc = (mx - mn) * 0.5;
+ uint32_t attemptNumber = 0;
+ uint32_t generatedSites = 0;
+ std::vector<PxVec3> tempPoints;
+ while (generatedSites < numberOfClusters)
+ {
+ float rn1 = mRnd->getRandomValue() * 2 - 1;
+ float rn2 = mRnd->getRandomValue() * 2 - 1;
+ float rn3 = mRnd->getRandomValue() * 2 - 1;
+ PxVec3 p = PxVec3(middle.x + rn1 * vc.x, middle.y + rn2 * vc.y, middle.z + rn3 * vc.z);
+
+ if (voronoiMeshEval.isPointContainedInMesh(mMesh, p) && (mStencil == nullptr
+ || voronoiMeshEval.isPointContainedInMesh(mStencil, p)))
+ {
+ generatedSites++;
+ tempPoints.push_back(p);
+ attemptNumber = 0;
+ }
+ else
+ {
+ attemptNumber++;
+ if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER)
+ break;
+ }
+ }
+ int32_t totalCount = 0;
+ for (; tempPoints.size() > 0; tempPoints.pop_back())
+ {
+ uint32_t unif = sitesPerCluster;
+ generatedSites = 0;
+ while (generatedSites < unif)
+ {
+ PxVec3 p = tempPoints.back() + PxVec3(mRnd->getRandomValue() * 2 - 1, mRnd->getRandomValue() * 2 - 1, mRnd->getRandomValue() * 2 - 1).getNormalized() * (mRnd->getRandomValue() + 0.001f) * clusterRadius;
+ if (voronoiMeshEval.isPointContainedInMesh(mMesh, p) && (mStencil == nullptr
+ || voronoiMeshEval.isPointContainedInMesh(mStencil, p)))
+ {
+ totalCount++;
+ generatedSites++;
+ mGeneratedSites.push_back(p);
+ attemptNumber = 0;
+ }
+ else
+ {
+ attemptNumber++;
+ if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER)
+ break;
+ }
+ }
+
+ }
+
+}
+
+
+#define IN_SPHERE_ATTEMPT_NUMBER 20
+
+void VoronoiSitesGenerator::addSite(const physx::PxVec3& site)
+{
+ mGeneratedSites.push_back(site);
+}
+
+
+void VoronoiSitesGenerator::generateInSphere(const uint32_t count, const float radius, const physx::PxVec3& center)
+{
+ BooleanEvaluator voronoiMeshEval(nullptr);
+ uint32_t attemptNumber = 0;
+ uint32_t generatedSites = 0;
+ std::vector<PxVec3> tempPoints;
+
+ while (generatedSites < count && attemptNumber < MAX_VORONOI_ATTEMPT_NUMBER)
+ {
+ float rn1 = mRnd->getRandomValue() * radius;
+ float rn2 = mRnd->getRandomValue() * radius;
+ float rn3 = mRnd->getRandomValue() * radius;
+ if (voronoiMeshEval.isPointContainedInMesh(mMesh, PxVec3(rn1, rn2, rn3) + center) && (mStencil == nullptr
+ || voronoiMeshEval.isPointContainedInMesh(mStencil, PxVec3(rn1, rn2, rn3) + center)))
+ {
+ generatedSites++;
+ mGeneratedSites.push_back(PxVec3(rn1, rn2, rn3) + center);
+ attemptNumber = 0;
+ }
+ else
+ {
+ attemptNumber++;
+ if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER)
+ break;
+ }
+ }
+}
+
+
+void VoronoiSitesGenerator::deleteInSphere(const float radius, const physx::PxVec3& center, float deleteProbability)
+{
+ float r2 = radius * radius;
+ for (uint32_t i = 0; i < mGeneratedSites.size(); ++i)
+ {
+ if ((mGeneratedSites[i] - center).magnitudeSquared() < r2 && mRnd->getRandomValue() <= deleteProbability)
+ {
+ std::swap(mGeneratedSites[i], mGeneratedSites.back());
+ mGeneratedSites.pop_back();
+ --i;
+ }
+ }
+}
+
+
+void VoronoiSitesGenerator::radialPattern(const physx::PxVec3& center, const physx::PxVec3& normal, float radius, int32_t angularSteps, int32_t radialSteps, float angleOffset, float variability)
+{
+// mGeneratedSites.push_back(center);
+ physx::PxVec3 t1, t2;
+ if (abs(normal.z) < 0.9)
+ {
+ t1 = normal.cross(PxVec3(0, 0, 1));
+ }
+ else
+ {
+ t1 = normal.cross(PxVec3(1, 0, 0));
+ }
+ t2 = t1.cross(normal);
+ t1.normalize();
+ t2.normalize();
+
+ float radStep = radius / radialSteps;
+ int32_t cCr = 0;
+
+ float angleStep = PxPi * 2 / angularSteps;
+ for (float cRadius = radStep; cRadius < radius; cRadius += radStep)
+ {
+ float cAngle = angleOffset * cCr;
+ for (int32_t i = 0; i < angularSteps; ++i)
+ {
+ float angVars = mRnd->getRandomValue() * variability + (1.0f - 0.5f * variability);
+ float radVars = mRnd->getRandomValue() * variability + (1.0f - 0.5f * variability);
+
+ PxVec3 nPos = (PxCos(cAngle * angVars) * t1 + PxSin(cAngle * angVars) * t2) * cRadius * radVars + center;
+ mGeneratedSites.push_back(nPos);
+ cAngle += angleStep;
+ }
+ ++cCr;
+ }
+}
+
+
+std::vector<PxVec3>& VoronoiSitesGenerator::getVoronoiSites()
+{
+ return mGeneratedSites;
+}
+
+
+int32_t FractureTool::voronoiFracturing(uint32_t chunkId, const std::vector<physx::PxVec3>& cellPointsIn, bool replaceChunk)
+{
+ if (chunkId == 0 && replaceChunk)
+ {
+ return 1;
+ }
+
+ int32_t chunkIndex = getChunkIndex(chunkId);
+ if (chunkIndex == -1 || cellPointsIn.size() < 2)
+ {
+ return 1;
+ }
+ if (!mChunkData[chunkIndex].isLeaf)
+ {
+ deleteAllChildsOfChunk(chunkId);
+ }
+ chunkIndex = getChunkIndex(chunkId);
+
+ Mesh* mesh = mChunkData[chunkIndex].meshData;
+
+ std::vector<PxVec3> cellPoints(cellPointsIn.size());
+ for (uint32_t i = 0; i < cellPointsIn.size(); ++i)
+ {
+ cellPoints[i] = (cellPointsIn[i] - mOffset) * (1.0f / mScaleFactor);
+ }
+
+ /**
+ Prebuild accelerator structure
+ */
+ BooleanEvaluator eval(mLoggingCallback);
+ BooleanEvaluator voronoiMeshEval(mLoggingCallback);
+
+ BBoxBasedAccelerator spAccel = BBoxBasedAccelerator(mesh, DEFAULT_BB_ACCELARATOR_RES);
+
+ std::vector<std::vector<int32_t> > neighboors;
+ findCellBasePlanes(cellPoints, neighboors);
+
+ /**
+ Fracture
+ */
+ int32_t parentChunk = replaceChunk ? mChunkData[chunkIndex].parent : chunkId;
+ std::vector<uint32_t> newlyCreatedChunksIds;
+ for (uint32_t i = 0; i < cellPoints.size(); ++i)
+ {
+ Mesh* cell = getCellMesh(eval, mPlaneIndexerOffset, i, cellPoints, neighboors);
+
+ if (cell == nullptr)
+ {
+ continue;
+ }
+ DummyAccelerator dmAccel(cell->getFacetCount());
+ voronoiMeshEval.performBoolean(mesh, cell, &spAccel, &dmAccel, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* resultMesh = voronoiMeshEval.createNewMesh();
+ if (resultMesh)
+ {
+ mChunkData.push_back(ChunkInfo());
+ mChunkData.back().isLeaf = true;
+ mChunkData.back().meshData = resultMesh;
+ mChunkData.back().parent = parentChunk;
+ mChunkData.back().chunkId = mChunkIdCounter++;
+ newlyCreatedChunksIds.push_back(mChunkData.back().chunkId);
+ }
+ eval.reset();
+ delete cell;
+ }
+ mChunkData[chunkIndex].isLeaf = false;
+ if (replaceChunk)
+ {
+ eraseChunk(chunkId);
+ }
+ mPlaneIndexerOffset += static_cast<int32_t>(cellPoints.size() * cellPoints.size());
+
+
+ if (mRemoveIslands)
+ {
+ for (auto chunkToCheck : newlyCreatedChunksIds)
+ {
+ islandDetectionAndRemoving(chunkToCheck);
+ }
+ }
+ return 0;
+}
+
+Mesh FractureTool::getChunkMesh(int32_t chunkId)
+{
+ Mesh temp = *mChunkData[getChunkIndex(chunkId)].meshData;
+ for (uint32_t i = 0; i < temp.getVerticesCount(); ++i)
+ {
+ temp.getVertices()[i].p = temp.getVertices()[i].p * mScaleFactor + mOffset;
+ }
+ temp.recalculateBoundingBox();
+
+ return temp;
+}
+
+
+int32_t FractureTool::voronoiFracturing(uint32_t chunkId, const std::vector<physx::PxVec3>& cellPointsIn, const physx::PxVec3& scale, bool replaceChunk)
+{
+ if (chunkId == 0 && replaceChunk)
+ {
+ return 1;
+ }
+
+ int32_t chunkIndex = getChunkIndex(chunkId);
+ if (chunkIndex == -1 || cellPointsIn.size() < 2)
+ {
+ return 1;
+ }
+ if (!mChunkData[chunkIndex].isLeaf)
+ {
+ deleteAllChildsOfChunk(chunkId);
+ }
+ chunkIndex = getChunkIndex(chunkId);
+
+ Mesh* mesh = mChunkData[chunkIndex].meshData;
+
+ std::vector<PxVec3> cellPoints(cellPointsIn.size());
+ for (uint32_t i = 0; i < cellPointsIn.size(); ++i)
+ {
+ cellPoints[i] = (cellPointsIn[i] - mOffset) * (1.0f / mScaleFactor);
+
+ cellPoints[i].x *= (1.0f / scale.x);
+ cellPoints[i].y *= (1.0f / scale.y);
+ cellPoints[i].z *= (1.0f / scale.z);
+ }
+
+ /**
+ Prebuild accelerator structure
+ */
+ BooleanEvaluator eval(mLoggingCallback);
+ BooleanEvaluator voronoiMeshEval(mLoggingCallback);
+
+ BBoxBasedAccelerator spAccel = BBoxBasedAccelerator(mesh, DEFAULT_BB_ACCELARATOR_RES);
+
+ std::vector<std::vector<int32_t> > neighboors;
+ findCellBasePlanes(cellPoints, neighboors);
+
+ /**
+ Fracture
+ */
+ int32_t parentChunk = replaceChunk ? mChunkData[chunkIndex].parent : chunkId;
+ std::vector<uint32_t> newlyCreatedChunksIds;
+
+ for (uint32_t i = 0; i < cellPoints.size(); ++i)
+ {
+ Mesh* cell = getCellMesh(eval, mPlaneIndexerOffset, i, cellPoints, neighboors);
+
+ if (cell == nullptr)
+ {
+ continue;
+ }
+
+ for (uint32_t v = 0; v < cell->getVerticesCount(); ++v)
+ {
+ cell->getVertices()[v].p.x *= scale.x;
+ cell->getVertices()[v].p.y *= scale.y;
+ cell->getVertices()[v].p.z *= scale.z;
+ }
+ cell->recalculateBoundingBox();
+ DummyAccelerator dmAccel(cell->getFacetCount());
+ voronoiMeshEval.performBoolean(mesh, cell, &spAccel, &dmAccel, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* resultMesh = voronoiMeshEval.createNewMesh();
+ if (resultMesh)
+ {
+ mChunkData.push_back(ChunkInfo());
+ mChunkData.back().isLeaf = true;
+ mChunkData.back().meshData = resultMesh;
+ mChunkData.back().parent = parentChunk;
+ mChunkData.back().chunkId = mChunkIdCounter++;
+ newlyCreatedChunksIds.push_back(mChunkData.back().chunkId);
+ }
+ eval.reset();
+ delete cell;
+ }
+ mChunkData[chunkIndex].isLeaf = false;
+ if (replaceChunk)
+ {
+ eraseChunk(chunkId);
+ }
+ mPlaneIndexerOffset += static_cast<int32_t>(cellPoints.size() * cellPoints.size());
+
+ if (mRemoveIslands)
+ {
+ for (auto chunkToCheck : newlyCreatedChunksIds)
+ {
+ islandDetectionAndRemoving(chunkToCheck);
+ }
+ }
+
+ return 0;
+}
+
+
+int32_t FractureTool::slicing(uint32_t chunkId, SlicingConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd)
+{
+ if (conf.noiseAmplitude != 0)
+ {
+ return slicingNoisy(chunkId, conf, replaceChunk, rnd);
+ }
+
+ if (replaceChunk && chunkId == 0)
+ {
+ return 1;
+ }
+
+ int32_t chunkIndex = getChunkIndex(chunkId);
+ if (chunkIndex == -1)
+ {
+ return 1;
+ }
+ if (!mChunkData[chunkIndex].isLeaf)
+ {
+ deleteAllChildsOfChunk(chunkId);
+ }
+ chunkIndex = getChunkIndex(chunkId);
+
+
+ Mesh* mesh = new Mesh(*mChunkData[chunkIndex].meshData);
+
+ BooleanEvaluator bTool(mLoggingCallback);
+
+ int32_t x_slices = conf.x_slices;
+ int32_t y_slices = conf.y_slices;
+ int32_t z_slices = conf.z_slices;
+
+ const PxBounds3 sourceBBox = mesh->getBoundingBox();
+
+ PxVec3 center = PxVec3(mesh->getBoundingBox().minimum.x, 0, 0);
+
+
+ float x_offset = (sourceBBox.maximum.x - sourceBBox.minimum.x) * (1.0f / (x_slices + 1));
+ float y_offset = (sourceBBox.maximum.y - sourceBBox.minimum.y) * (1.0f / (y_slices + 1));
+ float z_offset = (sourceBBox.maximum.z - sourceBBox.minimum.z) * (1.0f / (z_slices + 1));
+
+ center.x += x_offset;
+
+ PxVec3 dir(1, 0, 0);
+
+ Mesh* slBox = getCuttingBox(center, dir, 20, 0);
+
+ ChunkInfo ch;
+ ch.isLeaf = true;
+ ch.parent = replaceChunk ? mChunkData[chunkIndex].parent : chunkId;
+ std::vector<ChunkInfo> xSlicedChunks;
+ std::vector<ChunkInfo> ySlicedChunks;
+ std::vector<uint32_t> newlyCreatedChunksIds;
+ /**
+ Slice along x direction
+ */
+ for (int32_t slice = 0; slice < x_slices; ++slice)
+ {
+ PxVec3 randVect = PxVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
+ PxVec3 lDir = dir + randVect * conf.angle_variations;
+
+ setCuttingBox(center, lDir, slBox, 20, mPlaneIndexerOffset);
+ bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE());
+ ch.meshData = bTool.createNewMesh();
+
+ if (ch.meshData != 0)
+ {
+ xSlicedChunks.push_back(ch);
+ }
+ inverseNormalAndSetIndices(slBox, -mPlaneIndexerOffset);
+ ++mPlaneIndexerOffset;
+ bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* result = bTool.createNewMesh();
+ delete mesh;
+ mesh = result;
+ if (mesh == nullptr)
+ {
+ break;
+ }
+ center.x += x_offset + (rnd->getRandomValue()) * conf.offset_variations * x_offset;
+ }
+ if (mesh != 0)
+ {
+ ch.meshData = mesh;
+ xSlicedChunks.push_back(ch);
+ }
+
+
+ for (uint32_t chunk = 0; chunk < xSlicedChunks.size(); ++chunk)
+ {
+ center = PxVec3(0, sourceBBox.minimum.y, 0);
+ center.y += y_offset;
+ dir = PxVec3(0, 1, 0);
+ mesh = xSlicedChunks[chunk].meshData;
+
+ for (int32_t slice = 0; slice < y_slices; ++slice)
+ {
+ PxVec3 randVect = PxVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
+ PxVec3 lDir = dir + randVect * conf.angle_variations;
+
+
+ setCuttingBox(center, lDir, slBox, 20, mPlaneIndexerOffset);
+ bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE());
+ ch.meshData = bTool.createNewMesh();
+ if (ch.meshData != 0)
+ {
+ ySlicedChunks.push_back(ch);
+ }
+ inverseNormalAndSetIndices(slBox, -mPlaneIndexerOffset);
+ ++mPlaneIndexerOffset;
+ bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* result = bTool.createNewMesh();
+ delete mesh;
+ mesh = result;
+ if (mesh == nullptr)
+ {
+ break;
+ }
+ center.y += y_offset + (rnd->getRandomValue()) * conf.offset_variations * y_offset;
+ }
+ if (mesh != 0)
+ {
+ ch.meshData = mesh;
+ ySlicedChunks.push_back(ch);
+ }
+ }
+
+
+ for (uint32_t chunk = 0; chunk < ySlicedChunks.size(); ++chunk)
+ {
+ center = PxVec3(0, 0, sourceBBox.minimum.z);
+ center.z += z_offset;
+ dir = PxVec3(0, 0, 1);
+ mesh = ySlicedChunks[chunk].meshData;
+
+ for (int32_t slice = 0; slice < z_slices; ++slice)
+ {
+ PxVec3 randVect = PxVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
+ PxVec3 lDir = dir + randVect * conf.angle_variations;
+ setCuttingBox(center, lDir, slBox, 20, mPlaneIndexerOffset);
+ bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE());
+ ch.meshData = bTool.createNewMesh();
+ if (ch.meshData != 0)
+ {
+ ch.chunkId = mChunkIdCounter++;
+ newlyCreatedChunksIds.push_back(ch.chunkId);
+ mChunkData.push_back(ch);
+ }
+ inverseNormalAndSetIndices(slBox, -mPlaneIndexerOffset);
+ ++mPlaneIndexerOffset;
+ bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* result = bTool.createNewMesh();
+ delete mesh;
+ mesh = result;
+ if (mesh == nullptr)
+ {
+ break;
+ }
+ center.z += z_offset + (rnd->getRandomValue()) * conf.offset_variations * z_offset;
+ }
+ if (mesh != 0)
+ {
+ ch.chunkId = mChunkIdCounter++;
+ ch.meshData = mesh;
+ mChunkData.push_back(ch);
+ newlyCreatedChunksIds.push_back(ch.chunkId);
+ }
+ }
+
+
+ delete slBox;
+
+ mChunkData[chunkIndex].isLeaf = false;
+ if (replaceChunk)
+ {
+ eraseChunk(chunkId);
+ }
+
+ if (mRemoveIslands)
+ {
+ for (auto chunkToCheck : newlyCreatedChunksIds)
+ {
+ islandDetectionAndRemoving(chunkToCheck);
+ }
+ }
+
+ return 0;
+}
+
+int32_t FractureTool::slicingNoisy(uint32_t chunkId, SlicingConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd)
+{
+ if (replaceChunk && chunkId == 0)
+ {
+ return 1;
+ }
+
+ int32_t chunkIndex = getChunkIndex(chunkId);
+ if (chunkIndex == -1)
+ {
+ return 1;
+ }
+ if (!mChunkData[chunkIndex].isLeaf)
+ {
+ deleteAllChildsOfChunk(chunkId);
+ }
+ chunkIndex = getChunkIndex(chunkId);
+
+
+ Mesh* mesh = new Mesh(*mChunkData[chunkIndex].meshData);
+
+ BooleanEvaluator bTool(mLoggingCallback);
+
+ int32_t x_slices = conf.x_slices;
+ int32_t y_slices = conf.y_slices;
+ int32_t z_slices = conf.z_slices;
+
+ const PxBounds3 sourceBBox = mesh->getBoundingBox();
+
+ PxVec3 center = PxVec3(mesh->getBoundingBox().minimum.x, 0, 0);
+
+
+ float x_offset = (sourceBBox.maximum.x - sourceBBox.minimum.x) * (1.0f / (x_slices + 1));
+ float y_offset = (sourceBBox.maximum.y - sourceBBox.minimum.y) * (1.0f / (y_slices + 1));
+ float z_offset = (sourceBBox.maximum.z - sourceBBox.minimum.z) * (1.0f / (z_slices + 1));
+
+ center.x += x_offset;
+
+ PxVec3 dir(1, 0, 0);
+
+ Mesh* slBox = nullptr;
+
+ ChunkInfo ch;
+ ch.isLeaf = true;
+ ch.parent = replaceChunk ? mChunkData[chunkIndex].parent : chunkId;
+ std::vector<ChunkInfo> xSlicedChunks;
+ std::vector<ChunkInfo> ySlicedChunks;
+ std::vector<uint32_t> newlyCreatedChunksIds;
+ float noisyPartSize = 1.8f;
+ int32_t acceleratorRes = 5;
+ /**
+ Slice along x direction
+ */
+ for (int32_t slice = 0; slice < x_slices; ++slice)
+ {
+ PxVec3 randVect = PxVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
+ PxVec3 lDir = dir + randVect * conf.angle_variations;
+ slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, conf.surfaceResolution, mPlaneIndexerOffset, conf.noiseAmplitude, conf.noiseFrequency, conf.noiseOctaveNumber, rnd->getRandomValue());
+ // DummyAccelerator accel(mesh->getFacetCount());
+ IntersectionTestingAccelerator accel(mesh, acceleratorRes);
+ DummyAccelerator dummy(slBox->getFacetCount());
+ bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE());
+ ch.meshData = bTool.createNewMesh();
+ if (ch.meshData != 0)
+ {
+ xSlicedChunks.push_back(ch);
+ }
+ inverseNormalAndSetIndices(slBox, -mPlaneIndexerOffset);
+ ++mPlaneIndexerOffset;
+ bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* result = bTool.createNewMesh();
+ delete slBox;
+ delete mesh;
+ mesh = result;
+ if (mesh == nullptr)
+ {
+ break;
+ }
+ center.x += x_offset + (rnd->getRandomValue()) * conf.offset_variations * x_offset;
+ }
+ if (mesh != 0)
+ {
+ ch.meshData = mesh;
+ xSlicedChunks.push_back(ch);
+ }
+ slBox = getCuttingBox(center, dir, 20, 0);
+ uint32_t slicedChunkSize = xSlicedChunks.size();
+ for (uint32_t chunk = 0; chunk < slicedChunkSize; ++chunk)
+ {
+ center = PxVec3(0, sourceBBox.minimum.y, 0);
+ center.y += y_offset;
+ dir = PxVec3(0, 1, 0);
+ mesh = xSlicedChunks[chunk].meshData;
+
+ for (int32_t slice = 0; slice < y_slices; ++slice)
+ {
+ PxVec3 randVect = PxVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
+ PxVec3 lDir = dir + randVect * conf.angle_variations;
+
+ slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, conf.surfaceResolution, mPlaneIndexerOffset, conf.noiseAmplitude, conf.noiseFrequency, conf.noiseOctaveNumber, rnd->getRandomValue());
+ // DummyAccelerator accel(mesh->getFacetCount());
+ IntersectionTestingAccelerator accel(mesh, acceleratorRes);
+ DummyAccelerator dummy(slBox->getFacetCount());
+ bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE());
+ ch.meshData = bTool.createNewMesh();
+ if (ch.meshData != 0)
+ {
+ ySlicedChunks.push_back(ch);
+ }
+ inverseNormalAndSetIndices(slBox, -mPlaneIndexerOffset);
+ ++mPlaneIndexerOffset;
+ bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* result = bTool.createNewMesh();
+ delete slBox;
+ delete mesh;
+ mesh = result;
+ if (mesh == nullptr)
+ {
+ break;
+ }
+ center.y += y_offset + (rnd->getRandomValue()) * conf.offset_variations * y_offset;
+ }
+ if (mesh != 0)
+ {
+ ch.meshData = mesh;
+ ySlicedChunks.push_back(ch);
+ }
+ }
+
+ for (uint32_t chunk = 0; chunk < ySlicedChunks.size(); ++chunk)
+ {
+ center = PxVec3(0, 0, sourceBBox.minimum.z);
+ center.z += z_offset;
+ dir = PxVec3(0, 0, 1);
+ mesh = ySlicedChunks[chunk].meshData;
+
+ for (int32_t slice = 0; slice < z_slices; ++slice)
+ {
+ PxVec3 randVect = PxVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
+ PxVec3 lDir = dir + randVect * conf.angle_variations;
+ slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, conf.surfaceResolution, mPlaneIndexerOffset, conf.noiseAmplitude, conf.noiseFrequency, conf.noiseOctaveNumber, rnd->getRandomValue());
+ // DummyAccelerator accel(mesh->getFacetCount());
+ IntersectionTestingAccelerator accel(mesh, acceleratorRes);
+ DummyAccelerator dummy(slBox->getFacetCount());
+ bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE());
+ ch.meshData = bTool.createNewMesh();
+ if (ch.meshData != 0)
+ {
+ ch.chunkId = mChunkIdCounter++;
+ mChunkData.push_back(ch);
+ newlyCreatedChunksIds.push_back(ch.chunkId);
+ }
+ inverseNormalAndSetIndices(slBox, -mPlaneIndexerOffset);
+ ++mPlaneIndexerOffset;
+ bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* result = bTool.createNewMesh();
+ delete mesh;
+ delete slBox;
+ mesh = result;
+ if (mesh == nullptr)
+ {
+ break;
+ }
+ center.z += z_offset + (rnd->getRandomValue()) * conf.offset_variations * z_offset;
+ }
+ if (mesh != 0)
+ {
+ ch.chunkId = mChunkIdCounter++;
+ ch.meshData = mesh;
+ mChunkData.push_back(ch);
+ newlyCreatedChunksIds.push_back(ch.chunkId);
+ }
+ }
+
+// delete slBox;
+
+ mChunkData[chunkIndex].isLeaf = false;
+ if (replaceChunk)
+ {
+ eraseChunk(chunkId);
+ }
+
+ if (mRemoveIslands)
+ {
+ for (auto chunkToCheck : newlyCreatedChunksIds)
+ {
+ islandDetectionAndRemoving(chunkToCheck);
+ }
+ }
+
+ return 0;
+}
+
+
+
+int32_t FractureTool::getChunkIndex(int32_t chunkId)
+{
+ for (uint32_t i = 0; i < mChunkData.size(); ++i)
+ {
+ if (mChunkData[i].chunkId == chunkId)
+ {
+ return i;
+ }
+ }
+ return -1;
+}
+
+int32_t FractureTool::getChunkDepth(int32_t chunkId)
+{
+ int32_t chunkIndex = getChunkIndex(chunkId);
+ if (chunkIndex == -1)
+ {
+ return -1;
+ }
+
+ int32_t depth = 0;
+
+ while (mChunkData[chunkIndex].parent != -1)
+ {
+ ++depth;
+ chunkIndex = getChunkIndex(mChunkData[chunkIndex].parent);
+ }
+ return depth;
+}
+
+std::vector<int32_t> FractureTool::getChunksIdAtDepth(uint32_t depth)
+{
+ std::vector<int32_t> chunkIds;
+
+ for (uint32_t i = 0; i < mChunkData.size(); ++i)
+ {
+ if (getChunkDepth(mChunkData[i].chunkId) == (int32_t)depth)
+ {
+ chunkIds.push_back(mChunkData[i].chunkId);
+ }
+ }
+ return chunkIds;
+}
+
+
+void FractureTool::getTransformation(PxVec3& offset, float& scale)
+{
+ offset = mOffset;
+ scale = mScaleFactor;
+}
+
+void FractureTool::setSourceMesh(Mesh* mesh)
+{
+ if (mesh == nullptr)
+ {
+ return;
+ }
+ reset();
+
+ mChunkData.resize(1);
+ mChunkData[0].meshData = new Mesh(*mesh);
+ mChunkData[0].parent = -1;
+ mChunkData[0].isLeaf = true;
+ mChunkData[0].chunkId = mChunkIdCounter++;
+ mesh = mChunkData[0].meshData;
+
+ /**
+ Move to origin and scale to unit cube
+ */
+
+ mOffset = (mesh->getBoundingBox().maximum + mesh->getBoundingBox().minimum) * 0.5f;
+ PxVec3 bbSizes = (mesh->getBoundingBox().maximum - mesh->getBoundingBox().minimum);
+
+ mScaleFactor = std::max(bbSizes.x, std::max(bbSizes.y, bbSizes.z));
+
+ Vertex* verticesBuffer = mesh->getVertices();
+ for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i)
+ {
+ verticesBuffer[i].p = (verticesBuffer[i].p - mOffset) * (1.0f / mScaleFactor);
+ }
+
+ mesh->getBoundingBox().minimum = (mesh->getBoundingBox().minimum - mOffset) * (1.0f / mScaleFactor);
+ mesh->getBoundingBox().maximum = (mesh->getBoundingBox().maximum - mOffset) * (1.0f / mScaleFactor);
+
+
+ for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
+ {
+ mesh->getFacet(i)->userData = 0; // Mark facet as initial boundary facet
+ }
+}
+
+
+void FractureTool::reset()
+{
+ mChunkPostprocessors.clear();
+ for (uint32_t i = 0; i < mChunkData.size(); ++i)
+ {
+ delete mChunkData[i].meshData;
+ }
+ mChunkData.clear();
+ mPlaneIndexerOffset = 1;
+ mChunkIdCounter = 0;
+}
+
+
+
+
+bool FractureTool::isAncestorForChunk(int32_t ancestorId, int32_t chunkId)
+{
+ if (ancestorId == chunkId)
+ {
+ return false;
+ }
+ while (chunkId != -1)
+ {
+ if (ancestorId == chunkId)
+ {
+ return true;
+ }
+ chunkId = getChunkIndex(chunkId);
+ if (chunkId == -1)
+ {
+ return false;
+ }
+ chunkId = mChunkData[chunkId].parent;
+ }
+ return false;
+}
+
+void FractureTool::eraseChunk(int32_t chunkId)
+{
+ deleteAllChildsOfChunk(chunkId);
+ int32_t index = getChunkIndex(chunkId);
+ if (index != -1)
+ {
+ delete mChunkData[index].meshData;
+ std::swap(mChunkData.back(), mChunkData[index]);
+ mChunkData.pop_back();
+ }
+}
+
+
+void FractureTool::deleteAllChildsOfChunk(int32_t chunkId)
+{
+ std::vector<int32_t> chunkToDelete;
+ for (uint32_t i = 0; i < mChunkData.size(); ++i)
+ {
+ if (isAncestorForChunk(chunkId, mChunkData[i].chunkId))
+ {
+ chunkToDelete.push_back(i);
+ }
+ }
+ for (int32_t i = (int32_t)chunkToDelete.size() - 1; i >= 0; --i)
+ {
+ int32_t m = chunkToDelete[i];
+ delete mChunkData[m].meshData;
+ std::swap(mChunkData.back(), mChunkData[m]);
+ mChunkData.pop_back();
+ }
+}
+
+void FractureTool::finalizeFracturing()
+{
+ for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
+ {
+ delete mChunkPostprocessors[i];
+ }
+ mChunkPostprocessors.resize(mChunkData.size());
+ for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
+ {
+ mChunkPostprocessors[i] = new ChunkPostProcessor();
+ }
+
+ for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
+ {
+ mChunkPostprocessors[i]->triangulate(mChunkData[i].meshData);
+ }
+ std::vector<int32_t> badOnes;
+ for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
+ {
+ if (mChunkPostprocessors[i]->getBaseMesh().empty())
+ {
+ badOnes.push_back(i);
+ }
+ }
+ for (int32_t i = (int32_t)badOnes.size() - 1; i >= 0; --i)
+ {
+ int32_t chunkId = mChunkData[badOnes[i]].chunkId;
+ for (uint32_t j = 0; j < mChunkData.size(); ++j)
+ {
+ if (mChunkData[j].parent == chunkId)
+ mChunkData[j].parent = mChunkData[badOnes[i]].parent;
+ }
+ std::swap(mChunkPostprocessors[badOnes[i]], mChunkPostprocessors.back());
+ mChunkPostprocessors.pop_back();
+ std::swap(mChunkData[badOnes[i]], mChunkData.back());
+ mChunkData.pop_back();
+ }
+
+}
+
+const std::vector<ChunkInfo>& FractureTool::getChunkList()
+{
+ return mChunkData;
+}
+
+void FractureTool::getBaseMesh(int32_t chunkIndex, std::vector<Triangle>& output)
+{
+ NVBLAST_ASSERT(mChunkPostprocessors.size() > 0);
+ if (mChunkPostprocessors.size() == 0)
+ {
+ return; // finalizeFracturing() should be called before getting mesh!
+ }
+ output = mChunkPostprocessors[chunkIndex]->getBaseMesh();
+
+ /* Scale mesh back */
+
+ for (uint32_t i = 0; i < output.size(); ++i)
+ {
+ output[i].a.p = output[i].a.p * mScaleFactor + mOffset;
+ output[i].b.p = output[i].b.p * mScaleFactor + mOffset;
+ output[i].c.p = output[i].c.p * mScaleFactor + mOffset;
+ }
+}
+
+void FractureTool::getNoisedMesh(int32_t chunkIndex, std::vector<Triangle>& output)
+{
+ NVBLAST_ASSERT(mChunkPostprocessors.size() > 0);
+ if (mChunkPostprocessors.size() == 0)
+ {
+ return; // finalizeFracturing() should be called before getting mesh!
+ }
+
+ if (mChunkData[chunkIndex].chunkId == 0)
+ {
+ output = mChunkPostprocessors[chunkIndex]->getBaseMesh();
+ }
+ else
+ {
+ output = mChunkPostprocessors[chunkIndex]->getNoisyMesh();
+ }
+
+ for (uint32_t i = 0; i < output.size(); ++i)
+ {
+ output[i].a.p = output[i].a.p * mScaleFactor + mOffset;
+ output[i].b.p = output[i].b.p * mScaleFactor + mOffset;
+ output[i].c.p = output[i].c.p * mScaleFactor + mOffset;
+ }
+}
+
+void FractureTool::tesselate(float averateEdgeLength)
+{
+ NVBLAST_ASSERT(mChunkPostprocessors.size() > 0);
+ if (mChunkPostprocessors.size() == 0)
+ {
+ return; // finalizeFracturing() should be called before tesselation!
+ }
+
+ for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
+ {
+ if (mChunkData[i].chunkId == 0) // skip source mesh
+ {
+ continue;
+ }
+ mChunkPostprocessors[i]->tesselateInternalSurface(averateEdgeLength / mScaleFactor);
+ }
+}
+
+void FractureTool::applyNoise(float amplitude, float frequency, int32_t octaves, float falloff, int32_t relaxIterations, float relaxFactor, int32_t seed)
+{
+ octaves = octaves <= 0 ? 1 : octaves;
+ if (mChunkPostprocessors.empty())
+ {
+ return;
+ }
+ SimplexNoise noise(amplitude / mScaleFactor, frequency * mScaleFactor, octaves, seed);
+ for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
+ {
+ if (mChunkData[i].chunkId == 0) // skip source mesh
+ {
+ continue;
+ }
+ mChunkPostprocessors[i]->applyNoise(noise, falloff, relaxIterations, relaxFactor);
+ }
+}
+
+float getVolume(std::vector<Triangle>& triangles)
+{
+ float volume = 0.0f;
+
+ for (uint32_t i = 0; i < triangles.size(); ++i)
+ {
+ PxVec3& a = triangles[i].a.p;
+ PxVec3& b = triangles[i].b.p;
+ PxVec3& c = triangles[i].c.p;
+ volume += (a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x);
+ }
+ return (1.0f / 6.0f) * PxAbs(volume);
+}
+
+float FractureTool::getMeshOverlap(Mesh& meshA, Mesh& meshB)
+{
+ BooleanEvaluator bTool;
+ bTool.performBoolean(&meshA, &meshB, BooleanConfigurations::BOOLEAN_INTERSECION());
+ Mesh* result = bTool.createNewMesh();
+ if (result == nullptr)
+ {
+ return 0.0f;
+ }
+
+ ChunkPostProcessor postProcessor;
+ postProcessor.triangulate(&meshA);
+
+ float baseVolume = getVolume(postProcessor.getBaseMesh());
+ if (baseVolume == 0)
+ {
+ return 0.0f;
+ }
+ postProcessor.triangulate(result);
+ float intrsVolume = getVolume(postProcessor.getBaseMesh());
+
+ delete result;
+
+ return intrsVolume / baseVolume;
+}
+
+void weldVertices(std::map<Vertex, uint32_t, VrtComp>& vertexMapping, std::vector<Vertex>& vertexBuffer, std::vector<uint32_t>& indexBuffer, std::vector<Triangle>& trb)
+{
+ for (uint32_t i = 0; i < trb.size(); ++i)
+ {
+ auto it = vertexMapping.find(trb[i].a);
+ if (it == vertexMapping.end())
+ {
+ indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size()));
+ vertexMapping[trb[i].a] = static_cast<uint32_t>(vertexBuffer.size());
+ vertexBuffer.push_back(trb[i].a);
+ }
+ else
+ {
+ indexBuffer.push_back(it->second);
+ }
+ it = vertexMapping.find(trb[i].b);
+ if (it == vertexMapping.end())
+ {
+ indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size()));
+ vertexMapping[trb[i].b] = static_cast<uint32_t>(vertexBuffer.size());
+ vertexBuffer.push_back(trb[i].b);
+ }
+ else
+ {
+ indexBuffer.push_back(it->second);
+ }
+ it = vertexMapping.find(trb[i].c);
+ if (it == vertexMapping.end())
+ {
+ indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size()));
+ vertexMapping[trb[i].c] = static_cast<uint32_t>(vertexBuffer.size());
+ vertexBuffer.push_back(trb[i].c);
+ }
+ else
+ {
+ indexBuffer.push_back(it->second);
+ }
+ }
+
+}
+
+void FractureTool::setRemoveIslands(bool isRemoveIslands)
+{
+ mRemoveIslands = isRemoveIslands;
+}
+
+int32_t FractureTool::islandDetectionAndRemoving(int32_t chunkId)
+{
+ if (chunkId == 0)
+ {
+ return 0;
+ }
+ int32_t chunkIndex = getChunkIndex(chunkId);
+ ChunkPostProcessor prc;
+ prc.triangulate(mChunkData[chunkIndex].meshData);
+
+ Mesh* chunk = mChunkData[chunkIndex].meshData;
+
+ std::vector<uint32_t>& mapping = prc.getBaseMapping();
+ std::vector<TriangleIndexed>& trs = prc.getBaseMeshIndexed();
+
+ std::vector<std::vector<uint32_t> > graph(prc.getWeldedVerticesCount());
+ std::vector<int32_t>& pm = prc.getPositionedMapping();
+ if (pm.size() == 0)
+ {
+ return 0;
+ }
+
+ /**
+ Chunk graph
+ */
+ for (uint32_t i = 0; i < trs.size(); ++i)
+ {
+ graph[pm[trs[i].ea]].push_back(pm[trs[i].eb]);
+ graph[pm[trs[i].ea]].push_back(pm[trs[i].ec]);
+
+ graph[pm[trs[i].ec]].push_back(pm[trs[i].eb]);
+ graph[pm[trs[i].ec]].push_back(pm[trs[i].ea]);
+
+ graph[pm[trs[i].eb]].push_back(pm[trs[i].ea]);
+ graph[pm[trs[i].eb]].push_back(pm[trs[i].ec]);
+ }
+ for (uint32_t i = 0; i < chunk->getEdgesCount(); ++i)
+ {
+ int v1 = chunk->getEdges()[i].s;
+ int v2 = chunk->getEdges()[i].e;
+
+ v1 = pm[mapping[v1]];
+ v2 = pm[mapping[v2]];
+
+ graph[v1].push_back(v2);
+ graph[v2].push_back(v1);
+
+ }
+
+
+ /**
+ Walk graph, mark components
+ */
+
+ std::vector<int32_t> comps(prc.getWeldedVerticesCount(), -1);
+ std::queue<uint32_t> que;
+ int32_t cComp = 0;
+
+ for (uint32_t i = 0; i < prc.getWeldedVerticesCount(); ++i)
+ {
+ int32_t to = pm[i];
+ if (comps[to] != -1) continue;
+ que.push(to);
+ comps[to] = cComp;
+
+ while (!que.empty())
+ {
+ int32_t c = que.front();
+ que.pop();
+
+ for (uint32_t j = 0; j < graph[c].size(); ++j)
+ {
+ if (comps[graph[c][j]] == -1)
+ {
+ que.push(graph[c][j]);
+ comps[graph[c][j]] = cComp;
+ }
+ }
+ }
+ cComp++;
+ }
+ for (uint32_t i = 0; i < prc.getWeldedVerticesCount(); ++i)
+ {
+ int32_t to = pm[i];
+ comps[i] = comps[to];
+ }
+ std::vector<uint32_t> longComps(chunk->getVerticesCount());
+ for (uint32_t i = 0; i < chunk->getVerticesCount(); ++i)
+ {
+ int32_t to = mapping[i];
+ longComps[i] = comps[to];
+ }
+
+ if (cComp > 1)
+ {
+ std::vector<std::vector<Vertex> > compVertices(cComp);
+ std::vector<std::vector<Facet> > compFacets(cComp);
+ std::vector<std::vector<Edge> > compEdges(cComp);
+
+
+ std::vector<uint32_t> compVertexMapping(chunk->getVerticesCount(), 0);
+ Vertex* vrts = chunk->getVertices();
+ for (uint32_t v = 0; v < chunk->getVerticesCount(); ++v)
+ {
+ int32_t vComp = comps[mapping[v]];
+ compVertexMapping[v] = static_cast<uint32_t>(compVertices[vComp].size());
+ compVertices[vComp].push_back(vrts[v]);
+ }
+
+ Facet* fcb = chunk->getFacetsBuffer();
+ Edge* edb = chunk->getEdges();
+
+ for (uint32_t fc = 0; fc < chunk->getFacetCount(); ++fc)
+ {
+ std::vector<uint32_t> edgesPerComp(cComp, 0);
+ for (uint32_t ep = fcb[fc].firstEdgeNumber; ep < fcb[fc].firstEdgeNumber + fcb[fc].edgesCount; ++ep)
+ {
+ int32_t vComp = comps[mapping[edb[ep].s]];
+ edgesPerComp[vComp]++;
+ compEdges[vComp].push_back(Edge(compVertexMapping[edb[ep].s], compVertexMapping[edb[ep].e]));
+ }
+ for (int32_t c = 0; c < cComp; ++c)
+ {
+ if (edgesPerComp[c] == 0)
+ {
+ continue;
+ }
+ compFacets[c].push_back(*chunk->getFacet(fc));
+ compFacets[c].back().edgesCount = edgesPerComp[c];
+ compFacets[c].back().firstEdgeNumber = static_cast<int32_t>(compEdges[c].size()) - edgesPerComp[c];
+ }
+ }
+
+ delete mChunkData[chunkIndex].meshData;
+ mChunkData[chunkIndex].meshData = new Mesh(compVertices[0].data(), compEdges[0].data(), compFacets[0].data(), static_cast<uint32_t>(compVertices[0].size()),
+ static_cast<uint32_t>(compEdges[0].size()), static_cast<uint32_t>(compFacets[0].size()));;
+ for (int32_t i = 1; i < cComp; ++i)
+ {
+ mChunkData.push_back(ChunkInfo(mChunkData[chunkIndex]));
+ mChunkData.back().chunkId = mChunkIdCounter++;
+ mChunkData.back().meshData = new Mesh(compVertices[i].data(), compEdges[i].data(), compFacets[i].data(), static_cast<uint32_t>(compVertices[i].size()),
+ static_cast<uint32_t>(compEdges[i].size()), static_cast<uint32_t>(compFacets[i].size()));
+ }
+
+ return cComp;
+ }
+ return 0;
+}
+
+void FractureTool::getBufferedBaseMeshes(std::vector<Vertex>& vertexBuffer, std::vector<std::vector<uint32_t> >& indexBuffer)
+{
+ std::map<Vertex, uint32_t, VrtComp> vertexMapping;
+ vertexBuffer.clear();
+ indexBuffer.clear();
+ indexBuffer.resize(mChunkPostprocessors.size());
+
+ for (uint32_t ch = 0; ch < mChunkPostprocessors.size(); ++ch)
+ {
+ std::vector<Triangle>& trb = mChunkPostprocessors[ch]->getBaseMesh();
+ weldVertices(vertexMapping, vertexBuffer, indexBuffer[ch], trb);
+ }
+ for (uint32_t i = 0; i < vertexBuffer.size(); ++i)
+ {
+ vertexBuffer[i].p = vertexBuffer[i].p * mScaleFactor + mOffset;
+ }
+}
+
+int32_t FractureTool::getChunkId(int32_t chunkIndex)
+{
+ if (chunkIndex < 0 || static_cast<uint32_t>(chunkIndex) >= mChunkData.size())
+ {
+ return -1;
+ }
+ return mChunkData[chunkIndex].chunkId;
+}
+
+void FractureTool::getBufferedNoiseMeshes(std::vector<Vertex>& vertexBuffer, std::vector<std::vector<uint32_t> >& indexBuffer)
+{
+ std::map<Vertex, uint32_t, VrtComp> vertexMapping;
+ vertexBuffer.clear();
+ indexBuffer.clear();
+ indexBuffer.resize(mChunkPostprocessors.size());
+
+ for (uint32_t ch = 0; ch < mChunkPostprocessors.size(); ++ch)
+ {
+ if (ch == 0)
+ {
+ std::vector<Triangle>& trb = mChunkPostprocessors[ch]->getBaseMesh();
+ weldVertices(vertexMapping, vertexBuffer, indexBuffer[ch], trb);
+ }
+ else
+ {
+ std::vector<Triangle>& trb = mChunkPostprocessors[ch]->getNoisyMesh();
+ weldVertices(vertexMapping, vertexBuffer, indexBuffer[ch], trb);
+ }
+ }
+ for (uint32_t i = 0; i < vertexBuffer.size(); ++i)
+ {
+ vertexBuffer[i].p = vertexBuffer[i].p * mScaleFactor + mOffset;
+ }
+}
+
+
+} // namespace Blast
+} // namespace Nv
+
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringInternalCommon.h b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringInternalCommon.h
new file mode 100644
index 0000000..b8fb20e
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringInternalCommon.h
@@ -0,0 +1,193 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTINTERNALCOMMON_H
+#define NVBLASTINTERNALCOMMON_H
+#include "NvBlastExtAuthoringTypes.h"
+
+using namespace physx;
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Edge representation with index of parent facet
+*/
+struct EdgeWithParent
+{
+ int32_t s, e; // Starting and ending vertices
+ int32_t parent; // Parent facet index
+ EdgeWithParent() : s(0), e(0), parent(0) {}
+ EdgeWithParent(int32_t s, int32_t e, int32_t p) : s(s), e(e), parent(p) {}
+};
+
+
+/**
+Comparator for sorting edges according to parent facet number.
+*/
+struct EdgeComparator
+{
+ bool operator()(const EdgeWithParent& a, const EdgeWithParent& b) const
+ {
+ if (a.parent == b.parent)
+ {
+ if (a.s == b.s)
+ {
+ return a.e < b.e;
+ }
+ else
+ {
+ return a.s < b.s;
+ }
+ }
+ else
+ {
+ return a.parent < b.parent;
+ }
+ }
+};
+
+
+/**
+Vertex projection direction flag.
+*/
+enum ProjectionDirections
+{
+ YZ_PLANE = 1 << 1,
+ XY_PLANE = 1 << 2,
+ ZX_PLANE = 1 << 3,
+
+ OPPOSITE_WINDING = 1 << 4
+};
+
+/**
+Computes best direction to project points.
+*/
+NV_FORCE_INLINE ProjectionDirections getProjectionDirection(const physx::PxVec3& normal)
+{
+ float maxv = std::max(abs(normal.x), std::max(abs(normal.y), abs(normal.z)));
+ ProjectionDirections retVal;
+ if (maxv == abs(normal.x))
+ {
+ retVal = YZ_PLANE;
+ if (normal.x < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING);
+ return retVal;
+ }
+ if (maxv == abs(normal.y))
+ {
+ retVal = ZX_PLANE;
+ if (normal.y > 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING);
+ return retVal;
+ }
+ retVal = XY_PLANE;
+ if (normal.z < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING);
+ return retVal;
+}
+
+
+/**
+Computes point projected on given axis aligned plane.
+*/
+NV_FORCE_INLINE physx::PxVec2 getProjectedPoint(const physx::PxVec3& point, ProjectionDirections dir)
+{
+ if (dir & YZ_PLANE)
+ {
+ return physx::PxVec2(point.y, point.z);
+ }
+ if (dir & ZX_PLANE)
+ {
+ return physx::PxVec2(point.x, point.z);
+ }
+ return physx::PxVec2(point.x, point.y);
+}
+
+/**
+Computes point projected on given axis aligned plane, this method is polygon-winding aware.
+*/
+NV_FORCE_INLINE physx::PxVec2 getProjectedPointWithWinding(const physx::PxVec3& point, ProjectionDirections dir)
+{
+ if (dir & YZ_PLANE)
+ {
+ if (dir & OPPOSITE_WINDING)
+ {
+ return physx::PxVec2(point.z, point.y);
+ }
+ else
+ return physx::PxVec2(point.y, point.z);
+ }
+ if (dir & ZX_PLANE)
+ {
+ if (dir & OPPOSITE_WINDING)
+ {
+ return physx::PxVec2(point.z, point.x);
+ }
+ return physx::PxVec2(point.x, point.z);
+ }
+ if (dir & OPPOSITE_WINDING)
+ {
+ return physx::PxVec2(point.y, point.x);
+ }
+ return physx::PxVec2(point.x, point.y);
+}
+
+
+
+#define MAXIMUM_EXTENT 1000 * 1000 * 1000
+#define BBOX_TEST_EPS 1e-5f
+
+/**
+Test fattened bounding box intersetion.
+*/
+NV_INLINE bool weakBoundingBoxIntersection(const physx::PxBounds3& aBox, const physx::PxBounds3& bBox)
+{
+ if (std::max(aBox.minimum.x, bBox.minimum.x) > std::min(aBox.maximum.x, bBox.maximum.x) + BBOX_TEST_EPS)
+ return false;
+ if (std::max(aBox.minimum.y, bBox.minimum.y) > std::min(aBox.maximum.y, bBox.maximum.y) + BBOX_TEST_EPS)
+ return false;
+ if (std::max(aBox.minimum.z, bBox.minimum.z) > std::min(aBox.maximum.z, bBox.maximum.z) + BBOX_TEST_EPS)
+ return false;
+ return true;
+}
+
+
+
+/**
+Test segment vs plane intersection. If segment intersects the plane true is returned. Point of intersection is written into 'result'.
+*/
+NV_INLINE bool getPlaneSegmentIntersection(const PxPlane& pl, const PxVec3& a, const PxVec3& b, PxVec3& result)
+{
+ float div = (b - a).dot(pl.n);
+ if (PxAbs(div) < 0.0001f)
+ {
+ if (pl.contains(a))
+ {
+ result = a;
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ float t = (-a.dot(pl.n) - pl.d) / div;
+ if (t < 0.0f || t > 1.0f)
+ {
+ return false;
+ }
+ result = (b - a) * t + a;
+ return true;
+}
+
+} // namespace Blast
+} // namespace Nv
+
+#endif \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringMesh.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringMesh.cpp
new file mode 100644
index 0000000..a25d2fe
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringMesh.cpp
@@ -0,0 +1,558 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#define _CRT_SECURE_NO_WARNINGS
+
+#include "NvBlastExtAuthoringMesh.h"
+#include "NvBlastExtAuthoringTypes.h"
+#include <string.h>
+#include "NvBlastExtAuthoringPerlinNoise.h"
+
+using physx::PxVec2;
+using physx::PxVec3;
+using physx::PxBounds3;
+
+namespace Nv
+{
+namespace Blast
+{
+
+Mesh::Mesh(PxVec3* position, PxVec3* normals, PxVec2* uv, uint32_t verticesCount, uint32_t* indices, uint32_t indicesCount)
+{
+
+ mVertices.resize(verticesCount);
+ for (uint32_t i = 0; i < mVertices.size(); ++i)
+ {
+ mVertices[i].p = position[i];
+ }
+ if (normals != 0)
+ {
+ for (uint32_t i = 0; i < mVertices.size(); ++i)
+ {
+ mVertices[i].n = normals[i];
+ }
+
+ }
+ else
+ {
+ for (uint32_t i = 0; i < mVertices.size(); ++i)
+ {
+ mVertices[i].n = PxVec3(0, 0, 0);
+ }
+ }
+ if (uv != 0)
+ {
+ for (uint32_t i = 0; i < mVertices.size(); ++i)
+ {
+ mVertices[i].uv[0] = uv[i];
+ }
+ }
+ else
+ {
+ for (uint32_t i = 0; i < mVertices.size(); ++i)
+ {
+ mVertices[i].uv[0] = PxVec2(0, 0);
+ }
+ }
+ mEdges.resize(indicesCount);
+ mFacets.resize(indicesCount / 3);
+ mBounds.setEmpty();
+ for (uint32_t i = 0; i < verticesCount; ++i)
+ {
+ mBounds.include(mVertices[i].p);
+ }
+ int32_t facetId = 0;
+ for (uint32_t i = 0; i < indicesCount; i += 3)
+ {
+ mEdges[i].s = indices[i];
+ mEdges[i].e = indices[i + 1];
+
+ mEdges[i + 1].s = indices[i + 1];
+ mEdges[i + 1].e = indices[i + 2];
+
+ mEdges[i + 2].s = indices[i + 2];
+ mEdges[i + 2].e = indices[i];
+ mFacets[facetId].firstEdgeNumber = i;
+ mFacets[facetId].edgesCount = 3;
+ facetId++;
+ }
+}
+
+Mesh::Mesh(Vertex* vertices, Edge* edges, Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount)
+{
+ mVertices.resize(posCount);
+ mEdges.resize(edgesCount);
+ mFacets.resize(facetsCount);
+
+ memcpy(mVertices.data(), vertices, sizeof(Vertex) * posCount);
+ memcpy(mEdges.data(), edges, sizeof(Edge) * edgesCount);
+ memcpy(mFacets.data(), facets, sizeof(Facet) * facetsCount);
+ mBounds.setEmpty();
+ for (uint32_t i = 0; i < posCount; ++i)
+ {
+ mBounds.include(mVertices[i].p);
+ }
+}
+
+float Mesh::getMeshVolume()
+{
+ /**
+ Check if mesh boundary consist only of triangles
+ */
+ for (uint32_t i = 0; i < mFacets.size(); ++i)
+ {
+ if (mFacets[i].edgesCount != 3)
+ {
+ return 0.0f;
+ }
+ }
+
+ float volume = 0;
+ for (uint32_t i = 0; i < mFacets.size(); ++i)
+ {
+ int32_t offset = mFacets[i].firstEdgeNumber;
+ PxVec3& a = mVertices[mEdges[offset].s].p;
+ PxVec3& b = mVertices[mEdges[offset + 1].s].p;
+ PxVec3& c = mVertices[mEdges[offset + 2].s].p;
+
+ volume += (a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x);
+ }
+ return (1.0f / 6.0f) * abs(volume);
+}
+
+
+uint32_t Mesh::getFacetCount()
+{
+ return static_cast<uint32_t>(mFacets.size());
+}
+
+Vertex* Mesh::getVertices()
+{
+ return mVertices.data();
+}
+
+Edge* Mesh::getEdges()
+{
+ return mEdges.data();
+}
+
+uint32_t Mesh::getEdgesCount()
+{
+ return static_cast<uint32_t>(mEdges.size());
+}
+uint32_t Mesh::getVerticesCount()
+{
+ return static_cast<uint32_t>(mVertices.size());
+}
+Facet* Mesh::getFacetsBuffer()
+{
+ return mFacets.data();
+}
+Facet* Mesh::getFacet(int32_t facet)
+{
+ return &mFacets[facet];
+}
+
+
+Mesh::~Mesh()
+{
+}
+
+PxBounds3& Mesh::getBoundingBox()
+{
+ return mBounds;
+}
+
+void Mesh::recalculateBoundingBox()
+{
+ mBounds.setEmpty();
+ for (uint32_t i = 0; i < mVertices.size(); ++i)
+ {
+ mBounds.include(mVertices[i].p);
+ }
+}
+
+
+
+void getTangents(PxVec3& normal, PxVec3& t1, PxVec3& t2)
+{
+
+ if (abs(normal.z) < 0.9)
+ {
+ t1 = normal.cross(PxVec3(0, 0, 1));
+ }
+ else
+ {
+ t1 = normal.cross(PxVec3(1, 0, 0));
+ }
+ t2 = t1.cross(normal);
+}
+
+Mesh* getCuttingBox(const PxVec3& point, const PxVec3& normal, float size, int32_t id)
+{
+ PxVec3 lNormal = normal.getNormalized();
+ PxVec3 t1, t2;
+ getTangents(lNormal, t1, t2);
+
+ std::vector<Vertex> positions(8);
+ positions[0].p = point + (t1 + t2) * size;
+ positions[1].p = point + (t2 - t1) * size;
+
+ positions[2].p = point + (-t1 - t2) * size;
+ positions[3].p = point + (t1 - t2) * size;
+
+
+ positions[4].p = point + (t1 + t2 + lNormal) * size;
+ positions[5].p = point + (t2 - t1 + lNormal) * size;
+
+ positions[6].p = point + (-t1 - t2 + lNormal) * size;
+ positions[7].p = point + (t1 - t2 + lNormal) * size;
+
+ positions[0].n = -lNormal;
+ positions[1].n = -lNormal;
+
+ positions[2].n = -lNormal;
+ positions[3].n = -lNormal;
+
+
+ positions[4].n = -lNormal;
+ positions[5].n = -lNormal;
+
+ positions[6].n = -lNormal;
+ positions[7].n = -lNormal;
+
+ positions[0].uv[0] = PxVec2(0, 0);
+ positions[1].uv[0] = PxVec2(10, 0);
+
+ positions[2].uv[0] = PxVec2(10, 10);
+ positions[3].uv[0] = PxVec2(0, 10);
+
+
+ positions[4].uv[0] = PxVec2(0, 0);
+ positions[5].uv[0] = PxVec2(10, 0);
+
+ positions[6].uv[0] = PxVec2(10, 10);
+ positions[7].uv[0] = PxVec2(0, 10);
+
+
+ std::vector<Edge> edges;
+ std::vector<Facet> facets;
+
+ edges.push_back(Edge(0, 1));
+ edges.push_back(Edge(1, 2));
+ edges.push_back(Edge(2, 3));
+ edges.push_back(Edge(3, 0));
+ facets.push_back(Facet(0, 4, id));
+
+
+ edges.push_back(Edge(0, 3));
+ edges.push_back(Edge(3, 7));
+ edges.push_back(Edge(7, 4));
+ edges.push_back(Edge(4, 0));
+ facets.push_back(Facet(4, 4, id));
+
+ edges.push_back(Edge(3, 2));
+ edges.push_back(Edge(2, 6));
+ edges.push_back(Edge(6, 7));
+ edges.push_back(Edge(7, 3));
+ facets.push_back(Facet(8, 4, id));
+
+ edges.push_back(Edge(5, 6));
+ edges.push_back(Edge(6, 2));
+ edges.push_back(Edge(2, 1));
+ edges.push_back(Edge(1, 5));
+ facets.push_back(Facet(12, 4, id));
+
+ edges.push_back(Edge(4, 5));
+ edges.push_back(Edge(5, 1));
+ edges.push_back(Edge(1, 0));
+ edges.push_back(Edge(0, 4));
+ facets.push_back(Facet(16, 4, id));
+
+ edges.push_back(Edge(4, 7));
+ edges.push_back(Edge(7, 6));
+ edges.push_back(Edge(6, 5));
+ edges.push_back(Edge(5, 4));
+ facets.push_back(Facet(20, 4, id));
+ return new Mesh(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()), static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
+}
+
+void inverseNormalAndSetIndices(Mesh* mesh, int32_t id)
+{
+ for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i)
+ {
+ mesh->getVertices()[i].n *= -1.0f;
+ }
+ for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
+ {
+ mesh->getFacet(i)->userData = id;
+ }
+
+}
+
+void setCuttingBox(const PxVec3& point, const PxVec3& normal, Mesh* mesh, float size, int32_t id)
+{
+ PxVec3 t1, t2;
+ PxVec3 lNormal = normal.getNormalized();
+ getTangents(lNormal, t1, t2);
+
+ Vertex* positions = mesh->getVertices();
+ positions[0].p = point + (t1 + t2) * size;
+ positions[1].p = point + (t2 - t1) * size;
+
+ positions[2].p = point + (-t1 - t2) * size;
+ positions[3].p = point + (t1 - t2) * size;
+
+
+ positions[4].p = point + (t1 + t2 + lNormal) * size;
+ positions[5].p = point + (t2 - t1 + lNormal) * size;
+
+ positions[6].p = point + (-t1 - t2 + lNormal) * size;
+ positions[7].p = point + (t1 - t2 + lNormal) * size;
+
+ positions[0].n = -lNormal;
+ positions[1].n = -lNormal;
+
+ positions[2].n = -lNormal;
+ positions[3].n = -lNormal;
+
+
+ positions[4].n = -lNormal;
+ positions[5].n = -lNormal;
+
+ positions[6].n = -lNormal;
+ positions[7].n = -lNormal;
+
+ for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
+ {
+ mesh->getFacet(i)->userData = id;
+ }
+ mesh->recalculateBoundingBox();
+}
+
+bool Mesh::isValid()
+{
+ return mVertices.size() > 0 && mEdges.size() > 0 && mFacets.size() > 0;
+}
+
+Mesh* getNoisyCuttingBoxPair(const physx::PxVec3& point, const physx::PxVec3& normal, float size, float jaggedPlaneSize, uint32_t resolution, int32_t id, float amplitude, float frequency, int32_t octaves, int32_t seed)
+{
+ SimplexNoise nEval(amplitude, frequency, octaves, seed);
+ PxVec3 t1, t2;
+ PxVec3 lNormal = normal.getNormalized();
+ getTangents(lNormal, t1, t2);
+
+ std::vector<Vertex> vertices ((resolution + 1) * (resolution + 1) + 12);
+ PxVec3 cPosit = point + (t1 + t2) * jaggedPlaneSize;
+ PxVec3 t1d = -t1 * 2.0f * jaggedPlaneSize / resolution;
+ PxVec3 t2d = -t2 * 2.0f * jaggedPlaneSize / resolution;
+
+ int32_t vrtId = 0;
+ for (uint32_t i = 0; i < resolution + 1; ++i)
+ {
+ PxVec3 lcPosit = cPosit;
+ for (uint32_t j = 0; j < resolution + 1; ++j)
+ {
+ vertices[vrtId].p = lcPosit;
+ lcPosit += t1d;
+ vrtId++;
+ }
+ cPosit += t2d;
+ }
+
+
+ for (uint32_t i = 1; i < resolution; ++i)
+ {
+ for (uint32_t j = 1; j < resolution; ++j)
+ {
+ PxVec3& pnt = vertices[i * (resolution + 1) + j].p;
+ pnt += lNormal * nEval.sample(pnt);
+ }
+ }
+
+ std::vector<Edge> edges;
+ std::vector<Facet> facets;
+ for (uint32_t i = 0; i < resolution; ++i)
+ {
+ for (uint32_t j = 0; j < resolution; ++j)
+ {
+ uint32_t start = edges.size();
+ edges.push_back(Edge(i * (resolution + 1) + j, i * (resolution + 1) + j + 1));
+ edges.push_back(Edge(i * (resolution + 1) + j + 1, (i + 1) * (resolution + 1) + j + 1));
+ edges.push_back(Edge((i + 1) * (resolution + 1) + j + 1, (i + 1) * (resolution + 1) + j));
+ edges.push_back(Edge((i + 1) * (resolution + 1) + j, i * (resolution + 1) + j));
+ facets.push_back(Facet(start, 4, id));
+ }
+ }
+ uint32_t offset = (resolution + 1) * (resolution + 1);
+
+ vertices[0 + offset].p = point + (t1 + t2) * size;
+ vertices[1 + offset].p = point + (t2 - t1) * size;
+
+ vertices[2 + offset].p = point + (-t1 - t2) * size;
+ vertices[3 + offset].p = point + (t1 - t2) * size;
+
+ vertices[8 + offset].p = point + (t1 + t2) * jaggedPlaneSize;
+ vertices[9 + offset].p = point + (t2 - t1) * jaggedPlaneSize;
+
+ vertices[10 + offset].p = point + (-t1 - t2) * jaggedPlaneSize;
+ vertices[11 + offset].p = point + (t1 - t2) * jaggedPlaneSize;
+
+
+ vertices[4 + offset].p = point + (t1 + t2 + lNormal) * size;
+ vertices[5 + offset].p = point + (t2 - t1 + lNormal) * size;
+
+ vertices[6 + offset].p = point + (-t1 - t2 + lNormal) * size;
+ vertices[7 + offset].p = point + (t1 - t2 + lNormal) * size;
+
+ for (uint32_t i = 1; i < resolution; ++i)
+ {
+ for (uint32_t j = 1; j < resolution; ++j)
+ {
+ PxVec3 v1 = vertices[(resolution + 1) * (i + 1) + j].p - vertices[(resolution + 1) * i + j].p;
+ PxVec3 v2 = vertices[(resolution + 1) * (i) + j + 1].p - vertices[(resolution + 1) * i + j].p;
+ PxVec3 v3 = vertices[(resolution + 1) * (i - 1) + j].p - vertices[(resolution + 1) * i + j].p;
+ PxVec3 v4 = vertices[(resolution + 1) * (i) + j - 1].p - vertices[(resolution + 1) * i + j].p;
+
+ vertices[(resolution + 1) * i + j].n = v1.cross(v2) + v2.cross(v3) + v3.cross(v4) + v4.cross(v1);
+ vertices[(resolution + 1) * i + j].n.normalize();
+ }
+ }
+
+ int32_t edgeOffset = edges.size();
+ edges.push_back(Edge(0 + offset, 1 + offset));
+ edges.push_back(Edge(1 + offset, 2 + offset));
+ edges.push_back(Edge(2 + offset, 3 + offset));
+ edges.push_back(Edge(3 + offset, 0 + offset));
+
+ edges.push_back(Edge(11 + offset, 10 + offset));
+ edges.push_back(Edge(10 + offset, 9 + offset));
+ edges.push_back(Edge(9 + offset, 8 + offset));
+ edges.push_back(Edge(8 + offset, 11 + offset));
+
+ facets.push_back(Facet(edgeOffset, 8, id));
+
+
+
+ edges.push_back(Edge(0 + offset, 3 + offset));
+ edges.push_back(Edge(3 + offset, 7 + offset));
+ edges.push_back(Edge(7 + offset, 4 + offset));
+ edges.push_back(Edge(4 + offset, 0 + offset));
+ facets.push_back(Facet(8 + edgeOffset, 4, id));
+
+ edges.push_back(Edge(3 + offset, 2 + offset));
+ edges.push_back(Edge(2 + offset, 6 + offset));
+ edges.push_back(Edge(6 + offset, 7 + offset));
+ edges.push_back(Edge(7 + offset, 3 + offset));
+ facets.push_back(Facet(12 + edgeOffset, 4, id));
+
+ edges.push_back(Edge(5 + offset, 6 + offset));
+ edges.push_back(Edge(6 + offset, 2 + offset));
+ edges.push_back(Edge(2 + offset, 1 + offset));
+ edges.push_back(Edge(1 + offset, 5 + offset));
+ facets.push_back(Facet(16 + edgeOffset, 4, id));
+
+ edges.push_back(Edge(4 + offset, 5 + offset));
+ edges.push_back(Edge(5 + offset, 1 + offset));
+ edges.push_back(Edge(1 + offset, 0 + offset));
+ edges.push_back(Edge(0 + offset, 4 + offset));
+ facets.push_back(Facet(20 + edgeOffset, 4, id));
+
+ edges.push_back(Edge(4 + offset, 7 + offset));
+ edges.push_back(Edge(7 + offset, 6 + offset));
+ edges.push_back(Edge(6 + offset, 5 + offset));
+ edges.push_back(Edge(5 + offset, 4 + offset));
+ facets.push_back(Facet(24 + edgeOffset, 4, id));
+
+ //
+ return new Mesh(vertices.data(), edges.data(), facets.data(), vertices.size(), edges.size(), facets.size());
+}
+
+Mesh* getBigBox(const PxVec3& point, float size)
+{
+ PxVec3 normal(0, 0, 1);
+ normal.normalize();
+ PxVec3 t1, t2;
+ getTangents(normal, t1, t2);
+
+ std::vector<Vertex> positions(8);
+ positions[0].p = point + (t1 + t2 - normal) * size;
+ positions[1].p = point + (t2 - t1 - normal) * size;
+
+ positions[2].p = point + (-t1 - t2 - normal) * size;
+ positions[3].p = point + (t1 - t2 - normal) * size;
+
+
+ positions[4].p = point + (t1 + t2 + normal) * size;
+ positions[5].p = point + (t2 - t1 + normal) * size;
+
+ positions[6].p = point + (-t1 - t2 + normal) * size;
+ positions[7].p = point + (t1 - t2 + normal) * size;
+
+ positions[0].uv[0] = PxVec2(0, 0);
+ positions[1].uv[0] = PxVec2(10, 0);
+
+ positions[2].uv[0] = PxVec2(10, 10);
+ positions[3].uv[0] = PxVec2(0, 10);
+
+
+ positions[4].uv[0] = PxVec2(0, 0);
+ positions[5].uv[0] = PxVec2(10, 0);
+
+ positions[6].uv[0] = PxVec2(10, 10);
+ positions[7].uv[0] = PxVec2(0, 10);
+
+
+ std::vector<Edge> edges;
+ std::vector<Facet> facets;
+
+ edges.push_back(Edge(0, 1));
+ edges.push_back(Edge(1, 2));
+ edges.push_back(Edge(2, 3));
+ edges.push_back(Edge(3, 0));
+ facets.push_back(Facet(0, 4));
+
+
+ edges.push_back(Edge(0, 3));
+ edges.push_back(Edge(3, 7));
+ edges.push_back(Edge(7, 4));
+ edges.push_back(Edge(4, 0));
+ facets.push_back(Facet(4, 4));
+
+ edges.push_back(Edge(3, 2));
+ edges.push_back(Edge(2, 6));
+ edges.push_back(Edge(6, 7));
+ edges.push_back(Edge(7, 3));
+ facets.push_back(Facet(8, 4));
+
+ edges.push_back(Edge(5, 6));
+ edges.push_back(Edge(6, 2));
+ edges.push_back(Edge(2, 1));
+ edges.push_back(Edge(1, 5));
+ facets.push_back(Facet(12, 4));
+
+ edges.push_back(Edge(4, 5));
+ edges.push_back(Edge(5, 1));
+ edges.push_back(Edge(1, 0));
+ edges.push_back(Edge(0, 4));
+ facets.push_back(Facet(16, 4));
+
+ edges.push_back(Edge(4, 7));
+ edges.push_back(Edge(7, 6));
+ edges.push_back(Edge(6, 5));
+ edges.push_back(Edge(5, 4));
+ facets.push_back(Facet(20, 4));
+ for (int i = 0; i < 8; ++i)
+ positions[i].n = PxVec3(0, 0, 0);
+ return new Mesh(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()), static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringPerlinNoise.h b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringPerlinNoise.h
new file mode 100644
index 0000000..95308c2
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringPerlinNoise.h
@@ -0,0 +1,373 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAUTHORINGPERLINNOISE_H
+#define NVBLASTEXTAUTHORINGPERLINNOISE_H
+
+
+#include <NvBlastExtAuthoringTypes.h>
+
+#include <PxVec4.h>
+#include <PxVec3.h>
+
+#define PERLIN_NOISE_SAMPLE_TABLE 512
+using physx::PxVec3;
+namespace Nv
+{
+namespace Blast
+{
+
+/***********
+ Noise generation routines, copied from Apex.
+*/
+
+
+NV_INLINE float at3(const float& rx, const float& ry, const float& rz, const PxVec3 q)
+{
+ return rx * q[0] + ry * q[1] + rz * q[2];
+}
+
+NV_INLINE float fade(float t) { return t * t * t * (t * (t * 6.0f - 15.0f) + 10.0f); }
+
+NV_INLINE float lerp(float t, float a, float b) { return a + t * (b - a); }
+
+NV_INLINE void setup(int i, PxVec3 point, float& t, int& b0, int& b1, float& r0, float& r1)
+{
+ t = point[i] + (0x1000);
+ b0 = ((int)t) & (PERLIN_NOISE_SAMPLE_TABLE - 1);
+ b1 = (b0 + 1) & (PERLIN_NOISE_SAMPLE_TABLE - 1);
+ r0 = t - (int)t;
+ r1 = r0 - 1.0f;
+}
+
+
+NV_INLINE float noiseSample(PxVec3 point, int* p, PxVec3* g)
+{
+ int bx0, bx1, by0, by1, bz0, bz1, b00, b10, b01, b11;
+ float rx0, rx1, ry0, ry1, rz0, rz1, sy, sz, a, b, c, d, t, u, v;
+ PxVec3 q;
+ int i, j;
+
+ setup(0, point, t, bx0, bx1, rx0, rx1);
+ setup(1, point, t, by0, by1, ry0, ry1);
+ setup(2, point, t, bz0, bz1, rz0, rz1);
+
+ i = p[bx0];
+ j = p[bx1];
+
+ b00 = p[i + by0];
+ b10 = p[j + by0];
+ b01 = p[i + by1];
+ b11 = p[j + by1];
+
+ t = fade(rx0);
+ sy = fade(ry0);
+ sz = fade(rz0);
+
+ q = g[b00 + bz0]; u = at3(rx0, ry0, rz0, q);
+ q = g[b10 + bz0]; v = at3(rx1, ry0, rz0, q);
+ a = lerp(t, u, v);
+
+ q = g[b01 + bz0]; u = at3(rx0, ry1, rz0, q);
+ q = g[b11 + bz0]; v = at3(rx1, ry1, rz0, q);
+ b = lerp(t, u, v);
+
+ c = lerp(sy, a, b);
+
+ q = g[b00 + bz1]; u = at3(rx0, ry0, rz1, q);
+ q = g[b10 + bz1]; v = at3(rx1, ry0, rz1, q);
+ a = lerp(t, u, v);
+
+ q = g[b01 + bz1]; u = at3(rx0, ry1, rz1, q);
+ q = g[b11 + bz1]; v = at3(rx1, ry1, rz1, q);
+ b = lerp(t, u, v);
+
+ d = lerp(sy, a, b);
+
+ return lerp(sz, c, d);
+}
+
+/**
+ Perlin Noise generation tool
+*/
+class PerlinNoise
+{
+public:
+ /**
+ \param[in] rnd Random value generator
+ \param[in] octaves Number of noise octaves
+ \param[in] frequency Frequency of noise
+ \param[in] amplitude Amplitude of noise
+ */
+ PerlinNoise(Nv::Blast::RandomGeneratorBase* rnd, int octaves = 1, float frequency = 1., float amplitude = 1.)
+ : mRnd(rnd),
+ mOctaves(octaves),
+ mFrequency(frequency),
+ mAmplitude(amplitude),
+ mbInit(false)
+ {
+
+ }
+
+ /*
+ Reset state of noise generator
+ \param[in] octaves Number of noise octaves
+ \param[in] frequency Frequency of noise
+ \param[in] amplitude Amplitude of noise
+ */
+ void reset(int octaves = 1, float frequency = 1.f, float amplitude = 1.f)
+ {
+ mOctaves = octaves;
+ mFrequency = frequency;
+ mAmplitude = amplitude;
+ init();
+ }
+
+ /**
+ Get Perlin Noise value at given point
+ */
+ float sample(const physx::PxVec3& point)
+ {
+ return perlinNoise(point);
+ }
+
+private:
+ PerlinNoise& operator=(const PerlinNoise&);
+
+ float perlinNoise(physx::PxVec3 point)
+ {
+ if (!mbInit)
+ init();
+
+ const int octaves = mOctaves;
+ const float frequency = mFrequency;
+ float amplitude = mAmplitude;
+ float result = 0.0f;
+
+ point *= frequency;
+
+ for (int i = 0; i < octaves; ++i)
+ {
+ PxVec3 lpnt;
+ lpnt[0] = point.x;
+ lpnt[1] = point.y;
+ lpnt[2] = point.z;
+ result += (noiseSample(lpnt, p, g)) * amplitude;
+ point *= 2.0f;
+ amplitude *= 0.5f;
+ }
+ return result;
+ }
+
+ void init(void)
+ {
+ mbInit = true;
+
+ unsigned i, j;
+ int k;
+
+ for (i = 0; i < (unsigned)PERLIN_NOISE_SAMPLE_TABLE; i++)
+ {
+ p[i] = (int)i;
+ for (j = 0; j < 3; ++j)
+ g[i][j] = mRnd->getRandomValue();
+ g[i].normalize();
+ }
+
+ while (--i)
+ {
+ k = p[i];
+ j = static_cast<uint32_t>(mRnd->getRandomValue() * PERLIN_NOISE_SAMPLE_TABLE);
+ p[i] = p[j];
+ p[j] = k;
+ }
+
+ for (i = 0; i < PERLIN_NOISE_SAMPLE_TABLE + 2; ++i)
+ {
+ p[(unsigned)PERLIN_NOISE_SAMPLE_TABLE + i] = p[i];
+ for (j = 0; j < 3; ++j)
+ g[(unsigned)PERLIN_NOISE_SAMPLE_TABLE + i][j] = g[i][j];
+ }
+
+ }
+
+ Nv::Blast::RandomGeneratorBase* mRnd;
+ int mOctaves;
+ float mFrequency;
+ float mAmplitude;
+
+ // Permutation vector
+ int p[(unsigned)(PERLIN_NOISE_SAMPLE_TABLE + PERLIN_NOISE_SAMPLE_TABLE + 2)];
+ // Gradient vector
+ PxVec3 g[(unsigned)(PERLIN_NOISE_SAMPLE_TABLE + PERLIN_NOISE_SAMPLE_TABLE + 2)];
+
+ bool mbInit;
+};
+
+
+/**
+ Simplex noise generation tool
+*/
+class SimplexNoise
+{
+
+ int32_t mOctaves;
+ float mAmplitude;
+ float mFrequency;
+ int32_t mSeed;
+
+ static const int X_NOISE_GEN = 1619;
+ static const int Y_NOISE_GEN = 31337;
+ static const int Z_NOISE_GEN = 6971;
+ static const int W_NOISE_GEN = 1999;
+ static const int SEED_NOISE_GEN = 1013;
+ static const int SHIFT_NOISE_GEN = 8;
+
+ NV_INLINE int fastfloor(float x)
+ {
+ return (x >= 0) ? (int)x : (int)(x - 1);
+ }
+
+ SimplexNoise& operator=(const SimplexNoise&)
+ {
+ return *this;
+ }
+
+public:
+ /**
+ \param[in] ampl Amplitude of noise
+ \param[in] freq Frequency of noise
+ \param[in] octaves Number of noise octaves
+ \param[in] seed Random seed value
+ */
+ SimplexNoise(float ampl, float freq, int32_t octaves, int32_t seed) : mOctaves(octaves), mAmplitude(ampl), mFrequency(freq), mSeed(seed) {};
+ // 4D simplex noise
+ // returns: (x,y,z) = noise grad, w = noise value
+
+ /**
+ Evaluate noise at given 4d-point
+ \param[in] x x coordinate of point
+ \param[in] y y coordinate of point
+ \param[in] z z coordinate of point
+ \param[in] w w coordinate of point
+ \param[in] seed Random seed value
+ \return Noise valued vector (x,y,z) and scalar (w)
+ */
+ physx::PxVec4 eval4D(float x, float y, float z, float w, int seed)
+ {
+ // The skewing and unskewing factors are hairy again for the 4D case
+ const float F4 = (physx::PxSqrt(5.0f) - 1.0f) / 4.0f;
+ const float G4 = (5.0f - physx::PxSqrt(5.0f)) / 20.0f;
+ // Skew the (x,y,z,w) space to determine which cell of 24 simplices we're in
+ float s = (x + y + z + w) * F4; // Factor for 4D skewing
+ int ix = fastfloor(x + s);
+ int iy = fastfloor(y + s);
+ int iz = fastfloor(z + s);
+ int iw = fastfloor(w + s);
+ float tu = (ix + iy + iz + iw) * G4; // Factor for 4D unskewing
+ // Unskew the cell origin back to (x,y,z,w) space
+ float x0 = x - (ix - tu); // The x,y,z,w distances from the cell origin
+ float y0 = y - (iy - tu);
+ float z0 = z - (iz - tu);
+ float w0 = w - (iw - tu);
+
+ int c = (x0 > y0) ? (1 << 0) : (1 << 2);
+ c += (x0 > z0) ? (1 << 0) : (1 << 4);
+ c += (x0 > w0) ? (1 << 0) : (1 << 6);
+ c += (y0 > z0) ? (1 << 2) : (1 << 4);
+ c += (y0 > w0) ? (1 << 2) : (1 << 6);
+ c += (z0 > w0) ? (1 << 4) : (1 << 6);
+
+ physx::PxVec4 res;
+ res.setZero();
+
+ // Calculate the contribution from the five corners
+ for (int p = 4; p >= 0; --p)
+ {
+ int ixp = ((c >> 0) & 3) >= p ? 1 : 0;
+ int iyp = ((c >> 2) & 3) >= p ? 1 : 0;
+ int izp = ((c >> 4) & 3) >= p ? 1 : 0;
+ int iwp = ((c >> 6) & 3) >= p ? 1 : 0;
+
+ float xp = x0 - ixp + (4 - p) * G4;
+ float yp = y0 - iyp + (4 - p) * G4;
+ float zp = z0 - izp + (4 - p) * G4;
+ float wp = w0 - iwp + (4 - p) * G4;
+
+ float t = 0.6f - xp * xp - yp * yp - zp * zp - wp * wp;
+ if (t > 0)
+ {
+ //get index
+ int gradIndex = int((
+ X_NOISE_GEN * (ix + ixp)
+ + Y_NOISE_GEN * (iy + iyp)
+ + Z_NOISE_GEN * (iz + izp)
+ + W_NOISE_GEN * (iw + iwp)
+ + SEED_NOISE_GEN * seed)
+ & 0xffffffff);
+ gradIndex ^= (gradIndex >> SHIFT_NOISE_GEN);
+ gradIndex &= 31;
+
+ physx::PxVec4 g;
+ {
+ const int h = gradIndex;
+ const int hs = 2 - (h >> 4);
+ const int h1 = (h >> 3);
+ g.x = (h1 == 0) ? 0.0f : ((h & 4) ? -1.0f : 1.0f);
+ g.y = (h1 == 1) ? 0.0f : ((h & (hs << 1)) ? -1.0f : 1.0f);
+ g.z = (h1 == 2) ? 0.0f : ((h & hs) ? -1.0f : 1.0f);
+ g.w = (h1 == 3) ? 0.0f : ((h & 1) ? -1.0f : 1.0f);
+ }
+ float gdot = (g.x * xp + g.y * yp + g.z * zp + g.w * wp);
+
+ float t2 = t * t;
+ float t3 = t2 * t;
+ float t4 = t3 * t;
+
+ float dt4gdot = 8 * t3 * gdot;
+
+ res.x += t4 * g.x - dt4gdot * xp;
+ res.y += t4 * g.y - dt4gdot * yp;
+ res.z += t4 * g.z - dt4gdot * zp;
+ res.w += t4 * gdot;
+ }
+ }
+ // scale the result to cover the range [-1,1]
+ res *= 27;
+ return res;
+ }
+
+ /**
+ Evaluate noise at given 3d-point
+ \param[in] p Point in which noise will be evaluated
+ \return Noise value at given point
+ */
+ float sample(physx::PxVec3 p)
+ {
+ p *= mFrequency;
+ float result = 0.0f;
+ float alpha = 1;
+ for (int32_t i = 1; i <= mOctaves; ++i)
+ {
+ result += eval4D(p.x * i, p.y * i, p.z * i, i * 5.0f, mSeed).w * alpha;
+ alpha *= 0.45;
+ }
+ return result * mAmplitude;
+ }
+
+};
+
+
+ } // Blast namespace
+} // Nv namespace
+
+
+
+#endif \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringTriangulator.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringTriangulator.cpp
new file mode 100644
index 0000000..0b7187f
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringTriangulator.cpp
@@ -0,0 +1,1439 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+// This warning arises when using some stl containers with older versions of VC
+// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
+#include "NvPreprocessor.h"
+#if NV_VC && NV_VC < 14
+#pragma warning(disable : 4702)
+#endif
+#include "NvBlastExtAuthoringTriangulator.h"
+#include "NvBlastExtAuthoringMesh.h"
+#include "NvBlastExtAuthoringTypes.h"
+#include <math.h>
+#include "NvPreprocessor.h"
+#include <algorithm>
+#include <vector>
+#include <set>
+#include "NvBlastExtAuthoringBooleanTool.h"
+#include <queue>
+#include "NvBlastExtAuthoringPerlinNoise.h"
+#include <NvBlastAssert.h>
+
+using physx::PxVec3;
+using physx::PxVec2;
+
+#define VEC_COMPARISON_OFFSET 1e-5f
+#define TWO_VERTICES_THRESHOLD 1e-7
+
+namespace Nv
+{
+namespace Blast
+{
+
+bool VrtComp::operator()(const Vertex& a, const Vertex& b) const
+{
+ if (a.p.x + VEC_COMPARISON_OFFSET < b.p.x) return true;
+ if (a.p.x - VEC_COMPARISON_OFFSET > b.p.x) return false;
+ if (a.p.y + VEC_COMPARISON_OFFSET < b.p.y) return true;
+ if (a.p.y - VEC_COMPARISON_OFFSET > b.p.y) return false;
+ if (a.p.z + VEC_COMPARISON_OFFSET < b.p.z) return true;
+ if (a.p.z - VEC_COMPARISON_OFFSET > b.p.z) return false;
+
+ if (a.n.x + 1e-3 < b.n.x) return true;
+ if (a.n.x - 1e-3 > b.n.x) return false;
+ if (a.n.y + 1e-3 < b.n.y) return true;
+ if (a.n.y - 1e-3 > b.n.y) return false;
+ if (a.n.z + 1e-3 < b.n.z) return true;
+ if (a.n.z - 1e-3 > b.n.z) return false;
+
+
+ if (a.uv[0].x + 1e-3 < b.uv[0].x) return true;
+ if (a.uv[0].x - 1e-3 > b.uv[0].x) return false;
+ if (a.uv[0].y + 1e-3 < b.uv[0].y) return true;
+ return false;
+}
+
+bool VrtPositionComparator::operator()(const PxVec3& a, const PxVec3& b) const
+{
+ if (a.x + VEC_COMPARISON_OFFSET < b.x) return true;
+ if (a.x - VEC_COMPARISON_OFFSET > b.x) return false;
+ if (a.y + VEC_COMPARISON_OFFSET < b.y) return true;
+ if (a.y - VEC_COMPARISON_OFFSET > b.y) return false;
+ if (a.z + VEC_COMPARISON_OFFSET < b.z) return true;
+ if (a.z - VEC_COMPARISON_OFFSET > b.z) return false;
+ return false;
+}
+
+NV_FORCE_INLINE bool compareTwoVertices(const PxVec3& a, const PxVec3& b)
+{
+ return std::abs(b.x - a.x) < TWO_VERTICES_THRESHOLD && std::abs(b.y - a.y) < TWO_VERTICES_THRESHOLD && std::abs(b.z - a.z) < TWO_VERTICES_THRESHOLD;
+}
+NV_FORCE_INLINE bool compareTwoVertices(const PxVec2& a, const PxVec2& b)
+{
+ return std::abs(b.x - a.x) < TWO_VERTICES_THRESHOLD && std::abs(b.y - a.y) < TWO_VERTICES_THRESHOLD;
+}
+
+NV_FORCE_INLINE float getRotation(const PxVec2& a, const PxVec2& b)
+{
+ return a.x * b.y - a.y * b.x;
+}
+
+inline bool pointInside(PxVec2 a, PxVec2 b, PxVec2 c, PxVec2 pnt)
+{
+ if (compareTwoVertices(a, pnt) || compareTwoVertices(b, pnt) || compareTwoVertices(c, pnt))
+ {
+ return false;
+ }
+ float v1 = (getRotation((b - a), (pnt - a)));
+ float v2 = (getRotation((c - b), (pnt - b)));
+ float v3 = (getRotation((a - c), (pnt - c)));
+
+ return (v1 >= 0.0f && v2 >= 0.0f && v3 >= 0.0f) ||
+ (v1 <= 0.0f && v2 <= 0.0f && v3 <= 0.0f);
+
+}
+void ChunkPostProcessor::triangulatePolygonWithEarClipping(std::vector<uint32_t>& inputPolygon, Vertex* vert, ProjectionDirections dir)
+{
+ // return;
+ //for (uint32_t i = 0; i < inputPolygon.size(); ++i)
+ //{
+ // mBaseMeshTriangles.push_back(TriangleIndexed(inputPolygon[i], inputPolygon[i], inputPolygon[(i + 1) % inputPolygon.size()]));
+ //}
+ //return;
+ int32_t vCount = static_cast<int32_t>(inputPolygon.size());
+
+ if (vCount < 3)
+ {
+ return;
+ }
+ for (int32_t curr = 0; curr < vCount && vCount > 2; ++curr)
+ {
+ int32_t prev = (curr == 0) ? vCount - 1 : curr - 1;
+ int32_t next = (curr == vCount - 1) ? 0 : curr + 1;
+
+ Vertex cV = vert[inputPolygon[curr]];
+ Vertex nV = vert[inputPolygon[prev]];
+ Vertex pV = vert[inputPolygon[next]];
+
+ PxVec2 cVp = getProjectedPoint(cV.p, dir);
+ PxVec2 nVp = getProjectedPoint(nV.p, dir);
+ PxVec2 pVp = getProjectedPoint(pV.p, dir);
+
+ // Check wheather curr is ear-tip
+ float rot = getRotation((pVp - nVp).getNormalized(), (cVp - nVp).getNormalized());
+ if (!(dir & OPPOSITE_WINDING)) rot = -rot;
+ if (rot > 0.0001)
+ {
+ bool good = true;
+ for (int vrt = 0; vrt < vCount; ++vrt)
+ {
+ if (vrt == curr || vrt == prev || vrt == next) continue;
+ if (pointInside(cVp, nVp, pVp, getProjectedPoint(vert[inputPolygon[vrt]].p, dir)))
+ {
+ good = false;
+ break;
+ }
+ }
+ if (good)
+ {
+ addEdgeTr(Edge(inputPolygon[curr], inputPolygon[prev]));
+ addEdgeTr(Edge(inputPolygon[next], inputPolygon[prev]));
+ addEdgeTr(Edge(inputPolygon[curr], inputPolygon[next]));
+
+ mBaseMeshTriangles.push_back(TriangleIndexed(inputPolygon[curr], inputPolygon[prev], inputPolygon[next]));
+ vCount--;
+ inputPolygon.erase(inputPolygon.begin() + curr);
+ curr = -1;
+ }
+ }
+ }
+}
+
+
+
+struct LoopInfo
+{
+ LoopInfo()
+ {
+ used = false;
+ }
+ PxVec3 normal;
+ float area;
+ int32_t index;
+ bool used;
+ bool operator<(const LoopInfo& b) const
+ {
+ return area < b.area;
+ }
+};
+
+int32_t unitePolygons(std::vector<uint32_t>& externalLoop, std::vector<uint32_t>& internalLoop, Vertex* vrx, ProjectionDirections dir)
+{
+ if (externalLoop.size() < 3 || internalLoop.size() < 3)
+ return 1;
+ /**
+ Find point with maximum x-coordinate
+ */
+ float x_max = -MAXIMUM_EXTENT;
+ int32_t mIndex = -1;
+ for (uint32_t i = 0; i < internalLoop.size(); ++i)
+ {
+ float nx = getProjectedPoint(vrx[internalLoop[i]].p, dir).x;
+ if (nx > x_max)
+ {
+ mIndex = i;
+ x_max = nx;
+ }
+ }
+ if (mIndex == -1)
+ {
+ return 1;
+ }
+
+ /**
+ Search for base point on external loop
+ */
+ float minX = MAXIMUM_EXTENT;
+ int32_t vrtIndex = -1;
+ bool isFromBuffer = 0;
+ PxVec2 holePoint = getProjectedPoint(vrx[internalLoop[mIndex]].p, dir);
+ PxVec2 computedPoint;
+ for (uint32_t i = 0; i < externalLoop.size(); ++i)
+ {
+ int32_t nx = (i + 1) % externalLoop.size();
+ PxVec2 pnt1 = getProjectedPoint(vrx[externalLoop[i]].p, dir);
+ PxVec2 pnt2 = getProjectedPoint(vrx[externalLoop[nx]].p, dir);
+ if (pnt1.x < x_max && pnt2.x < x_max)
+ {
+ continue;
+ }
+ PxVec2 vc = pnt2 - pnt1;
+ if (vc.y == 0 && pnt1.y == holePoint.y)
+ {
+ if (pnt1.x < minX && pnt1.x < pnt2.x && pnt1.x > x_max)
+ {
+ minX = pnt1.x;
+ vrtIndex = i;
+ isFromBuffer = true;
+ }
+ if (pnt2.x < minX && pnt2.x < pnt1.x && pnt2.x > x_max)
+ {
+ minX = pnt2.x;
+ vrtIndex = nx;
+ isFromBuffer = true;
+ }
+ }
+ else
+ {
+ float t = (holePoint.y - pnt1.y) / vc.y;
+ if (t <= 1 && t >= 0)
+ {
+ PxVec2 tempPoint = vc * t + pnt1;
+ if (tempPoint.x < minX && tempPoint.x > x_max)
+ {
+ minX = tempPoint.x;
+ vrtIndex = i;
+ isFromBuffer = false;
+ computedPoint = tempPoint;
+ }
+ }
+ }
+ }
+ if (vrtIndex == -1)
+ {
+ // std::cout << "Triangulation: base vertex for inner loop is not found..." << std::endl;
+ return 1;
+ }
+ int32_t bridgePoint = -1;
+ float bestAngle = 100;
+ if (!isFromBuffer)
+ {
+ PxVec2 ex1 = getProjectedPoint(vrx[externalLoop[vrtIndex]].p, dir);
+ PxVec2 ex2 = getProjectedPoint(vrx[externalLoop[(vrtIndex + 1) % externalLoop.size()]].p, dir);
+
+ if (ex1.x > ex2.x)
+ {
+ vrtIndex = (vrtIndex + 1) % externalLoop.size();
+ ex1 = ex2;
+ }
+ /* Check if some point is inside triangle */
+ bool notFound = true;
+ for (int32_t i = 0; i < (int32_t)externalLoop.size(); ++i)
+ {
+ PxVec2 tempPoint = getProjectedPoint(vrx[externalLoop[i]].p, dir);
+ if (pointInside(holePoint, ex1, computedPoint, tempPoint))
+ {
+ notFound = false;
+ PxVec2 cVp = getProjectedPoint(vrx[externalLoop[i]].p, dir);
+ PxVec2 pVp = getProjectedPoint(vrx[externalLoop[(i - 1 + externalLoop.size()) % externalLoop.size()]].p, dir);
+ PxVec2 nVp = getProjectedPoint(vrx[externalLoop[(i + 1) % externalLoop.size()]].p, dir);
+ float rt = getRotation((cVp - pVp).getNormalized(), (nVp - pVp).getNormalized());
+ if ((dir & OPPOSITE_WINDING)) rt = -rt;
+ if (rt < 0.000001)
+ continue;
+ float tempAngle = PxVec2(1, 0).dot((tempPoint - holePoint).getNormalized());
+ if (bestAngle < tempAngle)
+ {
+ bestAngle = tempAngle;
+ bridgePoint = i;
+ }
+ }
+ }
+ if (notFound)
+ {
+ bridgePoint = vrtIndex;
+ }
+ if (bridgePoint == -1)
+ {
+ // std::cout << "Triangulation: bridge vertex for inner loop is not found..." << std::endl;
+ return 1;
+ }
+ }
+ else
+ {
+ bridgePoint = vrtIndex;
+ }
+ std::vector<uint32_t> temporal;
+
+ for (int32_t i = 0; i <= bridgePoint; ++i)
+ {
+ temporal.push_back(externalLoop[i]);
+ }
+ temporal.push_back(internalLoop[mIndex]);
+ for (int32_t i = (mIndex + 1) % internalLoop.size(); i != mIndex; i = (i + 1) % internalLoop.size())
+ {
+ temporal.push_back(internalLoop[i]);
+ }
+ temporal.push_back(internalLoop[mIndex]);
+ for (uint32_t i = bridgePoint; i < externalLoop.size(); ++i)
+ {
+ temporal.push_back(externalLoop[i]);
+ }
+ externalLoop = temporal;
+ return 0;
+}
+
+void ChunkPostProcessor::buildPolygonAndTriangulate(std::vector<Edge>& edges, Vertex* vertices, int32_t userData)
+{
+ std::vector<std::vector<uint32_t> > serializedLoops;
+
+ std::set<int> visitedVertices;
+ std::vector<int> used(edges.size(), 0);
+ uint32_t collected = 0;
+
+ std::vector<int> edgesIds;
+ /**
+ Add first edge to polygon
+ */
+ edgesIds.push_back(0);
+ visitedVertices.insert(edges[0].s);
+ visitedVertices.insert(edges[0].e);
+ used[0] = true;
+ collected = 1;
+ uint32_t lastEdge = 0;
+ bool successfullPass = false;
+ for (; collected < edges.size();)
+ {
+ successfullPass = false;
+ for (uint32_t p = 0; p < edges.size(); ++p)
+ {
+ if (used[p] == 0 && edges[p].s == edges[lastEdge].e)
+ {
+ successfullPass = true;
+ collected++;
+ used[p] = true;
+ edgesIds.push_back(p);
+ lastEdge = p;
+ if (visitedVertices.find(edges[p].e) != visitedVertices.end()) // if we formed loop, detach it and triangulate
+ {
+ serializedLoops.push_back(std::vector<uint32_t>());
+ std::vector<uint32_t>& serializedPositions = serializedLoops.back();
+ while (edgesIds.size() > 0)
+ {
+ serializedPositions.push_back(edges[edgesIds.back()].s);
+ visitedVertices.erase(edges[edgesIds.back()].s);
+ if (edges[edgesIds.back()].s == edges[p].e)
+ {
+ edgesIds.pop_back();
+ break;
+ }
+ edgesIds.pop_back();
+ }
+ if (edgesIds.size() > 0)
+ {
+ lastEdge = edgesIds.back();
+ }
+ else
+ {
+ for (uint32_t t = 0; t < edges.size(); ++t)
+ {
+ if (used[t] == 0)
+ {
+ edgesIds.push_back(t);
+ visitedVertices.insert(edges[t].s);
+ visitedVertices.insert(edges[t].e);
+ used[t] = true;
+ collected++;
+ lastEdge = t;
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ visitedVertices.insert(edges[p].e);
+ }
+ }
+ }
+ if (!successfullPass)
+ {
+ break;
+ }
+ }
+
+ std::vector<LoopInfo> loopsInfo(serializedLoops.size());
+ // Compute normal to whole polygon, and areas of loops
+ PxVec3 wholeFacetNormal(0, 0, 0);
+ for (uint32_t loop = 0; loop < serializedLoops.size(); ++loop)
+ {
+ PxVec3 loopNormal(0, 0, 0);
+ std::vector<uint32_t>& pos = serializedLoops[loop];
+ for (uint32_t vrt = 1; vrt + 1 < serializedLoops[loop].size(); ++vrt)
+ {
+ loopNormal += (vertices[pos[vrt]].p - vertices[pos[0]].p).cross(vertices[pos[vrt + 1]].p - vertices[pos[0]].p);
+ }
+ loopsInfo[loop].area = loopNormal.magnitude();
+ loopsInfo[loop].normal = loopNormal;
+ loopsInfo[loop].index = loop;
+ wholeFacetNormal += loopNormal;
+ }
+
+ // Change areas signs according to winding direction
+ for (uint32_t loop = 0; loop < serializedLoops.size(); ++loop)
+ {
+ if (wholeFacetNormal.dot(loopsInfo[loop].normal) < 0)
+ {
+ loopsInfo[loop].area = -loopsInfo[loop].area;
+ }
+ }
+ ProjectionDirections dir = getProjectionDirection(wholeFacetNormal);
+ std::sort(loopsInfo.begin(), loopsInfo.end());
+
+ std::vector<PxVec3> tempPositions;
+ int32_t oldSize = static_cast<int32_t>(mBaseMeshTriangles.size());
+ for (uint32_t extPoly = 0; extPoly < loopsInfo.size(); ++extPoly)
+ {
+ if (loopsInfo[extPoly].area < 0)
+ {
+ continue; // Polygon with negative area is hole
+ }
+ int32_t baseLoop = loopsInfo[extPoly].index;
+ for (uint32_t intPoly = 0; intPoly < loopsInfo.size(); ++intPoly)
+ {
+ if (loopsInfo[intPoly].area > 0 || loopsInfo[intPoly].used || abs(loopsInfo[intPoly].area) > loopsInfo[extPoly].area)
+ {
+ continue;
+ }
+ int32_t holeLoop = loopsInfo[intPoly].index;
+
+ if (!unitePolygons(serializedLoops[baseLoop], serializedLoops[holeLoop], vertices, dir))
+ {
+ loopsInfo[intPoly].used = true;
+ };
+ }
+ triangulatePolygonWithEarClipping(serializedLoops[baseLoop],vertices, dir);
+ }
+ for (uint32_t i = oldSize; i < mBaseMeshTriangles.size(); ++i)
+ {
+ mBaseMeshTriangles[i].userInfo = userData;
+ }
+}
+
+NV_FORCE_INLINE int32_t ChunkPostProcessor::addVerticeIfNotExist(Vertex& p)
+{
+ auto it = mVertMap.find(p);
+ if (it == mVertMap.end())
+ {
+ mVertMap[p] = static_cast<int32_t>(mVertices.size());
+ mVertices.push_back(p);
+ return static_cast<int32_t>(mVertices.size()) - 1;
+ }
+ else
+ {
+ return it->second;
+ }
+}
+
+NV_FORCE_INLINE void ChunkPostProcessor::addEdgeIfValid(EdgeWithParent& ed)
+{
+ if (ed.s == ed.e)
+ return;
+ EdgeWithParent opposite(ed.e, ed.s, ed.parent);
+ auto it = mEdgeMap.find(opposite);
+ if (it == mEdgeMap.end())
+ {
+ mEdgeMap[ed] = static_cast<int32_t>(mBaseMeshEdges.size());
+ mBaseMeshEdges.push_back(ed);
+ }
+ else
+ {
+ if (mBaseMeshEdges[it->second].s == NOT_VALID_VERTEX)
+ {
+ mBaseMeshEdges[it->second].s = ed.s;
+ mBaseMeshEdges[it->second].e = ed.e;
+ }
+ mBaseMeshEdges[it->second].s = NOT_VALID_VERTEX;
+ }
+}
+
+
+
+void ChunkPostProcessor::prepare(Mesh* mesh)
+{
+ Edge* ed = mesh->getEdges();
+ Vertex* vr = mesh->getVertices();
+ mBaseMapping.resize(mesh->getVerticesCount());
+ for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
+ {
+ Facet* fc = mesh->getFacet(i);
+ for (uint32_t j = fc->firstEdgeNumber; j < fc->firstEdgeNumber + fc->edgesCount; ++j)
+ {
+ int32_t a = addVerticeIfNotExist(vr[ed[j].s]);
+ int32_t b = addVerticeIfNotExist(vr[ed[j].e]);
+ mBaseMapping[ed[j].s] = a;
+ mBaseMapping[ed[j].e] = b;
+ EdgeWithParent e(a, b, i);
+ addEdgeIfValid(e);
+ }
+ }
+ std::vector<EdgeWithParent> temp;
+ temp.reserve(mBaseMeshEdges.size());
+ for (uint32_t i = 0; i < mBaseMeshEdges.size(); ++i)
+ {
+ if (mBaseMeshEdges[i].s != NOT_VALID_VERTEX)
+ {
+ temp.push_back(mBaseMeshEdges[i]);
+ }
+ }
+
+}
+
+void ChunkPostProcessor::reset()
+{
+ isTesselated = false;
+ mVertices.clear();
+ mBaseMeshEdges.clear();
+ mVertMap.clear();
+ mEdgeMap.clear();
+ mTrMeshEdgeMap.clear();
+ mTrMeshEdges.clear();
+ mTrMeshEdToTr.clear();
+ mBaseMeshTriangles.clear();
+ mEdgeFlag.clear();
+ mVertexValence.clear();
+ mRestrictionFlag.clear();
+ mVerticesDistances.clear();
+ mVerticesNormalsSmoothed.clear();
+
+ mBaseMeshResultTriangles.clear();
+ mTesselatedMeshResultTriangles.clear();
+ mTesselatedMeshTriangles.clear();
+}
+
+void ChunkPostProcessor::triangulate(Mesh* mesh)
+{
+ reset();
+ if (mesh == nullptr || !mesh->isValid())
+ {
+ return;
+ }
+ prepare(mesh);
+ if (mBaseMeshEdges.empty())
+ {
+ return;
+ }
+ std::vector<Edge> temp;
+ int32_t fP = mBaseMeshEdges[0].parent;
+ for (uint32_t i = 0; i < mBaseMeshEdges.size(); ++i)
+ {
+ if (fP != mBaseMeshEdges[i].parent)
+ {
+ if (temp.empty() == false)
+ {
+ buildPolygonAndTriangulate(temp, &mVertices[0], mesh->getFacet(fP)->userData);
+ }
+ temp.clear();
+ fP = mBaseMeshEdges[i].parent;
+ }
+ temp.push_back(Edge(mBaseMeshEdges[i].s, mBaseMeshEdges[i].e));
+ }
+ buildPolygonAndTriangulate(temp, &mVertices[0], mesh->getFacet(fP)->userData);
+
+ /* Build final triangles */
+
+ mBaseMeshResultTriangles.clear();
+ for (uint32_t i = 0; i < mBaseMeshTriangles.size(); ++i)
+ {
+ if (mBaseMeshTriangles[i].ea == NOT_VALID_VERTEX)
+ {
+ continue;
+ }
+ mBaseMeshResultTriangles.push_back(Triangle(mVertices[mBaseMeshTriangles[i].ea], mVertices[mBaseMeshTriangles[i].eb], mVertices[mBaseMeshTriangles[i].ec]));
+ mBaseMeshResultTriangles.back().userInfo = mBaseMeshTriangles[i].userInfo;
+ }
+
+ computePositionedMapping();
+}
+
+void ChunkPostProcessor::prebuildTesselatedTriangles()
+{
+ mTesselatedMeshResultTriangles.clear();
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ if (mTesselatedMeshTriangles[i].ea == NOT_VALID_VERTEX)
+ {
+ continue;
+ }
+ mTesselatedMeshResultTriangles.push_back(Triangle(mVertices[mTesselatedMeshTriangles[i].ea], mVertices[mTesselatedMeshTriangles[i].eb], mVertices[mTesselatedMeshTriangles[i].ec]));
+ mTesselatedMeshResultTriangles.back().userInfo = mTesselatedMeshTriangles[i].userInfo;
+ }
+
+}
+
+
+int32_t ChunkPostProcessor::addEdgeTr(const Edge& e)
+{
+ Edge ed = e;
+ if (ed.e < ed.s) std::swap(ed.s, ed.e);
+ auto it = mTrMeshEdgeMap.find(ed);
+ if (it == mTrMeshEdgeMap.end())
+ {
+ mTrMeshEdToTr.push_back(EdgeToTriangles());
+ mTrMeshEdgeMap[ed] = (int)mTrMeshEdToTr.size() - 1;
+ mTrMeshEdges.push_back(ed);
+ mEdgeFlag.push_back(INTERNAL_EDGE);
+ return (int32_t)mTrMeshEdToTr.size() - 1;
+ }
+ else
+ {
+ return it->second;
+ }
+}
+
+int32_t ChunkPostProcessor::findEdge(const Edge& e)
+{
+ Edge ed = e;
+ if (ed.e < ed.s) std::swap(ed.s, ed.e);
+ auto it = mTrMeshEdgeMap.find(ed);
+ if (it == mTrMeshEdgeMap.end())
+ {
+ return -1;
+ }
+ return it->second;
+}
+
+void ChunkPostProcessor::updateEdgeTriangleInfo()
+{
+ mTrMeshEdToTr.clear();
+ mTrMeshEdToTr.resize(mTrMeshEdges.size());
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ TriangleIndexed& tr = mTesselatedMeshTriangles[i];
+ if (tr.ea == NOT_VALID_VERTEX)
+ continue;
+ int32_t ed = addEdgeTr(Edge(tr.ea, tr.eb));
+ mTrMeshEdToTr[ed].add(i);
+ ed = addEdgeTr(Edge(tr.ea, tr.ec));
+ mTrMeshEdToTr[ed].add(i);
+ ed = addEdgeTr(Edge(tr.ec, tr.eb));
+ mTrMeshEdToTr[ed].add(i);
+ }
+}
+
+void ChunkPostProcessor::updateVertEdgeInfo()
+{
+ mVertexToTriangleMap.clear();
+ mVertexToTriangleMap.resize(mVertices.size());
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ TriangleIndexed& tr = mTesselatedMeshTriangles[i];
+ if (tr.ea == NOT_VALID_VERTEX) continue;
+ mVertexToTriangleMap[tr.ea].push_back(i);
+ mVertexToTriangleMap[tr.eb].push_back(i);
+ mVertexToTriangleMap[tr.ec].push_back(i);
+ }
+ mVertexValence.clear();
+ mVertexValence.resize(mVertices.size(), 0);
+
+ for (uint32_t i = 0; i < mTrMeshEdges.size(); ++i)
+ {
+ if (mTrMeshEdToTr[i].c != 0)
+ {
+ mVertexValence[mTrMeshEdges[i].s]++;
+ mVertexValence[mTrMeshEdges[i].e]++;
+ }
+ }
+}
+
+
+void ChunkPostProcessor::collapseEdge(int32_t id)
+{
+ Edge cEdge = mTrMeshEdges[id];
+ uint32_t from = cEdge.s;
+ uint32_t to = cEdge.e;
+
+
+ if (mRestrictionFlag[from] && mRestrictionFlag[to])
+ {
+ return;
+ }
+
+ if (mVertexValence[from] > mVertexValence[to])
+ {
+ std::swap(from, to);
+ }
+
+ if (mRestrictionFlag[from])
+ {
+ std::swap(from, to);
+ }
+
+ std::set<int32_t> connectedToBegin;
+ std::set<int32_t> connectedToEnd;
+ std::set<int32_t> neighboorTriangles;
+
+ int32_t trWithEdge[2] = {-1, -1};
+ int32_t cntr = 0;
+ for (uint32_t i = 0; i < mVertexToTriangleMap[from].size(); ++i)
+ {
+ if (mTesselatedMeshTriangles[mVertexToTriangleMap[from][i]].ea == NOT_VALID_VERTEX)
+ continue;
+ if (neighboorTriangles.insert(mVertexToTriangleMap[from][i]).second && mTesselatedMeshTriangles[mVertexToTriangleMap[from][i]].isContainEdge(from, to))
+ {
+ trWithEdge[cntr] = mVertexToTriangleMap[from][i];
+ cntr++;
+ }
+ }
+ for (uint32_t i = 0; i < mVertexToTriangleMap[to].size(); ++i)
+ {
+ if (mTesselatedMeshTriangles[mVertexToTriangleMap[to][i]].ea == NOT_VALID_VERTEX)
+ continue;
+ if (neighboorTriangles.insert(mVertexToTriangleMap[to][i]).second && mTesselatedMeshTriangles[mVertexToTriangleMap[to][i]].isContainEdge(from, to))
+ {
+ trWithEdge[cntr] = mVertexToTriangleMap[to][i];
+ cntr++;
+ }
+ }
+
+ if (cntr == 0)
+ {
+ return;
+ }
+ if (cntr > 2)
+ {
+ return;
+ }
+
+ for (uint32_t i: neighboorTriangles)
+ {
+ if (mTesselatedMeshTriangles[i].ea == from || mTesselatedMeshTriangles[i].eb == from || mTesselatedMeshTriangles[i].ec == from)
+ {
+ if (mTesselatedMeshTriangles[i].ea != to && mTesselatedMeshTriangles[i].ea != from)
+ connectedToBegin.insert(mTesselatedMeshTriangles[i].ea);
+ if (mTesselatedMeshTriangles[i].eb != to && mTesselatedMeshTriangles[i].eb != from)
+ connectedToBegin.insert(mTesselatedMeshTriangles[i].eb);
+ if (mTesselatedMeshTriangles[i].ec != to && mTesselatedMeshTriangles[i].ec != from)
+ connectedToBegin.insert(mTesselatedMeshTriangles[i].ec);
+ }
+
+ if (mTesselatedMeshTriangles[i].ea == to || mTesselatedMeshTriangles[i].eb == to || mTesselatedMeshTriangles[i].ec == to)
+ {
+ if (mTesselatedMeshTriangles[i].ea != to && mTesselatedMeshTriangles[i].ea != from)
+ connectedToEnd.insert(mTesselatedMeshTriangles[i].ea);
+ if (mTesselatedMeshTriangles[i].eb != to && mTesselatedMeshTriangles[i].eb != from)
+ connectedToEnd.insert(mTesselatedMeshTriangles[i].eb);
+ if (mTesselatedMeshTriangles[i].ec != to && mTesselatedMeshTriangles[i].ec != from)
+ connectedToEnd.insert(mTesselatedMeshTriangles[i].ec);
+ }
+ }
+ bool canBeCollapsed = true;
+ for (auto it = connectedToBegin.begin(); it != connectedToBegin.end(); ++it)
+ {
+ uint32_t currV = *it;
+ if (connectedToEnd.find(currV) == connectedToEnd.end())
+ continue;
+ bool found = false;
+ for (int32_t tr : neighboorTriangles)
+ {
+ if ((mTesselatedMeshTriangles[tr].ea == from || mTesselatedMeshTriangles[tr].eb == from || mTesselatedMeshTriangles[tr].ec == from) &&
+ (mTesselatedMeshTriangles[tr].ea == to || mTesselatedMeshTriangles[tr].eb == to || mTesselatedMeshTriangles[tr].ec == to) &&
+ (mTesselatedMeshTriangles[tr].ea == currV || mTesselatedMeshTriangles[tr].eb == currV || mTesselatedMeshTriangles[tr].ec == currV))
+ {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ canBeCollapsed = false;
+ break;
+ }
+ }
+
+ if (canBeCollapsed)
+ {
+ for (int32_t i : neighboorTriangles)
+ {
+ if (trWithEdge[0] == i) continue;
+ if (cntr == 2 && trWithEdge[1] == i) continue;
+ TriangleIndexed tr = mTesselatedMeshTriangles[i];
+ PxVec3 oldNormal = (mVertices[tr.eb].p - mVertices[tr.ea].p).cross(mVertices[tr.ec].p - mVertices[tr.ea].p);
+
+ if (tr.ea == from)
+ {
+ tr.ea = to;
+ }
+ else
+ if (tr.eb == from)
+ {
+ tr.eb = to;
+ }
+ else
+ if (tr.ec == from)
+ {
+ tr.ec = to;
+ }
+ PxVec3 newNormal = (mVertices[tr.eb].p - mVertices[tr.ea].p).cross(mVertices[tr.ec].p - mVertices[tr.ea].p);
+ if (newNormal.magnitude() < 1e-8f)
+ {
+ canBeCollapsed = false;
+ break;
+ }
+ if (oldNormal.dot(newNormal) < 0)
+ {
+ canBeCollapsed = false;
+ break;
+ }
+ }
+ }
+
+ if (canBeCollapsed)
+ {
+ mTesselatedMeshTriangles[trWithEdge[0]].ea = NOT_VALID_VERTEX;
+ if (cntr == 2)mTesselatedMeshTriangles[trWithEdge[1]].ea = NOT_VALID_VERTEX;
+
+ for (int32_t i : neighboorTriangles)
+ {
+ if (mTesselatedMeshTriangles[i].ea == NOT_VALID_VERTEX)
+ continue;
+ if (mTesselatedMeshTriangles[i].ea == from)
+ {
+ mTesselatedMeshTriangles[i].ea = to;
+ mVertexToTriangleMap[from].clear();
+ mVertexToTriangleMap[to].push_back(i);
+ }
+ else
+ if (mTesselatedMeshTriangles[i].eb == from)
+ {
+ mTesselatedMeshTriangles[i].eb = to;
+ mVertexToTriangleMap[from].clear();
+ mVertexToTriangleMap[to].push_back(i);
+ }
+ else
+ if (mTesselatedMeshTriangles[i].ec == from)
+ {
+ mTesselatedMeshTriangles[i].ec = to;
+ mVertexToTriangleMap[from].clear();
+ mVertexToTriangleMap[to].push_back(i);
+ }
+ }
+ }
+}
+
+
+void ChunkPostProcessor::divideEdge(int32_t id)
+{
+
+ if (mTrMeshEdToTr[id].c == 0 )
+ {
+ return;
+ }
+
+ Edge cEdge = mTrMeshEdges[id];
+ EdgeFlag snapRestriction = mEdgeFlag[id];
+ Vertex middle;
+ uint32_t nv = NOT_VALID_VERTEX;
+ for (int32_t t = 0; t < mTrMeshEdToTr[id].c; ++t)
+ {
+ int32_t oldTriangleIndex = mTrMeshEdToTr[id].tr[t];
+ TriangleIndexed tr = mTesselatedMeshTriangles[mTrMeshEdToTr[id].tr[t]];
+
+ if (tr.ea == NOT_VALID_VERTEX)
+ {
+ continue;
+ }
+
+ uint32_t pbf[3];
+ pbf[0] = tr.ea;
+ pbf[1] = tr.eb;
+ pbf[2] = tr.ec;
+ for (int32_t p = 0; p < 3; ++p)
+ {
+ int32_t pnx = (p + 1) % 3;
+ int32_t opp = (p + 2) % 3;
+
+ if ((pbf[p] == cEdge.s && pbf[pnx] == cEdge.e) || (pbf[p] == cEdge.e && pbf[pnx] == cEdge.s))
+ {
+ if (nv == NOT_VALID_VERTEX)
+ {
+ middle.p = (mVertices[pbf[p]].p + mVertices[pbf[pnx]].p) * 0.5f;
+ middle.n = (mVertices[pbf[p]].n + mVertices[pbf[pnx]].n) * 0.5f;
+ middle.uv[0] = (mVertices[pbf[p]].uv[0] + mVertices[pbf[pnx]].uv[0]) * 0.5f;
+
+ nv = (uint32_t)mVertices.size();
+ mVertices.push_back(middle);
+ }
+ if (nv < mRestrictionFlag.size())
+ {
+ mRestrictionFlag[nv] = ((snapRestriction == EXTERNAL_BORDER_EDGE) || (snapRestriction == INTERNAL_BORDER_EDGE));
+ }
+ else
+ {
+ mRestrictionFlag.push_back((snapRestriction == EXTERNAL_BORDER_EDGE) || (snapRestriction == INTERNAL_BORDER_EDGE));
+ }
+
+ uint32_t ind1 = addEdgeTr(Edge(pbf[p], nv));
+ uint32_t ind2 = addEdgeTr(Edge(nv, pbf[pnx]));
+ uint32_t ind3 = addEdgeTr(Edge(nv, pbf[opp]));
+
+
+ mEdgeFlag[ind1] = snapRestriction;
+ mEdgeFlag[ind2] = snapRestriction;
+ mEdgeFlag[ind3] = INTERNAL_EDGE;
+
+ mTrMeshEdToTr[ind1].add(mTrMeshEdToTr[id].tr[t]);
+ int32_t userInfo = mTesselatedMeshTriangles[mTrMeshEdToTr[id].tr[t]].userInfo;
+ mTesselatedMeshTriangles[mTrMeshEdToTr[id].tr[t]] = TriangleIndexed(pbf[p], nv, pbf[opp]);
+ mTesselatedMeshTriangles[mTrMeshEdToTr[id].tr[t]].userInfo = userInfo;
+ mTrMeshEdToTr[ind2].add((int32_t)mTesselatedMeshTriangles.size());
+ mTrMeshEdToTr[ind3].add((int32_t)mTrMeshEdToTr[id].tr[t]);
+ mTrMeshEdToTr[ind3].add((int32_t)mTesselatedMeshTriangles.size());
+ mTesselatedMeshTriangles.push_back(TriangleIndexed(nv,pbf[pnx], pbf[opp]));
+ mTesselatedMeshTriangles.back().userInfo = userInfo;
+ int32_t ed1 = findEdge(Edge(pbf[pnx], pbf[opp]));
+ mTrMeshEdToTr[ed1].replace(oldTriangleIndex, (int32_t)mTesselatedMeshTriangles.size() - 1);
+ break;
+ }
+ }
+ }
+}
+
+
+NV_FORCE_INLINE void markEdge(int32_t ui, int32_t ed, std::vector<ChunkPostProcessor::EdgeFlag>& shortMarkup, std::vector<int32_t>& lastOwner)
+{
+ if (shortMarkup[ed] == ChunkPostProcessor::NONE)
+ {
+ if (ui == 0)
+ {
+ shortMarkup[ed] = ChunkPostProcessor::EXTERNAL_EDGE;
+ }
+ else
+ {
+ shortMarkup[ed] = ChunkPostProcessor::INTERNAL_EDGE;
+ }
+ lastOwner[ed] = ui;
+ }
+ else
+ {
+ if (ui != 0)
+ {
+ if (shortMarkup[ed] == ChunkPostProcessor::EXTERNAL_EDGE)
+ {
+ shortMarkup[ed] = ChunkPostProcessor::EXTERNAL_BORDER_EDGE;
+ }
+ if ((shortMarkup[ed] == ChunkPostProcessor::INTERNAL_EDGE) && ui != lastOwner[ed])
+ {
+ shortMarkup[ed] = ChunkPostProcessor::INTERNAL_BORDER_EDGE;
+ }
+ }
+ else
+ {
+ if (shortMarkup[ed] != ChunkPostProcessor::EXTERNAL_EDGE)
+ {
+ shortMarkup[ed] = ChunkPostProcessor::EXTERNAL_BORDER_EDGE;
+ }
+ }
+ }
+}
+
+float falloffFunction(float x, float mx)
+{
+ float t = (x) / (mx + 1e-6f);
+ t = std::min(1.0f, t);
+ return t * t;
+}
+
+void ChunkPostProcessor::recalcNoiseDirs()
+{
+ /**
+ Compute normals direction to apply noise
+ */
+ mVerticesNormalsSmoothed.resize(mVertices.size(), PxVec3(0, 0, 0));
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ if (mTesselatedMeshTriangles[i].ea == NOT_VALID_VERTEX)
+ {
+ continue;
+ }
+ TriangleIndexed& tr = mTesselatedMeshTriangles[i];
+ if (tr.userInfo == 0) continue;
+
+ if (tr.userInfo < 0)
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] += mVertices[tr.ea].n.getNormalized();
+ else
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] -= mVertices[tr.ea].n.getNormalized();
+
+ if (tr.userInfo < 0)
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] += mVertices[tr.eb].n.getNormalized();
+ else
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] -= mVertices[tr.eb].n.getNormalized();
+
+ if (tr.userInfo < 0)
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] += mVertices[tr.ec].n.getNormalized();
+ else
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] -= mVertices[tr.ec].n.getNormalized();
+
+ }
+ for (uint32_t i = 0; i < mVerticesNormalsSmoothed.size(); ++i)
+ {
+
+ mVerticesNormalsSmoothed[i] = mVerticesNormalsSmoothed[mPositionMappedVrt[i]];
+ mVerticesNormalsSmoothed[i].normalize();
+ }
+}
+
+
+
+void ChunkPostProcessor::applyNoise(SimplexNoise& noise, float falloff, int32_t relaxIterations, float relaxFactor)
+{
+ NVBLAST_ASSERT(isTesselated);
+ if (isTesselated == false)
+ {
+ return;
+ }
+ mRestrictionFlag.clear();
+ mRestrictionFlag.resize(mVertices.size(), false);
+
+ for (uint32_t i = 0; i < mTrMeshEdges.size(); ++i)
+ {
+ if (mTrMeshEdToTr[i].c != 0)
+ {
+ if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE)
+ {
+ mRestrictionFlag[mTrMeshEdges[i].e] = true;
+ mRestrictionFlag[mTrMeshEdges[i].s] = true;
+ }
+ }
+ }
+ std::vector<Vertex> localVertices = mVertices;
+
+ recalcNoiseDirs();
+
+ relax(relaxIterations, relaxFactor, localVertices);
+
+
+ /**
+ Apply noise
+ */
+ for (uint32_t i = 0; i < localVertices.size(); ++i)
+ {
+
+ if (!mRestrictionFlag[i])
+ {
+
+ float d = noise.sample(localVertices[i].p);
+ localVertices[i].p += (falloffFunction(mVerticesDistances[i], falloff)) * mVerticesNormalsSmoothed[i] * d;
+ }
+ }
+
+
+ /* Recalculate smoothed normals*/
+ mVerticesNormalsSmoothed.assign(mVerticesNormalsSmoothed.size(), PxVec3(0, 0, 0));
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ if (mTesselatedMeshTriangles[i].ea == NOT_VALID_VERTEX)
+ {
+ continue;
+ }
+ TriangleIndexed& tr = mTesselatedMeshTriangles[i];
+ if (tr.userInfo == 0) continue;
+
+ Triangle pTr(localVertices[tr.ea], localVertices[tr.eb], localVertices[tr.ec]);
+ PxVec3 nrm = pTr.getNormal().getNormalized();
+
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] += nrm;
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] += nrm;
+ mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] += nrm;
+ }
+ for (uint32_t i = 0; i < mVerticesNormalsSmoothed.size(); ++i)
+ {
+ mVerticesNormalsSmoothed[i] = mVerticesNormalsSmoothed[mPositionMappedVrt[i]];
+ mVerticesNormalsSmoothed[i].normalize();
+ }
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ if (mTesselatedMeshTriangles[i].ea == NOT_VALID_VERTEX)
+ {
+ continue;
+ }
+ TriangleIndexed& tr = mTesselatedMeshTriangles[i];
+ if (tr.userInfo == 0) continue;
+
+ localVertices[tr.ea].n = mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]];
+ localVertices[tr.eb].n = mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]];
+ localVertices[tr.ec].n = mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]];
+ }
+
+ mTesselatedMeshResultTriangles.clear();
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ if (mTesselatedMeshTriangles[i].ea == NOT_VALID_VERTEX)
+ {
+ continue;
+ }
+ mTesselatedMeshResultTriangles.push_back(Triangle(localVertices[mTesselatedMeshTriangles[i].ea], localVertices[mTesselatedMeshTriangles[i].eb], localVertices[mTesselatedMeshTriangles[i].ec]));
+ mTesselatedMeshResultTriangles.back().userInfo = mTesselatedMeshTriangles[i].userInfo;
+ }
+
+
+}
+
+
+void ChunkPostProcessor::computePositionedMapping()
+{
+ std::map<PxVec3, int32_t, VrtPositionComparator> mPosMap;
+ mPositionMappedVrt.clear();
+ mPositionMappedVrt.resize(mVertices.size());
+
+ for (uint32_t i = 0; i < mVertices.size(); ++i)
+ {
+ auto it = mPosMap.find(mVertices[i].p);
+
+ if (it == mPosMap.end())
+ {
+ mPosMap[mVertices[i].p] = i;
+ mPositionMappedVrt[i] = i;
+ }
+ else
+ {
+ mPositionMappedVrt[i] = it->second;
+ }
+ }
+}
+
+void ChunkPostProcessor::computeFalloffAndNormals()
+{
+ // Map newly created vertices according to positions
+
+ computePositionedMapping();
+
+ mGeometryGraph.resize(mVertices.size());
+ for (uint32_t i = 0; i < mTrMeshEdges.size(); ++i)
+ {
+ if (mTrMeshEdToTr[i].c == 0)
+ {
+ continue;
+ }
+ int32_t v1 = mPositionMappedVrt[mTrMeshEdges[i].s];
+ int32_t v2 = mPositionMappedVrt[mTrMeshEdges[i].e];
+
+ if (std::find(mGeometryGraph[v1].begin(), mGeometryGraph[v1].end(), v2) == mGeometryGraph[v1].end())
+ mGeometryGraph[v1].push_back(v2);
+ if (std::find(mGeometryGraph[v2].begin(), mGeometryGraph[v2].end(), v1) == mGeometryGraph[v2].end())
+ mGeometryGraph[v2].push_back(v1);
+ }
+ mVerticesDistances.clear();
+ mVerticesDistances.resize(mVertices.size(), 10000.0f);
+
+ std::queue<int32_t> que;
+
+ for (uint32_t i = 0; i < mTrMeshEdges.size(); ++i)
+ {
+ if (mTrMeshEdToTr[i].c != 0 && (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE))
+ {
+ int32_t v1 = mPositionMappedVrt[mTrMeshEdges[i].s];
+ int32_t v2 = mPositionMappedVrt[mTrMeshEdges[i].e];
+ mVerticesDistances[v1] = 0.0f;
+ mVerticesDistances[v2] = 0.0f;
+ que.push(v1);
+ que.push(v2);
+ }
+ }
+ while (!que.empty())
+ {
+ int32_t curr = que.front();
+ que.pop();
+
+ for (uint32_t i = 0; i < mGeometryGraph[curr].size(); ++i)
+ {
+ int32_t to = mGeometryGraph[curr][i];
+ float d = mVerticesDistances[curr] + 0.1f;// (mVertices[to].p - mVertices[curr].p).magnitudeSquared();
+ if (d < mVerticesDistances[to])
+ {
+ mVerticesDistances[to] = d;
+ que.push(to);
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < mVerticesDistances.size(); ++i)
+ {
+ int32_t from = mPositionMappedVrt[i];
+ mVerticesDistances[i] = mVerticesDistances[from];
+ }
+}
+
+bool edgeOverlapTest(PxVec3& as, PxVec3& ae, PxVec3& bs, PxVec3& be)
+{
+ //return false;
+ if (std::max(std::min(as.x, ae.x), std::min(bs.x, be.x)) > std::min(std::max(as.x, ae.x), std::max(bs.x, be.x))) return false;
+ if (std::max(std::min(as.y, ae.y), std::min(bs.y, be.y)) > std::min(std::max(as.y, ae.y), std::max(bs.y, be.y))) return false;
+ if (std::max(std::min(as.z, ae.z), std::min(bs.z, be.z)) > std::min(std::max(as.z, ae.z), std::max(bs.z, be.z))) return false;
+
+ return ((bs - as).cross(ae - as)).magnitudeSquared() < 1e-12f && ((be - as).cross(ae - as)).magnitudeSquared() < 1e-12f;
+}
+
+void ChunkPostProcessor::relax(int32_t iteration, float factor, std::vector<Vertex>& vertices)
+{
+ std::vector<PxVec3> verticesTemp(vertices.size());
+ std::vector<PxVec3> normalsTemp(vertices.size());
+ for (int32_t iter = 0; iter < iteration; ++iter)
+ {
+ for (uint32_t i = 0; i < vertices.size(); ++i)
+ {
+ if (mRestrictionFlag[i])
+ {
+ continue;
+ }
+ PxVec3 cps = vertices[i].p;
+ PxVec3 cns = mVerticesNormalsSmoothed[i];
+ PxVec3 averaged(0, 0, 0);
+ PxVec3 averagedNormal(0, 0, 0);
+
+ for (uint32_t p = 0; p < mGeometryGraph[mPositionMappedVrt[i]].size(); ++p)
+ {
+ int32_t to = mGeometryGraph[mPositionMappedVrt[i]][p];
+ averaged += vertices[to].p;
+ averagedNormal += mVerticesNormalsSmoothed[to];
+
+ }
+ averaged *= (1.0f / mGeometryGraph[mPositionMappedVrt[i]].size());
+ averagedNormal *= (1.0f / mGeometryGraph[mPositionMappedVrt[i]].size());
+ verticesTemp[i] = cps + (averaged - cps) * factor;
+ normalsTemp[i] = cns * (1.0f - factor) + averagedNormal * factor;
+ }
+ for (uint32_t i = 0; i < vertices.size(); ++i)
+ {
+ if (mRestrictionFlag[i])
+ {
+ continue;
+ }
+ vertices[i].p = verticesTemp[i];
+ mVerticesNormalsSmoothed[i] = normalsTemp[i].getNormalized();
+
+ }
+ }
+
+}
+
+void ChunkPostProcessor::prebuildEdgeFlagArray()
+{
+ mRestrictionFlag.clear();
+ mRestrictionFlag.resize(mVertices.size());
+ mEdgeFlag.clear();
+ mEdgeFlag.resize(mTrMeshEdges.size(), NONE);
+
+ std::map<PxVec3, int32_t, VrtPositionComparator> mPosMap;
+ mPositionMappedVrt.clear();
+ mPositionMappedVrt.resize(mVertices.size(), 0);
+
+ for (uint32_t i = 0; i < mVertices.size(); ++i)
+ {
+ auto it = mPosMap.find(mVertices[i].p);
+
+ if (it == mPosMap.end())
+ {
+ mPosMap[mVertices[i].p] = i;
+ mPositionMappedVrt[i] = i;
+ }
+ else
+ {
+ mPositionMappedVrt[i] = it->second;
+ }
+ }
+
+ std::map<Edge, int32_t> mPositionEdgeMap;
+ std::vector<int32_t> mPositionBasedEdges(mTrMeshEdges.size());
+
+
+ for (uint32_t i = 0; i < mTrMeshEdges.size(); ++i)
+ {
+ Edge tmp = Edge(mPositionMappedVrt[mTrMeshEdges[i].s], mPositionMappedVrt[mTrMeshEdges[i].e]);
+ if (tmp.e < tmp.s) std::swap(tmp.e, tmp.s);
+ auto it = mPositionEdgeMap.find(tmp);
+ if (it == mPositionEdgeMap.end())
+ {
+ mPositionEdgeMap[tmp] = i;
+ mPositionBasedEdges[i] = i;
+ }
+ else
+ {
+ mPositionBasedEdges[i] = it->second;
+ }
+ }
+
+ std::vector<EdgeFlag> shortMarkup(mTrMeshEdges.size(), NONE);
+ std::vector<int32_t> lastOwner(mTrMeshEdges.size(), 0);
+
+ std::vector<std::vector<int32_t> > edgeOverlap(mTrMeshEdges.size());
+ for (auto it1 = mPositionEdgeMap.begin(); it1 != mPositionEdgeMap.end(); ++it1)
+ {
+ auto it2 = it1;
+ it2++;
+ for (; it2 != mPositionEdgeMap.end(); ++it2)
+ {
+ Edge& ed1 = mTrMeshEdges[it1->second];
+ Edge& ed2 = mTrMeshEdges[it2->second];
+
+ if (edgeOverlapTest(mVertices[ed1.s].p, mVertices[ed1.e].p, mVertices[ed2.s].p, mVertices[ed2.e].p))
+ {
+ edgeOverlap[it1->second].push_back(it2->second);
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ int32_t ui = mTesselatedMeshTriangles[i].userInfo;
+ int32_t ed = mPositionBasedEdges[findEdge(Edge(mTesselatedMeshTriangles[i].ea, mTesselatedMeshTriangles[i].eb))];
+
+
+ markEdge(ui, ed, shortMarkup, lastOwner);
+ for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
+ {
+ markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
+ }
+
+ ed = mPositionBasedEdges[findEdge(Edge(mTesselatedMeshTriangles[i].ea, mTesselatedMeshTriangles[i].ec))];
+ markEdge(ui, ed, shortMarkup, lastOwner);
+ for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
+ {
+ markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
+ }
+
+ ed = mPositionBasedEdges[findEdge(Edge(mTesselatedMeshTriangles[i].eb, mTesselatedMeshTriangles[i].ec))];
+ markEdge(ui, ed, shortMarkup, lastOwner);
+ for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
+ {
+ markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
+ }
+
+ }
+
+ for (uint32_t i = 0; i < mTrMeshEdges.size(); ++i)
+ {
+ mEdgeFlag[i] = shortMarkup[mPositionBasedEdges[i]];
+ }
+
+ for (uint32_t i = 0; i < mTesselatedMeshTriangles.size(); ++i)
+ {
+ if (mTesselatedMeshTriangles[i].userInfo != 0) continue;
+
+ int32_t ed = findEdge(Edge(mTesselatedMeshTriangles[i].ea, mTesselatedMeshTriangles[i].eb));
+ mEdgeFlag[ed] = EXTERNAL_EDGE;
+ ed = findEdge(Edge(mTesselatedMeshTriangles[i].ec, mTesselatedMeshTriangles[i].eb));
+ mEdgeFlag[ed] = EXTERNAL_EDGE;
+ ed = findEdge(Edge(mTesselatedMeshTriangles[i].ea, mTesselatedMeshTriangles[i].ec));
+ mEdgeFlag[ed] = EXTERNAL_EDGE;
+ }
+}
+
+
+
+void ChunkPostProcessor::tesselateInternalSurface(float maxLenIn)
+{
+ mTesselatedMeshTriangles = mBaseMeshTriangles;
+ if (mTesselatedMeshTriangles.empty())
+ {
+ return;
+ }
+
+ updateEdgeTriangleInfo();
+ prebuildEdgeFlagArray();
+ mRestrictionFlag.resize(mVertices.size(), 0);
+ for (uint32_t i = 0; i < mTrMeshEdges.size(); ++i)
+ {
+ if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE || mEdgeFlag[i] == INTERNAL_BORDER_EDGE)
+ {
+ mRestrictionFlag[mTrMeshEdges[i].s] = 1;
+ mRestrictionFlag[mTrMeshEdges[i].e] = 1;
+ }
+ }
+
+
+ float maxLen = std::max(0.1f, maxLenIn);
+ while (maxLen > maxLenIn)
+ {
+ float mlSq = maxLen * maxLen;
+ float minD = maxLen * 0.5f;
+ minD = minD * minD;
+
+ for (int32_t iter = 0; iter < 15; ++iter)
+ {
+ updateVertEdgeInfo();
+ uint32_t oldSize = (uint32_t)mTrMeshEdges.size();
+ for (uint32_t i = 0; i < oldSize; ++i)
+ {
+ if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == INTERNAL_BORDER_EDGE)
+ {
+ continue;
+ }
+ if ((mVertices[mTrMeshEdges[i].s].p - mVertices[mTrMeshEdges[i].e].p).magnitudeSquared() < minD)
+ {
+ collapseEdge(i);
+ }
+ }
+
+ oldSize = (uint32_t)mTrMeshEdges.size();
+ updateEdgeTriangleInfo();
+ for (uint32_t i = 0; i < oldSize; ++i)
+ {
+ if (mEdgeFlag[i] == EXTERNAL_EDGE)
+ {
+ continue;
+ }
+ if ((mVertices[mTrMeshEdges[i].s].p - mVertices[mTrMeshEdges[i].e].p).magnitudeSquared() > mlSq)
+ {
+ divideEdge(i);
+ }
+ }
+ }
+ maxLen *= 0.3;
+ maxLen = std::max(maxLen, maxLenIn);
+ }
+ computeFalloffAndNormals();
+ prebuildTesselatedTriangles();
+ isTesselated = true;
+}
+
+} // namespace Blast
+} // namespace Nv \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringTriangulator.h b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringTriangulator.h
new file mode 100644
index 0000000..83942f4
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringTriangulator.h
@@ -0,0 +1,261 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAUTHORINGTRIANGULATOR_H
+#define NVBLASTEXTAUTHORINGTRIANGULATOR_H
+
+
+#include <vector>
+#include <map>
+#include "NvBlastExtAuthoringTypes.h"
+#include "NvBlastExtAuthoringMesh.h"
+#include "NvBlastExtAuthoringInternalCommon.h"
+
+namespace Nv
+{
+namespace Blast
+{
+class SimplexNoise;
+
+/**
+ Vertex comparator for vertex welding.
+*/
+struct VrtComp
+{
+ bool operator()(const Vertex& a, const Vertex& b) const;
+};
+
+/**
+ Vertex comparator for vertex welding (not accounts normal and uv parameters of vertice).
+*/
+struct VrtPositionComparator
+{
+ bool operator()(const physx::PxVec3& a, const physx::PxVec3& b) const;
+};
+
+/**
+ Structure used on tesselation stage. Maps edge to two neighboor triangles
+*/
+struct EdgeToTriangles
+{
+ int32_t tr[2];
+ int32_t c;
+ EdgeToTriangles()
+ {
+ c = 0;
+ }
+ /**
+ Add triangle to edge. Should not be called more than twice for one edge!!!!.
+ */
+ void add(int32_t t)
+ {
+ tr[c] = t;
+ ++c;
+ }
+ /**
+ Replaces mapping from one triangle to another.
+ */
+ void replace(int32_t from, int32_t to)
+ {
+ if (tr[0] == from)
+ {
+ tr[0] = to;
+ }
+ else
+ {
+ if (c == 2 && tr[1] == from)
+ {
+ tr[1] = to;
+ }
+ }
+ }
+ /**
+ Get triangle which is mapped by this edge and which index is different than provided.
+ */
+ int32_t getNot(int32_t id)
+ {
+ if (tr[0] != id)
+ {
+ return tr[0];
+ }
+ if (c == 2 && tr[1] != id)
+ {
+ return tr[1];
+ }
+ return -1;
+ }
+
+};
+
+
+/**
+ Tool for doing all post processing steps of authoring.
+*/
+class ChunkPostProcessor
+{
+public:
+ /**
+ Edge flags
+ */
+ enum EdgeFlag{ INTERNAL_EDGE, EXTERNAL_BORDER_EDGE, INTERNAL_BORDER_EDGE, EXTERNAL_EDGE, NONE };
+
+ /**
+ Triangulates provided mesh and saves result internally. Uses Ear-clipping algorithm.
+ \param[in] mesh Mesh for triangulation
+ */
+ void triangulate(Mesh* mesh);
+
+ /**
+ \return Return array of triangles of base mesh.
+ */
+ std::vector<Triangle>& getBaseMesh()
+ {
+ return mBaseMeshResultTriangles;
+ }
+
+ /**
+ \return Return array of TriangleIndexed of base mesh. Each TriangleIndexed contains index of corresponding vertex in internal vertex buffer.
+ */
+ std::vector<TriangleIndexed>& getBaseMeshIndexed()
+ {
+ return mBaseMeshTriangles;
+ }
+
+
+ /**
+ \return Return mapping from vertices of input Mesh to internal vertices buffer. Used for island detection.
+ */
+ std::vector<uint32_t>& getBaseMapping()
+ {
+ return mBaseMapping;
+ };
+ /**
+ \return Return mapping from vertices of input Mesh to internal vertices buffer, only positions are accounted. Used for island detection.
+ */
+ std::vector<int32_t>& getPositionedMapping()
+ {
+ return mPositionMappedVrt;
+ };
+
+ /**
+ \return Return internal vertex buffer size. Vertices internally are welded with some threshold.
+ */
+ uint32_t getWeldedVerticesCount()
+ {
+ return static_cast<uint32_t>(mVertices.size());
+ }
+ /**
+ Tesselate internal surface.
+ \param[in] maxLen - maximal length of edge on internal surface.
+ */
+ void tesselateInternalSurface(float maxLen);
+
+ /**
+ Apply noise to internal surface. Must be called only after tesselation!!!
+ \param[in] noise - noise generator
+ \param[in] falloff - damping of noise around of external surface
+ \param[in] relaxIterations - number of smoothing iterations before applying noise
+ \param[in] relaxFactor - amount of smooting before applying noise.
+ */
+ void applyNoise(SimplexNoise& noise, float falloff, int32_t relaxIterations, float relaxFactor);
+
+ /**
+ \return Return array of noised mesh triangles.
+ */
+ std::vector<Triangle>& getNoisyMesh()
+ {
+ return mTesselatedMeshResultTriangles;
+ };
+
+ /**
+ Removes all information about mesh triangulation, tesselation, etc.
+ */
+ void reset();
+
+private:
+
+
+
+ void collapseEdge(int32_t id);
+ void divideEdge(int32_t id);
+ void updateVertEdgeInfo();
+ void updateEdgeTriangleInfo();
+
+ int32_t addVerticeIfNotExist(Vertex& p);
+ void addEdgeIfValid(EdgeWithParent& ed);
+
+ /* Data used before triangulation to build polygon loops*/
+
+ std::vector<Vertex> mVertices;
+ std::vector<EdgeWithParent> mBaseMeshEdges;
+ std::map<Vertex, int32_t, VrtComp> mVertMap;
+ std::map<EdgeWithParent, int32_t, EdgeComparator> mEdgeMap;
+ std::vector<uint32_t> mBaseMapping;
+
+
+
+ /**
+ Unite all almost similar vertices, update edges according to this changes
+ */
+ void prepare(Mesh* mesh);
+
+ /* ------------------------------------------------------------ */
+
+ /* Triangulation and tesselation stage data */
+ bool isTesselated;
+
+ std::map<Edge, int32_t> mTrMeshEdgeMap;
+ std::vector<Edge> mTrMeshEdges;
+ std::vector<EdgeToTriangles> mTrMeshEdToTr;
+ std::vector<int32_t> mVertexValence;
+ std::vector<std::vector<int32_t> > mVertexToTriangleMap;
+
+ std::vector<bool> mRestrictionFlag;
+ std::vector<EdgeFlag> mEdgeFlag;
+
+ std::vector<float> mVerticesDistances;
+ std::vector<physx::PxVec3> mVerticesNormalsSmoothed;
+ std::vector<int32_t> mPositionMappedVrt;
+ std::vector<std::vector<int32_t> > mGeometryGraph;
+
+
+ int32_t addEdgeTr(const Edge& ed);
+ int32_t findEdge(const Edge& e);
+
+ void prebuildEdgeFlagArray();
+ void computePositionedMapping();
+
+ void computeFalloffAndNormals();
+
+
+
+ void triangulatePolygonWithEarClipping(std::vector<uint32_t>& inputPolygon, Vertex* vert, ProjectionDirections dir);
+ void buildPolygonAndTriangulate(std::vector<Edge>& edges, Vertex* vertices, int32_t userData);
+
+ void relax(int32_t iterations, float factor, std::vector<Vertex>& vertices);
+ void recalcNoiseDirs();
+
+ std::vector<TriangleIndexed> mBaseMeshTriangles;
+ std::vector<TriangleIndexed> mTesselatedMeshTriangles;
+
+ /**
+ Final triangles
+ */
+ void prebuildTesselatedTriangles();
+
+ std::vector<Triangle> mBaseMeshResultTriangles;
+ std::vector<Triangle> mTesselatedMeshResultTriangles;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTAUTHORINGTRIANGULATOR_H
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringVSA.h b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringVSA.h
new file mode 100644
index 0000000..fd0c9c9
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtAuthoringVSA.h
@@ -0,0 +1,312 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAUTHORINGVSA_H
+#define NVBLASTEXTAUTHORINGVSA_H
+
+namespace Nv
+{
+namespace Blast
+{
+
+/*
+ This code copied from APEX GSA
+*/
+
+namespace VSA
+{
+typedef float real;
+
+struct VS3D_Halfspace_Set
+{
+ virtual real farthest_halfspace(real plane[4], const real point[4]) = 0;
+};
+
+
+// Simple types and operations for internal calculations
+struct Vec3 { real x, y, z; }; // 3-vector
+inline Vec3 vec3(real x, real y, real z) { Vec3 r; r.x = x; r.y = y; r.z = z; return r; } // vector builder
+inline Vec3 operator + (const Vec3& a, const Vec3& b) { return vec3(a.x + b.x, a.y + b.y, a.z + b.z); } // vector addition
+inline Vec3 operator * (real s, const Vec3& v) { return vec3(s*v.x, s*v.y, s*v.z); } // scalar multiplication
+inline real operator | (const Vec3& a, const Vec3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } // dot product
+inline Vec3 operator ^ (const Vec3& a, const Vec3& b) { return vec3(a.y*b.z - b.y*a.z, a.z*b.x - b.z*a.x, a.x*b.y - b.x*a.y); } // cross product
+
+struct Vec4 { Vec3 v; real w; }; // 4-vector split into 3-vector and scalar parts
+inline Vec4 vec4(const Vec3& v, real w) { Vec4 r; r.v = v; r.w = w; return r; } // vector builder
+inline real operator | (const Vec4& a, const Vec4& b) { return (a.v | b.v) + a.w*b.w; } // dot product
+
+// More accurate perpendicular
+inline Vec3 perp(const Vec3& a, const Vec3& b)
+{
+ Vec3 c = a^b; // Cross-product gives perpendicular
+#if VS3D_HIGH_ACCURACY || REAL_DOUBLE
+ const real c2 = c | c;
+ if (c2 != 0) c = c + (1 / c2)*((a | c)*(c^b) + (b | c)*(a^c)); // Improvement to (a b)^T(c) = (0)
+#endif
+ return c;
+}
+
+// Square
+inline real sq(real x) { return x*x; }
+
+// Returns index of the extremal element in a three-element set {e0, e1, e2} based upon comparisons c_ij. The extremal index m is such that c_mn is true, or e_m == e_n, for all n.
+inline int ext_index(int c_10, int c_21, int c_20) { return c_10 << c_21 | (c_21&c_20) << 1; }
+
+// Returns index (0, 1, or 2) of minimum argument
+inline int index_of_min(real x0, real x1, real x2) { return ext_index((int)(x1 < x0), (int)(x2 < x1), (int)(x2 < x0)); }
+
+// Compare fractions with positive deominators. Returns a_num*sqrt(a_rden2) > b_num*sqrt(b_rden2)
+inline bool frac_gt(real a_num, real a_rden2, real b_num, real b_rden2)
+{
+ const bool a_num_neg = a_num < 0;
+ const bool b_num_neg = b_num < 0;
+ return a_num_neg != b_num_neg ? b_num_neg : ((a_num*a_num*a_rden2 > b_num*b_num*b_rden2) != a_num_neg);
+}
+
+// Returns index (0, 1, or 2) of maximum fraction with positive deominators
+inline int index_of_max_frac(real x0_num, real x0_rden2, real x1_num, real x1_rden2, real x2_num, real x2_rden2)
+{
+ return ext_index((int)frac_gt(x1_num, x1_rden2, x0_num, x0_rden2), (int)frac_gt(x2_num, x2_rden2, x1_num, x1_rden2), (int)frac_gt(x2_num, x2_rden2, x0_num, x0_rden2));
+}
+
+// Compare values given their signs and squares. Returns a > b. a2 and b2 may have any constant offset applied to them.
+inline bool sgn_sq_gt(real sgn_a, real a2, real sgn_b, real b2) { return sgn_a*sgn_b < 0 ? (sgn_b < 0) : ((a2 > b2) != (sgn_a < 0)); }
+
+// Returns index (0, 1, or 2) of maximum value given their signs and squares. sq_x0, sq_x1, and sq_x2 may have any constant offset applied to them.
+inline int index_of_max_sgn_sq(real sgn_x0, real sq_x0, real sgn_x1, real sq_x1, real sgn_x2, real sq_x2)
+{
+ return ext_index((int)sgn_sq_gt(sgn_x1, sq_x1, sgn_x0, sq_x0), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x1, sq_x1), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x0, sq_x0));
+}
+
+// Project 2D (homogeneous) vector onto 2D half-space boundary
+inline void project2D(Vec3& r, const Vec3& plane, real delta, real recip_n2, real eps2)
+{
+ r = r + (-delta*recip_n2)*vec3(plane.x, plane.y, 0);
+ r = r + (-(r | plane)*recip_n2)*vec3(plane.x, plane.y, 0); // Second projection for increased accuracy
+ if ((r | r) > eps2) return;
+ r = (-plane.z*recip_n2)*vec3(plane.x, plane.y, 0);
+ r.z = 1;
+}
+
+
+// Update function for vs3d_test
+static bool vs3d_update(Vec4& p, Vec4 S[4], int& plane_count, const Vec4& q, real eps2)
+{
+ // h plane is the last plane
+ const Vec4& h = S[plane_count - 1];
+
+ // Handle plane_count == 1 specially (optimization; this could be commented out)
+ if (plane_count == 1)
+ {
+ // Solution is objective projected onto h plane
+ p = q;
+ p.v = p.v + -(p | h)*h.v;
+ if ((p | p) <= eps2) p = vec4(-h.w*h.v, 1); // If p == 0 then q is a direction vector, any point in h is a support point
+ return true;
+ }
+
+ // Create basis in the h plane
+ const int min_i = index_of_min(h.v.x*h.v.x, h.v.y*h.v.y, h.v.z*h.v.z);
+ const Vec3 y = h.v^vec3((real)(min_i == 0), (real)(min_i == 1), (real)(min_i == 2));
+ const Vec3 x = y^h.v;
+
+ // Use reduced vector r instead of p
+ Vec3 r = { x | q.v, y | q.v, q.w*(y | y) }; // (x|x) = (y|y) = square of plane basis scale
+
+ // If r == 0 (within epsilon), then it is a direction vector, and we have a bounded solution
+ if ((r | r) <= eps2) r.z = 1;
+
+ // Create plane equations in the h plane. These will not be normalized in general.
+ int N = 0; // Plane count in h subspace
+ Vec3 R[3]; // Planes in h subspace
+ real recip_n2[3]; // Plane normal vector reciprocal lengths squared
+ real delta[3]; // Signed distance of objective to the planes
+ int index[3]; // Keep track of original plane indices
+ for (int i = 0; i < plane_count - 1; ++i)
+ {
+ const Vec3& vi = S[i].v;
+ const real cos_theta = h.v | vi;
+ R[N] = vec3(x | vi, y | vi, S[i].w - h.w*cos_theta);
+ index[N] = i;
+ const real n2 = R[N].x*R[N].x + R[N].y*R[N].y;
+ if (n2 >= eps2)
+ {
+ const real lin_norm = (real)1.5 - (real)0.5*n2; // 1st-order approximation to 1/sqrt(n2) expanded about n2 = 1
+ R[N] = lin_norm*R[N]; // We don't need normalized plane equations, but rescaling (even with an approximate normalization) gives better numerical behavior
+ recip_n2[N] = 1 / (R[N].x*R[N].x + R[N].y*R[N].y);
+ delta[N] = r | R[N];
+ ++N; // Keep this plane
+ }
+ else if (cos_theta < 0) return false; // Parallel cases are redundant and rejected, anti-parallel cases are 1D voids
+ }
+
+ // Now work with the N-sized R array of half-spaces in the h plane
+ switch (N)
+ {
+ case 1: one_plane :
+ if (delta[0] < 0) N = 0; // S[0] is redundant, eliminate it
+ else project2D(r, R[0], delta[0], recip_n2[0], eps2);
+ break;
+ case 2: two_planes :
+ if (delta[0] < 0 && delta[1] < 0) N = 0; // S[0] and S[1] are redundant, eliminate them
+ else
+ {
+ const int max_d_index = (int)frac_gt(delta[1], recip_n2[1], delta[0], recip_n2[0]);
+ project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2);
+ const int min_d_index = max_d_index ^ 1;
+ const real new_delta_min = r | R[min_d_index];
+ if (new_delta_min < 0)
+ {
+ index[0] = index[max_d_index];
+ N = 1; // S[min_d_index] is redundant, eliminate it
+ }
+ else
+ {
+ // Set r to the intersection of R[0] and R[1] and keep both
+ r = perp(R[0], R[1]);
+ if (r.z*r.z*recip_n2[0] * recip_n2[1] < eps2)
+ {
+ if (R[0].x*R[1].x + R[0].y*R[1].y < 0) return false; // 2D void found
+ goto one_plane;
+ }
+ r = (1 / r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0
+ }
+ }
+ break;
+ case 3:
+ if (delta[0] < 0 && delta[1] < 0 && delta[2] < 0) N = 0; // S[0], S[1], and S[2] are redundant, eliminate them
+ else
+ {
+ const Vec3 row_x = { R[0].x, R[1].x, R[2].x };
+ const Vec3 row_y = { R[0].y, R[1].y, R[2].y };
+ const Vec3 row_w = { R[0].z, R[1].z, R[2].z };
+ const Vec3 cof_w = perp(row_x, row_y);
+ const bool detR_pos = (row_w | cof_w) > 0;
+ const int nrw_sgn0 = cof_w.x*cof_w.x*recip_n2[1] * recip_n2[2] < eps2 ? 0 : (((int)((cof_w.x > 0) == detR_pos) << 1) - 1);
+ const int nrw_sgn1 = cof_w.y*cof_w.y*recip_n2[2] * recip_n2[0] < eps2 ? 0 : (((int)((cof_w.y > 0) == detR_pos) << 1) - 1);
+ const int nrw_sgn2 = cof_w.z*cof_w.z*recip_n2[0] * recip_n2[1] < eps2 ? 0 : (((int)((cof_w.z > 0) == detR_pos) << 1) - 1);
+
+ if ((nrw_sgn0 | nrw_sgn1 | nrw_sgn2) >= 0) return false; // 3D void found
+
+ const int positive_width_count = ((nrw_sgn0 >> 1) & 1) + ((nrw_sgn1 >> 1) & 1) + ((nrw_sgn2 >> 1) & 1);
+ if (positive_width_count == 1)
+ {
+ // A single positive width results from a redundant plane. Eliminate it and peform N = 2 calculation.
+ const int pos_width_index = ((nrw_sgn1 >> 1) & 1) | (nrw_sgn2 & 2); // Calculates which index corresponds to the positive-width side
+ R[pos_width_index] = R[2];
+ recip_n2[pos_width_index] = recip_n2[2];
+ delta[pos_width_index] = delta[2];
+ index[pos_width_index] = index[2];
+ N = 2;
+ goto two_planes;
+ }
+
+ // Find the max dot product of r and R[i]/|R_normal[i]|. For numerical accuracy when the angle between r and the i^{th} plane normal is small, we take some care below:
+ const int max_d_index = r.z != 0
+ ? index_of_max_frac(delta[0], recip_n2[0], delta[1], recip_n2[1], delta[2], recip_n2[2]) // displacement term resolves small-angle ambiguity, just use dot product
+ : index_of_max_sgn_sq(delta[0], -sq(r.x*R[0].y - r.y*R[0].x)*recip_n2[0], delta[1], -sq(r.x*R[1].y - r.y*R[1].x)*recip_n2[1], delta[2], -sq(r.x*R[2].y - r.y*R[2].x)*recip_n2[2]); // No displacement term. Use wedge product to find the sine of the angle.
+
+ // Project r onto max-d plane
+ project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2);
+ N = 1; // Unless we use a vertex in the loop below
+ const int index_max = index[max_d_index];
+
+ // The number of finite widths should be >= 2. If not, it should be 0, but in any case it implies three parallel lines in the plane, which we should not have here.
+ // If we do have three parallel lines (# of finite widths < 2), we've picked the line corresponding to the half-plane farthest from r, which is correct.
+ const int finite_width_count = (nrw_sgn0 & 1) + (nrw_sgn1 & 1) + (nrw_sgn2 & 1);
+ if (finite_width_count >= 2)
+ {
+ const int i_remaining[2] = { (1 << max_d_index) & 3, (3 >> max_d_index) ^ 1 }; // = {(max_d_index+1)%3, (max_d_index+2)%3}
+ const int i_select = (int)frac_gt(delta[i_remaining[1]], recip_n2[i_remaining[1]], delta[i_remaining[0]], recip_n2[i_remaining[0]]); // Select the greater of the remaining dot products
+ for (int i = 0; i < 2; ++i)
+ {
+ const int j = i_remaining[i_select^i]; // i = 0 => the next-greatest, i = 1 => the least
+ if ((r | R[j]) >= 0)
+ {
+ r = perp(R[max_d_index], R[j]);
+ r = (1 / r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0
+ index[1] = index[j];
+ N = 2;
+ break;
+ }
+ }
+ }
+
+ index[0] = index_max;
+ }
+ break;
+ }
+
+ // Transform r back to 3D space
+ p = vec4(r.x*x + r.y*y + (-r.z*h.w)*h.v, r.z);
+
+ // Pack S array with kept planes
+ if (N < 2 || index[1] != 0) { for (int i = 0; i < N; ++i) S[i] = S[index[i]]; } // Safe to copy columns in order
+ else { const Vec4 temp = S[0]; S[0] = S[index[0]]; S[1] = temp; } // Otherwise use temp storage to avoid overwrite
+ S[N] = h;
+ plane_count = N + 1;
+
+ return true;
+}
+
+
+// Performs the VS algorithm for D = 3
+inline int vs3d_test(VS3D_Halfspace_Set& halfspace_set, real* q = nullptr)
+{
+ // Objective = q if it is not NULL, otherwise it is the origin represented in homogeneous coordinates
+ const Vec4 objective = q ? (q[3] != 0 ? vec4((1 / q[3])*vec3(q[0], q[1], q[2]), 1) : *(Vec4*)q) : vec4(vec3(0, 0, 0), 1);
+
+ // Tolerance for 3D void simplex algorithm
+ const real eps_f = (real)1 / (sizeof(real) == 4 ? (1L << 23) : (1LL << 52)); // Floating-point epsilon
+#if VS3D_HIGH_ACCURACY || REAL_DOUBLE
+ const real eps = 8 * eps_f;
+#else
+ const real eps = 80 * eps_f;
+#endif
+ const real eps2 = eps*eps; // Using epsilon squared
+
+ // Maximum allowed iterations of main loop. If exceeded, error code is returned
+ const int max_iteration_count = 50;
+
+ // State
+ Vec4 S[4]; // Up to 4 planes
+ int plane_count = 0; // Number of valid planes
+ Vec4 p = objective; // Test point, initialized to objective
+
+ // Default result, changed to valid result if found in loop below
+ int result = -1;
+
+ // Iterate until a stopping condition is met or the maximum number of iterations is reached
+ for (int i = 0; result < 0 && i < max_iteration_count; ++i)
+ {
+ Vec4& plane = S[plane_count++];
+ real delta = halfspace_set.farthest_halfspace(&plane.v.x, &p.v.x);
+#if VS3D_UNNORMALIZED_PLANE_HANDLING != 0
+ const real recip_norm = vs3d_recip_sqrt(plane.v | plane.v);
+ plane = vec4(recip_norm*plane.v, recip_norm*plane.w);
+ delta *= recip_norm;
+#endif
+ if (delta <= 0 || delta*delta <= eps2*(p | p)) result = 1; // Intersection found
+ else if (!vs3d_update(p, S, plane_count, objective, eps2)) result = 0; // Void simplex found
+ }
+
+ // If q is given, fill it with the solution (normalize p.w if it is not zero)
+ if (q) *(Vec4*)q = (p.w != 0) ? vec4((1 / p.w)*p.v, 1) : p;
+
+ return result;
+}
+
+} // namespace VSA
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTAUTHORINGVSA_H
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtTriangleProcessor.cpp b/NvBlast/sdk/extensions/authoring/source/NvBlastExtTriangleProcessor.cpp
new file mode 100644
index 0000000..3c3e540
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtTriangleProcessor.cpp
@@ -0,0 +1,355 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtTriangleProcessor.h"
+#include "NvBlastExtAuthoringInternalCommon.h"
+#define COLLIN_EPS 1e-4f
+#define V_COMP_EPS 1e-5f
+
+using namespace physx;
+
+namespace Nv
+{
+namespace Blast
+{
+/**
+ Segments bounding box interseciton test
+*/
+bool boundingRectangleIntersection(const PxVec2& s1, const PxVec2& e1, const PxVec2& s2, const PxVec2& e2)
+{
+ // sl1/sl2 is always left bottom end of rectangle
+ // se1/el2 is always right top end of rectangle
+
+ PxF32 sl1, sl2, el1, el2;
+ if (s1.x < e1.x)
+ {
+ sl1 = s1.x;
+ el1 = e1.x;
+ }
+ else
+ {
+ el1 = s1.x;
+ sl1 = e1.x;
+ }
+
+ if (s2.x < e2.x)
+ {
+ sl2 = s2.x;
+ el2 = e2.x;
+ }
+ else
+ {
+ el2 = s2.x;
+ sl2 = e2.x;
+ }
+ if (PxMax(sl1, sl2) > PxMin(el1, el2))
+ return false;
+
+ if (s1.y < e1.y)
+ {
+ sl1 = s1.y;
+ el1 = e1.y;
+ }
+ else
+ {
+ el1 = s1.y;
+ sl1 = e1.y;
+ }
+
+ if (s2.y < e2.y)
+ {
+ sl2 = s2.y;
+ el2 = e2.y;
+ }
+ else
+ {
+ el2 = s2.y;
+ sl2 = e2.y;
+ }
+ if (PxMax(sl1, sl2) > PxMin(el1, el2))
+ return false;
+
+ return true;
+}
+
+inline PxF32 getRotation(PxVec2 a, PxVec2 b)
+{
+ return a.x * b.y - a.y * b.x;
+}
+
+inline PxF32 getParameter(const PxVec2& a, const PxVec2& b, const PxVec2& point)
+{
+ return (point - a).magnitude() / (b - a).magnitude();
+}
+inline PxVec3 lerp3D(const PxVec3& a, const PxVec3& b, const PxF32 t)
+{
+ return (b - a) * t + a;
+}
+
+
+
+struct Line2D
+{
+ PxVec2 normal;
+ PxF32 c;
+ Line2D(PxVec2 vec, PxVec2 point)
+ {
+ normal.x = vec.y;
+ normal.y = -vec.x;
+ c = -normal.dot(point);
+ }
+};
+
+
+uint32_t TriangleProcessor::getSegmentIntersection(const PxVec2& s1, const PxVec2& e1, const PxVec2& s2, const PxVec2& e2, PxF32& t1)
+{
+ if (!boundingRectangleIntersection(s1, e1, s2, e2))
+ return 0;
+
+ PxVec2 vec1 = e1 - s1;
+ PxVec2 vec2 = e2 - s2;
+ PxF32 det1 = getRotation(vec1, vec2);
+ if (PxAbs(det1) < COLLIN_EPS)
+ {
+ return 0;
+ }
+ Line2D lineA(vec1, s1);
+ Line2D lineB(vec2, s2);
+ PxVec2 fInt;
+
+ PxF32 detX = lineA.normal.y * lineB.c - lineA.c * lineB.normal.y;
+ PxF32 detY = lineA.c * lineB.normal.x - lineB.c * lineA.normal.x;
+ PxF32 x = detX / det1;
+ PxF32 y = detY / det1;
+
+ if (x + V_COMP_EPS >= PxMax(PxMin(s1.x, e1.x), PxMin(s2.x, e2.x)) &&
+ x - V_COMP_EPS <= PxMin(PxMax(s1.x, e1.x), PxMax(s2.x, e2.x)) &&
+ y + V_COMP_EPS >= PxMax(PxMin(s1.y, e1.y), PxMin(s2.y, e2.y)) &&
+ y - V_COMP_EPS <= PxMin(PxMax(s1.y, e1.y), PxMax(s2.y, e2.y)))
+ {
+ fInt.x = x;
+ fInt.y = y;
+ t1 = getParameter(s1, e1, fInt);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct cwComparer
+{
+ PxVec3 basePoint;
+ PxVec3 normal;
+ cwComparer(PxVec3 basePointIn, PxVec3 norm)
+ {
+ basePoint = basePointIn;
+ normal = norm;
+ };
+ bool operator()(const PxVec3& a, const PxVec3& b)
+ {
+ PxVec3 norm = (a - basePoint).cross(b - basePoint);
+ return normal.dot(norm) > 0;
+ }
+};
+
+bool vec3Comparer(const PxVec3& a, const PxVec3& b)
+{
+ if (a.x + V_COMP_EPS < b.x) return true;
+ if (a.x - V_COMP_EPS > b.x) return false;
+ if (a.y + V_COMP_EPS < b.y) return true;
+ if (a.y - V_COMP_EPS > b.y) return false;
+ if (a.z + V_COMP_EPS < b.z) return true;
+ return false;
+}
+
+void TriangleProcessor::sortToCCW(std::vector<PxVec3>& points, PxVec3& normal)
+{
+ std::sort(points.begin(), points.end(), vec3Comparer);
+ int lastUnique = 0;
+ for (uint32_t i = 1; i < points.size(); ++i)
+ {
+ PxVec3 df = points[i] - points[lastUnique];
+ if (df.x > V_COMP_EPS || df.y > V_COMP_EPS || df.z > V_COMP_EPS)
+ {
+ points[++lastUnique] = points[i];
+ }
+ }
+ points.resize(lastUnique + 1);
+ if (points.size() > 2)
+ {
+ cwComparer compr(points[0], normal);
+ std::sort(points.begin() + 1, points.end(), compr);
+ }
+}
+
+
+
+void TriangleProcessor::buildConvexHull(std::vector<PxVec3>& points, std::vector<PxVec3>& convexHull,const PxVec3& normal)
+{
+
+ std::sort(points.begin(), points.end(), vec3Comparer);
+ int lastUnique = 0;
+ for (uint32_t i = 1; i < points.size(); ++i)
+ {
+ PxVec3 df = points[i] - points[lastUnique];
+ if (df.x > V_COMP_EPS || df.y > V_COMP_EPS || df.z > V_COMP_EPS)
+ {
+ points[++lastUnique] = points[i];
+ }
+ }
+ points.resize(lastUnique + 1);
+ if (points.size() > 2)
+ {
+ cwComparer compr(points[0], normal);
+ std::sort(points.begin() + 1, points.end(), compr);
+ }
+ if (points.size() < 3)
+ return;
+ convexHull.push_back(points[0]);
+ convexHull.push_back(points[1]);
+ ProjectionDirections projectionDirection = getProjectionDirection(normal);
+ for (uint32_t i = 2; i < points.size(); ++i)
+ {
+ PxVec2 pnt = getProjectedPointWithWinding(points[i], projectionDirection);
+ PxVec2 vec = pnt - getProjectedPointWithWinding(convexHull.back(), projectionDirection);
+ if (vec.x < V_COMP_EPS && vec.y < V_COMP_EPS)
+ {
+ continue;
+ }
+ if (getRotation(vec, getProjectedPointWithWinding(convexHull.back(), projectionDirection) - getProjectedPointWithWinding(convexHull[convexHull.size() - 2], projectionDirection)) < 0)
+ {
+ convexHull.push_back(points[i]);
+ }
+ else
+ {
+ while (convexHull.size() > 1 && getRotation(vec, getProjectedPointWithWinding(convexHull.back(), projectionDirection) - getProjectedPointWithWinding(convexHull[convexHull.size() - 2], projectionDirection)) > 0)
+ {
+ convexHull.pop_back();
+ vec = pnt - getProjectedPointWithWinding(convexHull.back(), projectionDirection);
+ }
+ convexHull.push_back(points[i]);
+ }
+ }
+}
+
+
+uint32_t TriangleProcessor::getTriangleIntersection(TrPrcTriangle& a, TrPrcTriangle2d& aProjected, TrPrcTriangle &b, PxVec3& centroid, std::vector<PxVec3>& intersectionBuffer, PxVec3 normal)
+{
+
+ b.points[0] -= centroid;
+ b.points[1] -= centroid;
+ b.points[2] -= centroid;
+
+ ProjectionDirections prjDir = getProjectionDirection(normal);
+
+ TrPrcTriangle2d bProjected;
+ bProjected.points[0] = getProjectedPointWithWinding(b.points[0], prjDir);
+ bProjected.points[1] = getProjectedPointWithWinding(b.points[1], prjDir);
+ bProjected.points[2] = getProjectedPointWithWinding(b.points[2], prjDir);
+
+
+ if (!triangleBoundingBoxIntersection(aProjected, bProjected)) return 0;
+
+ //* Check triangle A against points of B *//
+ for (int i = 0; i < 3; ++i)
+ {
+ if (isPointInside(bProjected.points[i], aProjected))
+ {
+ intersectionBuffer.push_back(b.points[i]);
+ }
+ }
+ //* Check triangle B against points of A *//
+ for (int i = 0; i < 3; ++i)
+ {
+ if (isPointInside(aProjected.points[i], bProjected))
+ {
+ intersectionBuffer.push_back(a.points[i]);
+ }
+ }
+
+ //* Check edges intersection *//
+ float param = 0;
+ for (int i = 0; i < 3; ++i)
+ {
+ for (int j = 0; j < 3; ++j)
+ {
+ if (getSegmentIntersection(aProjected.points[i], aProjected.points[(i + 1) % 3], bProjected.points[j], bProjected.points[(j + 1) % 3], param))
+ {
+ intersectionBuffer.push_back(lerp3D(a.points[i], a.points[(i + 1) % 3], param));
+ }
+ }
+ }
+
+ if (intersectionBuffer.size() == 0)
+ return 0;
+
+ // Intersection between two triangles is convex, but points should be reordered to construct right polygon //
+ std::vector<PxVec3> intrs;
+ buildConvexHull(intersectionBuffer, intrs, normal);
+ intersectionBuffer = intrs;
+
+ // Return all points back from origin //
+ for (uint32_t i = 0; i < intersectionBuffer.size(); ++i)
+ {
+ intersectionBuffer[i] += centroid;
+ }
+ return 1;
+}
+
+
+
+bool TriangleProcessor::triangleBoundingBoxIntersection(TrPrcTriangle2d& a, TrPrcTriangle2d& b)
+{
+ float fb = std::min(a.points[0].x, std::min(a.points[1].x, a.points[2].x));
+ float fe = std::max(a.points[0].x, std::max(a.points[1].x, a.points[2].x));
+
+ float sb = std::min(b.points[0].x, std::min(b.points[1].x, b.points[2].x));
+ float se = std::max(b.points[0].x, std::max(b.points[1].x, b.points[2].x));
+
+ if (std::min(fe, se) + V_COMP_EPS < std::max(fb, sb)) return 0;
+
+ fb = std::min(a.points[0].y, std::min(a.points[1].y, a.points[2].y));
+ fe = std::max(a.points[0].y, std::max(a.points[1].y, a.points[2].y));
+
+ sb = std::min(b.points[0].y, std::min(b.points[1].y, b.points[2].y));
+ se = std::max(b.points[0].y, std::max(b.points[1].y, b.points[2].y));
+ if (std::min(fe, se) + V_COMP_EPS < std::max(fb, sb)) return 0;
+ return 1;
+}
+
+
+uint32_t TriangleProcessor::isPointInside(const PxVec2& point, const TrPrcTriangle2d& triangle)
+{
+ PxF32 av = getRotation(point - triangle.points[0], triangle.points[1] - triangle.points[0]);
+ PxF32 bv = getRotation(point - triangle.points[1], triangle.points[2] - triangle.points[1]);
+ PxF32 cv = getRotation(point - triangle.points[2], triangle.points[0] - triangle.points[2]);
+
+
+ if (PxAbs(av) < COLLIN_EPS) av = 0;
+ if (PxAbs(bv) < COLLIN_EPS) bv = 0;
+ if (PxAbs(cv) < COLLIN_EPS) cv = 0;
+
+ if (av >= 0 && bv >= 0 && cv >= 0)
+ {
+ if (av == 0 || bv == 0 || cv == 0)
+ return 2;
+ return 1;
+ }
+ if (av <= 0 && bv <= 0 && cv <= 0)
+ {
+ if (av == 0 || bv == 0 || cv == 0)
+ return 2;
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/authoring/source/NvBlastExtTriangleProcessor.h b/NvBlast/sdk/extensions/authoring/source/NvBlastExtTriangleProcessor.h
new file mode 100644
index 0000000..db9f682
--- /dev/null
+++ b/NvBlast/sdk/extensions/authoring/source/NvBlastExtTriangleProcessor.h
@@ -0,0 +1,158 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTTRIANGLEPROCESSOR_H
+#define NVBLASTEXTTRIANGLEPROCESSOR_H
+
+#include <PxPhysicsAPI.h>
+#include <vector>
+#include <algorithm>
+
+using namespace physx;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+ Triangle processor internal triangle representation. Contains only vertex positions.
+*/
+struct TrPrcTriangle
+{
+ PxVec3 points[3];
+ TrPrcTriangle(PxVec3 a = PxVec3(0.0f), PxVec3 b = PxVec3(0.0f), PxVec3 c = PxVec3(0.0f))
+ {
+ points[0] = a;
+ points[1] = b;
+ points[2] = c;
+ }
+
+ TrPrcTriangle& operator=(const TrPrcTriangle& b)
+ {
+ points[0] = b.points[0];
+ points[1] = b.points[1];
+ points[2] = b.points[2];
+ return *this;
+ }
+
+ TrPrcTriangle(const TrPrcTriangle& b)
+ {
+ points[0] = b.points[0];
+ points[1] = b.points[1];
+ points[2] = b.points[2];
+ }
+ PxVec3 getNormal() const
+ {
+ return (points[1] - points[0]).cross(points[2] - points[0]);
+ }
+};
+
+/**
+ Triangle processor internal 2D triangle representation. Contains only vertex positions.
+*/
+struct TrPrcTriangle2d
+{
+ PxVec2 points[3];
+ TrPrcTriangle2d(PxVec2 a = PxVec2(0.0f), PxVec2 b = PxVec2(0.0f), PxVec2 c = PxVec2(0.0f))
+ {
+ points[0] = a;
+ points[1] = b;
+ points[2] = c;
+ }
+
+ TrPrcTriangle2d operator=(const TrPrcTriangle2d& b)
+ {
+ points[0] = b.points[0];
+ points[1] = b.points[1];
+ points[2] = b.points[2];
+ return *this;
+ }
+
+ TrPrcTriangle2d(const TrPrcTriangle2d& b)
+ {
+ points[0] = b.points[0];
+ points[1] = b.points[1];
+ points[2] = b.points[2];
+ }
+};
+
+class TriangleProcessor
+{
+public:
+
+
+ TriangleProcessor()
+ {};
+ ~TriangleProcessor()
+ {
+ }
+
+
+ /**
+ Build intersection between two triangles
+ \param[in] a First triangle (A)
+ \param[in] aProjected Projected triangle A
+ \param[in] b Second triangle (B)
+ \param[in] centroid Centroid of first triangle (A)
+ \param[out] intersectionBuffer Result intersection polygon
+ \param[in] normal Normal vector to triangle (Common for both A and B).
+ \return 1 - if if intersection is found.
+ */
+ uint32_t getTriangleIntersection(TrPrcTriangle& a, TrPrcTriangle2d& aProjected, TrPrcTriangle &b, PxVec3& centroid, std::vector<PxVec3>& intersectionBuffer, PxVec3 normal);
+
+ /**
+ Test whether BB of triangles intersect.
+ \param[in] a First triangle (A)
+ \param[in] b Second triangle (B)
+ \return true - if intersect
+ */
+ bool triangleBoundingBoxIntersection(TrPrcTriangle2d& a, TrPrcTriangle2d& b);
+
+
+ /**
+ Test whether point is inside of triangle.
+ \param[in] point Point coordinates in 2d space.
+ \param[in] triangle Triangle in 2d space.
+ \return 1 - if inside, 2 if on edge, 0 if neither inside nor edge.
+ */
+ uint32_t isPointInside(const PxVec2& point, const TrPrcTriangle2d& triangle);
+
+ /**
+ Segment intersection point
+ \param[in] s1 Segment-1 start point
+ \param[in] e1 Segment-1 end point
+ \param[in] s2 Segment-2 start point
+ \param[in] e2 Segment-2 end point
+ \param[out] t1 Intersection point parameter relatively to Segment-1, lies in [0.0, 1.0] range.
+ \return 0 if there is no intersections, 1 - if intersection is found.
+ */
+ uint32_t getSegmentIntersection(const PxVec2& s1, const PxVec2& e1, const PxVec2& s2, const PxVec2& e2, PxF32& t1);
+
+ /**
+ Sort vertices of polygon in CCW-order
+ */
+ void sortToCCW(std::vector<PxVec3>& points, PxVec3& normal);
+
+ /**
+ Builds convex polygon for given set of points. Points should be coplanar.
+ \param[in] points Input array of points
+ \param[out] convexHull Output polygon
+ \param[in] normal Normal vector to polygon.
+ */
+ void buildConvexHull(std::vector<PxVec3>& points, std::vector<PxVec3>& convexHull, const PxVec3& normal);
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // NVBLASTEXTTRIANGLEPROCESSOR_H
diff --git a/NvBlast/sdk/extensions/common/source/NvBlastExtAllocator.h b/NvBlast/sdk/extensions/common/source/NvBlastExtAllocator.h
new file mode 100644
index 0000000..d917cbf
--- /dev/null
+++ b/NvBlast/sdk/extensions/common/source/NvBlastExtAllocator.h
@@ -0,0 +1,127 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTALLOCATOR_H
+#define NVBLASTEXTALLOCATOR_H
+
+#include "NvBlastTkFramework.h"
+#include "PxAllocatorCallback.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+ExtAllocator uses TkFramework allocator
+*/
+class ExtAllocator
+{
+public:
+ ExtAllocator(const char* = 0)
+ {
+ }
+
+ void* allocate(size_t size, const char* filename, int line)
+ {
+ return NvBlastTkFrameworkGet()->getAllocatorCallback().allocate(size, nullptr, filename, line);
+ }
+
+ void deallocate(void* ptr)
+ {
+ NvBlastTkFrameworkGet()->getAllocatorCallback().deallocate(ptr);
+ }
+
+
+ /**
+ Aligned allocation.
+
+ Example using 16-byte alignment:
+
+ // b will lie on a 16-byte boundary and point to 50 bytes of usable memory
+ void* b = alignedAlloc<16>(50);
+ */
+ template<int A>
+ static void* alignedAlloc(size_t size, const char* filename, int line)
+ {
+ NV_COMPILE_TIME_ASSERT(A > 0 && A <= 256);
+ unsigned char* mem = (unsigned char*)ExtAllocator().allocate(size + A, filename, line);
+ const unsigned char offset = (unsigned char)((uintptr_t)A - (uintptr_t)mem % A - 1);
+ mem += offset;
+ *mem++ = offset;
+ return mem;
+ }
+
+ template<int A>
+ static void* alignedAlloc(size_t size)
+ {
+ return alignedAlloc<A>(size, __FILE__, __LINE__);
+ }
+
+
+ /**
+ Version of alignedAlloc specialized 16-byte aligned allocation.
+ */
+ static void* alignedAlloc16(size_t size)
+ {
+ return alignedAlloc<16>(size);
+ }
+
+
+ /**
+ Aligned deallocation.
+
+ Memory freed using this function MUST have been allocated using alignedAlloc.
+
+ Example using free:
+
+ // Using the memory pointer b from the example above (for alignedAlloc)
+ alignedFree(b);
+ */
+ static void alignedFree(void* block)
+ {
+ if (block != nullptr)
+ {
+ unsigned char* mem = (unsigned char*)block;
+ const unsigned char offset = *--mem;
+ ExtAllocator().deallocate(mem - offset);
+ }
+ };
+};
+
+
+/**
+ExtAlignedAllocator uses ExtAllocator
+*/
+template<int A>
+class ExtAlignedAllocator
+{
+public:
+ ExtAlignedAllocator(const char* = 0)
+ {
+ }
+
+ void* allocate(size_t size, const char* filename, int line)
+ {
+ return ExtAllocator::alignedAlloc<A>(size, filename, line);
+ }
+
+ void deallocate(void* ptr)
+ {
+ return ExtAllocator::alignedFree(ptr);
+ }
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTEXTALLOCATOR_H
diff --git a/NvBlast/sdk/extensions/common/source/NvBlastExtArray.h b/NvBlast/sdk/extensions/common/source/NvBlastExtArray.h
new file mode 100644
index 0000000..9ea4777
--- /dev/null
+++ b/NvBlast/sdk/extensions/common/source/NvBlastExtArray.h
@@ -0,0 +1,41 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTARRAY_H
+#define NVBLASTEXTARRAY_H
+
+
+#include "NvBlastExtAllocator.h"
+#include "PsInlineArray.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+template <class T>
+struct ExtArray
+{
+ typedef physx::shdfnd::Array<T, ExtAllocator> type;
+};
+
+
+template <class T, uint32_t N>
+struct ExtInlineArray
+{
+ typedef physx::shdfnd::InlineArray<T, N, ExtAllocator> type;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTEXTARRAY_H
diff --git a/NvBlast/sdk/extensions/common/source/NvBlastExtDefs.h b/NvBlast/sdk/extensions/common/source/NvBlastExtDefs.h
new file mode 100644
index 0000000..72b6c1d
--- /dev/null
+++ b/NvBlast/sdk/extensions/common/source/NvBlastExtDefs.h
@@ -0,0 +1,64 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTDEFS_H
+#define NVBLASTEXTDEFS_H
+
+#include "NvBlastTkFramework.h"
+#include "PxAllocatorCallback.h"
+#include <new>
+
+
+//////// Log macros that use the ExtContext log function ////////
+
+#define NVBLASTEXT_LOG_ERROR(_msg) NVBLAST_LOG_ERROR(NvBlastTkFrameworkGet()->getLogFn(), _msg)
+#define NVBLASTEXT_LOG_WARNING(_msg) NVBLAST_LOG_WARNING(NvBlastTkFrameworkGet()->getLogFn(), _msg)
+#define NVBLASTEXT_LOG_INFO(_msg) NVBLAST_LOG_INFO(NvBlastTkFrameworkGet()->getLogFn(), _msg)
+#define NVBLASTEXT_LOG_DEBUG(_msg) NVBLAST_LOG_DEBUG(NvBlastTkFrameworkGet()->getLogFn(), _msg)
+
+#define NVBLASTEXT_CHECK(_expr, _messageType, _msg, _onFail) \
+ { \
+ if(!(_expr)) \
+ { \
+ (*NvBlastTkFrameworkGet()->getLogFn())(_messageType, _msg, __FILE__, __LINE__); \
+ { _onFail; }; \
+ } \
+ }
+
+#define NVBLASTEXT_CHECK_ERROR(_expr, _msg, _onFail) NVBLASTEXT_CHECK(_expr, NvBlastMessage::Error, _msg, _onFail)
+#define NVBLASTEXT_CHECK_WARNING(_expr, _msg, _onFail) NVBLASTEXT_CHECK(_expr, NvBlastMessage::Warning, _msg, _onFail)
+#define NVBLASTEXT_CHECK_INFO(_expr, _msg, _onFail) NVBLASTEXT_CHECK(_expr, NvBlastMessage::Info, _msg, _onFail)
+#define NVBLASTEXT_CHECK_DEBUG(_expr, _msg, _onFail) NVBLASTEXT_CHECK(_expr, NvBlastMessage::Debug, _msg, _onFail)
+
+
+//////// Allocator macros ////////
+
+/**
+Placement new with ExtContext allocation.
+Example: Foo* foo = NVBLASTEXT_NEW(Foo, context) (params);
+*/
+#define NVBLASTEXT_NEW(T) new (NvBlastTkFrameworkGet()->getAllocatorCallback().allocate(sizeof(T), #T, __FILE__, __LINE__)) T
+
+/**
+Respective delete to NVBLASTEXT_NEW
+Example: NVBLASTEXT_DELETE(foo, Foo, context);
+*/
+#define NVBLASTEXT_DELETE(obj, T) \
+ (obj)->~T(); \
+ NvBlastTkFrameworkGet()->getAllocatorCallback().deallocate(obj)
+
+
+//////// Util macros ////////
+
+// Macro to load a uint32_t (or larger) with four characters
+#define NVBLASTEXT_FOURCC(_a, _b, _c, _d) ( (uint32_t)(_a) | (uint32_t)(_b)<<8 | (uint32_t)(_c)<<16 | (uint32_t)(_d)<<24 )
+
+
+#endif // #ifndef NVBLASTEXTDEFS_H
diff --git a/NvBlast/sdk/extensions/common/source/NvBlastExtHashMap.h b/NvBlast/sdk/extensions/common/source/NvBlastExtHashMap.h
new file mode 100644
index 0000000..0fa094e
--- /dev/null
+++ b/NvBlast/sdk/extensions/common/source/NvBlastExtHashMap.h
@@ -0,0 +1,34 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTHASHMAP_H
+#define NVBLASTEXTHASHMAP_H
+
+
+#include "NvBlastExtAllocator.h"
+#include "PsHashMap.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+template <class Key, class Value, class HashFn = physx::shdfnd::Hash<Key>>
+struct ExtHashMap
+{
+ typedef physx::shdfnd::HashMap<Key, Value, HashFn, ExtAllocator> type;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTEXTHASHMAP_H
diff --git a/NvBlast/sdk/extensions/common/source/NvBlastExtHashSet.h b/NvBlast/sdk/extensions/common/source/NvBlastExtHashSet.h
new file mode 100644
index 0000000..97fc0a9
--- /dev/null
+++ b/NvBlast/sdk/extensions/common/source/NvBlastExtHashSet.h
@@ -0,0 +1,33 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTHASHSET_H
+#define NVBLASTEXTHASHSET_H
+
+
+#include "NvBlastExtAllocator.h"
+#include "PsHashSet.h"
+
+namespace Nv
+{
+namespace Blast
+{
+
+template <class Key, class HashFn = physx::shdfnd::Hash<Key>>
+struct ExtHashSet
+{
+ typedef physx::shdfnd::HashSet<Key, HashFn, ExtAllocator> type;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTEXTHASHSET_H
diff --git a/NvBlast/sdk/extensions/converter/include/NvBlastExtDataConverter.h b/NvBlast/sdk/extensions/converter/include/NvBlastExtDataConverter.h
new file mode 100644
index 0000000..b251518
--- /dev/null
+++ b/NvBlast/sdk/extensions/converter/include/NvBlastExtDataConverter.h
@@ -0,0 +1,40 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTDATACONVERTER_H
+#define NVBLASTEXTDATACONVERTER_H
+
+
+#include "NvBlast.h"
+#include <vector>
+
+namespace Nv
+{
+namespace Blast
+{
+ /**
+ Generic version conversion function for Blast data blocks.
+
+ Automatically determines block type (one of NvBlastDataBlock::Type) and uses appropriate converter.
+
+ \param[out] outBlock User-supplied memory block to fill with new data.
+ \param[in] inBlock Data block to convert.
+ \param[in] outBlockVersion Version to convert too, pass 'nullptr' to convert to the latest version.
+
+ \return true iff conversion was successful.
+ */
+ NVBLAST_API bool convertDataBlock(std::vector<char>& outBlock, const std::vector<char>& inBlock, uint32_t* outBlockVersion = nullptr);
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTDATACONVERTER_H
diff --git a/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtAssetBlockVersionConverter_v0_v1.h b/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtAssetBlockVersionConverter_v0_v1.h
new file mode 100644
index 0000000..44a0b54
--- /dev/null
+++ b/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtAssetBlockVersionConverter_v0_v1.h
@@ -0,0 +1,88 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTASSETBLOCKVERSIONCONVERTER_V0_V1_H
+#define NVBLASTEXTASSETBLOCKVERSIONCONVERTER_V0_V1_H
+
+
+#include "NvBlastExtBinaryBlockConverter.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/*
+ WARNING: THIS CLASS IS AN EXAMPLE.
+ REPLACE WITH ACTUAL CONVERSION CODE ONCE NEEDED.
+*/
+class NvBlastAssetBlockVersionConverter_v0_v1 : public BinaryBlockConverter::VersionConverter
+{
+public:
+ virtual uint32_t getVersionFrom() const { return NvBlastAssetDataFormat::Initial; }
+
+ virtual uint32_t getVersionTo() const { return 1/*NvBlastAssetDataFormat::BondCountSwap*/; }
+
+ // remains the same
+ struct SupportGraph
+ {
+ uint32_t m_nodeCount;
+ uint32_t m_chunkIndicesOffset;
+ uint32_t m_adjacencyPartitionOffset;
+ uint32_t m_adjacentNodeIndicesOffset;
+ uint32_t m_adjacentBondIndicesOffset;
+ };
+
+ // prev version
+ struct AssetDataHeaderPrev
+ {
+ uint32_t m_formatVersion;
+ uint32_t m_size;
+ NvBlastID m_ID;
+ uint32_t m_totalChunkCount;
+ SupportGraph m_graph;
+ uint32_t m_leafChunkCount;
+ uint32_t m_firstSubsupportChunkIndex; // 0
+ uint32_t m_bondCount; // 1
+ };
+
+ // new version
+ struct AssetDataHeaderNew
+ {
+ uint32_t m_formatVersion;
+ uint32_t m_size;
+ NvBlastID m_ID;
+ uint32_t m_totalChunkCount;
+ SupportGraph m_graph;
+ uint32_t m_leafChunkCount;
+ uint32_t m_bondCount; // 1
+ uint32_t m_firstSubsupportChunkIndex; // 0
+ };
+
+ bool convert(const std::vector<char>& from, std::vector<char>& to) const
+ {
+ to = from;
+
+ const AssetDataHeaderPrev* headerPrev = reinterpret_cast<const AssetDataHeaderPrev*>(from.data());
+ AssetDataHeaderNew* headerNew = reinterpret_cast<AssetDataHeaderNew*>(to.data());
+ headerNew->m_bondCount = headerPrev->m_bondCount;
+ headerNew->m_firstSubsupportChunkIndex = headerPrev->m_firstSubsupportChunkIndex;
+ headerNew->m_formatVersion = (uint32_t)getVersionTo();
+
+ return true;
+ }
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTASSETBLOCKVERSIONCONVERTER_V0_V1_H
diff --git a/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtBinaryBlockConverter.cpp b/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtBinaryBlockConverter.cpp
new file mode 100644
index 0000000..a606b70
--- /dev/null
+++ b/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtBinaryBlockConverter.cpp
@@ -0,0 +1,152 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtBinaryBlockConverter.h"
+#include <iostream>
+#include <algorithm>
+#include <queue>
+#include <limits>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+bool BinaryBlockConverter::convertBinaryBlock(std::vector<char>& outBlock, const std::vector<VersionConverterPtr>& converters, const std::vector<char>& inBlock, uint32_t outBlockVersion, uint32_t inBlockVersion)
+{
+ if (inBlock.empty())
+ {
+ std::cerr << "Conversion failed: empty input block." << std::endl;
+ return false;
+ }
+
+ // starting version
+ uint32_t version;
+ version = inBlockVersion;
+ std::cout << "Input block version: " << version << std::endl;
+
+ // target version
+ const uint32_t targetVersion = outBlockVersion;
+ std::cout << "Target version: " << targetVersion << std::endl;
+
+ // search conversion path
+ std::vector<VersionConverterPtr> conversionPath;
+ if (!findShortestPath(conversionPath, converters, version, targetVersion))
+ {
+ std::cerr << "Conversion failed: can't find conversion path." << std::endl;
+ return false;
+ }
+
+ // starting convertion loop
+ std::vector<char> blockFrom(inBlock.begin(), inBlock.end());
+ std::vector<char> blockTo(inBlock.size());
+ for (const VersionConverterPtr converter : conversionPath)
+ {
+ // actual conversion call
+ std::cout << "Converting from version: " << converter->getVersionFrom() << " -> " << converter->getVersionTo() << " Result: ";
+ if (!converter->convert(blockFrom, blockTo))
+ {
+ std::cout << "Fail." << std::endl;
+ std::cerr << "Conversion failed: inside converter for version: " << version << std::endl;
+ return false;
+ }
+ else
+ {
+ std::cout << "Success." << std::endl;
+ blockFrom.swap(blockTo);
+ version = converter->getVersionTo();
+ }
+ }
+
+ // copy result
+ outBlock = blockFrom;
+
+ return true;
+}
+
+
+/**
+Finds shortest path form versionFrom to verstionTo using breadth-first search with DP
+*/
+bool BinaryBlockConverter::findShortestPath(std::vector<VersionConverterPtr>& conversionPath, const std::vector<VersionConverterPtr>& converters, uint32_t versionFrom, uint32_t versionTo)
+{
+ // find max version
+ uint32_t versionMax = 0;
+ for (VersionConverterPtr c : converters)
+ {
+ versionMax = std::max(versionMax, c->getVersionFrom());
+ versionMax = std::max(versionMax, c->getVersionTo());
+ }
+
+ // dynamic programming data
+ struct Node
+ {
+ uint32_t distance;
+ VersionConverterPtr converter;
+
+ Node() : distance(std::numeric_limits<uint32_t>::max()), converter(nullptr) {}
+ };
+ std::vector<Node> nodes(versionMax + 1);
+
+ // initial state (start from versionTo)
+ std::queue<uint32_t> q;
+ q.emplace(versionTo);
+ nodes[versionTo].distance = 0;
+ nodes[versionTo].converter = nullptr;
+
+ // breadth-first search
+ bool found = false;
+ while (!q.empty() && !found)
+ {
+ uint32_t v0 = q.front();
+ q.pop();
+
+ for (const VersionConverterPtr c : converters)
+ {
+ if (c->getVersionTo() == v0)
+ {
+ uint32_t v1 = c->getVersionFrom();
+ if (nodes[v1].distance > nodes[v0].distance + 1)
+ {
+ nodes[v1].distance = nodes[v0].distance + 1;
+ nodes[v1].converter = c;
+ q.emplace(v1);
+ }
+
+ if (c->getVersionFrom() == versionFrom)
+ {
+ found = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (found)
+ {
+ // unfold found path to result conversionPath
+ uint32_t v = versionFrom;
+ conversionPath.clear();
+ while (nodes[v].converter.get() != nullptr)
+ {
+ conversionPath.push_back(nodes[v].converter);
+ v = nodes[v].converter->getVersionTo();
+ }
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtBinaryBlockConverter.h b/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtBinaryBlockConverter.h
new file mode 100644
index 0000000..83eb6b3
--- /dev/null
+++ b/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtBinaryBlockConverter.h
@@ -0,0 +1,57 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTBINARYBLOCKCONVERTER_H
+#define NVBLASTEXTBINARYBLOCKCONVERTER_H
+
+
+#include "NvBlast.h"
+#include <vector>
+#include <memory>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Generic binary block converter class.
+
+BinaryBlockConverter is an abstract class, as well as it's member class VersionConverter. In order to implement your own
+binary converter - implement for every version conversion BinaryBlockConverter::VersionConverter. Then implement BinaryBlockConverter
+where getVersionConverters() should return all your implemented BinaryBlockConverter::VersionConverter's.
+
+*/
+class BinaryBlockConverter
+{
+public:
+ class VersionConverter
+ {
+ public:
+ virtual uint32_t getVersionFrom() const = 0;
+ virtual uint32_t getVersionTo() const = 0;
+ virtual bool convert(const std::vector<char>& from, std::vector<char>& to) const = 0;
+ };
+
+ typedef std::shared_ptr<VersionConverter> VersionConverterPtr;
+
+ static bool convertBinaryBlock(std::vector<char>& outBlock, const std::vector<VersionConverterPtr>& converters, const std::vector<char>& inBlock, uint32_t outBlockVersion, uint32_t inBlockVersion);
+protected:
+
+private:
+ static bool findShortestPath(std::vector<VersionConverterPtr>& conversionPath, const std::vector<VersionConverterPtr>& converters, uint32_t versionFrom, uint32_t versionTo);
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTBINARYBLOCKCONVERTER_H
diff --git a/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtDataConverter.cpp b/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtDataConverter.cpp
new file mode 100644
index 0000000..fe8c745
--- /dev/null
+++ b/NvBlast/sdk/extensions/converter/source/conversion/NvBlastExtDataConverter.cpp
@@ -0,0 +1,103 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtDataConverter.h"
+#include "NvBlastExtBinaryBlockConverter.h"
+
+#include <iostream>
+
+// asset converters
+#include "NvBlastExtAssetBlockVersionConverter_v0_v1.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Asset Block Converter
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+NV_INLINE std::vector<BinaryBlockConverter::VersionConverterPtr> getAssetConverters()
+{
+ /**
+ +==========================================+
+ | HINT: ADD NEW VERSION CONVERTERS THERE |
+ +==========================================+
+ */
+ BinaryBlockConverter::VersionConverterPtr converters[] =
+ {
+ std::make_shared<NvBlastAssetBlockVersionConverter_v0_v1>()
+ };
+
+ return std::vector<BinaryBlockConverter::VersionConverterPtr>(converters, converters + sizeof(converters) / sizeof(converters[0]));
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Family Converter
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+NV_INLINE std::vector<BinaryBlockConverter::VersionConverterPtr> getFamilyConverters()
+{
+ /**
+ +==========================================+
+ | HINT: ADD NEW VERSION CONVERTERS THERE |
+ +==========================================+
+ */
+ BinaryBlockConverter::VersionConverterPtr converters[] =
+ {
+ nullptr //std::make_shared<NvBlastFamilyVersionConverter_v0_v1>()
+ };
+
+ return std::vector<BinaryBlockConverter::VersionConverterPtr>(converters, converters + sizeof(converters) / sizeof(converters[0]));
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Generic Block Converter
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool convertDataBlock(std::vector<char>& outBlock, const std::vector<char>& inBlock, uint32_t* outBlockVersion)
+{
+ // Pick header to determine dataType and version
+ if (inBlock.size() < sizeof(NvBlastDataBlock))
+ {
+ std::cerr << "Conversion failed: invalid block, passed block is too small." << std::endl;
+ return false;
+ }
+ const NvBlastDataBlock* dataBlock = reinterpret_cast<const NvBlastDataBlock*>(inBlock.data());
+
+ // Select appropriate converters and version based on dataType
+ std::vector<BinaryBlockConverter::VersionConverterPtr> converters;
+ uint32_t version;
+ switch (dataBlock->dataType)
+ {
+ case NvBlastDataBlock::AssetDataBlock:
+ std::cout << "Input block dataType: NvBlastDataBlock::Asset" << std::endl;
+ converters = getAssetConverters();
+ version = (outBlockVersion == nullptr ? static_cast<uint32_t>(NvBlastAssetDataFormat::Current) : *outBlockVersion);
+ break;
+ case NvBlastDataBlock::FamilyDataBlock:
+ std::cout << "Input block dataType: NvBlastDataBlock::Family" << std::endl;
+ converters = getFamilyConverters();
+ version = (outBlockVersion == nullptr ? static_cast<uint32_t>(NvBlastFamilyDataFormat::Current) : *outBlockVersion);
+ break;
+ default:
+ std::cerr << "Conversion failed: unsupported dataType: " << dataBlock->dataType << std::endl;
+ return false;
+ }
+
+ return BinaryBlockConverter::convertBinaryBlock(outBlock, converters, inBlock, version, dataBlock->formatVersion);
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/import/include/NvBlastExtApexImportTool.h b/NvBlast/sdk/extensions/import/include/NvBlastExtApexImportTool.h
new file mode 100644
index 0000000..bbfcfce
--- /dev/null
+++ b/NvBlast/sdk/extensions/import/include/NvBlastExtApexImportTool.h
@@ -0,0 +1,199 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAPEXIMPORTTOOL_H
+#define NVBLASTEXTAPEXIMPORTTOOL_H
+
+#include "NvBlast.h"
+#include <vector>
+#include <string>
+#include "NvBlastExtPxAsset.h"
+
+namespace physx
+{
+class PxErrorCallback;
+class PxAllocatorCallback;
+namespace general_PxIOStream2
+{
+class PxFileBuf;
+}
+}
+
+namespace nvidia
+{
+namespace apex
+{
+class ApexSDK;
+class ModuleDestructible;
+class DestructibleAsset;
+}
+using namespace physx::general_PxIOStream2;
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+struct CollisionHull;
+class TkFramework;
+
+namespace ApexImporter
+{
+
+struct ApexImporterConfig
+{
+ /**
+ Interface search mode:
+
+ EXACT - - Importer tries to find triangles from two chunks which lay in common surface.
+ If such triangles are found, their intersections are considered as the interface.
+
+ FORCED - Bond creation is forced no matter how far chunks from each other.
+
+ */
+ enum InterfaceSearchMode { EXACT, FORCED, MODE_COUNT };
+
+ ApexImporterConfig()
+ {
+ setDefaults();
+ }
+
+ void setDefaults()
+ {
+ infSearchMode = EXACT;
+ }
+ InterfaceSearchMode infSearchMode;
+};
+
+
+class ApexDestruction;
+
+
+/**
+ ApexImportTool provides routines to create NvBlastAssets from APEX assets.
+*/
+class ApexImportTool
+{
+public:
+
+ /**
+ Constructor should be provided with user defined allocator and massage function:
+ \param[in] logFn User - supplied message function(see NvBlastLog definition).May be NULL.
+ */
+ ApexImportTool(NvBlastLog logFn = NULL);
+ ~ApexImportTool();
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ /**
+ Before using ApexImportTool should be initialized. ApexSDK and ModuleDestructible initialized internally.
+ \return If true, ApexImportTool initialized successfully.
+ */
+ bool initialize();
+
+ /**
+ Before using ApexImportTool should be initialized. User can provide existing ApexSDK and ModuleDestructible objects
+ \param[in] apexSdk Pointer on ApexSDK object
+ \param[in] moduleDestructible Pointer on ModuleDestructible object
+ \return If true, ApexImportTool initialized successfully.
+ */
+ bool initialize(nvidia::apex::ApexSDK* apexSdk, nvidia::apex::ModuleDestructible* moduleDestructible);
+
+ /**
+ Checks whether ApexImportTool is initialized and can be used.
+ \return If true, ApexImportTool initialized successfully.
+ */
+ bool isValid();
+
+
+ /**
+ Method loads APEX Destruction asset from file
+ \param[in] stream Pointer on PxFileBuf stream with Apex Destruction asset
+ \return If not 0, pointer on DestructibleAsset object is returned.
+ */
+ nvidia::apex::DestructibleAsset* loadAssetFromFile(nvidia::PxFileBuf* stream);
+
+
+ /**
+ Method builds NvBlastAsset form provided DestructibleAsset. DestructibleAsset must contain support graph!
+ \param[out] chunkReorderInvMap Chunk map from blast chunk to apex chunk to be filled.
+ \param[in] apexAsset Pointer on DestructibleAsset object which should be converted to NvBlastAsset
+ \param[out] chunkDescriptors Reference on chunk descriptors array to be filled.
+ \param[out] bondDescriptors Reference on bond descriptors array to be filled.
+ \param[out] flags Reference on chunk flags to be filled.
+
+ \return If true, output arrays are filled.
+ */
+ bool importApexAsset(std::vector<uint32_t>& chunkReorderInvMap, const nvidia::apex::DestructibleAsset* apexAsset,
+ std::vector<NvBlastChunkDesc>& chunkDescriptors, std::vector<NvBlastBondDesc>& bondDescriptors, std::vector<uint32_t>& flags);
+
+ /**
+ Method builds NvBlastAsset form provided DestructibleAsset. DestructibleAsset must contain support graph!
+ Parameteres of conversion could be provided with ApexImporterConfig.
+ \param[out] chunkReorderInvMap Chunk map from blast chunk to apex chunk to be filled.
+ \param[in] apexAsset Pointer on DestructibleAsset object which should be converted to NvBlastAsset
+ \param[out] chunkDescriptors Reference on chunk descriptors array to be filled.
+ \param[out] bondDescriptors Reference on bond descriptors array to be filled.
+ \param[out] flags Reference on chunk flags to be filled.
+ \param[in] config ApexImporterConfig object with conversion parameters, see above.
+ \return If true, output arrays are filled.
+ */
+ bool importApexAsset(std::vector<uint32_t>& chunkReorderInvMap, const nvidia::apex::DestructibleAsset* apexAsset,
+ std::vector<NvBlastChunkDesc>& chunkDescriptors, std::vector<NvBlastBondDesc>& bondDescriptors, std::vector<uint32_t>& flags,
+ const ApexImporterConfig& config);
+
+
+ /**
+ Method serializes user-supplied NvBlastAsset object to user-supplied PxFileBuf stream.
+ \param[in] asset Pointer on NvBlastAsset object which should be serialized
+ \param[in] stream Pointer on PxFileBuf object in which NvBlastAsset should be serialized.
+ \return If true, NvBlastAsset object serialized successfully.
+ */
+ bool saveAsset(const NvBlastAsset* asset, nvidia::PxFileBuf* stream);
+
+ /**
+ Method creates collision geometry from user-supplied APEX Destructible asset.
+ \param[in] apexAsset Pointer on DestructibleAsset object for which collision geometry should be created.
+ \param[in] chunkCount Blast asset chunk count, should be equal to number of blast chunk descriptors which are gathered at ApexImportTool::importApexAsset(...)
+ \param[in] chunkReorderInvMap Chunk map from blast chunk to apex chunk filled in ApexImportTool::importApexAsset(...)
+ \param[in] apexChunkFlags Chunk flags array
+ \param[out] physicsChunks Chunk physics info output array
+ \param[out] physicsSubchunks Chunk collision geometry and transformation data output array
+ \return If true - success, output arrays are filled.
+ */
+ bool getCollisionGeometry(const nvidia::apex::DestructibleAsset* apexAsset, uint32_t chunkCount, std::vector<uint32_t>& chunkReorderInvMap,
+ const std::vector<uint32_t>& apexChunkFlags, std::vector<ExtPxAssetDesc::ChunkDesc>& physicsChunks,
+ std::vector<ExtPxAssetDesc::SubchunkDesc>& physicsSubchunks);
+
+ ApexDestruction* m_apexDestruction;
+ //////////////////////////////////////////////////////////////////////////////
+
+private:
+ bool importApexAssetInternal(std::vector<uint32_t>& chunkReorderInvMap, const nvidia::apex::DestructibleAsset* apexAsset,
+ std::vector<NvBlastChunkDesc>& chunkDescriptors, std::vector<NvBlastBondDesc>& bondDesc, std::vector<uint32_t>& flags,
+ const ApexImporterConfig& configDesc);
+
+
+protected:
+ NvBlastLog m_log;
+
+protected:
+ ApexImportTool(const ApexImportTool&);
+ ApexImportTool& operator=(const ApexImportTool&);
+};
+
+} // namespace ApexImporter
+
+} // namespace Blast
+} // namespace Nv
+
+#endif // NVBLASTEXTAPEXIMPORTTOOL_H
diff --git a/NvBlast/sdk/extensions/import/source/NvBlastExtApexDestruction.cpp b/NvBlast/sdk/extensions/import/source/NvBlastExtApexDestruction.cpp
new file mode 100644
index 0000000..889a8c8
--- /dev/null
+++ b/NvBlast/sdk/extensions/import/source/NvBlastExtApexDestruction.cpp
@@ -0,0 +1,220 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtApexDestruction.h"
+
+
+#include "PxPhysicsAPI.h"
+#include "Apex.h"
+#include <ModuleDestructible.h>
+#include <DestructibleAsset.h>
+#include "NullRenderer.h"
+#include "PsString.h"
+
+using namespace nvidia;
+using namespace apex;
+
+//////////////////////////////////////////////////////////////////////////////
+
+PxDefaultAllocator gPxAllocator;
+PxDefaultErrorCallback gErrorCallback;
+NullRenderResourceManager gNullRenderer;
+
+/////////////////////////////////////////////////////////////////////////////
+
+namespace Nv
+{
+namespace Blast
+{
+using namespace ApexImporter;
+
+ApexDestruction::ApexDestruction(NvBlastLog log)
+{
+ m_log = log;
+ initialize();
+}
+
+ApexDestruction::ApexDestruction(ApexSDK* apexSdk, ModuleDestructible* moduleDestructible, NvBlastLog log)
+{
+ m_log = log;
+
+ m_Foundation.reset(&apexSdk->getPhysXSDK()->getFoundation(), false);
+ m_PhysxSDK.reset(apexSdk->getPhysXSDK(), false);
+ m_Cooking.reset(apexSdk->getCookingInterface(), false);
+ m_ApexSDK.reset(apexSdk, false);
+ for (uint32_t i = 0; i < apexSdk->getNbModules(); ++i)
+ {
+ if (!physx::shdfnd::strcmp(apexSdk->getModules()[i]->getName(), "Legacy"))
+ {
+ hasLegacyModule = true;
+ }
+ }
+ m_DestructibleModule.reset(moduleDestructible, false);
+}
+
+
+bool ApexDestruction::isValid()
+{
+ return m_PhysxSDK && m_Cooking && m_ApexSDK && m_DestructibleModule && hasLegacyModule;
+}
+
+bool ApexDestruction::initialize()
+{
+ if (isValid())
+ {
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ m_Foundation.reset(PxCreateFoundation(PX_FOUNDATION_VERSION, gPxAllocator, gErrorCallback));
+ if (!m_Foundation)
+ {
+ if (m_log)
+ {
+ m_log(NvBlastMessage::Error, "Error: failed to create Foundation\n", __FILE__, __LINE__);
+ }
+ return false;
+ }
+ physx::PxTolerancesScale scale;
+ m_PhysxSDK.reset(PxCreatePhysics(PX_PHYSICS_VERSION, *m_Foundation, scale, true));
+ if (!m_PhysxSDK)
+ {
+ if (m_log)
+ {
+ m_log(NvBlastMessage::Error, "Error: failed to create PhysX\n", __FILE__, __LINE__);
+ }
+ return false;
+ }
+
+#if 0
+ if (!PxInitExtensions(*mPhysxSDK, 0))
+ {
+ if (m_log)
+ {
+ m_log(Error, "Error: failed to init PhysX extensions\n", __FILE__, __LINE__);
+ }
+ return false;
+ }
+#endif
+
+ physx::PxCookingParams cookingParams(scale);
+ cookingParams.buildGPUData = true;
+ m_Cooking.reset(PxCreateCooking(PX_PHYSICS_VERSION, m_PhysxSDK->getFoundation(), cookingParams));
+ if (!m_Cooking)
+ {
+ m_log(NvBlastMessage::Error, "Error: failed to create PhysX Cooking\n", __FILE__, __LINE__);
+ return false;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ ApexSDKDesc apexSDKDesc;
+ apexSDKDesc.physXSDK = m_PhysxSDK.get();
+ apexSDKDesc.cooking = m_Cooking.get();
+ apexSDKDesc.renderResourceManager = &gNullRenderer;
+ apexSDKDesc.resourceCallback = nullptr;
+ apexSDKDesc.foundation = &m_PhysxSDK->getFoundation();
+
+ m_ApexSDK.reset(CreateApexSDK(apexSDKDesc, nullptr, APEX_SDK_VERSION));
+ if (!m_ApexSDK)
+ {
+ if (m_log)
+ {
+ m_log(NvBlastMessage::Error, "Error: failed to create APEX\n", __FILE__, __LINE__);
+ }
+ return false;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ m_DestructibleModule.reset(static_cast<nvidia::apex::ModuleDestructible*>(m_ApexSDK->createModule("Destructible")), ApexReleaser(*m_ApexSDK));
+ if (!m_DestructibleModule)
+ {
+ if (m_log)
+ {
+ m_log(NvBlastMessage::Error, "Error: failed to create ModuleDestructible\n", __FILE__, __LINE__);
+ }
+ return false;
+ }
+
+ if (!m_ApexSDK->createModule("Legacy"))
+ {
+ if (m_log)
+ {
+ m_log(NvBlastMessage::Error, "Error: failed to create Legacy module\n", __FILE__, __LINE__);
+ }
+ return false;
+ };
+
+
+ float massScaleExponenent = 1.f;
+ float massScale = 1.f;
+
+ NvParameterized::Interface* params = m_DestructibleModule->getDefaultModuleDesc();
+ NvParameterized::Handle paramsHandle(params);
+ paramsHandle.getParameter("scaledMassExponent");
+ paramsHandle.setParamF32(massScaleExponenent);
+ paramsHandle.getParameter("massScale");
+ paramsHandle.setParamF32(massScale);
+ m_DestructibleModule->init(*params);
+
+ return true;
+}
+
+DestructibleAsset* ApexDestruction::loadAsset(physx::PxFileBuf* stream)
+{
+ DestructibleAsset* asset = nullptr;
+
+ if (stream && stream->isOpen())
+ {
+ NvParameterized::Serializer::SerializeType serType = apexSDK()->getSerializeType(*stream);
+ NvParameterized::Serializer::ErrorType serError;
+ NvParameterized::Serializer* ser = apexSDK()->createSerializer(serType);
+ PX_ASSERT(ser);
+
+ NvParameterized::Serializer::DeserializedData data;
+ serError = ser->deserialize(*stream, data);
+
+ if (serError == NvParameterized::Serializer::ERROR_NONE && data.size() == 1)
+ {
+ NvParameterized::Interface* params = data[0];
+ if (!physx::shdfnd::strcmp(params->className(), "DestructibleAssetParameters"))
+ {
+ asset = static_cast<DestructibleAsset*>(apexSDK()->createAsset(params, ""));
+ }
+ else
+ {
+ m_log(NvBlastMessage::Error, "Error: deserialized data is not an APEX Destructible\n", __FILE__, __LINE__);
+ }
+ }
+ else
+ {
+ m_log(NvBlastMessage::Error, "Error: failed to deserialize\n", __FILE__, __LINE__);
+ }
+ ser->release();
+ }
+
+ if (!asset)
+ {
+ char message[255] = { 0 };
+ sprintf(message, "Error: failed to load asset...\n");
+ m_log(NvBlastMessage::Error, message, __FILE__, __LINE__);
+ }
+
+ return asset;
+}
+
+ApexDestruction::~ApexDestruction()
+{
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/import/source/NvBlastExtApexDestruction.h b/NvBlast/sdk/extensions/import/source/NvBlastExtApexDestruction.h
new file mode 100644
index 0000000..6560aed
--- /dev/null
+++ b/NvBlast/sdk/extensions/import/source/NvBlastExtApexDestruction.h
@@ -0,0 +1,108 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTAPEXDESTRUCTION_H
+#define NVBLASTEXTAPEXDESTRUCTION_H
+
+#include "ApexUsingNamespace.h"
+#include "NvBlastExtScopedResource.h"
+#include "PsUtilities.h"
+#include <string>
+#include <NvBlastTypes.h>
+#include <PxFileBuf.h>
+
+namespace physx
+{
+ class PxFoundation;
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+namespace ApexImporter
+{
+ /**
+ Class for working with APEX Destruction assets.
+ */
+class ApexDestruction
+{
+ PX_NOCOPY(ApexDestruction)
+
+public:
+ ApexDestruction(NvBlastLog log = NULL);
+
+ ApexDestruction(nvidia::apex::ApexSDK* apexSdk, nvidia::apex::ModuleDestructible* moduleDestructible, NvBlastLog log = NULL);
+ ~ApexDestruction();
+
+ //////////////////////////////////////////////////////////////////////////////
+ /**
+ ApexDestruction initialization. If APEX SDK and ModuleDestructible was provided to constructor, they will be used.
+ Otherwise, PhysXSDK and APEX SDK will be initialized.
+ */
+ bool initialize();
+
+ /**
+ /return Return True if tool initialized sucessfully.
+ */
+ bool isValid();
+
+ /**
+ Load Apex Destructible asset from PxFileBuf stream
+ \param[in] stream Apex asset stream
+ /return Return DestructibleAsset* if success, otherwise nullptr is returned.
+ */
+ nvidia::apex::DestructibleAsset* loadAsset(physx::PxFileBuf* stream);
+
+ /**
+ /return Return PxFoundation.
+ */
+ physx::PxFoundation* foundation() { return m_Foundation.get(); }
+ /**
+ /return Return PxPhysics.
+ */
+ physx::PxPhysics* physxSDK() { return m_PhysxSDK.get(); }
+ /**
+ /return Return PxCooking.
+ */
+ physx::PxCooking* cooking() { return m_Cooking.get(); }
+ /**
+ /return Return ApexSDK.
+ */
+ nvidia::apex::ApexSDK* apexSDK() { return m_ApexSDK.get(); }
+
+ /**
+ /return Return ModuleDestructible.
+ */
+ nvidia::apex::ModuleDestructible* destructibleModule() { return m_DestructibleModule.get(); }
+
+private:
+ bool hasLegacyModule;
+ NvBlastLog m_log;
+ //////////////////////////////////////////////////////////////////////////////
+
+protected:
+ ScopedResource<physx::PxFoundation> m_Foundation;
+ ScopedResource<physx::PxPhysics> m_PhysxSDK;
+ ScopedResource<physx::PxCooking> m_Cooking;
+ ScopedResource<nvidia::apex::ApexSDK> m_ApexSDK;
+ ScopedResource<nvidia::apex::ModuleDestructible, ApexReleaser> m_DestructibleModule;
+
+};
+
+} // namespace ApexImporter
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // NVBLASTEXTAPEXDESTRUCTION_H
diff --git a/NvBlast/sdk/extensions/import/source/NvBlastExtApexImportTool.cpp b/NvBlast/sdk/extensions/import/source/NvBlastExtApexImportTool.cpp
new file mode 100644
index 0000000..d2def6f
--- /dev/null
+++ b/NvBlast/sdk/extensions/import/source/NvBlastExtApexImportTool.cpp
@@ -0,0 +1,490 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtApexImportTool.h"
+
+#if NV_VC
+#pragma warning(push)
+#pragma warning(disable: 4996) // 'fopen' unsafe warning, from NxFileBuffer.h
+#endif
+
+#include "PxFoundation.h"
+#include "PxErrorCallback.h"
+#include "PxAllocatorCallback.h"
+
+#include "NvBlastIndexFns.h"
+#include "DestructibleAsset.h"
+#include "NvBlastExtApexDestruction.h"
+#include <PxConvexMesh.h>
+#include "PxPhysics.h"
+#include "NvBlastExtAuthoringCollisionBuilder.h"
+#include "NvBlastExtPxAsset.h"
+#include "NvBlastExtAuthoringTypes.h"
+#include "NvBlastExtAuthoringBondGenerator.h"
+
+using namespace nvidia;
+using namespace apex;
+
+namespace Nv
+{
+namespace Blast
+{
+
+namespace ApexImporter
+{
+ /**
+ Should be consistent with IntPair in APEX
+ */
+ struct IntPair
+ {
+ void set(int32_t _i0, int32_t _i1)
+ {
+ i0 = _i0;
+ i1 = _i1;
+ }
+
+ int32_t i0, i1;
+
+ static int compare(const void* a, const void* b)
+ {
+ const int32_t diff0 = ((IntPair*)a)->i0 - ((IntPair*)b)->i0;
+ return diff0 ? diff0 : (((IntPair*)a)->i1 - ((IntPair*)b)->i1);
+ }
+ };
+
+ApexImportTool::ApexImportTool(NvBlastLog log)
+ : m_apexDestruction(NULL), m_log(log)
+{
+}
+
+
+bool ApexImportTool::isValid()
+{
+ return m_apexDestruction && m_apexDestruction->isValid();
+}
+
+
+bool ApexImportTool::initialize()
+{
+ if (isValid())
+ {
+ return true;
+ }
+ m_log(NvBlastMessage::Info, "APEX initialization \n", __FILE__, __LINE__);
+ m_apexDestruction = new ApexDestruction(m_log);
+ return isValid();
+}
+
+
+bool ApexImportTool::initialize(nvidia::apex::ApexSDK* apexSdk, nvidia::apex::ModuleDestructible* moduleDestructible)
+{
+ if (isValid())
+ {
+ return true;
+ }
+ m_log(NvBlastMessage::Info, "APEX initialization \n", __FILE__, __LINE__);
+ m_apexDestruction = new ApexDestruction(apexSdk, moduleDestructible, m_log);
+ return isValid();
+}
+
+DestructibleAsset* ApexImportTool::loadAssetFromFile(physx::PxFileBuf* stream)
+{
+ return m_apexDestruction->loadAsset(stream);
+}
+
+
+bool ApexImportTool::getCollisionGeometry(const nvidia::apex::DestructibleAsset* apexAsset, uint32_t chunkCount, std::vector<uint32_t>& chunkReorderInvMap,
+ const std::vector<uint32_t>& apexChunkFlags, std::vector<ExtPxAssetDesc::ChunkDesc>& physicsChunks,
+ std::vector<ExtPxAssetDesc::SubchunkDesc>& physicsSubchunks)
+{
+ physicsChunks.clear();
+ physicsChunks.resize(chunkCount);
+ // prepare physics asset desc (convexes, transforms)
+ ConvexMeshBuilder collisionBuilder(m_apexDestruction->cooking(), &m_apexDestruction->apexSDK()->getPhysXSDK()->getPhysicsInsertionCallback());
+ int32_t apexHullCount = 0;
+ const uint32_t apexChunkCount = apexAsset->getChunkCount();
+ for (uint32_t chunkIndex = 0; chunkIndex < chunkCount; ++chunkIndex)
+ {
+ uint32_t apexChunkIndex = chunkReorderInvMap[chunkIndex];
+ if (apexChunkIndex < apexChunkCount)
+ {
+ uint32_t partIndex = apexAsset->getPartIndex(apexChunkIndex);
+ apexHullCount += apexAsset->getPartConvexHullCount(partIndex);
+ }
+ }
+ physicsSubchunks.reserve(chunkCount);
+ {
+ for (uint32_t chunkIndex = 0; chunkIndex < chunkCount; ++chunkIndex)
+ {
+ uint32_t apexChunkIndex = chunkReorderInvMap[chunkIndex];
+ if (apexChunkIndex < apexChunkCount)
+ {
+ uint32_t partIndex = apexAsset->getPartIndex(apexChunkIndex);
+ uint32_t partConvexHullCount = apexAsset->getPartConvexHullCount(partIndex);
+ NvParameterized::Interface** cxInterfaceArray = apexAsset->getPartConvexHullArray(partIndex);
+ physicsChunks[chunkIndex].subchunkCount = partConvexHullCount;
+ for (uint32_t hull = 0; hull < partConvexHullCount; ++hull)
+ {
+ NvParameterized::Handle paramHandle(cxInterfaceArray[hull]);
+ int32_t verticesCount = 0;
+ paramHandle.getParameter("vertices");
+ paramHandle.getArraySize(verticesCount);
+ std::vector<PxVec3> vertexData(verticesCount);
+ paramHandle.getParamVec3Array(vertexData.data(), verticesCount);
+
+ PxConvexMesh* convexMesh = collisionBuilder.buildConvexMesh(vertexData);
+
+ const ExtPxAssetDesc::SubchunkDesc subchunk =
+ {
+ PxTransform(PxIdentity),
+ PxConvexMeshGeometry(convexMesh)
+ };
+ physicsSubchunks.push_back(subchunk);
+ }
+ physicsChunks[chunkIndex].subchunks = partConvexHullCount ? (&physicsSubchunks.back() + 1 - partConvexHullCount) : nullptr;
+
+ // static flag set
+ physicsChunks[chunkIndex].isStatic = (apexChunkFlags[apexChunkIndex] & (1 << 1)) != 0;
+ }
+ else
+ {
+ // this is earth chunk
+ physicsChunks[chunkIndex].isStatic = true;
+ }
+ }
+ }
+
+ // check that vector didn't grow
+ if (static_cast<int32_t>(physicsSubchunks.size()) > apexHullCount)
+ {
+ m_log(NvBlastMessage::Error, "Error: sub chunk count seems to be wrong. \n", __FILE__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+void gatherChunkHullPoints(const DestructibleAsset* apexAsset, std::vector<std::vector<PxVec3> >& hullPoints)
+{
+ hullPoints.resize(apexAsset->getChunkCount());
+ for (uint32_t chunk = 0; chunk < apexAsset->getChunkCount(); ++chunk)
+ {
+ int32_t part = apexAsset->getPartIndex(chunk);
+ NvParameterized::Interface** cxInterfaceArray = apexAsset->getPartConvexHullArray(part);
+ for (uint32_t hull = 0; hull < apexAsset->getPartConvexHullCount(part); ++hull)
+ {
+ NvParameterized::Handle paramHandle(cxInterfaceArray[hull]);
+ int32_t verticesCount = 0;
+ paramHandle.getParameter("vertices");
+ paramHandle.getArraySize(verticesCount);
+ uint32_t oldSize = (uint32_t)hullPoints[chunk].size();
+ hullPoints[chunk].resize(hullPoints[chunk].size() + verticesCount);
+ paramHandle.getParamVec3Array(hullPoints[chunk].data() + oldSize, verticesCount);
+ }
+ }
+}
+PxBounds3 gatherChunkTriangles(const DestructibleAsset* apexAsset, std::vector<std::vector<Nv::Blast::Triangle> >& chunkTriangles, int32_t posBufferIndex, float scale, PxVec3 offset )
+{
+
+ PxBounds3 bnd;
+ bnd.setEmpty();
+ chunkTriangles.clear();
+ chunkTriangles.resize(apexAsset->getChunkCount());
+ for (uint32_t chunkIndex = 0; chunkIndex < apexAsset->getChunkCount(); ++chunkIndex)
+ {
+ uint32_t part = apexAsset->getPartIndex(chunkIndex);
+ const RenderMeshAsset* rAsset = apexAsset->getRenderMeshAsset();
+ uint32_t submeshCount = rAsset->getSubmeshCount();
+ for (uint32_t submeshIndex = 0; submeshIndex < submeshCount; ++submeshIndex)
+ {
+ const RenderSubmesh& currentSubmesh = rAsset->getSubmesh(submeshIndex);
+ const uint32_t* indexArray = currentSubmesh.getIndexBuffer(part);
+ uint32_t indexCount = currentSubmesh.getIndexCount(part);
+ const PxVec3* positions = reinterpret_cast<const PxVec3*>(currentSubmesh.getVertexBuffer().getBuffer(posBufferIndex));
+
+ for (uint32_t i = 0; i < indexCount; i += 3)
+ {
+ Vertex a;
+ Vertex b;
+ Vertex c;
+ bnd.include(positions[indexArray[i]]);
+ bnd.include(positions[indexArray[i + 1]]);
+ bnd.include(positions[indexArray[i + 2]]);
+
+ a.p = positions[indexArray[i]] - offset;
+ b.p = positions[indexArray[i + 1]] - offset;
+ c.p = positions[indexArray[i + 2]] - offset;
+ a.p *= scale;
+ b.p *= scale;
+ c.p *= scale;
+ chunkTriangles[chunkIndex].push_back(Nv::Blast::Triangle(a, b, c));
+ }
+ }
+ }
+ return bnd;
+}
+
+bool ApexImportTool::importApexAsset(std::vector<uint32_t>& chunkReorderInvMap, const nvidia::apex::DestructibleAsset* apexAsset,
+ std::vector<NvBlastChunkDesc>& chunkDescriptors, std::vector<NvBlastBondDesc>& bondDescriptors, std::vector<uint32_t>& apexChunkFlags)
+{
+ ApexImporterConfig configDesc;
+ configDesc.setDefaults();
+ return importApexAsset(chunkReorderInvMap, apexAsset, chunkDescriptors, bondDescriptors, apexChunkFlags, configDesc);
+}
+
+
+bool ApexImportTool::importApexAsset(std::vector<uint32_t>& chunkReorderInvMap, const nvidia::apex::DestructibleAsset* apexAsset,
+ std::vector<NvBlastChunkDesc>& chunkDescriptors, std::vector<NvBlastBondDesc>& bondDescriptors, std::vector<uint32_t>& apexChunkFlags, const ApexImporterConfig& configDesc)
+{
+ return importApexAssetInternal(chunkReorderInvMap, apexAsset, chunkDescriptors, bondDescriptors, apexChunkFlags, configDesc);
+}
+
+
+bool ApexImportTool::importApexAssetInternal(std::vector<uint32_t>& chunkReorderInvMap, const nvidia::apex::DestructibleAsset* apexAsset,
+ std::vector<NvBlastChunkDesc>& chunkDescriptors, std::vector<NvBlastBondDesc>& bondsDescriptors, std::vector<uint32_t>& apexChunkFlags, const ApexImporterConfig& configDesc)
+{
+
+ if (!apexAsset)
+ {
+ if (m_log != NULL)
+ {
+ m_log(NvBlastMessage::Error, "Error: attempting to import NULL Apex asset.\n", __FILE__, __LINE__);
+ }
+ return false;
+ }
+
+ // Build chunk descriptors for asset //
+ const uint32_t apexChunkCount = apexAsset->getChunkCount();
+ chunkDescriptors.clear();
+ chunkDescriptors.resize(apexChunkCount);
+ uint32_t rootChunkIndex = 0;
+
+ for (uint32_t i = 0; i < apexChunkCount; ++i)
+ {
+ // Use bounds center for centroid
+ const PxBounds3 bounds = apexAsset->getChunkActorLocalBounds(i);
+ const PxVec3 center = bounds.getCenter();
+ memcpy(chunkDescriptors[i].centroid, &center.x, 3 * sizeof(float));
+
+ // Find chunk volume
+ uint32_t partIndex = apexAsset->getPartIndex(i);
+ uint32_t partConvexHullCount = apexAsset->getPartConvexHullCount(partIndex);
+ NvParameterized::Interface** cxInterfaceArray = apexAsset->getPartConvexHullArray(partIndex);
+ chunkDescriptors[i].volume = 0.0f;
+ for (uint32_t hull = 0; hull < partConvexHullCount; ++hull)
+ {
+ NvParameterized::Handle paramHandle(cxInterfaceArray[hull]);
+ float hullVolume;
+ paramHandle.getParameter("volume");
+ paramHandle.getParamF32(hullVolume);
+ chunkDescriptors[i].volume += hullVolume;
+ }
+
+ int32_t parent = apexAsset->getChunkParentIndex(i);
+ if (parent == -1)
+ {
+ rootChunkIndex = i;
+ chunkDescriptors[i].parentChunkIndex = UINT32_MAX;
+ }
+ else
+ {
+ chunkDescriptors[i].parentChunkIndex = parent;
+ }
+
+ chunkDescriptors[i].flags = 0;
+ chunkDescriptors[i].userData = i;
+ }
+ // Get support graph data from Apex asset //
+
+ const NvParameterized::Interface* assetParameterized = apexAsset->getAssetNvParameterized();
+ uint32_t maximumSupportDepth = 0;
+
+ NvParameterized::Handle parameterHandle(*assetParameterized);
+ parameterHandle.getParameter("supportDepth");
+ parameterHandle.getParamU32(maximumSupportDepth);
+ std::vector<std::pair<uint32_t, uint32_t> > overlapsBuffer;
+ uint32_t overlapsCount = apexAsset->getCachedOverlapCountAtDepth(maximumSupportDepth);
+ if (overlapsCount != 0)
+ {
+ const IntPair* overlap = reinterpret_cast<const IntPair*>(apexAsset->getCachedOverlapsAtDepth(maximumSupportDepth));
+ for (uint32_t i = 0; i < overlapsCount; ++i)
+ {
+ chunkDescriptors[overlap[i].i0].flags = NvBlastChunkDesc::SupportFlag;
+ chunkDescriptors[overlap[i].i1].flags = NvBlastChunkDesc::SupportFlag;
+ overlapsBuffer.push_back(std::make_pair(overlap[i].i0, overlap[i].i1));
+ }
+ }
+
+ // Format all connections as (chunk with lower index) -> (chunk with higher index) //
+
+ for (uint32_t i = 0; i < overlapsBuffer.size(); ++i)
+ {
+ if (overlapsBuffer[i].first > overlapsBuffer[i].second)
+ {
+ std::swap(overlapsBuffer[i].first, overlapsBuffer[i].second);
+ }
+ }
+
+ // Unique all connections //
+ std::sort(overlapsBuffer.begin(), overlapsBuffer.end());
+ overlapsBuffer.resize(std::unique(overlapsBuffer.begin(), overlapsBuffer.end()) - overlapsBuffer.begin());
+
+ // Build bond descriptors (acquire area, normal, centroid)
+ bondsDescriptors.clear();
+ bondsDescriptors.resize(overlapsBuffer.size());
+
+ Nv::Blast::BlastBondGenerator bondGenTool(GetApexSDK()->getCookingInterface(), &GetApexSDK()->getPhysXSDK()->getPhysicsInsertionCallback());
+ std::vector<std::vector<Nv::Blast::Triangle> > chunkTriangles;
+
+ PxBounds3 bnds = apexAsset->getRenderMeshAsset()->getBounds();
+ PxVec3 offset = bnds.getCenter();
+ float scale = 1.0f / PxMax(PxAbs(bnds.getExtents(0)), PxMax(PxAbs(bnds.getExtents(1)), PxAbs(bnds.getExtents(2))));
+
+ bnds = gatherChunkTriangles(apexAsset, chunkTriangles, 0, scale, offset);
+
+
+ BondGenerationConfig cf;
+ cf.bondMode = BondGenerationConfig::AVERAGE;
+ if (configDesc.infSearchMode == configDesc.EXACT)
+ {
+ cf.bondMode = BondGenerationConfig::EXACT;
+ }
+
+ bondGenTool.createBondBetweenMeshes(chunkTriangles, bondsDescriptors, overlapsBuffer, cf);
+
+
+
+ float inverScale = 1.0f / scale;
+
+ for (uint32_t i = 0; i < bondsDescriptors.size(); ++i)
+ {
+ bondsDescriptors[i].bond.area *= inverScale * inverScale;
+ bondsDescriptors[i].bond.centroid[0] *= inverScale;
+ bondsDescriptors[i].bond.centroid[1] *= inverScale;
+ bondsDescriptors[i].bond.centroid[2] *= inverScale;
+
+ bondsDescriptors[i].bond.centroid[0] += offset.x;
+ bondsDescriptors[i].bond.centroid[1] += offset.y;
+ bondsDescriptors[i].bond.centroid[2] += offset.z;
+
+ }
+
+ /// Delete all bonds with zero area ///
+ for (uint32_t i = 0; i < bondsDescriptors.size(); ++i)
+ {
+ if (bondsDescriptors[i].bond.area == 0)
+ {
+ bondsDescriptors[i].chunkIndices[0] = bondsDescriptors.back().chunkIndices[0];
+ bondsDescriptors[i].chunkIndices[1] = bondsDescriptors.back().chunkIndices[1];
+ bondsDescriptors[i].bond = bondsDescriptors.back().bond;
+ bondsDescriptors.pop_back();
+ --i;
+ }
+ }
+
+
+
+ apexChunkFlags.clear();
+ apexChunkFlags.resize(chunkDescriptors.size());
+ // special 'earth chunk'
+ {
+ uint32_t earthChunkIndex = (uint32_t)chunkDescriptors.size();
+ NvBlastChunkDesc earthChunk;
+ memset(earthChunk.centroid, 0, 3 * sizeof(float));
+ earthChunk.volume = 0.0f;
+ earthChunk.parentChunkIndex = rootChunkIndex;
+ earthChunk.flags = NvBlastChunkDesc::SupportFlag;
+ earthChunk.userData = earthChunkIndex;
+ uint32_t chunksConnectedToEarth = 0;
+ for (uint32_t i = 0; i < chunkDescriptors.size(); i++)
+ {
+ uint32_t chunkID = i;
+ const NvParameterized::Interface* assetInterface = apexAsset->getAssetNvParameterized();
+ NvParameterized::Handle chunksHandle(*assetInterface, "chunks");
+ chunksHandle.set(chunkID);
+ NvParameterized::Handle flagsHandle(*assetInterface);
+ chunksHandle.getChildHandle(assetInterface, "flags", flagsHandle);
+ uint32_t flags;
+ flagsHandle.getParamU32(flags);
+
+ apexChunkFlags[chunkID] = flags;
+
+ // world support flag
+ if (flags & (1 << 0))
+ {
+ NvBlastBondDesc bond;
+ bond.chunkIndices[0] = i;
+ bond.chunkIndices[1] = earthChunkIndex;
+ bond.bond.area = 0.1f; // ???
+ PxVec3 center = apexAsset->getChunkActorLocalBounds(chunkID).getCenter();
+ memcpy(&bond.bond.centroid, &center.x, sizeof(PxVec3));
+ PxVec3 normal = PxVec3(0, 0, 1);
+ memcpy(&bond.bond.normal, &normal.x, sizeof(PxVec3));
+ bondsDescriptors.push_back(bond);
+ chunksConnectedToEarth++;
+ }
+ }
+ if (chunksConnectedToEarth > 0)
+ {
+ chunkDescriptors.push_back(earthChunk);
+ }
+ }
+
+ const uint32_t chunkCount = static_cast<uint32_t>(chunkDescriptors.size());
+ const uint32_t bondCount = static_cast<uint32_t>(bondsDescriptors.size());
+ std::vector<uint32_t> chunkReorderMap(chunkCount);
+ std::vector<NvBlastChunkDesc> scratch(chunkCount);
+ NvBlastEnsureAssetExactSupportCoverage(chunkDescriptors.data(), chunkCount, scratch.data(), m_log);
+ NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap.data(), chunkDescriptors.data(), chunkCount, scratch.data(), m_log);
+ NvBlastApplyAssetDescChunkReorderMapInplace(chunkDescriptors.data(), chunkCount, bondsDescriptors.data(), bondCount, chunkReorderMap.data(), scratch.data(), m_log);
+ chunkReorderInvMap.resize(chunkReorderMap.size());
+ Nv::Blast::invertMap(chunkReorderInvMap.data(), chunkReorderMap.data(), static_cast<uint32_t>(chunkReorderMap.size()));
+ return true;
+}
+
+
+bool ApexImportTool::saveAsset(const NvBlastAsset* asset, PxFileBuf* stream)
+{
+ if (!asset)
+ {
+ if (m_log != NULL)
+ {
+ m_log(NvBlastMessage::Error, "Error: attempting to serialize NULL asset.\n", __FILE__, __LINE__);
+ }
+ return false;
+ }
+ if (!stream)
+ {
+ if (m_log != NULL)
+ {
+ m_log(NvBlastMessage::Error, "Error: bad output stream.\n", __FILE__, __LINE__);
+ }
+ return false;
+ }
+ const void* assetData = asset;
+ uint32_t assetDataSize = NvBlastAssetGetSize(asset, m_log);
+ stream->write(assetData, assetDataSize);
+ stream->close();
+ if (m_log != NULL)
+ {
+ m_log(NvBlastMessage::Info, "Saving finished... \n", __FILE__, __LINE__);
+ }
+ return true;
+}
+
+
+ApexImportTool::~ApexImportTool()
+{
+ delete m_apexDestruction;
+}
+
+} // namespace ApexImporter
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/import/source/NvBlastExtScopedResource.cpp b/NvBlast/sdk/extensions/import/source/NvBlastExtScopedResource.cpp
new file mode 100644
index 0000000..7cf7492
--- /dev/null
+++ b/NvBlast/sdk/extensions/import/source/NvBlastExtScopedResource.cpp
@@ -0,0 +1,50 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtScopedResource.h"
+
+#include <ApexSDK.h>
+#include <RenderMeshAsset.h>
+#include <DestructibleAsset.h>
+#include <ModuleDestructible.h>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+namespace ApexImporter
+{
+
+void ApexReleaser::release(nvidia::apex::RenderMeshAssetAuthoring& a)
+{
+ if (mApex)
+ mApex->releaseAssetAuthoring(a);
+}
+
+
+void ApexReleaser::release(nvidia::apex::DestructibleAssetAuthoring& a)
+{
+ if (mApex)
+ mApex->releaseAssetAuthoring(a);
+}
+
+
+void ApexReleaser::release(nvidia::apex::ModuleDestructible& a)
+{
+ if (mApex)
+ mApex->releaseModule(&a);
+}
+
+} // namespace ApexImporter
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/import/source/NvBlastExtScopedResource.h b/NvBlast/sdk/extensions/import/source/NvBlastExtScopedResource.h
new file mode 100644
index 0000000..e0d35b7
--- /dev/null
+++ b/NvBlast/sdk/extensions/import/source/NvBlastExtScopedResource.h
@@ -0,0 +1,160 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTSCOPEDRESOURCE_H
+#define NVBLASTEXTSCOPEDRESOURCE_H
+
+#include <PxAssert.h>
+
+
+#pragma warning(push)
+#pragma warning(disable:4512)
+
+namespace nvidia
+{
+namespace apex
+{
+class ApexSDK;
+class Asset;
+class DestructibleAsset;
+class AssetAuthoring;
+class ModuleDestructible;
+class RenderMeshAssetAuthoring;
+class DestructibleAssetAuthoring;
+}
+}
+
+namespace Nv
+{
+namespace Blast
+{
+
+namespace ApexImporter
+{
+
+template <class T>
+class DefaultReleaser
+{
+public:
+ DefaultReleaser() { }
+ PX_INLINE void release(T& t) { t.release(); }
+};
+
+class ApexReleaser
+{
+public:
+ ApexReleaser() : mApex(nullptr) { }
+ ApexReleaser(nvidia::apex::ApexSDK& apex) : mApex(&apex) { }
+
+ void release(nvidia::apex::RenderMeshAssetAuthoring&);
+ void release(nvidia::apex::DestructibleAssetAuthoring&);
+ void release(nvidia::apex::ModuleDestructible&);
+
+protected:
+ bool mbValid;
+ nvidia::apex::ApexSDK* mApex;
+};
+
+template< class Releasable, class Releaser = DefaultReleaser<Releasable> >
+class ScopedResource
+{
+public:
+ ScopedResource()
+ : mpReleasable(nullptr), mIsReleasable(true){}
+
+ ScopedResource(Releasable* pReleasable, const Releaser& releaser)
+ : mpReleasable(pReleasable),
+ mReleaser(releaser), mIsReleasable(true) { }
+
+ ScopedResource(Releasable* pReleasable, bool isReleasable = true)
+ : mpReleasable(pReleasable), mIsReleasable(isReleasable) { }
+
+ ~ScopedResource()
+ {
+ destroy();
+ }
+
+ PX_INLINE operator bool() const
+ {
+ return (nullptr != mpReleasable);
+ }
+
+ PX_INLINE Releasable* get() const
+ {
+ return mpReleasable;
+ }
+
+ PX_INLINE Releasable* release()
+ {
+ Releasable* pReleasable = mpReleasable;
+ mpReleasable = nullptr;
+ return pReleasable;
+ }
+
+ PX_INLINE Releasable& operator* () const
+ {
+ PX_ASSERT(*this);
+ return *mpReleasable;
+ }
+
+ PX_INLINE Releasable* operator-> () const
+ {
+ return mpReleasable;
+ }
+
+ PX_INLINE bool operator==(const ScopedResource& b) const
+ {
+ return mpReleasable == b.mpReleasable;
+ }
+
+ PX_INLINE bool operator!=(const ScopedResource& b) const
+ {
+ return !(*this == b);
+ }
+
+ PX_INLINE void reset(Releasable* pReleasable, bool isReleasable = true)
+ {
+ if (mpReleasable == pReleasable) return;
+ destroy();
+ mpReleasable = pReleasable;
+ mIsReleasable = isReleasable;
+ }
+
+ PX_INLINE void reset(Releasable* pReleasable, const Releaser& releaser, bool isReleasable = true)
+ {
+ reset(pReleasable);
+ mReleaser = releaser;
+ mIsReleasable = isReleasable;
+ }
+
+private:
+
+ void destroy()
+ {
+ if (*this && mIsReleasable)
+ mReleaser.release(*mpReleasable);
+ }
+
+ ScopedResource(const ScopedResource&);
+ ScopedResource& operator=(const ScopedResource&);
+
+ Releasable* mpReleasable;
+ Releaser mReleaser;
+ bool mIsReleasable;
+};
+
+} // namespace ApexImporter
+
+} // namespace Blast
+} // namespace Nv
+
+#pragma warning(pop)
+
+#endif // NVBLASTEXTSCOPEDRESOURCE_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtImpactDamageManager.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtImpactDamageManager.h
new file mode 100644
index 0000000..ac3576d
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtImpactDamageManager.h
@@ -0,0 +1,142 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTIMPACTDAMAGEMANAGER_H
+#define NVBLASTEXTIMPACTDAMAGEMANAGER_H
+
+#include "PxFiltering.h"
+#include "NvPreprocessor.h"
+
+// Forward declarations
+namespace physx
+{
+struct PxContactPair;
+struct PxContactPairHeader;
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class ExtPxActor;
+class ExtPxManager;
+
+
+/**
+Custom Damage Function
+*/
+typedef bool(*ExtImpactDamageFunction)(void* data, ExtPxActor* actor, physx::PxShape* shape, physx::PxVec3 worldPos, physx::PxVec3 worldForce);
+
+
+/**
+Impact Damage Manager Settings.
+*/
+struct ExtImpactSettings
+{
+ bool isSelfCollissionEnabled; //!< family's self collision enabled
+ float fragility; //!< global fragility factor
+ ExtImpactDamageFunction damageFunction; //!< custom damage function, can be nullptr, default internal one will be used in that case.
+ void* damageFunctionData; //!< data to be passed in custom damage function
+
+
+ ExtImpactSettings() :
+ isSelfCollissionEnabled(false),
+ fragility(1.0f),
+ damageFunction(nullptr)
+ {}
+};
+
+
+/**
+Impact Damage Manager.
+
+Requires ExtPxManager.
+Call onContact from PxSimulationEventCallback onContact to accumulate damage.
+Call applyDamage to apply accumulated damage.
+*/
+class NV_DLL_EXPORT ExtImpactDamageManager
+{
+public:
+ //////// manager creation ////////
+
+ /**
+ Create a new ExtImpactDamageManager.
+
+ \param[in] pxManager The ExtPxManager instance to be used by impact damage manager.
+ \param[in] settings The settings to be set on ExtImpactDamageManager.
+
+ \return the new ExtImpactDamageManager if successful, NULL otherwise.
+ */
+ static ExtImpactDamageManager* create(ExtPxManager* pxManager, ExtImpactSettings settings = ExtImpactSettings());
+
+ /**
+ Release this manager.
+ */
+ virtual void release() = 0;
+
+
+ //////// interface ////////
+
+ /**
+ Set ExtImpactDamageManager settings.
+
+ \param[in] settings The settings to be set on ExtImpactDamageManager.
+ */
+ virtual void setSettings(const ExtImpactSettings& settings) = 0;
+
+ /**
+ This method is equal to PxSimulationEventCallback::onContact.
+
+ User should implement own PxSimulationEventCallback onContact and call this method in order ExtImpactDamageManager to work correctly.
+
+ Contacts will be processed and impact damage will be accumulated.
+
+ \param[in] pairHeader Information on the two actors whose shapes triggered a contact report.
+ \param[in] pairs The contact pairs of two actors for which contact reports have been requested. @see PxContactPair.
+ \param[in] nbPairs The number of provided contact pairs.
+
+ @see PxSimulationEventCallback
+ */
+ virtual void onContact(const physx::PxContactPairHeader& pairHeader, const physx::PxContactPair* pairs, uint32_t nbPairs) = 0;
+
+
+ /**
+ Apply accumulated impact damage.
+ */
+ virtual void applyDamage() = 0;
+
+
+ //////// filter shader ////////
+
+ /**
+ Custom implementation of PxSimulationFilterShader, enables necessary information to be passed in onContact().
+ Set it in your PxScene PxSceneDesc in order to impact damage to work correctly or implement your own.
+
+ @see PxSimulationFilterShader
+ */
+ static physx::PxFilterFlags FilterShader(
+ physx::PxFilterObjectAttributes attributes0,
+ physx::PxFilterData filterData0,
+ physx::PxFilterObjectAttributes attributes1,
+ physx::PxFilterData filterData1,
+ physx::PxPairFlags& pairFlags,
+ const void* constantBlock,
+ uint32_t constantBlockSize);
+
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTIMPACTDAMAGEMANAGER_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtPx.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtPx.h
new file mode 100644
index 0000000..b2d938b
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtPx.h
@@ -0,0 +1,29 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPX_H
+#define NVBLASTEXTPX_H
+
+
+/**
+This is the main include header for the BlastExt Physics, for users who
+want to use a single #include file.
+
+Alternatively, one can instead directly #include a subset of the below files.
+*/
+
+#include "NvBlastExtPxActor.h"
+#include "NvBlastExtPxAsset.h"
+#include "NvBlastExtPxFamily.h"
+#include "NvBlastExtPxListener.h"
+#include "NvBlastExtPxManager.h"
+
+
+#endif // ifndef NVBLASTEXTPX_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtPxActor.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxActor.h
new file mode 100644
index 0000000..994ace7
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxActor.h
@@ -0,0 +1,83 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXACTOR_H
+#define NVBLASTEXTPXACTOR_H
+
+#include "NvBlastTypes.h"
+
+
+// Forward declarations
+namespace physx
+{
+ class PxRigidDynamic;
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class ExtPxFamily;
+class TkActor;
+
+
+/**
+Actor.
+
+Corresponds one to one to PxRigidDynamic and ExtActor.
+*/
+class ExtPxActor
+{
+public:
+ /**
+ Get the number of visible chunks for this actor. May be used in conjunction with getChunkIndices().
+
+ \return the number of visible chunk indices for the actor.
+ */
+ virtual uint32_t getChunkCount() const = 0;
+
+ /**
+ Access actor's array of chunk indices. Use getChunkCount() to get a size of this array.
+
+ \return a pointer to an array of chunk indices of an actor.
+ */
+ virtual const uint32_t* getChunkIndices() const = 0;
+
+ /**
+ Every actor has corresponding PxActor.
+
+ /return a pointer to PxRigidDynamic actor.
+ */
+ virtual physx::PxRigidDynamic& getPhysXActor() const = 0;
+
+ /**
+ Every actor has corresponding TkActor.
+
+ /return a pointer to TkActor actor.
+ */
+ virtual TkActor& getTkActor() const = 0;
+
+ /**
+ Every actor has corresponding ExtPxFamily.
+
+ /return a pointer to ExtPxFamily family.
+ */
+ virtual ExtPxFamily& getFamily() const = 0;
+};
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXACTOR_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtPxAsset.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxAsset.h
new file mode 100644
index 0000000..a4dbe0e
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxAsset.h
@@ -0,0 +1,201 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXASSET_H
+#define NVBLASTEXTPXASSET_H
+
+#include "NvBlastTkFramework.h"
+#include "PxConvexMeshGeometry.h"
+#include "PxTransform.h"
+#include "NvBlastPreprocessor.h"
+
+
+// Forward declarations
+namespace physx
+{
+class PxCooking;
+
+namespace general_PxIOStream2
+{
+class PxFileBuf;
+}
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+/**
+Descriptor for PxAsset creation.
+
+PxAsset creates TkAsset internally, so TkAssetDesc must be filled.
+In addition it needs physics chunks data. Every chunk can have any amount of Convexes (Subchunks).
+*/
+struct ExtPxAssetDesc : public TkAssetDesc
+{
+ /**
+ Physics Subchunk.
+
+ Represents convex and it's position.
+ */
+ struct SubchunkDesc
+ {
+ physx::PxTransform transform; //!< convex local transform
+ physx::PxConvexMeshGeometry geometry; //!< convex geometry
+ };
+
+ /**
+ Physics Chunk.
+
+ Contains any amount of subchunks. Empty subchunks array makes chunk invisible.
+ */
+ struct ChunkDesc
+ {
+ SubchunkDesc* subchunks; //!< array of subchunks for chunk, can be empty
+ uint32_t subchunkCount; //!< size array of subchunks for chunk, can be 0
+ bool isStatic; //!< is chunk static. Static chunk makes PxActor Kinematic.
+ };
+
+ ChunkDesc* pxChunks; //!< array of chunks in asset, should be of size chunkCount (@see NvBlastAssetDesc)
+};
+
+
+/**
+Physics Subchunk.
+
+Represents convex and it's local position.
+*/
+struct ExtPxSubchunk
+{
+ physx::PxTransform transform; //!< convex local transform
+ physx::PxConvexMeshGeometry geometry; //!< convex geometry
+};
+
+
+/**
+Physics Chunk.
+
+Contains any amount of subchunks.
+*/
+struct ExtPxChunk
+{
+ uint32_t firstSubchunkIndex; //!< first Subchunk index in Subchunk's array in ExtPhyicsAsset
+ uint32_t subchunkCount; //!< Subchunk count. Can be 0.
+ bool isStatic; //!< is chunk static (kinematic)?.
+};
+
+
+/**
+Asset.
+
+Keeps all the static data needed for physics.
+*/
+class NV_DLL_EXPORT ExtPxAsset
+{
+public:
+
+ /**
+ Create a new ExtPxAsset.
+
+ \param[in] desc The ExtPxAssetDesc descriptor to be used, @see ExtPxAssetDesc.
+ \param[in] framework The TkFramework instance to be used to create TkAsset.
+
+ \return the new ExtPxAsset if successful, NULL otherwise.
+ */
+ static ExtPxAsset* create(const ExtPxAssetDesc& desc, TkFramework& framework);
+
+
+ /*
+ Factory method for deserialization
+
+ Doesn't specify chunks or subchunks as they'll be fed in during deserialization to avoid copying stuff around.
+
+ */
+ static ExtPxAsset* create(TkAsset* asset);
+
+
+ /**
+ Deserialize an ExtPxAsset object from the given stream.
+
+ \param[in] stream User-defined stream object.
+ \param[in] framework The TkFramework instance to be used to deserialize TkAsset.
+ \param[in] physics The PxPhysics instance to be to deserialize PxConvexMesh(s).
+
+ \return pointer the deserialized ExtPxAsset object if successful, or NULL if unsuccessful.
+ */
+ static ExtPxAsset* deserialize(physx::general_PxIOStream2::PxFileBuf& stream, TkFramework& framework, physx::PxPhysics& physics);
+
+ /**
+ Release this ExtPxAsset.
+ */
+ virtual void release() = 0;
+
+ /**
+ Write the asset's data to the user-defined PxFileBuf stream. Underlying TkAsset would be also serialized.
+
+ \param[in] stream User-defined stream object.
+ \param[in] cooking The PxCooking instance to be used to serialize PxConvexMesh(s).
+
+ \return true if serialization was successful, false otherwise.
+ */
+ virtual bool serialize(physx::general_PxIOStream2::PxFileBuf& stream, physx::PxCooking& cooking) const = 0;
+
+ /**
+ Every ExtPxAsset has corresponding TkAsset.
+
+ /return a pointer to TkAsset actor.
+ */
+ virtual const TkAsset& getTkAsset() const = 0;
+
+ /**
+ Get the number of chunks for this asset. May be used in conjunction with getChunks().
+
+ \return the number of chunks for the asset.
+ */
+ virtual uint32_t getChunkCount() const = 0;
+
+ /**
+ Access asset's array of chunks. Use getChunkCount() to get the size of this array.
+
+ \return a pointer to an array of chunk of an asset.
+ */
+ virtual const ExtPxChunk* getChunks() const = 0;
+
+ /**
+ Get the number of subchunks for this asset. May be used in conjunction with getSubchunks().
+ Subchunk count is the maximum value of ExtPxChunk: (firstSubchunkIndex + subchunkCount).
+
+ \return the number of subchunks for the asset.
+ */
+ virtual uint32_t getSubchunkCount() const = 0;
+
+ /**
+ Access asset's array of subchunks. Use getSubchunkCount() to get the size of this array.
+
+ \return a pointer to an array of subchunks of an asset.
+ */
+ virtual const ExtPxSubchunk* getSubchunks() const = 0;
+
+ /**
+ Pointer field available to the user.
+ */
+ void* userData;
+};
+
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXASSET_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtPxFamily.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxFamily.h
new file mode 100644
index 0000000..7805c15
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxFamily.h
@@ -0,0 +1,223 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXFAMILY_H
+#define NVBLASTEXTPXFAMILY_H
+
+#include "PxFiltering.h"
+
+
+// Forward declarations
+namespace physx
+{
+class PxRigidDynamic;
+class PxMaterial;
+class PxScene;
+class PxTransform;
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class ExtPxActor;
+class ExtPxAsset;
+class ExtPxListener;
+class TkFamily;
+
+
+/**
+PxShape Desc.
+
+Used to set settings for newly created PxShapes.
+
+@see PxShape
+*/
+struct ExtPxShapeDescTemplate
+{
+ uint8_t flags; //!< PxShapeFlags flags
+ physx::PxFilterData simulationFilterData; //!< user definable collision filter data
+ physx::PxFilterData queryFilterData; //!< user definable query filter data.
+ float contactOffset; //!< contact offset
+ float restOffset; //!< rest offset
+};
+
+
+/**
+PxActor Desc.
+
+Used to set settings for newly created PxActors.
+*/
+struct ExtPxActorDescTemplate
+{
+ uint8_t flags; //!< actor flags
+};
+
+
+/**
+Physics Spawn Settings.
+
+This Struct unifies setting to be used when PhysX actors are created.
+*/
+struct ExtPxSpawnSettings
+{
+ physx::PxScene* scene; //!< PxScene for PxActors to be spawned
+ physx::PxMaterial* material; //!< default PxMaterial
+ float density; //!< default density for PhysX
+};
+
+
+/**
+PxFamily.
+
+A collection of actors. Maps 1 to 1 with TkFamily.
+*/
+class ExtPxFamily
+{
+public:
+ /**
+ Spawn ExtPxFamily. Can be called only once. Actual PhysX actors will created and placed in PxScene
+
+ \param[in] pose World transform.
+ \param[in] scale Scale applied to spawned actors.
+ \param[in] settings Spawn settings.
+
+ \return true if spawn was successful, false otherwise.
+ */
+ virtual bool spawn(const physx::PxTransform& pose, const physx::PxVec3& scale, const ExtPxSpawnSettings& settings) = 0;
+
+
+ /**
+ Despawn this ExtPxFamily. This removes the PhysX actors from PxScene and deletes them, as well as
+ deleting the created ExtPxActors
+
+ This does not call release() on the family.
+
+ \returns true if successful.
+ */
+ virtual bool despawn() = 0;
+
+
+ /**
+ The number of actors currently in this family.
+
+ \return the number of ExtPxActor that currently exist in this family.
+ */
+ virtual uint32_t getActorCount() const = 0;
+
+ /**
+ Retrieve an array of pointers (into the user-supplied buffer) to actors.
+
+ \param[out] buffer A user-supplied array of ExtPxActor pointers.
+ \param[in] bufferSize The number of elements available to write into buffer.
+
+ \return the number of ExtPxActor pointers written to the buffer.
+ */
+ virtual uint32_t getActors(ExtPxActor** buffer, uint32_t bufferSize) const = 0;
+
+ /**
+ Every family has corresponding TkFamily.
+
+ /return a pointer to TkFamily actor.
+ */
+ virtual TkFamily& getTkFamily() const = 0;
+
+ /**
+ Access an array of shapes of subchunks. The size of array is equal getPxAsset()->getSubchunkCount().
+ For every corresponding subchunk it contains pointer to created PxShape or nullptr.
+
+ \return the pointer to subchunk shapes array.
+ */
+ virtual const physx::PxShape* const* getSubchunkShapes() const = 0;
+
+ /**
+ Every family has an associated asset.
+
+ \return a pointer to the (const) ExtPxAsset object.
+ */
+ virtual const ExtPxAsset& getPxAsset() const = 0;
+
+ /**
+ Set the default material to be used for PxRigidDynamic creation.
+
+ \param[in] material The material to be the new default.
+ */
+ virtual void setMaterial(physx::PxMaterial& material) = 0;
+
+ /*
+ Set ExtPxPxShapeDesc to be used on all newly created PxShapes.
+
+ NOTE: Using it will override marking LEAF_CHUNK in simulationFilterData.word3 now.
+
+ \param[in] pxShapeDesc The PxShape desc to be the new default. Can be nullptr.
+ */
+ virtual void setPxShapeDescTemplate(const ExtPxShapeDescTemplate* pxShapeDesc) = 0;
+
+ /**
+ Get the default ExtPxPxShapeDesc to be used on all newly created PxShapes.
+
+ \return a pointer to the default PxShape desc. Can be nullptr.
+ */
+ virtual const ExtPxShapeDescTemplate* getPxShapeDescTemplate() const = 0;
+
+ /*
+ Set ExtPxPxActorDesc to be used on all newly created PxActors.
+
+ \param[in] pxActorDesc The PxActor desc to be the new default. Can be nullptr.
+ */
+ virtual void setPxActorDesc(const ExtPxActorDescTemplate* pxActorDesc) = 0;
+
+ /**
+ Get the default ExtPxPxActorDesc to be used on all newly created PxActors.
+
+ \return a pointer to the default PxActor desc. Can be nullptr.
+ */
+ virtual const ExtPxActorDescTemplate* getPxActorDesc() const = 0;
+
+ /**
+ Add a user implementation of ExtPxListener to this family's list of listeners.
+
+ \param[in] listener The event listener to add.
+ */
+ virtual void subscribe(ExtPxListener& listener) = 0;
+
+ /**
+ Remove a user implementation of ExtPxListener from this family's list of listeners.
+
+ \param[in] listener The event listener to remove.
+ */
+ virtual void unsubscribe(ExtPxListener& listener) = 0;
+
+ /**
+ Call after split.
+ */
+ virtual void postSplitUpdate() = 0;
+
+ /**
+ Release this family.
+ */
+ virtual void release() = 0;
+
+ /**
+ UserData pointer. Free to be used by user in any way.
+ */
+ void* userData;
+};
+
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXFAMILY_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtPxListener.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxListener.h
new file mode 100644
index 0000000..4c43283
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxListener.h
@@ -0,0 +1,55 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXLISTENER_H
+#define NVBLASTEXTPXLISTENER_H
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class ExtPxFamily;
+class ExtPxActor;
+
+
+/**
+Physics Listener Interface.
+
+Actor create/destroy events listener.
+*/
+class ExtPxListener
+{
+public:
+ /**
+ Interface to be implemented by the user. Will be called when ExtPxFamily creates new actor.
+
+ \param[in] family Corresponding ExtPxFamily with new actor.
+ \param[in] actor The new actor.
+ */
+ virtual void onActorCreated(ExtPxFamily& family, ExtPxActor& actor) = 0;
+
+ /**
+ Interface to be implemented by the user. Will be called when ExtPxFamily destroy an actor.
+
+ \param[in] family Corresponding ExtPxFamily.
+ \param[in] actor The actor to be destroyed.
+ */
+ virtual void onActorDestroyed(ExtPxFamily& family, ExtPxActor& actor) = 0;
+};
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXLISTENER_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtPxManager.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxManager.h
new file mode 100644
index 0000000..9d73898
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtPxManager.h
@@ -0,0 +1,245 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXMANAGER_H
+#define NVBLASTEXTPXMANAGER_H
+
+#include "NvBlastTypes.h"
+#include "PxConvexMeshGeometry.h"
+#include "PxTransform.h"
+#include "NvPreprocessor.h"
+
+
+// Forward declarations
+namespace physx
+{
+class PxPhysics;
+class PxRigidDynamic;
+class PxJoint;
+
+namespace general_PxIOStream2
+{
+class PxFileBuf;
+}
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class ExtPxActor;
+class ExtPxAsset;
+class ExtPxFamily;
+class ExtPxListener;
+class TkFamily;
+class TkFramework;
+class TkGroup;
+class TkJoint;
+
+
+/**
+Family Desc.
+
+Used to create Physics Family.
+*/
+struct ExtPxFamilyDesc
+{
+ const ExtPxAsset* pxAsset; //!< px asset to create from, pointer will be stored in family.
+ NvBlastActorDesc actorDesc; //!< actor descriptor to be used when creating TkActor.
+ TkGroup* group; //!< if not nullptr, created TkActor will be placed in group
+};
+
+
+/**
+Function pointer for PxJoint creation.
+
+It will be called when new joints are being created. It should return valid PxJoint pointer or nullptr.
+*/
+typedef physx::PxJoint*(*ExtPxCreateJointFunction)(ExtPxActor* actor0, const physx::PxTransform& localFrame0, ExtPxActor* actor1, const physx::PxTransform& localFrame1, physx::PxPhysics& physics, TkJoint& joint);
+
+
+/**
+Physics Manager.
+
+Used to create and manage Physics Families.
+*/
+class NV_DLL_EXPORT ExtPxManager
+{
+public:
+ //////// manager creation ////////
+
+ /**
+ Create a new ExtPxManager.
+
+ \param[in] physics The PxPhysics instance to be used by ExtPxManager.
+ \param[in] framework The TkFramework instance to be used by ExtPxManager.
+ \param[in] createFn The function to be used when creating joints, can be nullptr.
+ \param[in] useUserData Flag if ExtPxManager is allowed to override PxActor's userData, it will store pointer to PxActor there.
+ It is recommended as fastest way. If set to 'false' HashMap will be used.
+
+ \return the new ExtPxManager if successful, NULL otherwise.
+ */
+ static ExtPxManager* create(physx::PxPhysics& physics, TkFramework& framework, ExtPxCreateJointFunction createFn = nullptr, bool useUserData = true);
+
+ /**
+ Release this manager.
+ */
+ virtual void release() = 0;
+
+
+ //////// impact ////////
+
+ /**
+ Simulation Filter data to be set on leaf chunk actors
+ */
+ enum FilterDataAttributes
+ {
+ LEAF_CHUNK = 1,
+ };
+
+
+ //////// interface ////////
+
+ /**
+ Create a px family from the given descriptor.
+
+ \param[in] desc The family descriptor (see ExtPxFamilyDesc).
+
+ \return the created family, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL.
+ */
+ virtual ExtPxFamily* createFamily(const ExtPxFamilyDesc& desc) = 0;
+
+ /**
+ Create a px joint associated with TkJoint.
+
+ ExtPxCreateJointFunction will be called after this call.
+ ExtPxCreateJointFunction must be set, nothing will happen otherwise.
+
+ \param[in] joint TkJoint to be used to create px joint.
+
+ \return true iff Joint was created.
+ */
+ virtual bool createJoint(TkJoint& joint) = 0;
+
+ /**
+ Destroy a px joint associated with TkJoint.
+
+ \param[in] joint TkJoint to be used to destroy px joint.
+ */
+ virtual void destroyJoint(TkJoint& joint) = 0;
+
+ /**
+ Set ExtPxCreateJointFunction to be used when new joints are being created.\
+
+ \param[in] createFn Create function pointer to set, can be nullptr.
+ */
+ virtual void setCreateJointFunction(ExtPxCreateJointFunction createFn) = 0;
+
+ /**
+ The number of families currently in this manager.
+
+ \return the number of ExtPxFamily that currently exist in this manger.
+ */
+ virtual uint32_t getFamilyCount() const = 0;
+
+ /**
+ Retrieve an array of pointers (into the user-supplied buffer) to families.
+
+ \param[out] buffer A user-supplied array of ExtPxFamily pointers.
+ \param[in] bufferSize The number of elements available to write into buffer.
+
+ \return the number of ExtPxFamily pointers written to the buffer.
+ */
+ virtual uint32_t getFamilies(ExtPxFamily** buffer, uint32_t bufferSize) const = 0;
+
+ /**
+ Look up an associated ExtPxFamily by TkFamily pointer.
+
+ \param[in] family The TkFamily pointer to look up.
+
+ \return pointer to the ExtPxFamily object if it exists, NULL otherwise.
+ */
+ virtual ExtPxFamily* getFamilyFromTkFamily(TkFamily& family) const = 0;
+
+ /**
+ Look up an associated ExtPxActor by PxRigidDynamic pointer.
+
+ \param[in] pxActor The PxRigidDynamic pointer to look up.
+
+ \return pointer to the ExtPxActor object if it exists, NULL otherwise.
+ */
+ virtual ExtPxActor* getActorFromPhysXActor(const physx::PxRigidDynamic& pxActor) const = 0;
+
+ /**
+ Get a PxPhysics object pointer used upon manager creation.
+
+ \return a pointer to the (const) PxPhysics object.
+ */
+ virtual physx::PxPhysics& getPhysics() const = 0;
+
+ /**
+ Get a TkFramework object pointer used upon manager creation.
+
+ \return a pointer to the TkFramework object.
+ */
+ virtual TkFramework& getFramework() const = 0;
+
+ /**
+ Get if useUserData was set upon manager creation.
+
+ \return true iff PxActor userData is used by manager.
+ */
+ virtual bool isPxUserDataUsed() const = 0;
+
+ /**
+ Limits the total number of actors that can exist at a given time. A value of zero disables this (gives no limit).
+
+ \param[in] limit If not zero, the maximum number of actors that will be allowed to exist.
+ */
+ virtual void setActorCountLimit(uint32_t limit) = 0;
+
+ /**
+ Retrieve the limit to the total number of actors that can exist at a given time. A value of zero disables this (gives no limit).
+
+ \return the limit to the total number of actors that can exist at a given time (or zero if there is no limit).
+ */
+ virtual uint32_t getActorCountLimit() = 0;
+
+ /**
+ The total number of PxActors generated by Blast.
+
+ \return the total number of PxActors generated by Blast.
+ */
+ virtual uint32_t getPxActorCount() const = 0;
+
+ /**
+ Add a user implementation of ExtPxListener to this family's list of listeners.
+
+ \param[in] listener The event listener to add.
+ */
+ virtual void subscribe(ExtPxListener& listener) = 0;
+
+ /**
+ Remove a user implementation of ExtPxListener from this family's list of listeners.
+
+ \param[in] listener The event listener to remove.
+ */
+ virtual void unsubscribe(ExtPxListener& listener) = 0;
+};
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXMANAGER_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtStressSolver.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtStressSolver.h
new file mode 100644
index 0000000..2fd389d
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtStressSolver.h
@@ -0,0 +1,209 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTSTRESSSOLVER_H
+#define NVBLASTEXTSTRESSSOLVER_H
+
+#include "common/PxRenderBuffer.h"
+#include <vector>
+#include "NvPreprocessor.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// forward declarations
+class ExtPxFamily;
+class ExtPxActor;
+
+/**
+Stress Solver Settings
+
+Stress on every bond is calculated as
+stress = bond.linearStress * stressLinearFactor + bond.angularStress * stressAngularFactor
+where:
+bond.linearStress - is linear stress force on particular bond
+bond.angularStress - is angular stress force on particular bond
+stressLinearFactor, stressAngularFactor - are a multiplier parameter set by this struct
+
+Support graph reduction:
+2 ^ reduction level = max node count to be aggregated during graph reduction, so 0 is 2 % 0 = 1, basically use support graph.
+So N nodes graph will be simplified to contain ~ N / (2 ^ reduction level)
+*/
+struct ExtStressSolverSettings
+{
+ float stressLinearFactor; //!< linear stress on bond multiplier
+ float stressAngularFactor; //!< angular stress on bond multiplier
+ uint32_t bondIterationsPerFrame; //!< number of bond iterations to perform per frame, @see getIterationsPerFrame() below
+ uint32_t graphReductionLevel; //!< graph reduction level
+
+ ExtStressSolverSettings() :
+ stressLinearFactor(0.00004f),
+ stressAngularFactor(0.00007f),
+ bondIterationsPerFrame(18000),
+ graphReductionLevel(3)
+ {}
+};
+
+
+/**
+Stress Solver.
+
+Uses ExtPxFamily, allocates and prepares it's graph once when it's created. Then it's being quickly updated on every
+actor split.
+Works on both dynamic and static actor's within family.
+For static actors it applies gravity.
+For dynamic actors it applies centrifugal force.
+Additionally applyImpulse() method can be used to apply external impulse (like impact damage).
+*/
+class NV_DLL_EXPORT ExtStressSolver
+{
+public:
+ //////// creation ////////
+
+ /**
+ Create a new ExtStressSolver.
+
+ \param[in] family The ExtPxFamily instance to calculate stress on.
+ \param[in] settings The settings to be set on ExtStressSolver.
+
+ \return the new ExtStressSolver if successful, NULL otherwise.
+ */
+ static ExtStressSolver* create(ExtPxFamily& family, ExtStressSolverSettings settings = ExtStressSolverSettings());
+
+
+ //////// interface ////////
+
+ /**
+ Release this stress solver.
+ */
+ virtual void release() = 0;
+
+ /**
+ Set stress solver settings.
+ Changing graph reduction level will lead to graph being rebuilt (which is fast, but still not recommended).
+ All other settings are applied instantly and can be changed every frame.
+
+ \param[in] settings The settings to be set on ExtStressSolver.
+ */
+ virtual void setSettings(const ExtStressSolverSettings& settings) = 0;
+
+ /**
+ Get stress solver settings.
+
+ \return the pointer to stress solver settings currently set.
+ */
+ virtual const ExtStressSolverSettings& getSettings() const = 0;
+
+ /**
+ Apply external impulse on particular actor of family
+
+ \param[in] actor The ExtPxActor to apply impulse on.
+ \param[in] position Local position in actor's coordinates to apply impulse on.
+ \param[in] force Impulse to apply (kg * m / s).
+ */
+ virtual void applyImpulse(ExtPxActor& actor, physx::PxVec3 position, physx::PxVec3 force) = 0;
+
+ /**
+ Update stress solver.
+
+ Calculate stress and optionally apply damage.
+
+ \param[in] doDamage If 'true' damage will be applied after stress solver.
+ */
+ virtual void update(bool doDamage = true) = 0;
+
+ /**
+ Reset stress solver.
+
+ Stress solver uses warm start internally, calling this function will flush all previous data calculated and also zeros frame count.
+ This function is to be used for debug purposes.
+ */
+ virtual void reset() = 0;
+
+ /**
+ Debug Render Mode
+ */
+ enum DebugRenderMode
+ {
+ STRESS_GRAPH = 0, //!< render only stress graph
+ STRESS_GRAPH_NODES_IMPULSES = 1, //!< render stress graph + nodes impulses after solving stress
+ STRESS_GRAPH_BONDS_IMPULSES = 2 //!< render stress graph + bonds impulses after solving stress
+ };
+
+ /**
+ Fill debug render for passed array of support graph nodes.
+
+ \param[in] nodes Node indices of support graph to debug render for.
+ \param[out] lines Lines array to fill.
+ \param[in] mode Debug render mode.
+ \param[in] scale Scale to be applied on impulses.
+ */
+ virtual void fillDebugRender(const std::vector<uint32_t>& nodes, std::vector<physx::PxDebugLine>& lines, DebugRenderMode mode, float scale = 1.0f) = 0;
+
+ /**
+ Get stress solver linear error.
+
+ \return the total linear error of stress calculation.
+ */
+ virtual float getStressErrorLinear() const = 0;
+
+ /**
+ Get stress solver angular error.
+
+ \return the total angular error of stress calculation.
+ */
+ virtual float getStressErrorAngular() const = 0;
+
+ /**
+ Get stress solver total iterations count since it was created (or reset).
+
+ \return the iterations count.
+ */
+ virtual uint32_t getIterationCount() const = 0;
+
+ /**
+ Get stress solver total frames count (update() calls) since it was created (or reset).
+
+ \return the frames count.
+ */
+ virtual uint32_t getFrameCount() const = 0;
+
+ /**
+ Get stress solver bonds count, after graph reduction was applied.
+
+ \return the bonds count.
+ */
+ virtual uint32_t getBondCount() const = 0;
+
+
+ //////// helpers ////////
+
+ /**
+ Get iteration per frame (update() call).
+
+ Helper method to know how many solver iterations are made per frame.
+
+ \return the iterations per frame count.
+ */
+ uint32_t getIterationsPerFrame() const
+ {
+ uint32_t perFrame = getSettings().bondIterationsPerFrame / (getBondCount() + 1);
+ return perFrame > 0 ? perFrame : 1;
+ }
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTSTRESSSOLVER_H
diff --git a/NvBlast/sdk/extensions/physx/include/NvBlastExtSync.h b/NvBlast/sdk/extensions/physx/include/NvBlastExtSync.h
new file mode 100644
index 0000000..805378a
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/include/NvBlastExtSync.h
@@ -0,0 +1,213 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTSYNC_H
+#define NVBLASTEXTSYNC_H
+
+#include "NvBlastTk.h"
+#include "foundation/PxTransform.h"
+#include "foundation/PxAllocatorCallback.h"
+#include "NvPreprocessor.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+class ExtPxFamily;
+class ExtPxManager;
+
+
+/**
+Sync Event types
+*/
+struct ExtSyncEventType
+{
+ enum Enum
+ {
+ Fracture = 0, //!< Contains Fracture commands
+ FamilySync, //!< Contains full family Family blob
+ Physics, //!< Contains actor's physical info, like transforms
+
+ Count
+ };
+};
+
+
+/**
+Generic Sync Event
+*/
+struct NV_DLL_EXPORT ExtSyncEvent
+{
+ ExtSyncEvent(ExtSyncEventType::Enum t) : type(t) {}
+ virtual ~ExtSyncEvent() {}
+
+ template<class T>
+ const T* getEvent() const { return reinterpret_cast<const T*>(this); }
+
+ /**
+ Any Event can be copied (cloned).
+
+ \return the pointer to the new copy of event.
+ */
+ virtual ExtSyncEvent* clone() const = 0;
+
+ void release();
+
+ ExtSyncEventType::Enum type; //!< Event type
+ uint64_t timestamp; //!< Event timestamp
+ NvBlastID familyID; //!< TkFamily ID
+};
+
+
+/**
+Generic CRTP for Sync Events
+*/
+template <class T, ExtSyncEventType::Enum eventType>
+struct ExtSyncEventInstance : public ExtSyncEvent
+{
+ ExtSyncEventInstance() : ExtSyncEvent(eventType) {}
+
+ static const ExtSyncEventType::Enum EVENT_TYPE = eventType;
+
+ ExtSyncEvent* clone() const override
+ {
+ return new (NvBlastTkFrameworkGet()->getAllocatorCallback().allocate(sizeof(T), nullptr, __FILE__, __LINE__)) T(*(T*)this);
+ }
+};
+
+
+/**
+Fracture Sync Event
+*/
+struct ExtSyncEventFracture : public ExtSyncEventInstance<ExtSyncEventFracture, ExtSyncEventType::Fracture>
+{
+ std::vector<NvBlastBondFractureData> bondFractures; //!< bond fracture data
+ std::vector<NvBlastChunkFractureData> chunkFractures; //!< chunk fracture data
+};
+
+
+/**
+Family Sync Event
+*/
+struct ExtSyncEventFamilySync : public ExtSyncEventInstance<ExtSyncEventFamilySync, ExtSyncEventType::FamilySync>
+{
+ std::vector<char> family; //!< family binary blob
+};
+
+
+/**
+Physics Sync Event
+*/
+struct ExtSyncEventPhysicsSync : public ExtSyncEventInstance<ExtSyncEventPhysicsSync, ExtSyncEventType::Physics>
+{
+ struct ActorData
+ {
+ uint32_t actorIndex; //!< actor index in family
+ physx::PxTransform transform; //!< actor world transform
+ };
+
+ std::vector<ActorData> data; //!< actors data
+};
+
+
+/**
+Sync Manager.
+
+Implements TkEventListener interface. It can be directly subscribed to listen for family events.
+*/
+class NV_DLL_EXPORT ExtSync : public TkEventListener
+{
+public:
+ //////// creation ////////
+
+ /**
+ Create a new ExtSync.
+
+ \return the new ExtSync if successful, NULL otherwise.
+ */
+ static ExtSync* create();
+
+
+ //////// common interface ////////
+
+ /**
+ Release Sync manager.
+ */
+ virtual void release() = 0;
+
+
+ //////// server-side interface ////////
+
+ /**
+ TkEventListener interface.
+
+ \param[in] events The array of events being dispatched.
+ \param[in] eventCount The number of events in the array.
+ */
+ virtual void receive(const TkEvent* events, uint32_t eventCount) = 0;
+
+ /**
+ Sync family state. Writes to internal sync buffer.
+
+ \param[in] family The TkFamily to sync
+ */
+ virtual void syncFamily(const TkFamily& family) = 0;
+
+ /**
+ Sync PxFamily state. Writes to internal sync buffer.
+
+ \param[in] family The ExtPxFamily to sync
+ */
+ virtual void syncFamily(const ExtPxFamily& family) = 0;
+
+ /**
+ The size of internal sync buffer (events count).
+
+ \return the number of events in internal sync buffer.
+ */
+ virtual uint32_t getSyncBufferSize() const = 0;
+
+ /**
+ Acquire internal sync buffer.
+
+ \param[in] buffer Reference to sync event buffer pointer to be set.
+ \param[in] size Reference to the size of the buffer array to be set.
+ */
+ virtual void acquireSyncBuffer(const ExtSyncEvent*const*& buffer, uint32_t& size) const = 0;
+
+ /**
+ Clear internal sync buffer.
+ */
+ virtual void releaseSyncBuffer() = 0;
+
+
+ //////// client-side interface ////////
+
+ /**
+ Apply external sync buffer on TkFramework and possibly ExtPxManager. This function call will result in
+ respective families/actors changes in order to synchronize state.
+
+ \param[in] framework The TkFramework instance to be used.
+ \param[in] buffer Sync buffer array pointer.
+ \param[in] size Sync buffer array size.
+ \param[in] groupForNewActors TkGroup to be used for newly created actors. Can be nullptr.
+ \param[in] manager The ExtPxManager instance to be used. Can be nullptr, physics sync events will be ignored in that case.
+ */
+ virtual void applySyncBuffer(TkFramework& framework, const ExtSyncEvent** buffer, uint32_t size, TkGroup* groupForNewActors, ExtPxManager* manager = nullptr) = 0;
+
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTSYNC_H
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpactDamageManager.cpp b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpactDamageManager.cpp
new file mode 100644
index 0000000..54d2696
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpactDamageManager.cpp
@@ -0,0 +1,448 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtImpactDamageManager.h"
+#include "NvBlastExtPxManager.h"
+#include "NvBlastExtPxFamily.h"
+#include "NvBlastExtPxActor.h"
+#include "NvBlastExtPxListener.h"
+
+#include "NvBlastAssert.h"
+
+#include "NvBlastExtDamageShaders.h"
+#include "NvBlastExtArray.h"
+#include "NvBlastExtDefs.h"
+
+#include "PxRigidDynamic.h"
+#include "PxSimulationEventCallback.h"
+#include "PxRigidBodyExt.h"
+
+#include "NvBlastTkFramework.h"
+#include "NvBlastTkActor.h"
+#include "NvBlastTkFamily.h"
+#include "NvBlastTkAsset.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+using namespace physx;
+
+const float MIN_IMPACT_VELOCITY_SQUARED = 1.0f;
+
+
+class ExtImpactDamageManagerImpl final : public ExtImpactDamageManager
+{
+public:
+ ExtImpactDamageManagerImpl(ExtPxManager* pxManager, ExtImpactSettings settings)
+ : m_pxManager(pxManager), m_settings(settings), m_listener(this), m_usePxUserData(m_pxManager->isPxUserDataUsed())
+ {
+ NVBLAST_ASSERT_WITH_MESSAGE(pxManager != nullptr, "ExtImpactDamageManager creation: input ExtPxManager is nullptr.");
+ m_pxManager->subscribe(m_listener);
+
+ m_impactDamageBuffer.reserve(32);
+ }
+
+ ~ExtImpactDamageManagerImpl()
+ {
+ m_pxManager->unsubscribe(m_listener);
+ }
+
+ virtual void release() override
+ {
+ NVBLASTEXT_DELETE(this, ExtImpactDamageManagerImpl);
+ }
+
+
+ //////// interface ////////
+
+ virtual void setSettings(const ExtImpactSettings& settings) override
+ {
+ m_settings = settings;
+ }
+
+ virtual void onContact(const PxContactPairHeader& pairHeader, const PxContactPair* pairs, uint32_t nbPairs) override;
+
+ virtual void applyDamage() override;
+
+
+ //////// public methods ////////
+
+ void queueImpactDamage(ExtPxActor* actor, PxVec3 force, PxVec3 position, PxShape* shape)
+ {
+ ImpactDamageData data = { actor, force, position, shape };
+ m_impactDamageBuffer.pushBack(data);
+ }
+
+
+private:
+ //////// physics manager listener ////////
+
+ class PxManagerListener : public ExtPxListener
+ {
+ public:
+ PxManagerListener(ExtImpactDamageManagerImpl* manager) : m_manager(manager) {}
+
+ virtual void onActorCreated(ExtPxFamily&, ExtPxActor&) override {}
+ virtual void onActorDestroyed(ExtPxFamily& family, ExtPxActor& actor) override
+ {
+ NV_UNUSED(family);
+
+ // filter out actor from queued buffer
+ auto& buffer = m_manager->m_impactDamageBuffer;
+ for (int32_t i = 0; i < (int32_t)buffer.size(); ++i)
+ {
+ if (buffer[i].actor == &actor)
+ {
+ buffer.replaceWithLast(i);
+ i--;
+ }
+ }
+ }
+ private:
+ ExtImpactDamageManagerImpl* m_manager;
+ };
+
+
+ //////// private methods ////////
+
+ void ensureBuffersSize(ExtPxActor* actor);
+ void damageActor(ExtPxActor* actor, PxShape* shape, PxVec3 position, PxVec3 force);
+
+
+ //////// data ////////
+
+ ExtPxManager* m_pxManager;
+ ExtImpactSettings m_settings;
+ PxManagerListener m_listener;
+ ExtArray<PxContactPairPoint>::type m_pairPointBuffer;
+ bool m_usePxUserData;
+
+ struct ImpactDamageData
+ {
+ ExtPxActor* actor;
+ PxVec3 force;
+ PxVec3 position;
+ PxShape* shape;
+ };
+
+ ExtArray<ImpactDamageData>::type m_impactDamageBuffer;
+
+ NvBlastFractureBuffers m_fractureBuffers;
+ ExtArray<uint8_t>::type m_fractureData;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ExtImpactDamageManagerImpl
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ExtImpactDamageManager* ExtImpactDamageManager::create(ExtPxManager* pxManager, ExtImpactSettings settings)
+{
+ return NVBLASTEXT_NEW(ExtImpactDamageManagerImpl) (pxManager, settings);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// onContact callback call
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void ExtImpactDamageManagerImpl::onContact(const PxContactPairHeader& pairHeader, const PxContactPair* pairs, uint32_t nbPairs)
+{
+ if (pairHeader.flags & physx::PxContactPairHeaderFlag::eREMOVED_ACTOR_0 ||
+ pairHeader.flags & physx::PxContactPairHeaderFlag::eREMOVED_ACTOR_1 ||
+ pairHeader.actors[0] == nullptr ||
+ pairHeader.actors[1] == nullptr)
+ {
+ return;
+ }
+
+ PxRigidActor* rigidActor0 = pairHeader.actors[0];
+ PxRigidActor* rigidActor1 = pairHeader.actors[1];
+
+ ExtPxActor* actors[2];
+
+ if (m_usePxUserData)
+ {
+ actors[0] = (ExtPxActor*)rigidActor0->userData;
+ actors[1] = (ExtPxActor*)rigidActor1->userData;
+ }
+ else
+ {
+ PxRigidDynamic* rigidDynamic0 = rigidActor0->is<PxRigidDynamic>();
+ PxRigidDynamic* rigidDynamic1 = rigidActor1->is<PxRigidDynamic>();
+ actors[0] = rigidDynamic0 ? m_pxManager->getActorFromPhysXActor(*rigidDynamic0) : nullptr;
+ actors[1] = rigidDynamic1 ? m_pxManager->getActorFromPhysXActor(*rigidDynamic1) : nullptr;
+ }
+
+
+ // check one of them is blast actor
+ if (actors[0] == nullptr && actors[1] == nullptr)
+ {
+ return;
+ }
+
+ // self-collision check
+ if (actors[0] != nullptr && actors[1] != nullptr)
+ {
+ if (&actors[0]->getFamily() == &actors[1]->getFamily() && !m_settings.isSelfCollissionEnabled)
+ return;
+ }
+
+ for (uint32_t pairIdx = 0; pairIdx < nbPairs; pairIdx++)
+ {
+ const PxContactPair& currentPair = pairs[pairIdx];
+
+ if (currentPair.flags & physx::PxContactPairFlag::eREMOVED_SHAPE_0 ||
+ currentPair.flags & physx::PxContactPairFlag::eREMOVED_SHAPE_1 ||
+ currentPair.shapes[0] == nullptr ||
+ currentPair.shapes[1] == nullptr)
+ {
+ continue;
+ }
+
+ float masses[2] = { 0, 0 };
+ {
+ for (int i = 0; i < 2; ++i)
+ {
+ PxRigidDynamic* rigidDynamic = pairHeader.actors[i]->is<physx::PxRigidDynamic>();
+ if (rigidDynamic)
+ {
+ if (!(rigidDynamic->getRigidBodyFlags() & physx::PxRigidBodyFlag::eKINEMATIC))
+ {
+ masses[i] = rigidDynamic->getMass();
+ }
+ }
+ }
+ };
+
+ float reducedMass;
+ if (masses[0] == 0.0f)
+ {
+ reducedMass = masses[1];
+ }
+ else if (masses[1] == 0.0f)
+ {
+ reducedMass = masses[0];
+ }
+ else
+ {
+ reducedMass = masses[0] * masses[1] / (masses[0] + masses[1]);
+ }
+
+
+ PxVec3 destructibleForces[2] = { PxVec3(0.0f), PxVec3(0.0f) };
+ PxVec3 avgContactPosition = PxVec3(0.0f);
+ PxVec3 avgContactNormal = PxVec3(0.0f);
+ uint32_t numContacts = 0;
+
+ m_pairPointBuffer.resize(currentPair.contactCount);
+ uint32_t numContactsInStream = currentPair.contactCount > 0 ? currentPair.extractContacts(m_pairPointBuffer.begin(), currentPair.contactCount) : 0;
+
+ for (uint32_t contactIdx = 0; contactIdx < numContactsInStream; contactIdx++)
+ {
+ PxContactPairPoint& currentPoint = m_pairPointBuffer[contactIdx];
+
+ const PxVec3& patchNormal = currentPoint.normal;
+ const PxVec3& position = currentPoint.position;
+ PxVec3 velocities[2] = { PxVec3(0.0f), PxVec3(0.0f) };
+ for (int i = 0; i < 2; ++i)
+ {
+ PxRigidBody* rigidBody = pairHeader.actors[i]->is<physx::PxRigidBody>();
+ if (rigidBody)
+ {
+ velocities[i] = physx::PxRigidBodyExt::getVelocityAtPos(*rigidBody, position);
+ }
+ }
+
+ const PxVec3 velocityDelta = velocities[0] - velocities[1];
+ if (velocityDelta.magnitudeSquared() >= MIN_IMPACT_VELOCITY_SQUARED || reducedMass == 0.0f) // If reduced mass == 0, this is kineamtic vs. kinematic. Generate damage.
+ {
+ for (int i = 0; i < 2; ++i)
+ {
+ if (actors[i])
+ {
+ // this is not really physically correct, but at least its deterministic...
+ destructibleForces[i] += (patchNormal * patchNormal.dot(velocityDelta)) * reducedMass * (i ? 1.0f : -1.0f);
+ }
+ }
+ avgContactPosition += position;
+ avgContactNormal += patchNormal;
+ numContacts++;
+ }
+ }
+
+ if (numContacts)
+ {
+ avgContactPosition /= (float)numContacts;
+ avgContactNormal.normalize();
+ for (uint32_t i = 0; i < 2; i++)
+ {
+ const PxVec3 force = destructibleForces[i] / (float)numContacts;
+ ExtPxActor* actor = actors[i];
+ if (actor != nullptr)
+ {
+ if (!force.isZero())
+ {
+ queueImpactDamage(actor, force, avgContactPosition, currentPair.shapes[i]);
+ }
+ else if (reducedMass == 0.0f) // Handle kinematic vs. kinematic
+ {
+ // holy molly
+ }
+ }
+ }
+ }
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ExtImpactDamageManager damage processing
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+float clampedLerp(float from, float to, float t)
+{
+ t = PxClamp(t, 0.0f, 1.0f);
+ return (1 - t) * from + to * t;
+}
+
+void ExtImpactDamageManagerImpl::applyDamage()
+{
+ const auto damageFn = m_settings.damageFunction;
+ const auto damageFnData = m_settings.damageFunctionData;
+
+ for (const ImpactDamageData& data : m_impactDamageBuffer)
+ {
+ float forceMag = data.force.magnitude();
+ float acceleration = forceMag / data.actor->getPhysXActor().getMass();
+ float factor = acceleration * m_settings.fragility * 0.001f;
+ if (factor > 0.05f)
+ {
+ PxTransform t(data.actor->getPhysXActor().getGlobalPose().getInverse());
+ PxVec3 force = t.rotate(data.force);
+ PxVec3 position = t.transform(data.position);
+
+ if (!damageFn || !damageFn(damageFnData, data.actor, data.shape, position, force))
+ {
+ damageActor(data.actor, data.shape, position, force*.00001f);
+ }
+ }
+ }
+ m_impactDamageBuffer.clear();
+}
+
+void ExtImpactDamageManagerImpl::ensureBuffersSize(ExtPxActor* actor)
+{
+ const TkAsset* tkAsset = actor->getTkActor().getAsset();
+ const uint32_t chunkCount = tkAsset->getChunkCount();
+ const uint32_t bondCount = tkAsset->getBondCount();
+
+ m_fractureBuffers.bondFractureCount = bondCount;
+ m_fractureBuffers.chunkFractureCount = chunkCount;
+ m_fractureData.resize((uint32_t)(m_fractureBuffers.bondFractureCount*sizeof(NvBlastBondFractureData) + m_fractureBuffers.chunkFractureCount*sizeof(NvBlastChunkFractureData))); // chunk count + bond count
+ m_fractureBuffers.chunkFractures = reinterpret_cast<NvBlastChunkFractureData*>(m_fractureData.begin());
+ m_fractureBuffers.bondFractures = reinterpret_cast<NvBlastBondFractureData*>(&m_fractureData.begin()[m_fractureBuffers.chunkFractureCount*sizeof(NvBlastChunkFractureData)]);
+}
+
+void ExtImpactDamageManagerImpl::damageActor(ExtPxActor* actor, PxShape* /*shape*/, PxVec3 position, PxVec3 force)
+{
+ ensureBuffersSize(actor);
+
+ NvBlastExtShearDamageDesc damage[] = {
+ {
+ { force[0], force[1], force[2] }, // shear
+ { position[0], position[1], position[2] } // position
+ }
+ };
+
+ const void* familyMaterial = actor->getTkActor().getFamily().getMaterial();
+
+ // default material params settings
+ const NvBlastExtMaterial defaultMaterial = { 3.0f, 0.1f, 0.2f, 1.5f + 1e-5f, 0.95f };
+
+ NvBlastProgramParams programParams;
+ programParams.damageDescCount = 1;
+ programParams.damageDescBuffer = &damage;
+ programParams.material = familyMaterial == nullptr ? &defaultMaterial : familyMaterial;
+
+ NvBlastDamageProgram program = {
+ NvBlastExtShearGraphShader,
+ NvBlastExtShearSubgraphShader
+ };
+
+ NvBlastFractureBuffers fractureEvents = m_fractureBuffers;
+
+ actor->getTkActor().generateFracture(&fractureEvents, program, &programParams);
+ actor->getTkActor().applyFracture(nullptr, &fractureEvents);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Filter Shader
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+PxFilterFlags ExtImpactDamageManager::FilterShader(
+ PxFilterObjectAttributes attributes0,
+ PxFilterData filterData0,
+ PxFilterObjectAttributes attributes1,
+ PxFilterData filterData1,
+ PxPairFlags& pairFlags,
+ const void* constantBlock,
+ uint32_t constantBlockSize)
+{
+ PX_UNUSED(constantBlock);
+ PX_UNUSED(constantBlockSize);
+ // let triggers through
+ if (PxFilterObjectIsTrigger(attributes0) || PxFilterObjectIsTrigger(attributes1))
+ {
+ pairFlags = PxPairFlag::eTRIGGER_DEFAULT;
+ return PxFilterFlags();
+ }
+
+ if ((PxFilterObjectIsKinematic(attributes0) || PxFilterObjectIsKinematic(attributes1)) &&
+ (PxGetFilterObjectType(attributes0) == PxFilterObjectType::eRIGID_STATIC || PxGetFilterObjectType(attributes1) == PxFilterObjectType::eRIGID_STATIC))
+ {
+ return PxFilterFlag::eSUPPRESS;
+ }
+
+ // use a group-based mechanism if the first two filter data words are not 0
+ uint32_t f0 = filterData0.word0 | filterData0.word1;
+ uint32_t f1 = filterData1.word0 | filterData1.word1;
+ if (f0 && f1 && !(filterData0.word0&filterData1.word1 || filterData1.word0&filterData0.word1))
+ return PxFilterFlag::eSUPPRESS;
+
+ // determine if we should suppress notification
+ const bool suppressNotify = ((filterData0.word3 | filterData1.word3) & ExtPxManager::LEAF_CHUNK) != 0;
+
+ pairFlags = PxPairFlag::eCONTACT_DEFAULT;
+ if (!suppressNotify)
+ {
+ pairFlags = pairFlags
+ | PxPairFlag::eNOTIFY_CONTACT_POINTS
+ | PxPairFlag::eNOTIFY_THRESHOLD_FORCE_PERSISTS
+ | PxPairFlag::eNOTIFY_THRESHOLD_FORCE_FOUND
+ | PxPairFlag::eNOTIFY_TOUCH_FOUND
+ | PxPairFlag::eNOTIFY_TOUCH_PERSISTS;
+ }
+
+ // eSOLVE_CONTACT is invalid with kinematic pairs
+ if (PxFilterObjectIsKinematic(attributes0) && PxFilterObjectIsKinematic(attributes1))
+ {
+ pairFlags &= ~PxPairFlag::eSOLVE_CONTACT;
+ }
+
+ return PxFilterFlags();
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpulseStressSolver.cpp b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpulseStressSolver.cpp
new file mode 100644
index 0000000..8329de5
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpulseStressSolver.cpp
@@ -0,0 +1,1312 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtImpulseStressSolver.h"
+#include "NvBlastExtPxAsset.h"
+#include "NvBlastExtPxFamily.h"
+#include "NvBlastExtPxActor.h"
+#include "NvBlastAssert.h"
+#include "NvBlastIndexFns.h"
+#include "NvBlastExtDefs.h"
+
+#include "NvBlastTkAsset.h"
+#include "NvBlastTkActor.h"
+#include "NvBlastTkFamily.h"
+
+#include "PxScene.h"
+#include "PxRigidDynamic.h"
+
+#include <PsVecMath.h>
+#include "PsFPU.h"
+
+#include <algorithm>
+#include <set>
+
+#define USE_SCALAR_IMPL 0
+#define WARM_START 1
+#define USE_PHYSX_CONVEX_DATA 1
+#define GRAPH_INTERGRIRY_CHECK 0
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+using namespace physx;
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Solver
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SequentialImpulseSolver
+{
+public:
+ PX_ALIGN_PREFIX(16)
+ struct BondData
+ {
+ physx::PxVec3 impulseLinear;
+ uint32_t node0;
+ physx::PxVec3 impulseAngular;
+ uint32_t node1;
+ physx::PxVec3 offset0;
+ float invOffsetSqrLength;
+
+ float getStressHealth(const ExtStressSolverSettings& settings) const
+ {
+ return (impulseLinear.magnitude() * settings.stressLinearFactor + impulseAngular.magnitude() * settings.stressAngularFactor);
+ }
+ }
+ PX_ALIGN_SUFFIX(16);
+
+ PX_ALIGN_PREFIX(16)
+ struct NodeData
+ {
+ physx::PxVec3 velocityLinear;
+ float invI;
+ physx::PxVec3 velocityAngular;
+ float invMass;
+ }
+ PX_ALIGN_SUFFIX(16);
+
+ SequentialImpulseSolver(uint32_t nodeCount, uint32_t maxBondCount)
+ {
+ m_nodesData.resize(nodeCount);
+ m_bondsData.reserve(maxBondCount);
+ }
+
+ NV_INLINE const NodeData& getNodeData(uint32_t node) const
+ {
+ return m_nodesData[node];
+ }
+
+ NV_INLINE const BondData& getBondData(uint32_t bond) const
+ {
+ return m_bondsData[bond];
+ }
+
+ NV_INLINE uint32_t getBondCount() const
+ {
+ return m_bondsData.size();
+ }
+
+ NV_INLINE uint32_t getNodeCount() const
+ {
+ return m_nodesData.size();;
+ }
+
+ NV_INLINE void setNodeMassInfo(uint32_t node, float invMass, float invI)
+ {
+ m_nodesData[node].invMass = invMass;
+ m_nodesData[node].invI = invI;
+ }
+
+ NV_INLINE void initialize()
+ {
+ for (auto& node : m_nodesData)
+ {
+ node.velocityLinear = PxVec3(PxZero);
+ node.velocityAngular = PxVec3(PxZero);
+ }
+ }
+
+ NV_INLINE void setNodeVelocities(uint32_t node, const PxVec3& velocityLinear, const PxVec3& velocityAngular)
+ {
+ m_nodesData[node].velocityLinear = velocityLinear;
+ m_nodesData[node].velocityAngular = velocityAngular;
+ }
+
+ NV_INLINE uint32_t addBond(uint32_t node0, uint32_t node1, const PxVec3& offset)
+ {
+ const BondData data = {
+ PxVec3(PxZero),
+ node0,
+ PxVec3(PxZero),
+ node1,
+ offset,
+ 1.0f / offset.magnitudeSquared()
+ };
+ m_bondsData.pushBack(data);
+ return m_bondsData.size() - 1;
+ }
+
+ NV_INLINE void replaceWithLast(uint32_t bondIndex)
+ {
+ m_bondsData.replaceWithLast(bondIndex);
+ }
+
+ NV_INLINE void reset(uint32_t nodeCount)
+ {
+ m_bondsData.clear();
+ m_nodesData.resize(nodeCount);
+ }
+
+ NV_INLINE void clearBonds()
+ {
+ m_bondsData.clear();
+ }
+
+ void solve(uint32_t iterationCount, bool warmStart = false)
+ {
+ solveInit(warmStart);
+
+ for (uint32_t i = 0; i < iterationCount; ++i)
+ {
+ iterate();
+ }
+ }
+
+ void calcError(float& linear, float& angular)
+ {
+ linear = 0.0f;
+ angular = 0.0f;
+ for (BondData& bond : m_bondsData)
+ {
+ NodeData* node0 = &m_nodesData[bond.node0];
+ NodeData* node1 = &m_nodesData[bond.node1];
+
+ const PxVec3 vA = node0->velocityLinear - node0->velocityAngular.cross(bond.offset0);
+ const PxVec3 vB = node1->velocityLinear + node1->velocityAngular.cross(bond.offset0);
+
+ const PxVec3 vErrorLinear = vA - vB;
+ const PxVec3 vErrorAngular = node0->velocityAngular - node1->velocityAngular;
+
+ linear += vErrorLinear.magnitude();
+ angular += vErrorAngular.magnitude();
+ }
+ }
+
+private:
+ void solveInit(bool warmStart = false)
+ {
+ if (warmStart)
+ {
+ for (BondData& bond : m_bondsData)
+ {
+ NodeData* node0 = &m_nodesData[bond.node0];
+ NodeData* node1 = &m_nodesData[bond.node1];
+
+ const PxVec3 velocityLinearCorr0 = bond.impulseLinear * node0->invMass;
+ const PxVec3 velocityLinearCorr1 = bond.impulseLinear * node1->invMass;
+
+ const PxVec3 velocityAngularCorr0 = bond.impulseAngular * node0->invI - bond.offset0.cross(velocityLinearCorr0) * bond.invOffsetSqrLength;
+ const PxVec3 velocityAngularCorr1 = bond.impulseAngular * node1->invI + bond.offset0.cross(velocityLinearCorr1) * bond.invOffsetSqrLength;
+
+ node0->velocityLinear += velocityLinearCorr0;
+ node1->velocityLinear -= velocityLinearCorr1;
+
+ node0->velocityAngular += velocityAngularCorr0;
+ node1->velocityAngular -= velocityAngularCorr1;
+ }
+ }
+ else
+ {
+ for (BondData& bond : m_bondsData)
+ {
+ bond.impulseLinear = PxVec3(PxZero);
+ bond.impulseAngular = PxVec3(PxZero);
+ }
+ }
+ }
+
+
+ NV_INLINE void iterate()
+ {
+ using namespace physx::shdfnd::aos;
+
+ for (BondData& bond : m_bondsData)
+ {
+ NodeData* node0 = &m_nodesData[bond.node0];
+ NodeData* node1 = &m_nodesData[bond.node1];
+
+#if USE_SCALAR_IMPL
+ const PxVec3 vA = node0->velocityLinear - node0->velocityAngular.cross(bond.offset0);
+ const PxVec3 vB = node1->velocityLinear + node1->velocityAngular.cross(bond.offset0);
+
+ const PxVec3 vErrorLinear = vA - vB;
+ const PxVec3 vErrorAngular = node0->velocityAngular - node1->velocityAngular;
+
+ const float weightedMass = 1.0f / (node0->invMass + node1->invMass);
+ const float weightedInertia = 1.0f / (node0->invI + node1->invI);
+
+ const PxVec3 outImpulseLinear = -vErrorLinear * weightedMass * 0.5f;
+ const PxVec3 outImpulseAngular = -vErrorAngular * weightedInertia * 0.5f;
+
+ bond.impulseLinear += outImpulseLinear;
+ bond.impulseAngular += outImpulseAngular;
+
+ const PxVec3 velocityLinearCorr0 = outImpulseLinear * node0->invMass;
+ const PxVec3 velocityLinearCorr1 = outImpulseLinear * node1->invMass;
+
+ const PxVec3 velocityAngularCorr0 = outImpulseAngular * node0->invI - bond.offset0.cross(velocityLinearCorr0) * bond.invOffsetSqrLength;
+ const PxVec3 velocityAngularCorr1 = outImpulseAngular * node1->invI + bond.offset0.cross(velocityLinearCorr1) * bond.invOffsetSqrLength;
+
+ node0->velocityLinear += velocityLinearCorr0;
+ node1->velocityLinear -= velocityLinearCorr1;
+
+ node0->velocityAngular += velocityAngularCorr0;
+ node1->velocityAngular -= velocityAngularCorr1;
+#else
+ const Vec3V velocityLinear0 = V3LoadUnsafeA(node0->velocityLinear);
+ const Vec3V velocityLinear1 = V3LoadUnsafeA(node1->velocityLinear);
+ const Vec3V velocityAngular0 = V3LoadUnsafeA(node0->velocityAngular);
+ const Vec3V velocityAngular1 = V3LoadUnsafeA(node1->velocityAngular);
+
+ const Vec3V offset = V3LoadUnsafeA(bond.offset0);
+ const Vec3V vA = V3Add(velocityLinear0, V3Neg(V3Cross(velocityAngular0, offset)));
+ const Vec3V vB = V3Add(velocityLinear1, V3Cross(velocityAngular1, offset));
+
+ const Vec3V vErrorLinear = V3Sub(vA, vB);
+ const Vec3V vErrorAngular = V3Sub(velocityAngular0, velocityAngular1);
+
+ const FloatV invM0 = FLoad(node0->invMass);
+ const FloatV invM1 = FLoad(node1->invMass);
+ const FloatV invI0 = FLoad(node0->invI);
+ const FloatV invI1 = FLoad(node1->invI);
+ const FloatV invOffsetSqrLength = FLoad(bond.invOffsetSqrLength);
+
+ const FloatV weightedMass = FLoad(-0.5f / (node0->invMass + node1->invMass));
+ const FloatV weightedInertia = FLoad(-0.5f / (node0->invI + node1->invI));
+
+ const Vec3V outImpulseLinear = V3Scale(vErrorLinear, weightedMass);
+ const Vec3V outImpulseAngular = V3Scale(vErrorAngular, weightedInertia);
+
+ V3StoreA(V3Add(V3LoadUnsafeA(bond.impulseLinear), outImpulseLinear), bond.impulseLinear);
+ V3StoreA(V3Add(V3LoadUnsafeA(bond.impulseAngular), outImpulseAngular), bond.impulseAngular);
+
+ const Vec3V velocityLinearCorr0 = V3Scale(outImpulseLinear, invM0);
+ const Vec3V velocityLinearCorr1 = V3Scale(outImpulseLinear, invM1);
+
+ const Vec3V velocityAngularCorr0 = V3Sub(V3Scale(outImpulseAngular, invI0), V3Scale(V3Cross(offset, velocityLinearCorr0), invOffsetSqrLength));
+ const Vec3V velocityAngularCorr1 = V3Add(V3Scale(outImpulseAngular, invI1), V3Scale(V3Cross(offset, velocityLinearCorr1), invOffsetSqrLength));
+
+ V3StoreA(V3Add(velocityLinear0, velocityLinearCorr0), node0->velocityLinear);
+ V3StoreA(V3Sub(velocityLinear1, velocityLinearCorr1), node1->velocityLinear);
+
+ V3StoreA(V3Add(velocityAngular0, velocityAngularCorr0), node0->velocityAngular);
+ V3StoreA(V3Sub(velocityAngular1, velocityAngularCorr1), node1->velocityAngular);
+#endif
+ }
+ }
+
+ shdfnd::Array<BondData, ExtAlignedAllocator<16>> m_bondsData;
+ shdfnd::Array<NodeData, ExtAlignedAllocator<16>> m_nodesData;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Graph Processor
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if GRAPH_INTERGRIRY_CHECK
+#define CHECK_GRAPH_INTEGRITY checkGraphIntegrity()
+#else
+#define CHECK_GRAPH_INTEGRITY ((void)0)
+#endif
+
+class SupportGraphProcessor
+{
+
+public:
+ struct BondData
+ {
+ uint32_t node0;
+ uint32_t node1;
+ uint32_t blastBondIndex;
+ };
+
+ struct NodeData
+ {
+ float mass;
+ float volume;
+ PxVec3 localPos;
+ bool isStatic;
+ uint32_t solverNode;
+ uint32_t neighborsCount;
+ PxVec3 impulse;
+ };
+
+ struct SolverNodeData
+ {
+ uint32_t supportNodesCount;
+ PxVec3 localPos;
+ union
+ {
+ float mass;
+ int32_t indexShift;
+ };
+ float volume;
+ bool isStatic;
+ };
+
+ struct SolverBondData
+ {
+ ExtInlineArray<uint32_t, 8>::type blastBondIndices;
+ };
+
+ SupportGraphProcessor(uint32_t nodeCount, uint32_t maxBondCount) : m_solver(nodeCount, maxBondCount), m_nodesDirty(true)
+ {
+ m_nodesData.resize(nodeCount);
+ m_bondsData.reserve(maxBondCount);
+
+ m_solverNodesData.resize(nodeCount);
+ m_solverBondsData.reserve(maxBondCount);
+
+ m_solverBondsMap.reserve(maxBondCount);
+
+ m_blastBondIndexMap.resize(maxBondCount);
+ memset(m_blastBondIndexMap.begin(), 0xFF, m_blastBondIndexMap.size() * sizeof(uint32_t));
+ }
+
+ NV_INLINE const NodeData& getNodeData(uint32_t node) const
+ {
+ return m_nodesData[node];
+ }
+
+ NV_INLINE const BondData& getBondData(uint32_t bond) const
+ {
+ return m_bondsData[bond];
+ }
+
+ NV_INLINE const SolverNodeData& getSolverNodeData(uint32_t node) const
+ {
+ return m_solverNodesData[node];
+ }
+
+ NV_INLINE const SolverBondData& getSolverBondData(uint32_t bond) const
+ {
+ return m_solverBondsData[bond];
+ }
+
+ NV_INLINE const SequentialImpulseSolver::BondData& getSolverInternalBondData(uint32_t bond) const
+ {
+ return m_solver.getBondData(bond);
+ }
+
+ NV_INLINE const SequentialImpulseSolver::NodeData& getSolverInternalNodeData(uint32_t node) const
+ {
+ return m_solver.getNodeData(node);
+ }
+
+ NV_INLINE uint32_t getBondCount() const
+ {
+ return m_bondsData.size();
+ }
+
+ NV_INLINE uint32_t getNodeCount() const
+ {
+ return m_nodesData.size();;
+ }
+
+ NV_INLINE uint32_t getSolverBondCount() const
+ {
+ return m_solverBondsData.size();
+ }
+
+ NV_INLINE uint32_t getSolverNodeCount() const
+ {
+ return m_solverNodesData.size();;
+ }
+
+ NV_INLINE void setNodeInfo(uint32_t node, float mass, float volume, PxVec3 localPos, bool isStatic)
+ {
+ m_nodesData[node].mass = mass;
+ m_nodesData[node].volume = volume;
+ m_nodesData[node].localPos = localPos;
+ m_nodesData[node].isStatic = isStatic;
+ }
+
+ NV_INLINE void setNodeNeighborsCount(uint32_t node, uint32_t neighborsCount)
+ {
+ // neighbors count is expected to be the number of nodes on 1 island/actor.
+ m_nodesData[node].neighborsCount = neighborsCount;
+
+ // check for too huge aggregates (happens after island's split)
+ if (!m_nodesDirty)
+ {
+ m_nodesDirty |= (m_solverNodesData[m_nodesData[node].solverNode].supportNodesCount > neighborsCount / 2);
+ }
+ }
+
+ NV_INLINE void initialize()
+ {
+ sync();
+
+ m_solver.initialize();
+
+ for (auto& node : m_nodesData)
+ {
+ node.impulse = PxVec3(PxZero);
+ }
+ }
+
+ NV_INLINE void addNodeImpulse(uint32_t node, const PxVec3& impulse)
+ {
+ m_nodesData[node].impulse += impulse;
+ }
+
+ NV_INLINE void addNodeVelocity(uint32_t node, const PxVec3& velocity)
+ {
+ PxVec3 impulse = velocity * m_nodesData[node].mass;
+ addNodeImpulse(node, impulse);
+ }
+
+ NV_INLINE void addBond(uint32_t node0, uint32_t node1, uint32_t blastBondIndex)
+ {
+ if (isInvalidIndex(m_blastBondIndexMap[blastBondIndex]))
+ {
+ const BondData data = {
+ node0,
+ node1,
+ blastBondIndex
+ };
+ m_bondsData.pushBack(data);
+ m_blastBondIndexMap[blastBondIndex] = m_bondsData.size() - 1;
+ }
+ }
+
+ NV_INLINE void removeBondIfExists(uint32_t blastBondIndex)
+ {
+ const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex];
+
+ if (!isInvalidIndex(bondIndex))
+ {
+ const BondData& bond = m_bondsData[bondIndex];
+ const uint32_t solverNode0 = m_nodesData[bond.node0].solverNode;
+ const uint32_t solverNode1 = m_nodesData[bond.node1].solverNode;
+ bool isBondInternal = (solverNode0 == solverNode1);
+
+ if (isBondInternal)
+ {
+ // internal bond sadly requires graph resync (it never happens on reduction level '0')
+ m_nodesDirty = true;
+ }
+ else if (!m_nodesDirty)
+ {
+ // otherwise it's external bond, we can remove it manually and keep graph synced
+ // we don't need to spend time there if (m_nodesDirty == true), graph will be resynced anyways
+
+ BondKey solverBondKey(solverNode0, solverNode1);
+ auto entry = m_solverBondsMap.find(solverBondKey);
+ if (entry)
+ {
+ const uint32_t solverBondIndex = entry->second;
+ auto& blastBondIndices = m_solverBondsData[solverBondIndex].blastBondIndices;
+ blastBondIndices.findAndReplaceWithLast(blastBondIndex);
+ if (blastBondIndices.empty())
+ {
+ // all bonds associated with this solver bond were removed, so let's remove solver bond
+
+ m_solverBondsData.replaceWithLast(solverBondIndex);
+ m_solver.replaceWithLast(solverBondIndex);
+ if (m_solver.getBondCount() > 0)
+ {
+ // update 'previously last' solver bond mapping
+ const auto& solverBond = m_solver.getBondData(solverBondIndex);
+ m_solverBondsMap[BondKey(solverBond.node0, solverBond.node1)] = solverBondIndex;
+ }
+
+ m_solverBondsMap.erase(solverBondKey);
+ }
+ }
+
+ CHECK_GRAPH_INTEGRITY;
+ }
+
+ // remove bond from graph processor's list
+ m_blastBondIndexMap[blastBondIndex] = invalidIndex<uint32_t>();
+ m_bondsData.replaceWithLast(bondIndex);
+ m_blastBondIndexMap[m_bondsData[bondIndex].blastBondIndex] = m_bondsData.size() > bondIndex ? bondIndex : invalidIndex<uint32_t>();
+ }
+ }
+
+ NV_INLINE void setGraphReductionLevel(uint32_t level)
+ {
+ m_graphReductionLevel = level;
+ m_nodesDirty = true;
+ }
+
+ uint32_t getGraphReductionLevel() const
+ {
+ return m_graphReductionLevel;
+ }
+
+ void solve(uint32_t iterationCount, bool warmStart = false)
+ {
+ CHECK_GRAPH_INTEGRITY;
+
+ for (const NodeData& node : m_nodesData)
+ {
+ const SequentialImpulseSolver::NodeData& solverNode = m_solver.getNodeData(node.solverNode);
+ m_solver.setNodeVelocities(node.solverNode, solverNode.velocityLinear + node.impulse * solverNode.invMass, PxVec3(PxZero));
+ }
+
+ m_solver.solve(iterationCount, warmStart);
+ }
+
+ void calcError(float& linear, float& angular)
+ {
+ m_solver.calcError(linear, angular);
+ }
+
+ void generateFracture(ExtArray<NvBlastBondFractureData>::type& bondFractureBuffer, const ExtStressSolverSettings& settings, const float* blastBondHealths)
+ {
+ CHECK_GRAPH_INTEGRITY;
+
+ for (uint32_t i = 0; i < m_solverBondsData.size(); ++i)
+ {
+ const SequentialImpulseSolver::BondData& solverInternalBond = m_solver.getBondData(i);
+ if (solverInternalBond.getStressHealth(settings) > 1.0f)
+ {
+ const auto& blastBondIndices = m_solverBondsData[i].blastBondIndices;
+ for (auto blastBondIndex : blastBondIndices)
+ {
+ const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex];
+ if (!isInvalidIndex(bondIndex))
+ {
+ const BondData& bond = m_bondsData[bondIndex];
+
+ NVBLAST_ASSERT(getNodeData(bond.node0).solverNode != getNodeData(bond.node1).solverNode);
+ NVBLAST_ASSERT(bond.blastBondIndex == blastBondIndex);
+
+ NvBlastBondFractureData data;
+ data.health = blastBondHealths[blastBondIndex];
+ data.nodeIndex0 = bond.node0;
+ data.nodeIndex1 = bond.node1;
+ bondFractureBuffer.pushBack(data);
+ }
+ }
+ }
+ }
+ }
+
+private:
+
+ NV_INLINE void sync()
+ {
+ if (m_nodesDirty)
+ {
+ syncNodes();
+ }
+ if (m_bondsDirty)
+ {
+ syncBonds();
+ }
+
+ CHECK_GRAPH_INTEGRITY;
+ }
+
+ void syncNodes()
+ {
+ // init with 1<->1 blast nodes to solver nodes mapping
+ m_solverNodesData.resize(m_nodesData.size());
+ for (uint32_t i = 0; i < m_nodesData.size(); ++i)
+ {
+ m_nodesData[i].solverNode = i;
+ m_solverNodesData[i].supportNodesCount = 1;
+ m_solverNodesData[i].indexShift = 0;
+ }
+
+ // for static nodes aggregate size per graph reduction level is lower, it
+ // falls behind on few levels. (can be made as parameter)
+ const uint32_t STATIC_NODES_COUNT_PENALTY = 2 << 2;
+
+ // reducing graph by aggregating nodes level by level
+ for (uint32_t k = 0; k < m_graphReductionLevel; k++)
+ {
+ const uint32_t maxAggregateSize = 1 << (k + 1);
+
+ for (const BondData& bond : m_bondsData)
+ {
+ NodeData& node0 = m_nodesData[bond.node0];
+ NodeData& node1 = m_nodesData[bond.node1];
+
+ if (node0.isStatic != node1.isStatic)
+ continue;
+
+ if (node0.solverNode == node1.solverNode)
+ continue;
+
+ SolverNodeData& solverNode0 = m_solverNodesData[node0.solverNode];
+ SolverNodeData& solverNode1 = m_solverNodesData[node1.solverNode];
+
+ const int countPenalty = node0.isStatic ? STATIC_NODES_COUNT_PENALTY : 1;
+ const uint32_t aggregateSize = std::min<uint32_t>(maxAggregateSize, node0.neighborsCount / 2);
+
+ if (solverNode0.supportNodesCount * countPenalty >= aggregateSize)
+ continue;
+ if (solverNode1.supportNodesCount * countPenalty >= aggregateSize)
+ continue;
+
+ if (solverNode0.supportNodesCount >= solverNode1.supportNodesCount)
+ {
+ solverNode1.supportNodesCount--;
+ solverNode0.supportNodesCount++;
+ node1.solverNode = node0.solverNode;
+ }
+ else if (solverNode1.supportNodesCount >= solverNode0.supportNodesCount)
+ {
+ solverNode1.supportNodesCount++;
+ solverNode0.supportNodesCount--;
+ node0.solverNode = node1.solverNode;
+ }
+ }
+ }
+
+ // Solver Nodes now sparse, a lot of empty ones. Rearrange them by moving all non-empty to the front
+ // 2 passes used for that
+ {
+ uint32_t currentNode = 0;
+ for (; currentNode < m_solverNodesData.size(); ++currentNode)
+ {
+ if (m_solverNodesData[currentNode].supportNodesCount > 0)
+ continue;
+
+ // 'currentNode' is free
+
+ // search next occupied node
+ uint32_t k = currentNode + 1;
+ for (; k < m_solverNodesData.size(); ++k)
+ {
+ if (m_solverNodesData[k].supportNodesCount > 0)
+ {
+ // replace currentNode and keep indexShift
+ m_solverNodesData[currentNode].supportNodesCount = m_solverNodesData[k].supportNodesCount;
+ m_solverNodesData[k].indexShift = k - currentNode;
+ m_solverNodesData[k].supportNodesCount = 0;
+ break;
+ }
+ }
+
+ if (k == m_solverNodesData.size())
+ {
+ break;
+ }
+ }
+ for (auto& node : m_nodesData)
+ {
+ node.solverNode -= m_solverNodesData[node.solverNode].indexShift;
+ }
+
+ // now, we know total solver nodes count and which nodes are aggregated into them
+ m_solverNodesData.resize(currentNode);
+ }
+
+
+ // calculate all needed data
+ for (SolverNodeData& solverNode : m_solverNodesData)
+ {
+ solverNode.supportNodesCount = 0;
+ solverNode.localPos = PxVec3(PxZero);
+ solverNode.mass = 0.0f;
+ solverNode.volume = 0.0f;
+ solverNode.isStatic = false;
+ }
+
+ for (NodeData& node : m_nodesData)
+ {
+ SolverNodeData& solverNode = m_solverNodesData[node.solverNode];
+ solverNode.supportNodesCount++;
+ solverNode.localPos += node.localPos;
+ solverNode.mass += node.mass;
+ solverNode.volume += node.volume;
+ solverNode.isStatic |= node.isStatic;
+ }
+
+ for (SolverNodeData& solverNode : m_solverNodesData)
+ {
+ solverNode.localPos /= (float)solverNode.supportNodesCount;
+ }
+
+ m_solver.reset(m_solverNodesData.size());
+ for (uint32_t nodeIndex = 0; nodeIndex < m_solverNodesData.size(); ++nodeIndex)
+ {
+ const SolverNodeData& solverNode = m_solverNodesData[nodeIndex];
+
+ const float invMass = solverNode.isStatic ? 0.0f : 1.0f / solverNode.mass;
+ const float R = PxPow(solverNode.volume * 3.0f * PxInvPi / 4.0f, 1.0f / 3.0f); // sphere volume approximation
+ const float invI = invMass / (R * R * 0.4f); // sphere inertia tensor approximation: I = 2/5 * M * R^2 ; invI = 1 / I;
+ m_solver.setNodeMassInfo(nodeIndex, invMass, invI);
+ }
+
+ m_nodesDirty = false;
+
+ syncBonds();
+ }
+
+ void syncBonds()
+ {
+ // traverse all blast bonds and aggregate
+ m_solver.clearBonds();
+ m_solverBondsMap.clear();
+ m_solverBondsData.clear();
+ for (const BondData& bond : m_bondsData)
+ {
+ const NodeData& node0 = m_nodesData[bond.node0];
+ const NodeData& node1 = m_nodesData[bond.node1];
+
+ if (node0.solverNode == node1.solverNode)
+ continue; // skip (internal)
+
+ if (node0.isStatic && node1.isStatic)
+ continue;
+
+ BondKey key(node0.solverNode, node1.solverNode);
+ auto entry = m_solverBondsMap.find(key);
+ SolverBondData* data;
+ if (!entry)
+ {
+ m_solverBondsData.pushBack(SolverBondData());
+ data = &m_solverBondsData.back();
+ m_solverBondsMap[key] = m_solverBondsData.size() - 1;
+
+ SolverNodeData& solverNode0 = m_solverNodesData[node0.solverNode];
+ SolverNodeData& solverNode1 = m_solverNodesData[node1.solverNode];
+ m_solver.addBond(node0.solverNode, node1.solverNode, (solverNode1.localPos - solverNode0.localPos) * 0.5f);
+ }
+ else
+ {
+ data = &m_solverBondsData[entry->second];
+ }
+ data->blastBondIndices.pushBack(bond.blastBondIndex);
+ }
+
+ m_bondsDirty = false;
+ }
+
+#if GRAPH_INTERGRIRY_CHECK
+ void checkGraphIntegrity()
+ {
+ NVBLAST_ASSERT(m_solver.getBondCount() == m_solverBondsData.size());
+ NVBLAST_ASSERT(m_solver.getNodeCount() == m_solverNodesData.size());
+
+ std::set<uint64_t> solverBonds;
+ for (uint32_t i = 0; i < m_solverBondsData.size(); ++i)
+ {
+ const auto& bondData = m_solver.getBondData(i);
+ BondKey key(bondData.node0, bondData.node1);
+ NVBLAST_ASSERT(solverBonds.find(key) == solverBonds.end());
+ solverBonds.emplace(key);
+ auto entry = m_solverBondsMap.find(key);
+ NVBLAST_ASSERT(entry != nullptr);
+ const auto& solverBond = m_solverBondsData[entry->second];
+ for (auto& blastBondIndex : solverBond.blastBondIndices)
+ {
+ if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex]))
+ {
+ auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]];
+ BondKey key2(m_nodesData[b.node0].solverNode, m_nodesData[b.node1].solverNode);
+ NVBLAST_ASSERT(key2 == key);
+ }
+ }
+ }
+
+ for (auto& solverBond : m_solverBondsData)
+ {
+ for (auto& blastBondIndex : solverBond.blastBondIndices)
+ {
+ if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex]))
+ {
+ auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]];
+ NVBLAST_ASSERT(m_nodesData[b.node0].solverNode != m_nodesData[b.node1].solverNode);
+ }
+ }
+ }
+ uint32_t mappedBondCount = 0;
+ for (uint32_t i = 0; i < m_blastBondIndexMap.size(); i++)
+ {
+ const auto& bondIndex = m_blastBondIndexMap[i];
+ if (!isInvalidIndex(bondIndex))
+ {
+ mappedBondCount++;
+ NVBLAST_ASSERT(m_bondsData[bondIndex].blastBondIndex == i);
+ }
+ }
+ NVBLAST_ASSERT(m_bondsData.size() == mappedBondCount);
+ }
+#endif
+
+ struct BondKey
+ {
+ uint32_t node0;
+ uint32_t node1;
+
+ BondKey(uint32_t n0, uint32_t n1)
+ {
+ node0 = n0 < n1 ? n0 : n1;
+ node1 = n0 < n1 ? n1 : n0;
+ }
+
+ operator uint64_t() const
+ {
+ return static_cast<uint64_t>(node0) + (static_cast<uint64_t>(node1) << 32);
+ }
+ };
+
+ SequentialImpulseSolver m_solver;
+ ExtArray<SolverNodeData>::type m_solverNodesData;
+ ExtArray<SolverBondData>::type m_solverBondsData;
+
+ uint32_t m_graphReductionLevel;
+
+ bool m_nodesDirty;
+ bool m_bondsDirty;
+
+ ExtHashMap<BondKey, uint32_t>::type m_solverBondsMap;
+ ExtArray<uint32_t>::type m_blastBondIndexMap;
+
+ ExtArray<BondData>::type m_bondsData;
+ ExtArray<NodeData>::type m_nodesData;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ExtImpulseStressSolver
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Creation
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ExtImpulseStressSolver::ExtImpulseStressSolver(ExtPxFamily& family, ExtStressSolverSettings settings)
+ : m_family(family), m_settings(settings), m_isDirty(false), m_reset(false),
+ m_errorAngular(std::numeric_limits<float>::max()), m_errorLinear(std::numeric_limits<float>::max()), m_framesCount(0)
+{
+
+ const TkAsset* tkAsset = m_family.getTkFamily().getAsset();
+ const ExtPxAsset& asset = m_family.getPxAsset();
+ const ExtPxChunk* chunks = asset.getChunks();
+ const ExtPxSubchunk* subChunks = asset.getSubchunks();
+ m_graph = tkAsset->getGraph();
+ const uint32_t bondCount = tkAsset->getBondCount();
+
+ TkActor* tkActor;
+ m_family.getTkFamily().getActors(&tkActor, 1);
+ m_bondHealths = tkActor->getBondHealths();
+
+ m_graphProcessor = NVBLASTEXT_NEW(SupportGraphProcessor)(m_graph.nodeCount, bondCount);
+
+ // traverse graph and fill node info
+ for (uint32_t i = 0; i < m_graph.nodeCount; ++i)
+ {
+ uint32_t node0 = i;
+ uint32_t chunkIndex0 = m_graph.chunkIndices[node0];
+ const ExtPxChunk& chunk0 = chunks[chunkIndex0];
+
+ bool isChunkStatic = chunk0.isStatic;
+
+ for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++)
+ {
+ uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex];
+ if (m_bondHealths[bondIndex] <= 0.0f)
+ continue;
+ uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex];
+ uint32_t chunkIndex1 = m_graph.chunkIndices[node1];
+ const ExtPxChunk& chunk1 = chunks[chunkIndex1];
+
+ if (chunk1.subchunkCount == 0 || chunk1.isStatic)
+ {
+ isChunkStatic |= chunk1.isStatic;
+ continue;
+ }
+ }
+
+ // fill node info
+
+ float mass;
+ float volume;
+ PxVec3 localPos;
+ if (chunk0.subchunkCount > 0)
+ {
+#if USE_PHYSX_CONVEX_DATA
+ const ExtPxSubchunk& subChunk = subChunks[chunk0.firstSubchunkIndex];
+ PxVec3 localCenterOfMass;
+ PxMat33 intertia;
+ PxVec3 scale = subChunk.geometry.scale.scale;
+ subChunk.geometry.convexMesh->getMassInformation(mass, intertia, localCenterOfMass);
+ mass *= scale.x * scale.y * scale.z;
+ const PxTransform& chunk0LocalTransform = subChunk.transform;
+ localPos = chunk0LocalTransform.transform(localCenterOfMass);
+ volume = mass / 1.0f; // unit density
+#else
+ volume = solverChunk0.volume;
+ mass = volume * 1.0f; // density
+ localPos = *reinterpret_cast<const PxVec3*>(solverChunk0.centroid);
+#endif
+ }
+ else
+ {
+ mass = 0.0f;
+ volume = 0.0f;
+ localPos = PxVec3(PxZero);
+ isChunkStatic = true;
+ }
+ m_graphProcessor->setNodeInfo(node0, mass, volume, localPos, isChunkStatic);
+ }
+
+ // traverse graph and fill bond info
+ for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0)
+ {
+ for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++)
+ {
+ uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex];
+ if (m_bondHealths[bondIndex] <= 0.0f)
+ continue;
+ uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex];
+
+ if (node0 < node1)
+ {
+ m_graphProcessor->addBond(node0, node1, bondIndex);
+ }
+ }
+ }
+
+ // fire initial actor's created
+ ExtInlineArray<ExtPxActor*, 4>::type actors;;
+ actors.resize((uint32_t)m_family.getActorCount());
+ m_family.getActors(actors.begin(), actors.size());
+ for (const auto actor : actors)
+ {
+ onActorCreated(m_family, *actor);
+ }
+
+ m_family.subscribe(*this);
+}
+
+ExtImpulseStressSolver::~ExtImpulseStressSolver()
+{
+ NVBLASTEXT_DELETE(m_graphProcessor, SupportGraphProcessor);
+ m_family.unsubscribe(*this);
+}
+
+ExtStressSolver* ExtStressSolver::create(ExtPxFamily& family, ExtStressSolverSettings settings)
+{
+ return NVBLASTEXT_NEW(ExtImpulseStressSolver) (family, settings);
+}
+
+void ExtImpulseStressSolver::release()
+{
+ NVBLASTEXT_DELETE(this, ExtImpulseStressSolver);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Actors
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void ExtImpulseStressSolver::onActorCreated(ExtPxFamily& /*family*/, ExtPxActor& actor)
+{
+ if (actor.getTkActor().getGraphNodeCount() > 1)
+ {
+ // update neighbors
+ {
+ const uint32_t graphNodeCount = actor.getTkActor().getGraphNodeCount();
+ uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount);
+ actor.getTkActor().getGraphNodeIndices(graphNodeIndices, graphNodeCount);
+ for (uint32_t i = 0; i < graphNodeCount; ++i)
+ {
+ m_graphProcessor->setNodeNeighborsCount(graphNodeIndices[i], graphNodeCount);
+ }
+ }
+
+ m_actors.insert(&actor);
+ m_isDirty = true;
+ }
+}
+
+void ExtImpulseStressSolver::onActorDestroyed(ExtPxFamily& /*family*/, ExtPxActor& actor)
+{
+ if (m_actors.erase(&actor))
+ {
+ m_isDirty = true;
+ }
+}
+
+void ExtImpulseStressSolver::syncSolver()
+{
+ // traverse graph and remove dead bonds
+ for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0)
+ {
+ for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++)
+ {
+ uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex];
+ if (node0 < node1)
+ {
+ uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex];
+
+ if (m_bondHealths[bondIndex] <= 0.0f)
+ {
+ m_graphProcessor->removeBondIfExists(bondIndex);
+ }
+ }
+ }
+ }
+
+ m_isDirty = false;
+}
+
+
+void ExtImpulseStressSolver::initialize()
+{
+ if (m_reset)
+ {
+ m_framesCount = 0;
+ }
+
+ if (m_isDirty)
+ {
+ syncSolver();
+ }
+
+ if (m_settings.graphReductionLevel != m_graphProcessor->getGraphReductionLevel())
+ {
+ m_graphProcessor->setGraphReductionLevel(m_settings.graphReductionLevel);
+ }
+
+ m_graphProcessor->initialize();
+
+ for (auto it = m_actors.getIterator(); !it.done(); ++it)
+ {
+ const ExtPxActor* actor = *it;
+ const uint32_t graphNodeCount = actor->getTkActor().getGraphNodeCount();
+ uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount);
+ actor->getTkActor().getGraphNodeIndices(graphNodeIndices, graphNodeCount);
+
+ PxRigidDynamic& rigidDynamic = actor->getPhysXActor();
+ const bool isStatic = rigidDynamic.getRigidBodyFlags() & PxRigidBodyFlag::eKINEMATIC;
+ if (isStatic)
+ {
+ PxVec3 gravity = rigidDynamic.getScene()->getGravity();
+ gravity = rigidDynamic.getGlobalPose().rotateInv(gravity);
+
+ for (uint32_t i = 0; i < graphNodeCount; ++i)
+ {
+ const uint32_t node = graphNodeIndices[i];
+ m_graphProcessor->addNodeVelocity(node, gravity);
+ }
+ }
+ else
+ {
+ PxVec3 cMassPose = rigidDynamic.getCMassLocalPose().p;
+
+ PxVec3 angularVelocity = rigidDynamic.getGlobalPose().rotateInv(rigidDynamic.getAngularVelocity());
+ //PxVec3 linearVelocity = rigidDynamic.getGlobalPose().rotateInv(rigidDynamic.getLinearVelocity());
+
+ // Apply centrifugal force
+ for (uint32_t i = 0; i < graphNodeCount; ++i)
+ {
+ const uint32_t node = graphNodeIndices[i];
+ const auto& localPos = m_graphProcessor->getNodeData(node).localPos;
+ // a = w x (w x r)
+ const PxVec3 centrifugalAcceleration = angularVelocity.cross(angularVelocity.cross(localPos - cMassPose));
+ m_graphProcessor->addNodeVelocity(node, centrifugalAcceleration);
+ }
+ }
+
+ const auto entry = m_impulseBuffer.find(actor);
+ if (entry)
+ {
+ for (const ImpulseData& data : entry->second)
+ {
+ float bestDist = FLT_MAX;
+ uint32_t bestNode = invalidIndex<uint32_t>();
+
+ for (uint32_t i = 0; i < graphNodeCount; ++i)
+ {
+ const uint32_t node = graphNodeIndices[i];
+ const float sqrDist = (data.position - m_graphProcessor->getNodeData(node).localPos).magnitudeSquared();
+ if (sqrDist < bestDist)
+ {
+ bestDist = sqrDist;
+ bestNode = node;
+ }
+ }
+
+ if (!isInvalidIndex(bestNode))
+ {
+ m_graphProcessor->addNodeImpulse(bestNode, data.impulse);
+ }
+ }
+ m_impulseBuffer[actor].clear();
+ }
+ }
+}
+
+void ExtImpulseStressSolver::applyImpulse(ExtPxActor& actor, physx::PxVec3 position, physx::PxVec3 force)
+{
+ ImpulseData data = { position, force };
+
+ m_impulseBuffer[&actor].pushBack(data);
+}
+
+uint32_t ExtImpulseStressSolver::getBondCount() const
+{
+ return m_graphProcessor->getSolverBondCount();
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Update
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void ExtImpulseStressSolver::update(bool doDamage)
+{
+ initialize();
+
+ solve();
+
+ if (doDamage)
+ {
+ applyDamage();
+ }
+
+ m_framesCount++;
+}
+
+void ExtImpulseStressSolver::solve()
+{
+ PX_SIMD_GUARD;
+
+ const uint32_t iterations = getIterationsPerFrame();
+ m_graphProcessor->solve(iterations, WARM_START && !m_reset);
+ m_reset = false;
+
+ m_graphProcessor->calcError(m_errorLinear, m_errorAngular);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Damage
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void ExtImpulseStressSolver::applyDamage()
+{
+ m_bondFractureBuffer.clear();
+ m_graphProcessor->generateFracture(m_bondFractureBuffer, m_settings, m_bondHealths);
+
+ if (m_bondFractureBuffer.size() > 0)
+ {
+ NvBlastFractureBuffers fractureCommands;
+ fractureCommands.chunkFractureCount = 0;
+ fractureCommands.bondFractureCount = m_bondFractureBuffer.size();
+ fractureCommands.bondFractures = m_bondFractureBuffer.begin();
+
+ m_family.getTkFamily().applyFracture(&fractureCommands);
+ }
+}
+
+uint32_t ExtImpulseStressSolver::getIterationCount() const
+{
+ return getFrameCount() * getIterationsPerFrame();
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Debug Render
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static PxU32 PxVec4ToU32Color(const PxVec4& color)
+{
+ PxU32 c = 0;
+ c |= (int)(color.w * 255); c <<= 8;
+ c |= (int)(color.z * 255); c <<= 8;
+ c |= (int)(color.y * 255); c <<= 8;
+ c |= (int)(color.x * 255);
+ return c;
+}
+
+static PxVec4 PxVec4Lerp(const PxVec4 v0, const PxVec4 v1, float val)
+{
+ PxVec4 v(
+ v0.x * (1 - val) + v1.x * val,
+ v0.y * (1 - val) + v1.y * val,
+ v0.z * (1 - val) + v1.z * val,
+ v0.w * (1 - val) + v1.w * val
+ );
+ return v;
+}
+
+inline float clamp01(float v)
+{
+ return v < 0.0f ? 0.0f : (v > 1.0f ? 1.0f : v);
+}
+
+inline PxVec4 bondHealthColor(float healthFraction)
+{
+ healthFraction = clamp01(healthFraction);
+
+ const PxVec4 BOND_HEALTHY_COLOR(0.0f, 1.0f, 1.0f, 1.0f);
+ const PxVec4 BOND_MID_COLOR(1.0f, 1.0f, 0.0f, 1.0f);
+ const PxVec4 BOND_BROKEN_COLOR(1.0f, 0.0f, 0.0f, 1.0f);
+
+ return healthFraction < 0.5 ? PxVec4Lerp(BOND_BROKEN_COLOR, BOND_MID_COLOR, 2.0f * healthFraction) : PxVec4Lerp(BOND_MID_COLOR, BOND_HEALTHY_COLOR, 2.0f * healthFraction - 1.0f);
+}
+
+void ExtImpulseStressSolver::fillDebugRender(const std::vector<uint32_t>& nodes, std::vector<PxDebugLine>& lines, DebugRenderMode mode, float scale)
+{
+ const PxVec4 BOND_IMPULSE_LINEAR_COLOR(0.0f, 1.0f, 0.0f, 1.0f);
+ const PxVec4 BOND_IMPULSE_ANGULAR_COLOR(1.0f, 0.0f, 0.0f, 1.0f);
+
+ if (m_isDirty)
+ return;
+
+ ExtArray<uint8_t>::type& nodesSet = m_scratch;
+
+ nodesSet.resize(m_graphProcessor->getSolverNodeCount());
+ memset(nodesSet.begin(), 0, nodesSet.size() * sizeof(uint8_t));
+ for (auto& nodeIndex : nodes)
+ {
+ nodesSet[m_graphProcessor->getNodeData(nodeIndex).solverNode] = 1;
+ }
+
+ const uint32_t bondCount = m_graphProcessor->getSolverBondCount();
+ for (uint32_t i = 0; i < bondCount; ++i)
+ {
+ const auto& solverInternalBondData = m_graphProcessor->getSolverInternalBondData(i);
+ if (nodesSet[solverInternalBondData.node0] != 0)
+ {
+ NVBLAST_ASSERT(nodesSet[solverInternalBondData.node1] != 0);
+
+ const auto& solverInternalNode0 = m_graphProcessor->getSolverInternalNodeData(solverInternalBondData.node0);
+ const auto& solverInternalNode1 = m_graphProcessor->getSolverInternalNodeData(solverInternalBondData.node1);
+ const auto& solverNode0 = m_graphProcessor->getSolverNodeData(solverInternalBondData.node0);
+ const auto& solverNode1 = m_graphProcessor->getSolverNodeData(solverInternalBondData.node1);
+
+ PxVec3 p0 = solverNode0.localPos;
+ PxVec3 p1 = solverNode1.localPos;
+ PxVec3 center = (p0 + p1) * 0.5f;
+
+ const float stress = std::min<float>(solverInternalBondData.getStressHealth(m_settings), 1.0f);
+ PxVec4 color = bondHealthColor(1.0f - stress);
+
+ lines.push_back(PxDebugLine(p0, p1, PxVec4ToU32Color(color)));
+
+ float impulseScale = scale;
+
+ if (mode == DebugRenderMode::STRESS_GRAPH_NODES_IMPULSES)
+ {
+ lines.push_back(PxDebugLine(p0, p0 + solverInternalNode0.velocityLinear * impulseScale, PxVec4ToU32Color(BOND_IMPULSE_LINEAR_COLOR)));
+ lines.push_back(PxDebugLine(p0, p0 + solverInternalNode0.velocityAngular * impulseScale, PxVec4ToU32Color(BOND_IMPULSE_ANGULAR_COLOR)));
+ lines.push_back(PxDebugLine(p1, p1 + solverInternalNode1.velocityLinear * impulseScale, PxVec4ToU32Color(BOND_IMPULSE_LINEAR_COLOR)));
+ lines.push_back(PxDebugLine(p1, p1 + solverInternalNode1.velocityAngular * impulseScale, PxVec4ToU32Color(BOND_IMPULSE_ANGULAR_COLOR)));
+ }
+ else if (mode == DebugRenderMode::STRESS_GRAPH_BONDS_IMPULSES)
+ {
+ lines.push_back(PxDebugLine(center, center + solverInternalBondData.impulseLinear * impulseScale, PxVec4ToU32Color(BOND_IMPULSE_LINEAR_COLOR)));
+ lines.push_back(PxDebugLine(center, center + solverInternalBondData.impulseAngular * impulseScale, PxVec4ToU32Color(BOND_IMPULSE_ANGULAR_COLOR)));
+ }
+ }
+ }
+}
+
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpulseStressSolver.h b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpulseStressSolver.h
new file mode 100644
index 0000000..d274789
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtImpulseStressSolver.h
@@ -0,0 +1,164 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTIMPULSESTRESSSOLVER_H
+#define NVBLASTEXTIMPULSESTRESSSOLVER_H
+
+#include "NvBlastExtStressSolver.h"
+#include "NvBlastExtPxManager.h"
+#include "NvBlastExtPxListener.h"
+#include "NvBlastTypes.h"
+#include <NvBlastExtArray.h>
+#include <NvBlastExtHashSet.h>
+#include <NvBlastExtHashMap.h>
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+struct ExtStressNodeCachedData
+{
+ physx::PxVec3 localPos;
+ bool isStatic;
+};
+
+
+struct ExtStressBondCachedData
+{
+ uint32_t bondIndex;
+};
+
+class SupportGraphProcessor;
+
+/**
+*/
+class ExtImpulseStressSolver : public ExtStressSolver, ExtPxListener
+{
+ NV_NOCOPY(ExtImpulseStressSolver)
+
+public:
+ ExtImpulseStressSolver(ExtPxFamily& family, ExtStressSolverSettings settings);
+ virtual void release() override;
+
+
+ //////// ExtStressSolver interface ////////
+
+ virtual void setSettings(const ExtStressSolverSettings& settings) override
+ {
+ m_settings = settings;
+ }
+
+ virtual const ExtStressSolverSettings& getSettings() const override
+ {
+ return m_settings;
+ }
+
+ virtual void applyImpulse(ExtPxActor& actor, physx::PxVec3 position, physx::PxVec3 force) override;
+
+ virtual void update(bool doDamage) override;
+
+ void reset() override
+ {
+ m_reset = true;
+ }
+
+ virtual float getStressErrorLinear() const override
+ {
+ return m_errorLinear;
+ }
+
+ virtual float getStressErrorAngular() const override
+ {
+ return m_errorAngular;
+ }
+
+ virtual uint32_t getIterationCount() const override;
+
+ virtual uint32_t getFrameCount() const override
+ {
+ return m_framesCount;
+ }
+
+ virtual uint32_t getBondCount() const override;
+
+ virtual void fillDebugRender(const std::vector<uint32_t>& nodes, std::vector<physx::PxDebugLine>& lines, DebugRenderMode mode, float scale) override;
+
+
+ //////// ExtPxListener interface ////////
+
+ virtual void onActorCreated(ExtPxFamily& family, ExtPxActor& actor) final;
+
+ virtual void onActorDestroyed(ExtPxFamily& family, ExtPxActor& actor) final;
+
+
+private:
+ ~ExtImpulseStressSolver();
+
+
+ //////// private methods ////////
+
+ void solve();
+
+ void applyDamage();
+
+ void initialize();
+
+ NV_INLINE void iterate();
+
+ void syncSolver();
+
+ template<class T>
+ NV_INLINE T* getScratchArray(uint32_t size);
+
+
+ //////// data ////////
+
+ struct ImpulseData
+ {
+ physx::PxVec3 position;
+ physx::PxVec3 impulse;
+ };
+
+ ExtPxFamily& m_family;
+ ExtHashSet<ExtPxActor*>::type m_actors;
+ ExtStressSolverSettings m_settings;
+ NvBlastSupportGraph m_graph;
+ bool m_isDirty;
+ bool m_reset;
+ const float* m_bondHealths;
+ SupportGraphProcessor* m_graphProcessor;
+ float m_errorAngular;
+ float m_errorLinear;
+ uint32_t m_framesCount;
+ ExtArray<NvBlastBondFractureData>::type m_bondFractureBuffer;
+ ExtHashMap<const ExtPxActor*, ExtArray<ImpulseData>::type>::type m_impulseBuffer;
+ ExtArray<uint8_t>::type m_scratch;
+};
+
+
+template<class T>
+NV_INLINE T* ExtImpulseStressSolver::getScratchArray(uint32_t size)
+{
+ const uint32_t scratchSize = sizeof(T) * size;
+ if (m_scratch.size() < scratchSize)
+ {
+ m_scratch.resize(scratchSize);
+ }
+ return reinterpret_cast<T*>(m_scratch.begin());
+}
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTIMPULSESTRESSSOLVER_H
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxActorImpl.cpp b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxActorImpl.cpp
new file mode 100644
index 0000000..7732d18
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxActorImpl.cpp
@@ -0,0 +1,180 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtPxActorImpl.h"
+#include "NvBlastExtPxAsset.h"
+#include "NvBlastExtPxManagerImpl.h"
+#include "NvBlastExtPxFamilyImpl.h"
+
+#include "PxRigidDynamic.h"
+#include "PxPhysics.h"
+
+#include "NvBlastTkActor.h"
+#include "NvBlastTkAsset.h"
+
+#include "PxRigidBodyExt.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+ExtPxActorImpl::ExtPxActorImpl(ExtPxFamilyImpl* family, TkActor* tkActor, const PxActorCreateInfo& pxActorInfo)
+ : m_family(family), m_tkActor(tkActor)
+{
+ const ExtPxChunk* pxChunks = m_family->m_pxAsset.getChunks();
+ const ExtPxSubchunk* pxSubchunks = m_family->m_pxAsset.getSubchunks();
+ const NvBlastChunk* chunks = m_tkActor->getAsset()->getChunks();
+ uint32_t nodeCount = m_tkActor->getGraphNodeCount();
+
+ PxFilterData simulationFilterData; // Default constructor = {0,0,0,0}
+
+ // get visible chunk indices list
+ {
+ auto& chunkIndices = m_family->m_indicesScratch;
+ chunkIndices.resize(m_tkActor->getVisibleChunkCount());
+ m_tkActor->getVisibleChunkIndices(chunkIndices.begin(), static_cast<uint32_t>(chunkIndices.size()));
+
+ // fill visible chunk indices list with mapped to our asset indices
+ m_chunkIndices.reserve(chunkIndices.size());
+ for (const uint32_t chunkIndex : chunkIndices)
+ {
+ const ExtPxChunk& chunk = pxChunks[chunkIndex];
+ if (chunk.subchunkCount == 0)
+ continue;
+ m_chunkIndices.pushBack(chunkIndex);
+ }
+
+ // Single lower-support chunk actors might be leaf actors, check for this and disable contact callbacks if so
+ if (nodeCount <= 1)
+ {
+ PX_ASSERT(chunkIndices.size() == 1);
+ if (chunkIndices.size() > 0)
+ {
+ const NvBlastChunk& chunk = chunks[chunkIndices[0]];
+ if (chunk.firstChildIndex == chunk.childIndexStop)
+ {
+ simulationFilterData.word3 = ExtPxManager::LEAF_CHUNK; // mark as leaf chunk if chunk has no children
+ }
+ }
+ }
+ }
+
+ // create rigidDynamic and setup
+ PxPhysics& physics = m_family->m_manager.m_physics;
+ m_rigidDynamic = physics.createRigidDynamic(pxActorInfo.m_transform);
+ if (m_family->m_pxActorDescTemplate != nullptr)
+ {
+ m_rigidDynamic->setActorFlags(static_cast<physx::PxActorFlags>(m_family->m_pxActorDescTemplate->flags));
+ }
+
+ // fill rigidDynamic with shapes
+ PxMaterial* material = m_family->m_spawnSettings.material;
+ for (uint32_t i = 0; i < m_chunkIndices.size(); ++i)
+ {
+ uint32_t chunkID = m_chunkIndices[i];
+ const ExtPxChunk& chunk = pxChunks[chunkID];
+ for (uint32_t c = 0; c < chunk.subchunkCount; c++)
+ {
+ const uint32_t subchunkIndex = chunk.firstSubchunkIndex + c;
+ auto& subchunk = pxSubchunks[subchunkIndex];
+ PxShape* shape = physics.createShape(subchunk.geometry, *material);
+ shape->setLocalPose(subchunk.transform);
+
+ const ExtPxShapeDescTemplate* pxShapeDesc = m_family->m_pxShapeDescTemplate;
+ if (pxShapeDesc != nullptr)
+ {
+ shape->setFlags(static_cast<PxShapeFlags>(pxShapeDesc->flags));
+ shape->setSimulationFilterData(pxShapeDesc->simulationFilterData);
+ shape->setQueryFilterData(pxShapeDesc->queryFilterData);
+ shape->setContactOffset(pxShapeDesc->contactOffset);
+ shape->setRestOffset(pxShapeDesc->restOffset);
+ }
+ else
+ {
+ shape->setSimulationFilterData(simulationFilterData);
+ }
+
+ m_rigidDynamic->attachShape(*shape);
+
+ PX_ASSERT_WITH_MESSAGE(m_family->m_subchunkShapes[subchunkIndex] == nullptr, "Chunk has some shapes(live).");
+ m_family->m_subchunkShapes[subchunkIndex] = shape;
+ }
+ }
+
+ // search for static chunk in actor's graph (make actor static if it contains static chunk)
+ bool staticFound = false;
+ if (nodeCount > 0)
+ {
+ auto& graphChunkIndices = m_family->m_indicesScratch;
+ graphChunkIndices.resize(nodeCount);
+ m_tkActor->getGraphNodeIndices(graphChunkIndices.begin(), static_cast<uint32_t>(graphChunkIndices.size()));
+ const NvBlastSupportGraph graph = m_tkActor->getAsset()->getGraph();
+
+ for (uint32_t i = 0; !staticFound && i < graphChunkIndices.size(); ++i)
+ {
+ uint32_t chunkIndex = graph.chunkIndices[graphChunkIndices[i]];
+ const ExtPxChunk& chunk = pxChunks[chunkIndex];
+ staticFound = chunk.isStatic;
+ }
+ }
+ m_rigidDynamic->setRigidBodyFlag(PxRigidBodyFlag::eKINEMATIC, staticFound);
+
+ // store pointer to actor in px userData
+ m_family->m_manager.registerActor(m_rigidDynamic, this);
+
+ // store pointer to actor in blast userData
+ m_tkActor->userData = this;
+
+ // update mass properties
+ PxRigidBodyExt::updateMassAndInertia(*m_rigidDynamic, m_family->m_spawnSettings.density);
+
+ // set initial velocities
+ if (!(m_rigidDynamic->getRigidBodyFlags() & PxRigidBodyFlag::eKINEMATIC))
+ {
+ m_rigidDynamic->setLinearVelocity(pxActorInfo.m_linearVelocity);
+ m_rigidDynamic->setAngularVelocity(pxActorInfo.m_angularVelocity);
+ }
+}
+
+void ExtPxActorImpl::release()
+{
+ if (m_rigidDynamic != nullptr)
+ {
+ m_family->m_manager.unregisterActor(m_rigidDynamic);
+ m_rigidDynamic->release();
+ m_rigidDynamic = nullptr;
+ }
+
+ const ExtPxChunk* pxChunks = m_family->m_pxAsset.getChunks();
+ for (uint32_t chunkID : m_chunkIndices)
+ {
+ const ExtPxChunk& chunk = pxChunks[chunkID];
+ for (uint32_t c = 0; c < chunk.subchunkCount; c++)
+ {
+ const uint32_t subchunkIndex = chunk.firstSubchunkIndex + c;
+ m_family->m_subchunkShapes[subchunkIndex] = nullptr;
+ }
+ }
+ m_chunkIndices.clear();
+
+ m_tkActor->userData = nullptr;
+}
+
+ExtPxFamily& ExtPxActorImpl::getFamily() const
+{
+ return *m_family;
+}
+
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxActorImpl.h b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxActorImpl.h
new file mode 100644
index 0000000..a592293
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxActorImpl.h
@@ -0,0 +1,94 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXACTORIMPL_H
+#define NVBLASTEXTPXACTORIMPL_H
+
+#include "NvBlastExtPxActor.h"
+#include "NvBlastExtArray.h"
+#include "PxTransform.h"
+
+
+using namespace physx;
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+// Forward declarations
+class ExtPxFamilyImpl;
+
+struct PxActorCreateInfo
+{
+ PxTransform m_transform;
+ PxVec3 m_scale;
+ PxVec3 m_linearVelocity;
+ PxVec3 m_angularVelocity;
+};
+
+
+class ExtPxActorImpl final : public ExtPxActor
+{
+public:
+ //////// ctor ////////
+
+ ExtPxActorImpl(ExtPxFamilyImpl* family, TkActor* tkActor, const PxActorCreateInfo& pxActorInfo);
+
+ ~ExtPxActorImpl()
+ {
+ release();
+ }
+
+ void release();
+
+
+ //////// interface ////////
+
+ virtual uint32_t getChunkCount() const override
+ {
+ return static_cast<uint32_t>(m_chunkIndices.size());
+ }
+
+ virtual const uint32_t* getChunkIndices() const override
+ {
+ return m_chunkIndices.begin();
+ }
+
+ virtual PxRigidDynamic& getPhysXActor() const override
+ {
+ return *m_rigidDynamic;
+ }
+
+ virtual TkActor& getTkActor() const override
+ {
+ return *m_tkActor;
+ }
+
+ virtual ExtPxFamily& getFamily() const override;
+
+
+private:
+ //////// data ////////
+
+ ExtPxFamilyImpl* m_family;
+ TkActor* m_tkActor;
+ PxRigidDynamic* m_rigidDynamic;
+ ExtInlineArray<uint32_t, 4>::type m_chunkIndices;
+};
+
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXACTORIMPL_H
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxAssetImpl.cpp b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxAssetImpl.cpp
new file mode 100644
index 0000000..a0f75fc
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxAssetImpl.cpp
@@ -0,0 +1,315 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtPxAssetImpl.h"
+#include "NvBlastExtHashMap.h"
+
+#include "NvBlastAssert.h"
+#include "NvBlastIndexFns.h"
+
+#include "NvBlastTkAsset.h"
+
+#include "PxIO.h"
+#include "PxPhysics.h"
+#include "PxFileBuf.h"
+#include "cooking/PxCooking.h"
+
+#include <algorithm>
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Helpers/Wrappers
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class FileBufToPxInputStream final : public PxInputStream
+{
+public:
+ FileBufToPxInputStream(PxFileBuf& filebuf) : m_filebuf(filebuf) {}
+
+ virtual uint32_t read(void* dest, uint32_t count)
+ {
+ return m_filebuf.read(dest, count);
+ }
+
+private:
+ FileBufToPxInputStream& operator=(const FileBufToPxInputStream&);
+
+ PxFileBuf& m_filebuf;
+};
+
+
+class FileBufToPxOutputStream final : public PxOutputStream
+{
+public:
+ FileBufToPxOutputStream(PxFileBuf& filebuf) : m_filebuf(filebuf) {}
+
+ virtual uint32_t write(const void* src, uint32_t count) override
+ {
+ return m_filebuf.write(src, count);
+ }
+
+private:
+ FileBufToPxOutputStream& operator=(const FileBufToPxOutputStream&);
+
+ PxFileBuf& m_filebuf;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ExtPxAssetImpl Implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ExtPxAssetImpl::ExtPxAssetImpl(const ExtPxAssetDesc& desc, TkFramework& framework)
+{
+ m_tkAsset = framework.createAsset(desc);
+
+ // count subchunks and reserve memory
+ uint32_t subchunkCount = 0;
+ for (uint32_t i = 0; i < desc.chunkCount; ++i)
+ {
+ const auto& chunk = desc.pxChunks[i];
+ subchunkCount += static_cast<uint32_t>(chunk.subchunkCount);
+ }
+ m_subchunks.reserve(subchunkCount);
+
+ // fill chunks and subchunks
+ m_chunks.resize(desc.chunkCount);
+ for (uint32_t i = 0; i < desc.chunkCount; ++i)
+ {
+ const auto& chunk = desc.pxChunks[i];
+ m_chunks[i].isStatic = chunk.isStatic;
+ m_chunks[i].firstSubchunkIndex = m_subchunks.size();
+ m_chunks[i].subchunkCount = chunk.subchunkCount;
+ for (uint32_t k = 0; k < chunk.subchunkCount; ++k)
+ {
+ ExtPxSubchunk subchunk =
+ {
+ chunk.subchunks[k].transform,
+ chunk.subchunks[k].geometry
+ };
+ m_subchunks.pushBack(subchunk);
+ }
+ }
+}
+
+ExtPxAssetImpl::ExtPxAssetImpl(TkAsset* tkAsset):
+ m_tkAsset(tkAsset)
+{
+
+}
+
+ExtPxAssetImpl::~ExtPxAssetImpl()
+{
+ if (m_tkAsset)
+ {
+ m_tkAsset->release();
+ }
+}
+
+void ExtPxAssetImpl::release()
+{
+ NVBLASTEXT_DELETE(this, ExtPxAssetImpl);
+}
+
+NV_INLINE bool serializeConvexMesh(const PxConvexMesh& convexMesh, PxCooking& cooking, ExtArray<uint32_t>::type& indicesScratch,
+ ExtArray<PxHullPolygon>::type hullPolygonsScratch, PxOutputStream& stream)
+{
+ PxConvexMeshDesc desc;
+ desc.points.data = convexMesh.getVertices();
+ desc.points.count = convexMesh.getNbVertices();
+ desc.points.stride = sizeof(PxVec3);
+
+ hullPolygonsScratch.resize(convexMesh.getNbPolygons());
+
+ uint32_t indexCount = 0;
+ for (uint32_t i = 0; i < convexMesh.getNbPolygons(); i++)
+ {
+ PxHullPolygon polygon;
+ convexMesh.getPolygonData(i, polygon);
+ if (polygon.mNbVerts)
+ {
+ indexCount = std::max<uint32_t>(indexCount, polygon.mIndexBase + polygon.mNbVerts);
+ }
+ }
+ indicesScratch.resize(indexCount);
+
+ for (uint32_t i = 0; i < convexMesh.getNbPolygons(); i++)
+ {
+ PxHullPolygon polygon;
+ convexMesh.getPolygonData(i, polygon);
+ for (uint32_t j = 0; j < polygon.mNbVerts; j++)
+ {
+ indicesScratch[polygon.mIndexBase + j] = convexMesh.getIndexBuffer()[polygon.mIndexBase + j];
+ }
+
+ hullPolygonsScratch[i] = polygon;
+ }
+
+ desc.indices.count = indexCount;
+ desc.indices.data = indicesScratch.begin();
+ desc.indices.stride = sizeof(uint32_t);
+
+ desc.polygons.count = convexMesh.getNbPolygons();
+ desc.polygons.data = hullPolygonsScratch.begin();
+ desc.polygons.stride = sizeof(PxHullPolygon);
+
+ return cooking.cookConvexMesh(desc, stream);
+}
+
+bool ExtPxAssetImpl::serialize(PxFileBuf& stream, PxCooking& cooking) const
+{
+ // Header data
+ stream.storeDword(ClassID);
+ stream.storeDword(Version::Current);
+
+ m_tkAsset->serialize(stream);
+
+ // Chunks
+ const uint32_t chunkCount = m_tkAsset->getChunkCount();
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ const ExtPxChunk& chunk = m_chunks[i];
+ stream.storeDword(chunk.firstSubchunkIndex);
+ stream.storeDword(chunk.subchunkCount);
+ stream.storeDword(chunk.isStatic ? 1 : 0);
+ }
+
+ stream.storeDword(m_subchunks.size());
+
+ ExtArray<uint32_t>::type indicesScratch(512);
+ ExtArray<PxHullPolygon>::type hullPolygonsScratch(512);
+ ExtHashMap<PxConvexMesh*, uint32_t>::type convexReuseMap;
+
+ FileBufToPxOutputStream outputStream(stream);
+ for (uint32_t i = 0; i < m_subchunks.size(); ++i)
+ {
+ auto& subchunk = m_subchunks[i];
+
+ // Subchunk transform
+ stream.storeFloat(subchunk.transform.q.x); stream.storeFloat(subchunk.transform.q.y); stream.storeFloat(subchunk.transform.q.z); stream.storeFloat(subchunk.transform.q.w);
+ stream.storeFloat(subchunk.transform.p.x); stream.storeFloat(subchunk.transform.p.y); stream.storeFloat(subchunk.transform.p.z);
+
+ // Subchunk scale
+ stream.storeFloat(subchunk.geometry.scale.scale.x); stream.storeFloat(subchunk.geometry.scale.scale.y); stream.storeFloat(subchunk.geometry.scale.scale.z);
+ stream.storeFloat(subchunk.geometry.scale.rotation.x); stream.storeFloat(subchunk.geometry.scale.rotation.y); stream.storeFloat(subchunk.geometry.scale.rotation.z); stream.storeFloat(subchunk.geometry.scale.rotation.w);
+
+ auto convexMesh = subchunk.geometry.convexMesh;
+ NVBLASTEXT_CHECK_ERROR(convexMesh != nullptr, "ExtPxAssetImpl::serialize: subchunk convexMesh is nullptr.", return false);
+
+ auto entry = convexReuseMap.find(convexMesh);
+ if (entry)
+ {
+ stream.storeDword(entry->second);
+ }
+ else
+ {
+ stream.storeDword(invalidIndex<uint32_t>());
+ if (!serializeConvexMesh(*convexMesh, cooking, indicesScratch, hullPolygonsScratch, outputStream))
+ {
+ NVBLASTEXT_LOG_ERROR("ExtPxAssetImpl::serialize: subchunk convexMesh cooking/serialization failed.");
+ return false;
+ }
+ convexReuseMap[convexMesh] = i;
+ }
+ }
+
+ return true;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ExtPxAsset Static
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ExtPxAsset* ExtPxAsset::create(const ExtPxAssetDesc& desc, TkFramework& framework)
+{
+ ExtPxAssetImpl* asset = NVBLASTEXT_NEW(ExtPxAssetImpl)(desc, framework);
+ return asset;
+}
+
+
+Nv::Blast::ExtPxAsset* ExtPxAsset::create(TkAsset* tkAsset)
+{
+ ExtPxAssetImpl* asset = NVBLASTEXT_NEW(ExtPxAssetImpl)(tkAsset);
+
+ // Don't populate the chunks or subchunks!
+
+ return asset;
+}
+
+ExtPxAsset* ExtPxAsset::deserialize(PxFileBuf& stream, TkFramework& framework, PxPhysics& physics)
+{
+ ExtPxAssetImpl::DataHeader header;
+ header.dataType = stream.readDword();
+ header.version = stream.readDword();
+ NVBLASTEXT_CHECK_ERROR(header.dataType == ExtPxAssetImpl::ClassID, "ExtPxAsset::deserialize: wrong data type in filebuf stream.", return nullptr);
+ NVBLASTEXT_CHECK_ERROR(header.version == ExtPxAssetImpl::Version::Current, "ExtPxAsset::deserialize: wrong data version in filebuf stream.", return nullptr);
+
+ TkAsset* tkAsset = static_cast<TkAsset*>(framework.deserialize(stream));
+ NVBLASTEXT_CHECK_ERROR(tkAsset != nullptr, "ExtPxAsset::deserialize: failed to deserialize TkAsset.", return nullptr);
+
+ ExtPxAssetImpl* asset = NVBLASTEXT_NEW(ExtPxAssetImpl)(tkAsset);
+
+ asset->m_chunks.resize(asset->m_tkAsset->getChunkCount());
+
+ const uint32_t chunkCount = asset->m_chunks.size();
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ ExtPxChunk& chunk = asset->m_chunks[i];
+ chunk.firstSubchunkIndex = stream.readDword();
+ chunk.subchunkCount = stream.readDword();
+ chunk.isStatic = 0 != stream.readDword();
+ }
+
+ const uint32_t subchunkCount = stream.readDword();
+ asset->m_subchunks.resize(subchunkCount);
+
+ FileBufToPxInputStream inputStream(stream);
+ for (uint32_t i = 0; i < asset->m_subchunks.size(); ++i)
+ {
+ ExtPxSubchunk& subChunk = asset->m_subchunks[i];
+
+ // Subchunk transform
+ subChunk.transform.q.x = stream.readFloat(); subChunk.transform.q.y = stream.readFloat(); subChunk.transform.q.z = stream.readFloat(); subChunk.transform.q.w = stream.readFloat();
+ subChunk.transform.p.x = stream.readFloat(); subChunk.transform.p.y = stream.readFloat(); subChunk.transform.p.z = stream.readFloat();
+
+ // Subchunk scale
+ subChunk.geometry.scale.scale.x = stream.readFloat(); subChunk.geometry.scale.scale.y = stream.readFloat(); subChunk.geometry.scale.scale.z = stream.readFloat();
+ subChunk.geometry.scale.rotation.x = stream.readFloat(); subChunk.geometry.scale.rotation.y = stream.readFloat(); subChunk.geometry.scale.rotation.z = stream.readFloat(); subChunk.geometry.scale.rotation.w = stream.readFloat();
+
+ const uint32_t convexReuseIndex = stream.readDword();
+ if (isInvalidIndex(convexReuseIndex))
+ {
+ subChunk.geometry.convexMesh = physics.createConvexMesh(inputStream);
+ }
+ else
+ {
+ NVBLAST_ASSERT_WITH_MESSAGE(convexReuseIndex < i, "ExtPxAsset::deserialize: wrong convexReuseIndex.");
+ subChunk.geometry.convexMesh = asset->m_subchunks[convexReuseIndex].geometry.convexMesh;
+ }
+ if (!subChunk.geometry.convexMesh)
+ {
+ NVBLASTEXT_LOG_ERROR("ExtPxAsset::deserialize: failed to deserialize convex mesh.");
+ asset->release();
+ return nullptr;
+ }
+ }
+
+ return asset;
+}
+
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxAssetImpl.h b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxAssetImpl.h
new file mode 100644
index 0000000..fd95293
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxAssetImpl.h
@@ -0,0 +1,126 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXASSETIMPL_H
+#define NVBLASTEXTPXASSETIMPL_H
+
+#include "NvBlastExtPxAsset.h"
+#include "NvBlastExtArray.h"
+#include "NvBlastExtDefs.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+using namespace physx;
+using namespace general_PxIOStream2;
+
+
+class ExtPxAssetImpl final : public ExtPxAsset
+{
+ NV_NOCOPY(ExtPxAssetImpl)
+
+public:
+ friend class ExtPxAsset;
+
+ /**
+ Enum which keeps track of the serialized data format.
+ */
+ enum Version
+ {
+ /** Initial version */
+ Initial,
+
+ // New formats must come before Count. They should be given descriptive names with more information in comments.
+
+ /** The number of serialized formats. */
+ Count,
+
+ /** The current version. This should always be Count-1 */
+ Current = Count - 1
+ };
+
+ //////// ctor ////////
+
+ ExtPxAssetImpl(const ExtPxAssetDesc& desc, TkFramework& framework);
+ ExtPxAssetImpl(TkAsset* tkAsset);
+
+ ~ExtPxAssetImpl();
+
+
+ //////// interface ////////
+
+ virtual void release() override;
+
+ virtual const TkAsset& getTkAsset() const override
+ {
+ return *m_tkAsset;
+ }
+
+ virtual uint32_t getChunkCount() const override
+ {
+ return m_chunks.size();
+ }
+
+ virtual const ExtPxChunk* getChunks() const override
+ {
+ return m_chunks.begin();
+ }
+
+ virtual uint32_t getSubchunkCount() const override
+ {
+ return m_subchunks.size();
+ }
+
+ virtual const ExtPxSubchunk* getSubchunks() const override
+ {
+ return m_subchunks.begin();
+ }
+
+ virtual bool serialize(PxFileBuf& stream, PxCooking& cooking) const override;
+
+
+ /*
+ Get the underlying array for the chunks. Used for serialization.
+ */
+ ExtArray<ExtPxChunk>::type& getChunksArray() { return m_chunks; }
+
+ /*
+ Get the underlying array for the subchunks. Used for serialization.
+ */
+ ExtArray<ExtPxSubchunk>::type& getSubchunksArray() { return m_subchunks; }
+
+private:
+ //////// serialization data ////////
+
+ struct DataHeader
+ {
+ uint32_t dataType;
+ uint32_t version;
+ };
+
+ enum { ClassID = NVBLASTEXT_FOURCC('B', 'P', 'X', 'A') }; // Blast PhysX Asset
+
+
+ //////// data ////////
+
+ TkAsset* m_tkAsset;
+ ExtArray<ExtPxChunk>::type m_chunks;
+ ExtArray<ExtPxSubchunk>::type m_subchunks;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXASSETIMPL_H
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxFamilyImpl.cpp b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxFamilyImpl.cpp
new file mode 100644
index 0000000..b2d3a47
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxFamilyImpl.cpp
@@ -0,0 +1,294 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtPxFamilyImpl.h"
+#include "NvBlastExtPxActorImpl.h"
+#include "NvBlastExtPxAssetImpl.h"
+#include "NvBlastExtPxListener.h"
+#include "NvBlastExtPxManagerImpl.h"
+
+#include "NvBlastTkFamily.h"
+#include "NvBlastTkActor.h"
+#include "NvBlastTkJoint.h"
+
+#include "NvBlastAssert.h"
+
+#include "PxRigidDynamic.h"
+#include "PxScene.h"
+
+#include <algorithm>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+ExtPxFamilyImpl::ExtPxFamilyImpl(ExtPxManagerImpl& manager, TkFamily& tkFamily, const ExtPxAsset& pxAsset)
+ : m_manager(manager), m_tkFamily(tkFamily), m_pxAsset(pxAsset), m_pxShapeDescTemplate(nullptr), m_pxActorDescTemplate(nullptr), m_isSpawned(false)
+{
+ m_subchunkShapes.resize(static_cast<uint32_t>(m_pxAsset.getSubchunkCount()));
+
+ userData = nullptr;
+
+ m_manager.registerFamily(*this);
+}
+
+ExtPxFamilyImpl::~ExtPxFamilyImpl()
+{
+ m_manager.unregisterFamily(*this);
+
+ if (m_isSpawned)
+ {
+ m_tkFamily.removeListener(*this);
+
+ auto& actors = m_actorsBuffer;
+ actors.resize(m_actors.size());
+ uint32_t i = 0;
+ for (auto it = m_actors.getIterator(); !it.done(); ++it)
+ {
+ actors[i++] = *it;
+ }
+ destroyActors(actors.begin(), actors.size());
+ }
+
+ m_tkFamily.release();
+}
+
+void ExtPxFamilyImpl::release()
+{
+ NVBLASTEXT_DELETE(this, ExtPxFamilyImpl);
+}
+
+bool ExtPxFamilyImpl::spawn(const physx::PxTransform& pose, const physx::PxVec3& scale, const ExtPxSpawnSettings& settings)
+{
+ NVBLASTEXT_CHECK_ERROR(!m_isSpawned, "Family spawn: family already spawned. Was spawn() called twice?", return false);
+ NVBLASTEXT_CHECK_ERROR(settings.scene != nullptr, "Family creation: desc.scene is nullptr", return false);
+ NVBLASTEXT_CHECK_ERROR(settings.material != nullptr, "Family creation: desc.material is nullptr", return false);
+
+ m_initialTransform = pose;
+ m_spawnSettings = settings;
+
+ // get current tkActors (usually it's only 1, but it can be already in split state)
+ const uint32_t actorCount = (uint32_t)m_tkFamily.getActorCount();
+ m_newActorsBuffer.resize(actorCount);
+ m_tkFamily.getActors(m_newActorsBuffer.begin(), actorCount);
+
+ // calc max split count
+ uint32_t splitMaxActorCount = 0;
+ for (TkActor* actor : m_newActorsBuffer)
+ {
+ splitMaxActorCount = std::max<uint32_t>(splitMaxActorCount, actor->getSplitMaxActorCount());
+ }
+
+ // preallocate memory
+ m_newActorsBuffer.resize(splitMaxActorCount);
+ m_newActorCreateInfo.resize(splitMaxActorCount);
+ m_physXActorsBuffer.resize(splitMaxActorCount);
+ m_physXActorsBuffer.resize(splitMaxActorCount);
+ m_indicesScratch.reserve(splitMaxActorCount);
+
+ // fill initial actor create info
+ for (uint32_t i = 0; i < actorCount; ++i)
+ {
+ PxActorCreateInfo& pxActorInfo = m_newActorCreateInfo[i];
+ pxActorInfo.m_angularVelocity = PxVec3(PxZero);
+ pxActorInfo.m_linearVelocity = PxVec3(PxZero);
+ pxActorInfo.m_transform = pose;
+ pxActorInfo.m_scale = scale;
+ }
+
+ // create first actors in family
+ createActors(m_newActorsBuffer.begin(), m_newActorCreateInfo.begin(), actorCount);
+
+ // listen family for new actors
+ m_tkFamily.addListener(*this);
+
+ m_isSpawned = true;
+
+ return true;
+}
+
+bool ExtPxFamilyImpl::despawn()
+{
+ NVBLASTEXT_CHECK_ERROR(m_spawnSettings.scene != nullptr, "Family despawn: desc.scene is nullptr", return false);
+
+ auto& actors = m_actorsBuffer;
+ actors.resize(m_actors.size());
+ uint32_t i = 0;
+ for (auto it = m_actors.getIterator(); !it.done(); ++it)
+ {
+ actors[i++] = *it;
+ }
+ destroyActors(actors.begin(), actors.size());
+
+ return true;
+}
+
+void ExtPxFamilyImpl::receive(const TkEvent* events, uint32_t eventCount)
+{
+ auto& actorsToDelete = m_actorsBuffer;
+ actorsToDelete.clear();
+ uint32_t totalNewActorsCount = 0;
+
+ for (uint32_t i = 0; i < eventCount; ++i)
+ {
+ const TkEvent& e = events[i];
+ if (e.type == TkEvent::Split)
+ {
+ const TkSplitEvent* splitEvent = e.getPayload<TkSplitEvent>();
+
+ uint32_t newActorsCount = splitEvent->numChildren;
+
+ ExtPxActorImpl* parentActor = nullptr;
+ PxRigidDynamic* parentPxActor = nullptr;
+ if (splitEvent->parentData.userData)
+ {
+ parentActor = reinterpret_cast<ExtPxActorImpl*>(splitEvent->parentData.userData);
+ parentPxActor = &parentActor->getPhysXActor();
+ }
+
+ for (uint32_t j = totalNewActorsCount; j < totalNewActorsCount + newActorsCount; ++j)
+ {
+ m_newActorCreateInfo[j].m_transform = parentPxActor ? parentPxActor->getGlobalPose() : m_initialTransform;
+
+ //TODO: Get the current scale of the actor!
+ m_newActorCreateInfo[j].m_scale = m_initialScale;
+
+ m_newActorCreateInfo[j].m_linearVelocity = parentPxActor ? parentPxActor->getLinearVelocity() : PxVec3(PxZero);
+ m_newActorCreateInfo[j].m_angularVelocity = parentPxActor ? parentPxActor->getAngularVelocity() : PxVec3(PxZero);
+
+ m_newActorsBuffer[j] = splitEvent->children[j - totalNewActorsCount];
+ }
+
+ totalNewActorsCount += newActorsCount;
+
+ if (parentActor)
+ {
+ actorsToDelete.pushBack(parentActor);
+ }
+ }
+ }
+
+ destroyActors(actorsToDelete.begin(), actorsToDelete.size());
+ if (totalNewActorsCount > 0)
+ {
+ uint32_t cappedNewActorsCount = totalNewActorsCount;
+ const uint32_t actorCountLimit = m_manager.getActorCountLimit();
+ const uint32_t totalActorCount = m_manager.getPxActorCount();
+ if (actorCountLimit > 0 && cappedNewActorsCount + totalActorCount > actorCountLimit)
+ {
+ cappedNewActorsCount = actorCountLimit > totalActorCount ? actorCountLimit - totalActorCount : 0;
+ }
+ createActors(m_newActorsBuffer.begin(), m_newActorCreateInfo.begin(), cappedNewActorsCount);
+ m_culledActors.reserve(m_culledActors.size() + totalNewActorsCount - cappedNewActorsCount);
+ for (uint32_t i = cappedNewActorsCount; i < totalNewActorsCount; ++i)
+ {
+ m_culledActors.pushBack(m_newActorsBuffer[i]);
+ }
+ totalNewActorsCount = cappedNewActorsCount; // In case it's used below
+ }
+
+ for (uint32_t i = 0; i < eventCount; ++i)
+ {
+ const TkEvent& e = events[i];
+ if (e.type == TkEvent::JointUpdate)
+ {
+ const TkJointUpdateEvent* jointEvent = e.getPayload<TkJointUpdateEvent>();
+ NVBLAST_ASSERT(jointEvent->joint);
+ TkJoint& joint = *jointEvent->joint;
+
+ switch (jointEvent->subtype)
+ {
+ case TkJointUpdateEvent::External:
+ m_manager.createJoint(joint);
+ break;
+ case TkJointUpdateEvent::Changed:
+ m_manager.updateJoint(joint);
+ break;
+ case TkJointUpdateEvent::Unreferenced:
+ m_manager.destroyJoint(joint);
+ joint.release();
+ break;
+ }
+ }
+ }
+}
+
+void ExtPxFamilyImpl::createActors(TkActor** tkActors, const PxActorCreateInfo* pxActorInfos, uint32_t count)
+{
+ auto actorsToAdd = m_physXActorsBuffer.begin();
+ for (uint32_t i = 0; i < count; ++i)
+ {
+ ExtPxActorImpl* actor = NVBLASTEXT_NEW(ExtPxActorImpl)(this, tkActors[i], pxActorInfos[i]);
+ m_actors.insert(actor);
+ actorsToAdd[i] = &actor->getPhysXActor();
+ dispatchActorCreated(*actor);
+
+ // Handle incomplete joints
+ auto e = m_manager.m_incompleteJointMultiMap.find(tkActors[i]);
+ if (e != nullptr)
+ {
+ ExtArray<TkJoint*>::type joints = e->second; // Copying the array
+ m_manager.m_incompleteJointMultiMap.erase(tkActors[i]);
+ for (uint32_t j = 0; j < joints.size(); ++j)
+ {
+ m_manager.updateJoint(*joints[j]);
+ }
+ }
+ }
+ m_spawnSettings.scene->addActors(actorsToAdd, static_cast<uint32_t>(count));
+}
+
+void ExtPxFamilyImpl::destroyActors(ExtPxActor** actors, uint32_t count)
+{
+ auto pxActorsToRemove = m_physXActorsBuffer.begin();
+ for (uint32_t i = 0; i < count; ++i)
+ {
+ pxActorsToRemove[i] = &actors[i]->getPhysXActor();
+ }
+ m_spawnSettings.scene->removeActors(pxActorsToRemove, static_cast<uint32_t>(count));
+
+ for (uint32_t i = 0; i < count; ++i)
+ {
+ ExtPxActorImpl* actor = (ExtPxActorImpl*)actors[i];
+ m_actors.erase(actor);
+ dispatchActorDestroyed(*actor);
+ NVBLASTEXT_DELETE(actor, ExtPxActorImpl);
+ }
+}
+
+void ExtPxFamilyImpl::dispatchActorCreated(ExtPxActor& actor)
+{
+ for (ExtPxListener* listener : m_listeners)
+ listener->onActorCreated(*this, actor);
+ m_manager.dispatchActorCreated(*this, actor);
+}
+
+void ExtPxFamilyImpl::dispatchActorDestroyed(ExtPxActor& actor)
+{
+ for (ExtPxListener* listener : m_listeners)
+ listener->onActorDestroyed(*this, actor);
+ m_manager.dispatchActorDestroyed(*this, actor);
+}
+
+void ExtPxFamilyImpl::postSplitUpdate()
+{
+ for (auto actor : m_culledActors)
+ {
+ actor->release();
+ }
+ m_culledActors.resize(0);
+}
+
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxFamilyImpl.h b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxFamilyImpl.h
new file mode 100644
index 0000000..5c90346
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxFamilyImpl.h
@@ -0,0 +1,168 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXFAMILYIMPL_H
+#define NVBLASTEXTPXFAMILYIMPL_H
+
+#include "NvBlastExtPxFamily.h"
+#include "NvBlastExtArray.h"
+#include "NvBlastExtHashSet.h"
+#include "PxTransform.h"
+#include "NvBlastTkEvent.h"
+
+
+using namespace physx;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class ExtPxManagerImpl;
+class ExtPxActorImpl;
+struct PxActorCreateInfo;
+
+
+class ExtPxFamilyImpl final : public ExtPxFamily, TkEventListener
+{
+ NV_NOCOPY(ExtPxFamilyImpl)
+
+public:
+ friend ExtPxActorImpl;
+ friend ExtPxManagerImpl;
+
+ //////// ctor ////////
+
+ ExtPxFamilyImpl(ExtPxManagerImpl& manager, TkFamily& tkFamily, const ExtPxAsset& pxAsset);
+ ~ExtPxFamilyImpl();
+
+ virtual void release() override;
+
+
+ //////// ExtPxFamily interface ////////
+
+// virtual bool spawn(const PxTransform& pose, const ExtPxSpawnSettings& settings) override;
+ virtual bool spawn(const physx::PxTransform& pose, const physx::PxVec3& scale, const ExtPxSpawnSettings& settings) override;
+ virtual bool despawn() override;
+
+
+ virtual uint32_t getActorCount() const override
+ {
+ return m_actors.size();
+ }
+
+ virtual uint32_t getActors(ExtPxActor** buffer, uint32_t bufferSize) const override
+ {
+ uint32_t index = 0;
+ for (auto it = const_cast<ExtPxFamilyImpl*>(this)->m_actors.getIterator(); !it.done() && index < bufferSize; ++it)
+ {
+ buffer[index++] = *it;
+ }
+ return index;
+ }
+
+ virtual TkFamily& getTkFamily() const override
+ {
+ return m_tkFamily;
+ }
+
+ virtual const physx::PxShape* const* getSubchunkShapes() const override
+ {
+ return m_subchunkShapes.begin();
+ }
+
+ virtual const ExtPxAsset& getPxAsset() const override
+ {
+ return m_pxAsset;
+ }
+
+ virtual void setMaterial(PxMaterial& material) override
+ {
+ m_spawnSettings.material = &material;
+ }
+
+ virtual void setPxShapeDescTemplate(const ExtPxShapeDescTemplate* pxShapeDesc) override
+ {
+ m_pxShapeDescTemplate = pxShapeDesc;
+ }
+
+ virtual const ExtPxShapeDescTemplate* getPxShapeDescTemplate() const override
+ {
+ return m_pxShapeDescTemplate;
+ }
+
+ virtual void setPxActorDesc(const ExtPxActorDescTemplate* pxActorDesc) override
+ {
+ m_pxActorDescTemplate = pxActorDesc;
+ }
+
+ virtual const ExtPxActorDescTemplate* getPxActorDesc() const override
+ {
+ return m_pxActorDescTemplate;
+ }
+
+ virtual void subscribe(ExtPxListener& listener) override
+ {
+ m_listeners.pushBack(&listener);
+ }
+
+ virtual void unsubscribe(ExtPxListener& listener) override
+ {
+ m_listeners.findAndReplaceWithLast(&listener);
+ }
+
+ virtual void postSplitUpdate() override;
+
+ //////// TkEventListener interface ////////
+
+ virtual void receive(const TkEvent* events, uint32_t eventCount) override;
+
+
+ //////// events dispatch ////////
+
+ void dispatchActorCreated(ExtPxActor& actor);
+ void dispatchActorDestroyed(ExtPxActor& actor);
+
+
+private:
+ //////// private methods ////////
+
+ void createActors(TkActor** tkActors, const PxActorCreateInfo* pxActorInfos, uint32_t count);
+ void destroyActors(ExtPxActor** actors, uint32_t count);
+
+ //////// data ////////
+
+ ExtPxManagerImpl& m_manager;
+ TkFamily& m_tkFamily;
+ const ExtPxAsset& m_pxAsset;
+ ExtPxSpawnSettings m_spawnSettings;
+ const ExtPxShapeDescTemplate* m_pxShapeDescTemplate;
+ const ExtPxActorDescTemplate* m_pxActorDescTemplate;
+ bool m_isSpawned;
+ PxTransform m_initialTransform;
+ PxVec3 m_initialScale;
+ ExtHashSet<ExtPxActor*>::type m_actors;
+ ExtArray<TkActor*>::type m_culledActors;
+ ExtInlineArray<ExtPxListener*, 4>::type m_listeners;
+ ExtArray<PxShape*>::type m_subchunkShapes;
+ ExtArray<TkActor*>::type m_newActorsBuffer;
+ ExtArray<PxActorCreateInfo>::type m_newActorCreateInfo;
+ ExtArray<PxActor*>::type m_physXActorsBuffer;
+ ExtArray<ExtPxActor*>::type m_actorsBuffer;
+ ExtArray<uint32_t>::type m_indicesScratch;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXFAMILYIMPL_H
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxManagerImpl.cpp b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxManagerImpl.cpp
new file mode 100644
index 0000000..42266ee
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxManagerImpl.cpp
@@ -0,0 +1,127 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtPxManagerImpl.h"
+#include "NvBlastExtPxAssetImpl.h"
+#include "NvBlastExtPxActorImpl.h"
+#include "NvBlastExtPxFamilyImpl.h"
+
+#include "NvBlastAssert.h"
+
+#include "NvBlastTkActor.h"
+#include "NvBlastTkFamily.h"
+#include "NvBlastTkGroup.h"
+#include "NvBlastTkJoint.h"
+
+#include "PxRigidDynamic.h"
+#include "PxJoint.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+ExtPxManager* ExtPxManager::create(PxPhysics& physics, TkFramework& framework, ExtPxCreateJointFunction createFn, bool useUserData)
+{
+ return NVBLASTEXT_NEW(ExtPxManagerImpl)(physics, framework, createFn, useUserData);
+}
+
+void ExtPxManagerImpl::release()
+{
+ NVBLASTEXT_DELETE(this, ExtPxManagerImpl);
+}
+
+ExtPxFamily* ExtPxManagerImpl::createFamily(const ExtPxFamilyDesc& desc)
+{
+ NVBLASTEXT_CHECK_ERROR(desc.pxAsset != nullptr, "Family creation: pxAsset is nullptr.", return nullptr);
+
+ // create tk family
+ TkActorDesc tkActorDesc;
+ (&tkActorDesc)->NvBlastActorDesc::operator=(desc.actorDesc);
+ tkActorDesc.asset = &desc.pxAsset->getTkAsset();
+ TkActor* actor = m_framework.createActor(tkActorDesc);
+ NVBLASTEXT_CHECK_ERROR(actor != nullptr, "Family creation: tk actor creation failed.", return nullptr);
+
+ ExtPxFamilyImpl* family = NVBLASTEXT_NEW(ExtPxFamilyImpl)(*this, actor->getFamily(), *desc.pxAsset);
+
+ if (desc.group)
+ {
+ desc.group->addActor(*actor);
+ }
+
+ return family;
+}
+
+bool ExtPxManagerImpl::createJoint(TkJoint& joint)
+{
+ if (!joint.userData && m_createJointFn)
+ {
+ const TkJointData data = joint.getData();
+ ExtPxActorImpl* pxActor0 = data.actors[0] != nullptr ? reinterpret_cast<ExtPxActorImpl*>(data.actors[0]->userData) : nullptr;
+ ExtPxActorImpl* pxActor1 = data.actors[1] != nullptr ? reinterpret_cast<ExtPxActorImpl*>(data.actors[1]->userData) : nullptr;
+ NVBLAST_ASSERT(pxActor0 || pxActor1);
+ PxTransform lf0(data.attachPositions[0]);
+ PxTransform lf1(data.attachPositions[1]);
+ PxJoint* pxJoint = m_createJointFn(pxActor0, lf0, pxActor1, lf1, m_physics, joint);
+ if (pxJoint)
+ {
+ joint.userData = pxJoint;
+ return true;
+ }
+ }
+ return false;
+}
+
+void ExtPxManagerImpl::updateJoint(TkJoint& joint)
+{
+ if (joint.userData)
+ {
+ const TkJointData& data = joint.getData();
+ ExtPxActorImpl* pxActors[2];
+ for (int i = 0; i < 2; ++i)
+ {
+ if (data.actors[i] != nullptr)
+ {
+ pxActors[i] = reinterpret_cast<ExtPxActorImpl*>(data.actors[i]->userData);
+ if (pxActors[i] == nullptr)
+ {
+ ExtArray<TkJoint*>::type& joints = m_incompleteJointMultiMap[data.actors[i]];
+ NVBLAST_ASSERT(joints.find(&joint) == joints.end());
+ joints.pushBack(&joint);
+ return; // Wait until the TkActor is received to create this joint
+ }
+ }
+ else
+ {
+ pxActors[i] = nullptr;
+ }
+ }
+ NVBLAST_ASSERT(pxActors[0] || pxActors[1]);
+ PxJoint* pxJoint = reinterpret_cast<PxJoint*>(joint.userData);
+ pxJoint->setActors(pxActors[0] ? &pxActors[0]->getPhysXActor() : nullptr, pxActors[1] ? &pxActors[1]->getPhysXActor() : nullptr);
+ }
+}
+
+void ExtPxManagerImpl::destroyJoint(TkJoint& joint)
+{
+ if (joint.userData)
+ {
+ PxJoint* pxJoint = reinterpret_cast<PxJoint*>(joint.userData);
+ pxJoint->release();
+ joint.userData = nullptr;
+ }
+}
+
+
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxManagerImpl.h b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxManagerImpl.h
new file mode 100644
index 0000000..1f5e510
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/physics/NvBlastExtPxManagerImpl.h
@@ -0,0 +1,202 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTPXMANAGERIMPL_H
+#define NVBLASTEXTPXMANAGERIMPL_H
+
+#include "NvBlastExtPxManager.h"
+#include "NvBlastExtArray.h"
+#include "NvBlastExtHashMap.h"
+#include "NvBlastExtPxListener.h"
+#include "NvBlastExtPxFamily.h"
+
+#include "PxRigidDynamic.h"
+
+
+using namespace physx;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkActor;
+
+class ExtPxManagerImpl final : public ExtPxManager
+{
+ NV_NOCOPY(ExtPxManagerImpl)
+
+public:
+ friend class ExtPxActorImpl;
+ friend class ExtPxFamilyImpl;
+
+ ExtPxManagerImpl(PxPhysics& physics, TkFramework&framework, ExtPxCreateJointFunction createFn, bool usePxUserData)
+ : m_physics(physics), m_framework(framework), m_createJointFn(createFn), m_usePxUserData(usePxUserData), m_actorCountLimit(0)
+ {
+ }
+
+ ~ExtPxManagerImpl()
+ {
+ }
+
+ virtual void release() override;
+
+
+ //////// interface ////////
+
+ virtual ExtPxFamily* createFamily(const ExtPxFamilyDesc& desc) override;
+
+ virtual bool createJoint(TkJoint& joint) override;
+
+ virtual void destroyJoint(TkJoint& joint) override;
+
+ virtual void setCreateJointFunction(ExtPxCreateJointFunction createFn) override
+ {
+ m_createJointFn = createFn;
+ }
+
+ virtual uint32_t getFamilyCount() const override
+ {
+ return m_tkFamiliesMap.size();
+ }
+
+ virtual uint32_t getFamilies(ExtPxFamily** buffer, uint32_t bufferSize) const override
+ {
+ uint32_t index = 0;
+ for (auto it = const_cast<ExtPxManagerImpl*>(this)->m_tkFamiliesMap.getIterator(); !it.done() && index < bufferSize; ++it)
+ {
+ buffer[index++] = it->second;
+ }
+ return index;
+ }
+
+ virtual ExtPxFamily* getFamilyFromTkFamily(TkFamily& family) const override
+ {
+ auto entry = m_tkFamiliesMap.find(&family);
+ return entry != nullptr ? entry->second : nullptr;
+ }
+
+ virtual ExtPxActor* getActorFromPhysXActor(const PxRigidDynamic& pxActor) const override
+ {
+ auto it = m_physXActorsMap.find(&pxActor);
+ return it != nullptr ? it->second : nullptr;
+ }
+
+ virtual PxPhysics& getPhysics() const override
+ {
+ return m_physics;
+ }
+
+ virtual TkFramework& getFramework() const override
+ {
+ return m_framework;
+ }
+
+ virtual bool isPxUserDataUsed() const override
+ {
+ return m_usePxUserData;
+ }
+
+ virtual void subscribe(ExtPxListener& listener) override
+ {
+ m_listeners.pushBack(&listener);
+ }
+
+ virtual void unsubscribe(ExtPxListener& listener) override
+ {
+ m_listeners.findAndReplaceWithLast(&listener);
+ }
+
+ virtual void setActorCountLimit(uint32_t limit) override
+ {
+ m_actorCountLimit = limit;
+ }
+
+ virtual uint32_t getActorCountLimit() override
+ {
+ return m_actorCountLimit;
+ }
+
+ virtual uint32_t getPxActorCount() const override
+ {
+ return m_physXActorsMap.size();
+ }
+
+
+ //////// internal public methods ////////
+
+ void registerActor(PxRigidDynamic* pxActor, ExtPxActor* actor)
+ {
+ if (m_usePxUserData)
+ {
+ pxActor->userData = actor;
+ }
+ m_physXActorsMap[pxActor] = actor;
+ }
+
+ void unregisterActor(PxRigidDynamic* pxActor)
+ {
+ if (m_usePxUserData)
+ {
+ pxActor->userData = nullptr;
+ }
+ m_physXActorsMap.erase(pxActor);
+ }
+
+ void registerFamily(ExtPxFamily& family)
+ {
+ m_tkFamiliesMap[&family.getTkFamily()] = &family;
+ }
+
+ void unregisterFamily(ExtPxFamily& family)
+ {
+ m_tkFamiliesMap.erase(&family.getTkFamily());
+ }
+
+ void updateJoint(TkJoint& joint);
+
+
+ //////// events dispatch ////////
+
+ void dispatchActorCreated(ExtPxFamily& family, ExtPxActor& actor)
+ {
+ for (ExtPxListener* listener : m_listeners)
+ listener->onActorCreated(family, actor);
+ }
+
+ void dispatchActorDestroyed(ExtPxFamily& family, ExtPxActor&actor)
+ {
+ for (ExtPxListener* listener : m_listeners)
+ listener->onActorDestroyed(family, actor);
+ }
+
+
+private:
+
+ //////// data ////////
+
+ PxPhysics& m_physics;
+ TkFramework& m_framework;
+ ExtPxCreateJointFunction m_createJointFn;
+ bool m_usePxUserData;
+ ExtInlineArray<ExtPxListener*, 8>::type m_listeners;
+ ExtHashMap<const PxRigidDynamic*, ExtPxActor*>::type m_physXActorsMap;
+ ExtHashMap<TkFamily*, ExtPxFamily*>::type m_tkFamiliesMap;
+ ExtHashMap<TkActor*, ExtArray<TkJoint*>::type >::type m_incompleteJointMultiMap;
+ uint32_t m_actorCountLimit;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTEXTPXMANAGERIMPL_H
diff --git a/NvBlast/sdk/extensions/physx/source/sync/NvBlastExtSync.cpp b/NvBlast/sdk/extensions/physx/source/sync/NvBlastExtSync.cpp
new file mode 100644
index 0000000..5f018d9
--- /dev/null
+++ b/NvBlast/sdk/extensions/physx/source/sync/NvBlastExtSync.cpp
@@ -0,0 +1,235 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+
+#include "NvBlastExtSync.h"
+#include "NvBlastAssert.h"
+#include "NvBlast.h"
+#include "NvBlastExtDefs.h"
+#include "NvBlastExtPxManager.h"
+#include "NvBlastExtPxFamily.h"
+#include "NvBlastExtPxActor.h"
+#include "PxRigidDynamic.h"
+
+#include <chrono>
+using namespace std::chrono;
+
+namespace Nv
+{
+namespace Blast
+{
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ExtSyncImpl Definition
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class ExtSyncImpl : public ExtSync
+{
+ NV_NOCOPY(ExtSyncImpl)
+
+public:
+ //////// ctor ////////
+
+ ExtSyncImpl();
+
+ ~ExtSyncImpl();
+
+
+ //////// TkEventListener interface ////////
+
+ virtual void receive(const TkEvent* events, uint32_t eventCount) override;
+
+
+ //////// ExtSync interface ////////
+
+ virtual void release() override;
+
+ virtual void syncFamily(const TkFamily& family) override;
+ virtual void syncFamily(const ExtPxFamily& family) override;
+
+ virtual uint32_t getSyncBufferSize() const override;
+ virtual void acquireSyncBuffer(const ExtSyncEvent*const*& buffer, uint32_t& size) const override;
+ virtual void releaseSyncBuffer() override;
+
+ virtual void applySyncBuffer(TkFramework& framework, const ExtSyncEvent** buffer, uint32_t size, TkGroup* groupForNewActors, ExtPxManager* manager) override;
+
+
+private:
+ //////// data ////////
+
+ std::vector<ExtSyncEvent*> m_syncEvents;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ExtSyncEvent Implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void ExtSyncEvent::release()
+{
+ NVBLASTEXT_DELETE(this, ExtSyncEvent);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ExtSyncImpl Implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ExtSync* ExtSync::create()
+{
+ return NVBLASTEXT_NEW(ExtSyncImpl) ();
+}
+
+void ExtSyncImpl::release()
+{
+ NVBLASTEXT_DELETE(this, ExtSyncImpl);
+}
+
+ExtSyncImpl::ExtSyncImpl()
+{
+}
+
+ExtSyncImpl::~ExtSyncImpl()
+{
+ releaseSyncBuffer();
+}
+
+void ExtSyncImpl::receive(const TkEvent* events, uint32_t eventCount)
+{
+ for (uint32_t i = 0; i < eventCount; ++i)
+ {
+ const TkEvent& tkEvent = events[i];
+ if (tkEvent.type == TkEvent::FractureCommand)
+ {
+ const TkFractureCommands* fracEvent = tkEvent.getPayload<TkFractureCommands>();
+ ExtSyncEventFracture* e = NVBLASTEXT_NEW(ExtSyncEventFracture) ();
+ e->timestamp = duration_cast<milliseconds>(steady_clock::now().time_since_epoch()).count();
+ e->familyID = fracEvent->tkActorData.family->getID();
+ e->bondFractures.resize(fracEvent->buffers.bondFractureCount);
+ e->chunkFractures.resize(fracEvent->buffers.chunkFractureCount);
+ memcpy(e->bondFractures.data(), fracEvent->buffers.bondFractures, e->bondFractures.size() * sizeof(NvBlastBondFractureData));
+ memcpy(e->chunkFractures.data(), fracEvent->buffers.chunkFractures, e->chunkFractures.size() * sizeof(NvBlastChunkFractureData));
+ m_syncEvents.push_back(e);
+ }
+ }
+}
+
+void ExtSyncImpl::syncFamily(const TkFamily& family)
+{
+ ExtSyncEventFamilySync* e = NVBLASTEXT_NEW(ExtSyncEventFamilySync) ();
+ e->timestamp = duration_cast<milliseconds>(steady_clock::now().time_since_epoch()).count();
+ e->familyID = family.getID();
+ const NvBlastFamily* familyLL = family.getFamilyLL();
+ const uint32_t size = NvBlastFamilyGetSize(familyLL, NvBlastTkFrameworkGet()->getLogFn());
+ e->family = std::vector<char>((char*)familyLL, (char*)familyLL + size);
+ m_syncEvents.push_back(e);
+}
+
+void ExtSyncImpl::syncFamily(const ExtPxFamily& family)
+{
+ const TkFamily& tkFamily = family.getTkFamily();
+
+ syncFamily(tkFamily);
+
+ ExtSyncEventPhysicsSync* e = NVBLASTEXT_NEW(ExtSyncEventPhysicsSync) ();
+ e->timestamp = duration_cast<milliseconds>(steady_clock::now().time_since_epoch()).count();
+ e->familyID = tkFamily.getID();
+ std::vector<ExtPxActor*> actors(family.getActorCount());
+ family.getActors(actors.data(), static_cast<uint32_t>(actors.size()));
+ e->data.reserve(actors.size());
+ for (ExtPxActor* actor : actors)
+ {
+ ExtSyncEventPhysicsSync::ActorData data;
+ data.transform = actor->getPhysXActor().getGlobalPose();
+ data.actorIndex = actor->getTkActor().getIndex();
+ e->data.push_back(data);
+ }
+
+ m_syncEvents.push_back(e);
+}
+
+uint32_t ExtSyncImpl::getSyncBufferSize() const
+{
+ return static_cast<uint32_t>(m_syncEvents.size());
+}
+
+void ExtSyncImpl::acquireSyncBuffer(const ExtSyncEvent* const*& buffer, uint32_t& size) const
+{
+ buffer = m_syncEvents.data();
+ size = static_cast<uint32_t>(m_syncEvents.size());
+}
+
+void ExtSyncImpl::releaseSyncBuffer()
+{
+ for (uint32_t i = 0; i < m_syncEvents.size(); ++i)
+ {
+ NVBLASTEXT_DELETE(m_syncEvents[i], ExtSyncEvent);
+ }
+ m_syncEvents.clear();
+}
+
+void ExtSyncImpl::applySyncBuffer(TkFramework& framework, const ExtSyncEvent** buffer, uint32_t size, TkGroup* groupForNewActors, ExtPxManager* manager)
+{
+ const TkType* familyType = framework.getType(TkTypeIndex::Family);
+ NVBLAST_ASSERT(familyType);
+
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ const ExtSyncEvent* e = buffer[i];
+ const NvBlastID& id = e->familyID;
+ TkIdentifiable* object = framework.findObjectByID(id);
+ if (object && object->getType() == *familyType)
+ {
+ TkFamily* family = static_cast<TkFamily*>(object);
+
+ if (e->type == ExtSyncEventFracture::EVENT_TYPE)
+ {
+ const ExtSyncEventFracture* fractureEvent = e->getEvent<ExtSyncEventFracture>();
+ const NvBlastFractureBuffers commands =
+ {
+ static_cast<uint32_t>(fractureEvent->bondFractures.size()),
+ static_cast<uint32_t>(fractureEvent->chunkFractures.size()),
+ const_cast<NvBlastBondFractureData*>(fractureEvent->bondFractures.data()),
+ const_cast<NvBlastChunkFractureData*>(fractureEvent->chunkFractures.data())
+ };
+ family->applyFracture(&commands);
+ }
+ else if (e->type == ExtSyncEventFamilySync::EVENT_TYPE)
+ {
+ const ExtSyncEventFamilySync* familyEvent = e->getEvent<ExtSyncEventFamilySync>();
+ family->reinitialize((NvBlastFamily*)familyEvent->family.data(), groupForNewActors);
+ }
+ else if (e->type == ExtSyncEventPhysicsSync::EVENT_TYPE && manager)
+ {
+ const ExtSyncEventPhysicsSync* physicsEvent = e->getEvent<ExtSyncEventPhysicsSync>();
+ ExtPxFamily* pxFamily = manager->getFamilyFromTkFamily(*family);
+ if (pxFamily)
+ {
+ std::vector<ExtPxActor*> actors(pxFamily->getActorCount());
+ pxFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
+
+ for (auto data : physicsEvent->data)
+ {
+ for (ExtPxActor* physicsaActor : actors)
+ {
+ if (data.actorIndex == physicsaActor->getTkActor().getIndex())
+ {
+ physicsaActor->getPhysXActor().setGlobalPose(data.transform);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/extensions/serialization/include/NvBlastExtSerializationInterface.h b/NvBlast/sdk/extensions/serialization/include/NvBlastExtSerializationInterface.h
new file mode 100644
index 0000000..e4f27e0
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/include/NvBlastExtSerializationInterface.h
@@ -0,0 +1,38 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+/*
+ Include this file to get the C interface to serialization for all asset types (LL, Tk and Ext)
+*/
+#pragma once
+#include <ostream>
+#include "NvBlastTkAsset.h"
+#include "NvBlastExtPxAsset.h"
+
+#include "NvBlastExtSerializationLLInterface.h"
+
+namespace physx
+{
+ class PxPhysics;
+}
+
+NVBLAST_API void setPhysXSDK(physx::PxPhysics* physXSDK);
+
+NVBLAST_API Nv::Blast::TkAsset* deserializeTkAsset(const unsigned char* input, uint32_t size);
+NVBLAST_API Nv::Blast::TkAsset* deserializeTkAssetFromStream(std::istream &inputStream);
+NVBLAST_API bool serializeTkAssetIntoStream(const Nv::Blast::TkAsset *asset, std::ostream &outputStream);
+NVBLAST_API bool serializeTkAssetIntoNewBuffer(const Nv::Blast::TkAsset *asset, unsigned char **outBuffer, uint32_t &outSize);
+NVBLAST_API bool serializeTkAssetIntoExistingBuffer(const Nv::Blast::TkAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize);
+
+NVBLAST_API Nv::Blast::ExtPxAsset* deserializeExtPxAsset(const unsigned char* input, uint32_t size);
+NVBLAST_API Nv::Blast::ExtPxAsset* deserializeExtPxAssetFromStream(std::istream &inputStream);
+NVBLAST_API bool serializeExtPxAssetIntoStream(const Nv::Blast::ExtPxAsset *asset, std::ostream &outputStream);
+NVBLAST_API bool serializeExtPxAssetIntoNewBuffer(const Nv::Blast::ExtPxAsset *asset, unsigned char **outBuffer, uint32_t &outSize);
+NVBLAST_API bool serializeExtPxAssetIntoExistingBuffer(const Nv::Blast::ExtPxAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize);
diff --git a/NvBlast/sdk/extensions/serialization/include/NvBlastExtSerializationLLInterface.h b/NvBlast/sdk/extensions/serialization/include/NvBlastExtSerializationLLInterface.h
new file mode 100644
index 0000000..d21c7ca
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/include/NvBlastExtSerializationLLInterface.h
@@ -0,0 +1,43 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+/*
+Include this file to access the C API for serialization, for Low Level assets only.
+
+For Serialization of Tk and Ext assets, include only NvBlastExtSerializationInterface.h, which will include this file as well.
+
+*/
+#pragma once
+#include <ostream>
+#include "NvBlastPreprocessor.h"
+#include "NvBlastTypes.h"
+
+#include "NvBlastExtGlobals.h"
+
+/*
+ Set a global NvBlastAlloc signature allocation function that the deserialization will use when required.
+
+ NOTE: This will NOT be used when using the combined serialization library, as it will use the TkFramework's allocation and logging
+*/
+NVBLAST_API void setAllocator(NvBlastExtAlloc alloc);
+
+/*
+ Set a global NvBlastLog signature allocation function that the library will use when required.
+
+ NOTE: This will NOT be used when using the combined serialization library, as it will use the TkFramework's allocation and logging
+*/
+NVBLAST_API void setLog(NvBlastLog log);
+
+
+NVBLAST_API NvBlastAsset* deserializeAsset(const unsigned char* input, uint32_t size);
+NVBLAST_API NvBlastAsset* deserializeAssetFromStream(std::istream &inputStream);
+NVBLAST_API bool serializeAssetIntoStream(const NvBlastAsset *asset, std::ostream &outputStream);
+NVBLAST_API bool serializeAssetIntoNewBuffer(const NvBlastAsset *asset, unsigned char **outBuffer, uint32_t &outSize);
+NVBLAST_API bool serializeAssetIntoExistingBuffer(const NvBlastAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize);
diff --git a/NvBlast/sdk/extensions/serialization/source/BlastSerialization.capn b/NvBlast/sdk/extensions/serialization/source/BlastSerialization.capn
new file mode 100644
index 0000000..49b1dbb
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/BlastSerialization.capn
@@ -0,0 +1,162 @@
+@0xaffe4498f275ee58;
+
+using Cxx = import "/capnp/c++.capnp";
+
+$Cxx.namespace("Nv::Blast::Serialization");
+
+struct Asset
+{
+ header @0 :NvBlastDataBlock;
+
+ iD @1 :UUID;
+
+ chunkCount @2 :UInt32;
+
+ graph @3 :NvBlastSupportGraph;
+
+ leafChunkCount @4 :UInt32;
+
+ firstSubsupportChunkIndex @5 :UInt32;
+
+ bondCount @6 :UInt32;
+
+ chunks @7: List(NvBlastChunk);
+
+ bonds @8: List(NvBlastBond);
+
+ subtreeLeafChunkCounts @9: List(UInt32);
+
+ chunkToGraphNodeMap @10: List(UInt32);
+}
+
+struct TkAsset
+{
+ assetLL @0 :Asset;
+
+ jointDescs @1 :List(TkAssetJointDesc);
+
+}
+
+struct ExtPxAsset
+{
+ asset @0 :TkAsset;
+ chunks @1 :List(ExtPxChunk);
+ subchunks @2 :List(ExtPxSubchunk);
+}
+
+struct ExtPxChunk
+{
+ firstSubchunkIndex @0 :UInt32;
+ subchunkCount @1 :UInt32;
+ isStatic @2 :Bool;
+}
+
+struct ExtPxSubchunk
+{
+ transform @0 :PxTransform;
+ geometry @1 :PxConvexMeshGeometry;
+}
+
+struct PxConvexMeshGeometry
+{
+ scale @0 :PxMeshScale;
+ convexMesh @1 :Data;
+ meshFlags @2 :UInt8;
+
+ enum Type
+ {
+ eSPHERE @0;
+ ePLANE @1;
+ eCAPSULE @2;
+ eBOX @3;
+ eCONVEXMESH @4;
+ eTRIANGLEMESH @5;
+ eHEIGHTFIELD @6;
+ }
+
+ type @3 :Type;
+}
+
+struct NvBlastDataBlock
+{
+ enum Type
+ {
+ assetDataBlock @0;
+ instanceDataBlock @1;
+ }
+
+ dataType @0 :Type;
+
+ formatVersion @1 :UInt32;
+
+ size @2 :UInt32;
+}
+
+struct NvBlastChunk
+{
+ centroid @0 :List(Float32);
+
+ volume @1 :Float32;
+
+ parentChunkIndex @2 :UInt32;
+ firstChildIndex @3 :UInt32;
+ childIndexStop @4 :UInt32;
+ userData @5 :UInt32;
+}
+
+struct NvBlastBond
+{
+ normal @0 :List(Float32);
+ area @1 :Float32;
+ centroid @2 :List(Float32);
+ userData @3 :UInt32;
+}
+
+struct TkAssetJointDesc
+{
+ nodeIndices @0 :List(UInt32);
+ attachPositions @1 :List(PxVec3);
+}
+
+struct PxVec3
+{
+ x @0 :Float32;
+ y @1 :Float32;
+ z @2 :Float32;
+}
+
+struct PxQuat
+{
+ x @0 :Float32;
+ y @1 :Float32;
+ z @2 :Float32;
+ w @3 :Float32;
+}
+
+struct PxMeshScale
+{
+ scale @0 :PxVec3;
+ rotation @1 :PxQuat;
+}
+
+struct PxTransform
+{
+ q @0 :PxQuat;
+ p @1 :PxVec3;
+}
+
+struct NvBlastSupportGraph
+{
+ nodeCount @0 : UInt32;
+
+ chunkIndices @1 : List(UInt32);
+ adjacencyPartition @2 : List(UInt32);
+ adjacentNodeIndices @3 : List(UInt32);
+ adjacentBondIndices @4 : List(UInt32);
+}
+
+struct UUID
+{
+ value @0 : Data;
+}
+
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/AssetDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/AssetDTO.cpp
new file mode 100644
index 0000000..8d035fc
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/AssetDTO.cpp
@@ -0,0 +1,187 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "AssetDTO.h"
+#include "NvBlastIDDTO.h"
+#include "NvBlastChunkDTO.h"
+#include "NvBlastBondDTO.h"
+#include "NvBlastAsset.h"
+#include "NvBlastExtSerializationLLImpl.h"
+#include "NvBlastExtGlobals.h"
+
+#if !defined(BLAST_LL_ALLOC)
+#include "NvBlastExtAllocator.h"
+#endif
+
+namespace Nv
+{
+ namespace Blast
+ {
+ bool AssetDTO::serialize(Nv::Blast::Serialization::Asset::Builder builder, const Nv::Blast::Asset * poco)
+ {
+ NvBlastIDDTO::serialize(builder.initID(), &poco->m_ID);
+
+ builder.setLeafChunkCount(poco->m_leafChunkCount);
+
+ builder.setFirstSubsupportChunkIndex(poco->m_firstSubsupportChunkIndex);
+
+ capnp::List<Nv::Blast::Serialization::NvBlastChunk>::Builder chunks = builder.initChunks(poco->m_chunkCount);
+
+ builder.setChunkCount(poco->m_chunkCount);
+
+ NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF");
+
+ for (uint32_t i = 0; i < poco->m_chunkCount; i++)
+ {
+ NvBlastChunk& chunk = poco->getChunks()[i];
+
+ NvBlastChunkDTO::serialize(chunks[i], &chunk);
+ }
+
+ NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF");
+
+ capnp::List<Nv::Blast::Serialization::NvBlastBond>::Builder bonds = builder.initBonds(poco->m_bondCount);
+
+ builder.setBondCount(poco->m_bondCount);
+
+ for (uint32_t i = 0; i < poco->m_bondCount; i++)
+ {
+ NvBlastBond& bond = poco->getBonds()[i];
+
+ NvBlastBondDTO::serialize(bonds[i], &bond);
+ }
+
+ kj::ArrayPtr<uint32_t> stlcArray(poco->getSubtreeLeafChunkCounts(), poco->m_chunkCount);
+ builder.initSubtreeLeafChunkCounts(poco->m_chunkCount);
+ builder.setSubtreeLeafChunkCounts(stlcArray);
+
+ kj::ArrayPtr<uint32_t> ctgnArray(poco->getChunkToGraphNodeMap(), poco->m_chunkCount);
+ builder.setChunkToGraphNodeMap(ctgnArray);
+
+ Nv::Blast::Serialization::NvBlastSupportGraph::Builder graphBulder = builder.initGraph();
+
+ graphBulder.setNodeCount(poco->m_graph.m_nodeCount);
+
+ uint32_t* ciPtr = poco->m_graph.getChunkIndices();
+
+ kj::ArrayPtr<const uint32_t> ciArray(ciPtr, poco->m_graph.m_nodeCount);
+ graphBulder.setChunkIndices(ciArray);
+
+ kj::ArrayPtr<const uint32_t> adjPart(poco->m_graph.getAdjacencyPartition(), poco->m_graph.m_nodeCount + 1);
+ graphBulder.setAdjacencyPartition(adjPart);
+
+ NVBLAST_ASSERT(graphBulder.getAdjacencyPartition().size() == poco->m_graph.m_nodeCount + 1);
+
+ kj::ArrayPtr<const uint32_t> nodeIndices(poco->m_graph.getAdjacentNodeIndices(), poco->m_bondCount * 2);
+ graphBulder.setAdjacentNodeIndices(nodeIndices);
+
+ NVBLAST_ASSERT(graphBulder.getAdjacentNodeIndices().size() == poco->m_bondCount * 2);
+
+ kj::ArrayPtr<const uint32_t> bondIndices(poco->m_graph.getAdjacentBondIndices(), poco->m_bondCount * 2);
+ graphBulder.setAdjacentBondIndices(bondIndices);
+
+ return true;
+ }
+
+ Nv::Blast::Asset* AssetDTO::deserialize(Nv::Blast::Serialization::Asset::Reader reader)
+ {
+ NvBlastID EmptyId = {};
+
+ NvBlastExtAlloc allocFn = gAlloc;
+ NvBlastLog logFn = gLog;
+
+#if !defined(BLAST_LL_ALLOC)
+ allocFn = ExtAllocator::alignedAlloc16;
+ logFn = NvBlastTkFrameworkGet()->getLogFn();
+#endif
+
+ void* mem = allocFn(reader.totalSize().wordCount * sizeof(uint64_t));
+
+ auto asset = Nv::Blast::initializeAsset(mem, EmptyId, reader.getChunkCount(), reader.getGraph().getNodeCount(), reader.getLeafChunkCount(), reader.getFirstSubsupportChunkIndex(), reader.getBondCount(),
+ logFn);
+
+ bool result = deserializeInto(reader, asset);
+
+ return result ? asset : nullptr;
+ }
+
+ bool AssetDTO::deserializeInto(Nv::Blast::Serialization::Asset::Reader reader, Nv::Blast::Asset * poco)
+ {
+ NvBlastIDDTO::deserializeInto(reader.getID(), &poco->m_ID);
+
+ NvBlastBond* bonds = poco->getBonds();
+
+ uint32_t bondCount = reader.getBondCount();
+ for (uint32_t i = 0; i < bondCount; i++)
+ {
+ auto bondReader = reader.getBonds()[i];
+
+ NvBlastBondDTO::deserializeInto(bondReader, &bonds[i]);
+ }
+
+ NvBlastChunk* chunks = poco->getChunks();
+
+ uint32_t chunkCount = reader.getChunkCount();
+ for (uint32_t i = 0; i < chunkCount; i++)
+ {
+ auto chunkReader = reader.getChunks()[i];
+
+ NvBlastChunkDTO::deserializeInto(chunkReader, &chunks[i]);
+ }
+
+ poco->m_graph.m_nodeCount = reader.getGraph().getNodeCount();
+
+ NVBLAST_ASSERT(reader.getSubtreeLeafChunkCounts().size() == poco->m_chunkCount);
+ for (uint32_t i = 0; i < poco->m_chunkCount; i++)
+ {
+ poco->getSubtreeLeafChunkCounts()[i] = reader.getSubtreeLeafChunkCounts()[i];
+ }
+
+ for (uint32_t i = 0; i < chunkCount; i++)
+ {
+ poco->getChunkToGraphNodeMap()[i] = reader.getChunkToGraphNodeMap()[i];
+ }
+
+ uint32_t* ciPtr = poco->m_graph.getChunkIndices();
+
+ NVBLAST_ASSERT(reader.getGraph().getChunkIndices().size() == poco->m_graph.m_nodeCount);
+ for (uint32_t i = 0; i < poco->m_graph.m_nodeCount; i++)
+ {
+ ciPtr[i] = reader.getGraph().getChunkIndices()[i];
+ }
+
+ uint32_t* adjPartition = poco->m_graph.getAdjacencyPartition();
+ uint32_t idx = 0;
+
+ for (uint32_t adjPartIndex : reader.getGraph().getAdjacencyPartition())
+ {
+ adjPartition[idx++] = adjPartIndex;
+ }
+
+ uint32_t* adjNodes = poco->m_graph.getAdjacentNodeIndices();
+ idx = 0;
+
+ for (uint32_t adjNodeIndex : reader.getGraph().getAdjacentNodeIndices())
+ {
+ adjNodes[idx++] = adjNodeIndex;
+ }
+
+ uint32_t* adjBonds = poco->m_graph.getAdjacentBondIndices();
+ idx = 0;
+
+ for (uint32_t adjBondIndex : reader.getGraph().getAdjacentBondIndices())
+ {
+ adjBonds[idx++] = adjBondIndex;
+ }
+
+ return true;
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/AssetDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/AssetDTO.h
new file mode 100644
index 0000000..c090b5f
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/AssetDTO.h
@@ -0,0 +1,16 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "NvBlastAsset.h"
+#include "generated/NvBlastExtSerializationLL.capn.h"
+
+DTO_CLASS_LL(Asset, Nv::Blast::Asset, Nv::Blast::Serialization::Asset)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/DTOMacros.h b/NvBlast/sdk/extensions/serialization/source/DTO/DTOMacros.h
new file mode 100644
index 0000000..a234aec
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/DTOMacros.h
@@ -0,0 +1,44 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+
+#define DTO_CLASS(_NAME, _POCO, _SERIALIZER) \
+namespace Nv { \
+namespace Blast { \
+class _NAME ## DTO \
+{ \
+public: \
+ static class physx::PxCooking* Cooking; \
+ static class physx::PxPhysics* Physics; \
+ \
+ static bool serialize(_SERIALIZER::Builder builder, const _POCO * poco); \
+ static _POCO* deserialize(_SERIALIZER::Reader reader); \
+ static bool deserializeInto(_SERIALIZER::Reader reader, _POCO * poco); \
+}; \
+} \
+} \
+ \
+
+#define DTO_CLASS_LL(_NAME, _POCO, _SERIALIZER) \
+namespace Nv { \
+namespace Blast { \
+class _NAME ## DTO \
+{ \
+public: \
+ \
+ static bool serialize(_SERIALIZER::Builder builder, const _POCO * poco); \
+ static _POCO* deserialize(_SERIALIZER::Reader reader); \
+ static bool deserializeInto(_SERIALIZER::Reader reader, _POCO * poco); \
+}; \
+} \
+} \
+ \
+
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxAssetDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxAssetDTO.cpp
new file mode 100644
index 0000000..cf4cadc
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxAssetDTO.cpp
@@ -0,0 +1,78 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "ExtPxAssetDTO.h"
+#include "TkAssetDTO.h"
+#include "ExtPxChunkDTO.h"
+#include "ExtPxSubchunkDTO.h"
+#include "physics/NvBlastExtPxAssetImpl.h"
+#include "NvBlastAssert.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ bool ExtPxAssetDTO::serialize(Nv::Blast::Serialization::ExtPxAsset::Builder builder, const Nv::Blast::ExtPxAsset * poco)
+ {
+ TkAssetDTO::serialize(builder.getAsset(), &poco->getTkAsset());
+
+ auto chunks = builder.initChunks(poco->getChunkCount());
+
+ for (uint32_t i = 0; i <poco->getChunkCount(); i++)
+ {
+ ExtPxChunkDTO::serialize(chunks[i], &poco->getChunks()[i]);
+ }
+
+ auto subchunks = builder.initSubchunks(poco->getSubchunkCount());
+
+ for (uint32_t i = 0; i < poco->getSubchunkCount(); i++)
+ {
+ ExtPxSubchunkDTO::serialize(subchunks[i], &poco->getSubchunks()[i]);
+ }
+
+ return true;
+ }
+
+ Nv::Blast::ExtPxAsset* ExtPxAssetDTO::deserialize(Nv::Blast::Serialization::ExtPxAsset::Reader reader)
+ {
+ auto tkAsset = TkAssetDTO::deserialize(reader.getAsset());
+
+ Nv::Blast::ExtPxAssetImpl* asset = reinterpret_cast<Nv::Blast::ExtPxAssetImpl*>(Nv::Blast::ExtPxAsset::create(tkAsset));
+
+ NVBLAST_ASSERT(asset != nullptr);
+
+ auto chunks = asset->getChunksArray();
+
+ chunks.resize(reader.getChunks().size());
+ for (uint32_t i = 0; i < reader.getChunks().size(); i++)
+ {
+ ExtPxChunkDTO::deserializeInto(reader.getChunks()[i], &chunks[i]);
+ }
+
+ auto subchunks = asset->getSubchunksArray();
+
+ subchunks.resize(reader.getSubchunks().size());
+ for (uint32_t i = 0; i < reader.getSubchunks().size(); i++)
+ {
+ ExtPxSubchunkDTO::deserializeInto(reader.getSubchunks()[i], &subchunks[i]);
+ }
+
+ return asset;
+ }
+
+ bool ExtPxAssetDTO::deserializeInto(Nv::Blast::Serialization::ExtPxAsset::Reader reader, Nv::Blast::ExtPxAsset * poco)
+ {
+ reader = reader;
+ poco = nullptr;
+ //NOTE: Because of the way this is structured, can't do this.
+ return false;
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxAssetDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxAssetDTO.h
new file mode 100644
index 0000000..a35d38a
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxAssetDTO.h
@@ -0,0 +1,16 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "NvBlastBondDTO.h"
+#include "NvBlastExtPxAsset.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+
+DTO_CLASS(ExtPxAsset, Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxChunkDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxChunkDTO.cpp
new file mode 100644
index 0000000..e096bc1
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxChunkDTO.cpp
@@ -0,0 +1,43 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "ExtPxChunkDTO.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ bool ExtPxChunkDTO::serialize(Nv::Blast::Serialization::ExtPxChunk::Builder builder, const Nv::Blast::ExtPxChunk * poco)
+ {
+ builder.setFirstSubchunkIndex(poco->firstSubchunkIndex);
+ builder.setSubchunkCount(poco->subchunkCount);
+ builder.setIsStatic(poco->isStatic);
+
+ return true;
+ }
+
+ Nv::Blast::ExtPxChunk* ExtPxChunkDTO::deserialize(Nv::Blast::Serialization::ExtPxChunk::Reader reader)
+ {
+ reader = reader;
+ //TODO: Allocate with ExtContext and return
+
+ return nullptr;
+ }
+
+ bool ExtPxChunkDTO::deserializeInto(Nv::Blast::Serialization::ExtPxChunk::Reader reader, Nv::Blast::ExtPxChunk * poco)
+ {
+ poco->firstSubchunkIndex = reader.getFirstSubchunkIndex();
+ poco->subchunkCount = reader.getSubchunkCount();
+ poco->isStatic = reader.getIsStatic();
+
+ return true;
+ }
+ }
+} \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxChunkDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxChunkDTO.h
new file mode 100644
index 0000000..1ff36df
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxChunkDTO.h
@@ -0,0 +1,16 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "NvBlastExtPxAsset.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+
+DTO_CLASS(ExtPxChunk, Nv::Blast::ExtPxChunk, Nv::Blast::Serialization::ExtPxChunk)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxSubchunkDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxSubchunkDTO.cpp
new file mode 100644
index 0000000..cc2be96
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxSubchunkDTO.cpp
@@ -0,0 +1,43 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "ExtPxSubchunkDTO.h"
+#include "PxTransformDTO.h"
+#include "PxConvexMeshGeometryDTO.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ bool ExtPxSubchunkDTO::serialize(Nv::Blast::Serialization::ExtPxSubchunk::Builder builder, const Nv::Blast::ExtPxSubchunk * poco)
+ {
+ PxTransformDTO::serialize(builder.getTransform(), &poco->transform);
+ PxConvexMeshGeometryDTO::serialize(builder.getGeometry(), &poco->geometry);
+
+ return true;
+ }
+
+ Nv::Blast::ExtPxSubchunk* ExtPxSubchunkDTO::deserialize(Nv::Blast::Serialization::ExtPxSubchunk::Reader reader)
+ {
+ reader = reader;
+ //TODO: Allocate with ExtContext and return
+
+ return nullptr;
+ }
+
+ bool ExtPxSubchunkDTO::deserializeInto(Nv::Blast::Serialization::ExtPxSubchunk::Reader reader, Nv::Blast::ExtPxSubchunk * poco)
+ {
+ PxTransformDTO::deserializeInto(reader.getTransform(), &poco->transform);
+
+ return true;
+ }
+
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxSubchunkDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxSubchunkDTO.h
new file mode 100644
index 0000000..91f78e0
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/ExtPxSubchunkDTO.h
@@ -0,0 +1,16 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "NvBlastExtPxAsset.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+#include "DTOMacros.h"
+
+DTO_CLASS(ExtPxSubchunk, Nv::Blast::ExtPxSubchunk, Nv::Blast::Serialization::ExtPxSubchunk)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastBondDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastBondDTO.cpp
new file mode 100644
index 0000000..27cbb11
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastBondDTO.cpp
@@ -0,0 +1,63 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastBondDTO.h"
+#include "NvBlastAssert.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+
+ bool NvBlastBondDTO::serialize(Nv::Blast::Serialization::NvBlastBond::Builder builder, const NvBlastBond * poco)
+ {
+ NVBLAST_ASSERT(poco != nullptr);
+
+ kj::ArrayPtr<const float> normArray(poco->normal, 3);
+
+ builder.setNormal(normArray);
+
+ builder.setArea(poco->area);
+
+ kj::ArrayPtr<const float> centArray(poco->centroid, 3);
+
+ builder.setCentroid(centArray);
+
+ builder.setUserData(poco->userData);
+
+ return true;
+ }
+
+ NvBlastBond* NvBlastBondDTO::deserialize(Nv::Blast::Serialization::NvBlastBond::Reader reader)
+ {
+ //FIXME
+ reader = reader;
+ //TODO: Allocate with ExtContext and return
+ return nullptr;
+ }
+
+ bool NvBlastBondDTO::deserializeInto(Nv::Blast::Serialization::NvBlastBond::Reader reader, NvBlastBond * poco)
+ {
+ poco->area = reader.getArea();
+
+ poco->centroid[0] = reader.getCentroid()[0];
+ poco->centroid[1] = reader.getCentroid()[1];
+ poco->centroid[2] = reader.getCentroid()[2];
+
+ poco->normal[0] = reader.getNormal()[0];
+ poco->normal[1] = reader.getNormal()[1];
+ poco->normal[2] = reader.getNormal()[2];
+
+ poco->userData = reader.getUserData();
+
+ return true;
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastBondDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastBondDTO.h
new file mode 100644
index 0000000..8b67bd7
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastBondDTO.h
@@ -0,0 +1,16 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "NvBlastTypes.h"
+#include "generated/NvBlastExtSerializationLL.capn.h"
+
+DTO_CLASS_LL(NvBlastBond, NvBlastBond, Nv::Blast::Serialization::NvBlastBond)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastChunkDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastChunkDTO.cpp
new file mode 100644
index 0000000..38814ed
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastChunkDTO.cpp
@@ -0,0 +1,60 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastChunkDTO.h"
+#include "NvBlastAssert.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ bool NvBlastChunkDTO::serialize(Nv::Blast::Serialization::NvBlastChunk::Builder builder, const NvBlastChunk* poco)
+ {
+ NVBLAST_ASSERT(poco != nullptr);
+
+ kj::ArrayPtr<const float> centArray(poco->centroid, 3);
+ builder.setCentroid(centArray);
+
+ builder.setVolume(poco->volume);
+
+ builder.setParentChunkIndex(poco->parentChunkIndex);
+ builder.setFirstChildIndex(poco->firstChildIndex);
+ builder.setChildIndexStop(poco->childIndexStop);
+ builder.setUserData(poco->userData);
+
+ return true;
+ }
+
+ NvBlastChunk* NvBlastChunkDTO::deserialize(Nv::Blast::Serialization::NvBlastChunk::Reader reader)
+ {
+ //FIXME
+ reader = reader;
+
+ return nullptr;
+ }
+
+ bool NvBlastChunkDTO::deserializeInto(Nv::Blast::Serialization::NvBlastChunk::Reader reader, NvBlastChunk* target)
+ {
+ NVBLAST_ASSERT(target != nullptr);
+
+ target->centroid[0] = reader.getCentroid()[0];
+ target->centroid[1] = reader.getCentroid()[1];
+ target->centroid[2] = reader.getCentroid()[2];
+
+ target->childIndexStop = reader.getChildIndexStop();
+ target->firstChildIndex = reader.getFirstChildIndex();
+ target->parentChunkIndex = reader.getParentChunkIndex();
+ target->userData = reader.getUserData();
+ target->volume = reader.getVolume();
+
+ return true;
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastChunkDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastChunkDTO.h
new file mode 100644
index 0000000..5fec498
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastChunkDTO.h
@@ -0,0 +1,18 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "NvBlastTypes.h"
+#include "generated/NvBlastExtSerializationLL.capn.h"
+
+
+DTO_CLASS_LL(NvBlastChunk, NvBlastChunk, Nv::Blast::Serialization::NvBlastChunk)
+
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastIDDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastIDDTO.cpp
new file mode 100644
index 0000000..e540cd8
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastIDDTO.cpp
@@ -0,0 +1,48 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastIDDTO.h"
+#include "NvBlastTypes.h"
+#include "NvBlastAssert.h"
+#include "generated/NvBlastExtSerializationLL.capn.h"
+
+
+namespace Nv
+{
+ namespace Blast
+ {
+
+ bool NvBlastIDDTO::serialize(Nv::Blast::Serialization::UUID::Builder builder, const NvBlastID * poco)
+ {
+ capnp::Data::Reader idArrayReader((unsigned char *)poco->data, 16);
+ builder.setValue(idArrayReader);
+
+ return true;
+ }
+
+ NvBlastID* NvBlastIDDTO::deserialize(Nv::Blast::Serialization::UUID::Reader reader)
+ {
+ //FIXME
+ reader = reader;
+ //TODO: Allocate with ExtContext and return
+
+ return nullptr;
+ }
+
+ bool NvBlastIDDTO::deserializeInto(Nv::Blast::Serialization::UUID::Reader reader, NvBlastID * poco)
+ {
+ NVBLAST_ASSERT_WITH_MESSAGE(reader.getValue().size() == 16, "BlastID must be 16 bytes");
+
+ memcpy(poco, reader.getValue().begin(), 16);
+
+ return true;
+ }
+ }
+} \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastIDDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastIDDTO.h
new file mode 100644
index 0000000..afe6cf0
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/NvBlastIDDTO.h
@@ -0,0 +1,16 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "NvBlastTypes.h"
+#include "generated/NvBlastExtSerializationLL.capn.h"
+#include "DTOMacros.h"
+
+DTO_CLASS_LL(NvBlastID, NvBlastID, ::Nv::Blast::Serialization::UUID)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxConvexMeshGeometryDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/PxConvexMeshGeometryDTO.cpp
new file mode 100644
index 0000000..1c46f9e
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxConvexMeshGeometryDTO.cpp
@@ -0,0 +1,127 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "PxConvexMeshGeometryDTO.h"
+#include "PxMeshScaleDTO.h"
+#include "NvBlastAssert.h"
+#include "NvBlastExtKJPxInputStream.h"
+#include "NvBlastExtKJPxOutputStream.h"
+#include "PxConvexMeshDesc.h"
+#include "NvBlastExtSerialization.h"
+#include "PxVec3.h"
+#include <algorithm>
+#include "PxPhysics.h"
+
+
+namespace Nv
+{
+ namespace Blast
+ {
+ physx::PxCooking* PxConvexMeshGeometryDTO::Cooking = nullptr;
+ physx::PxPhysics* PxConvexMeshGeometryDTO::Physics = nullptr;
+
+ bool PxConvexMeshGeometryDTO::serialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Builder builder, const physx::PxConvexMeshGeometry * poco)
+ {
+ PxMeshScaleDTO::serialize(builder.getScale(), &poco->scale);
+
+ //TODO: Use cooking.cookConvexMesh to cook the mesh to a stream - then get that backing buffer and put it into the Data field
+
+ physx::PxConvexMeshDesc desc;
+ desc.points.data = poco->convexMesh->getVertices();
+ desc.points.count = poco->convexMesh->getNbVertices();
+ desc.points.stride = sizeof(physx::PxVec3);
+
+ std::vector<uint32_t> indicesScratch;
+ std::vector<physx::PxHullPolygon> hullPolygonsScratch;
+
+ hullPolygonsScratch.resize(poco->convexMesh->getNbPolygons());
+
+ uint32_t indexCount = 0;
+ for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++)
+ {
+ physx::PxHullPolygon polygon;
+ poco->convexMesh->getPolygonData(i, polygon);
+ if (polygon.mNbVerts)
+ {
+ indexCount = std::max<uint32_t>(indexCount, polygon.mIndexBase + polygon.mNbVerts);
+ }
+ }
+ indicesScratch.resize(indexCount);
+
+ for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++)
+ {
+ physx::PxHullPolygon polygon;
+ poco->convexMesh->getPolygonData(i, polygon);
+ for (uint32_t j = 0; j < polygon.mNbVerts; j++)
+ {
+ indicesScratch[polygon.mIndexBase + j] = poco->convexMesh->getIndexBuffer()[polygon.mIndexBase + j];
+ }
+
+ hullPolygonsScratch[i] = polygon;
+ }
+
+ desc.indices.count = indexCount;
+ desc.indices.data = indicesScratch.data();
+ desc.indices.stride = sizeof(uint32_t);
+
+ desc.polygons.count = poco->convexMesh->getNbPolygons();
+ desc.polygons.data = hullPolygonsScratch.data();
+ desc.polygons.stride = sizeof(physx::PxHullPolygon);
+
+
+ std::vector<unsigned char> buffer;
+ buffer.resize(16 * 1024 * 1024); // No idea how much memory is needed! Allocate 16MB
+ kj::ArrayPtr<unsigned char> bufferArray(buffer.data(), buffer.size());
+
+ Nv::Blast::ExtKJPxOutputStream outputStream(bufferArray);
+
+ bool cookResult = Cooking->cookConvexMesh(desc, outputStream);
+
+ if (!cookResult)
+ {
+ return false;
+ }
+
+ kj::ArrayPtr<unsigned char> cookedBuffer(outputStream.getBuffer().begin(), outputStream.getWrittenBytes());
+
+ builder.setConvexMesh(cookedBuffer);
+
+ // builder.getConvexMesh().
+
+ return true;
+ }
+
+ physx::PxConvexMeshGeometry* PxConvexMeshGeometryDTO::deserialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader)
+ {
+ NVBLAST_ASSERT(PxConvexMeshGeometryDTO::Cooking != nullptr);
+
+ reader = reader;
+
+ return nullptr;
+ }
+
+ bool PxConvexMeshGeometryDTO::deserializeInto(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader, physx::PxConvexMeshGeometry * poco)
+ {
+ NVBLAST_ASSERT(PxConvexMeshGeometryDTO::Cooking != nullptr);
+
+ PxMeshScaleDTO::deserializeInto(reader.getScale(), &poco->scale);
+
+ Nv::Blast::ExtKJPxInputStream inputStream(reader.getConvexMesh());
+
+ //NOTE: Naive approach, no shared convex hulls
+ poco->convexMesh = Physics->createConvexMesh(inputStream);
+
+ return false;
+ }
+
+
+
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxConvexMeshGeometryDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/PxConvexMeshGeometryDTO.h
new file mode 100644
index 0000000..27b3754
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxConvexMeshGeometryDTO.h
@@ -0,0 +1,17 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+#include "PxConvexMeshGeometry.h"
+#include "PxCooking.h"
+
+DTO_CLASS(PxConvexMeshGeometry, physx::PxConvexMeshGeometry, Nv::Blast::Serialization::PxConvexMeshGeometry)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxMeshScaleDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/PxMeshScaleDTO.cpp
new file mode 100644
index 0000000..8fee6ad
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxMeshScaleDTO.cpp
@@ -0,0 +1,42 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "PxMeshScaleDTO.h"
+#include "PxVec3DTO.h"
+#include "PxQuatDTO.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ bool PxMeshScaleDTO::serialize(Nv::Blast::Serialization::PxMeshScale::Builder builder, const physx::PxMeshScale * poco)
+ {
+ PxVec3DTO::serialize(builder.getScale(), &poco->scale);
+ PxQuatDTO::serialize(builder.getRotation(), &poco->rotation);
+
+ return true;
+ }
+
+ physx::PxMeshScale* PxMeshScaleDTO::deserialize(Nv::Blast::Serialization::PxMeshScale::Reader reader)
+ {
+ reader = reader;
+ return nullptr;
+ }
+
+ bool PxMeshScaleDTO::deserializeInto(Nv::Blast::Serialization::PxMeshScale::Reader reader, physx::PxMeshScale * poco)
+ {
+ PxVec3DTO::deserializeInto(reader.getScale(), &poco->scale);
+ PxQuatDTO::deserializeInto(reader.getRotation(), &poco->rotation);
+
+ return true;
+ }
+ }
+}
+
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxMeshScaleDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/PxMeshScaleDTO.h
new file mode 100644
index 0000000..7b758c8
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxMeshScaleDTO.h
@@ -0,0 +1,17 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "PxMeshScale.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+#include "PxCooking.h"
+
+DTO_CLASS(PxMeshScale, physx::PxMeshScale, Nv::Blast::Serialization::PxMeshScale)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxQuatDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/PxQuatDTO.cpp
new file mode 100644
index 0000000..8faeaa6
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxQuatDTO.cpp
@@ -0,0 +1,45 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "PxQuatDTO.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+
+ bool PxQuatDTO::serialize(Nv::Blast::Serialization::PxQuat::Builder builder, const physx::PxQuat * poco)
+ {
+ builder.setX(poco->x);
+ builder.setY(poco->y);
+ builder.setZ(poco->z);
+ builder.setW(poco->w);
+
+ return true;
+ }
+
+ physx::PxQuat* PxQuatDTO::deserialize(Nv::Blast::Serialization::PxQuat::Reader reader)
+ {
+ reader = reader;
+ return nullptr;
+ }
+
+ bool PxQuatDTO::deserializeInto(Nv::Blast::Serialization::PxQuat::Reader reader, physx::PxQuat * poco)
+ {
+ poco->x = reader.getX();
+ poco->y = reader.getY();
+ poco->z = reader.getZ();
+ poco->w = reader.getW();
+
+ return true;
+ }
+
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxQuatDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/PxQuatDTO.h
new file mode 100644
index 0000000..460d6c5
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxQuatDTO.h
@@ -0,0 +1,17 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "PxQuat.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+#include "PxCooking.h"
+
+DTO_CLASS(PxQuat, physx::PxQuat, Nv::Blast::Serialization::PxQuat)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxTransformDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/PxTransformDTO.cpp
new file mode 100644
index 0000000..20a7cbb
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxTransformDTO.cpp
@@ -0,0 +1,42 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "PxTransformDTO.h"
+#include "PxQuatDTO.h"
+#include "PxVec3DTO.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+
+ bool PxTransformDTO::serialize(Nv::Blast::Serialization::PxTransform::Builder builder, const physx::PxTransform * poco)
+ {
+ PxQuatDTO::serialize(builder.getQ(), &poco->q);
+ PxVec3DTO::serialize(builder.getP(), &poco->p);
+
+ return true;
+ }
+
+ physx::PxTransform* PxTransformDTO::deserialize(Nv::Blast::Serialization::PxTransform::Reader reader)
+ {
+ reader = reader;
+ return nullptr;
+ }
+
+ bool PxTransformDTO::deserializeInto(Nv::Blast::Serialization::PxTransform::Reader reader, physx::PxTransform * poco)
+ {
+ PxQuatDTO::deserializeInto(reader.getQ(), &poco->q);
+ PxVec3DTO::deserializeInto(reader.getP(), &poco->p);
+
+ return true;
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxTransformDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/PxTransformDTO.h
new file mode 100644
index 0000000..49a6b73
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxTransformDTO.h
@@ -0,0 +1,17 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "PxTransform.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+#include "PxCooking.h"
+
+DTO_CLASS(PxTransform, physx::PxTransform, Nv::Blast::Serialization::PxTransform)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxVec3DTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/PxVec3DTO.cpp
new file mode 100644
index 0000000..9827cd0
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxVec3DTO.cpp
@@ -0,0 +1,45 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "PxVec3DTO.h"
+#include "NvBlastAssert.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ bool PxVec3DTO::serialize(Nv::Blast::Serialization::PxVec3::Builder builder, const physx::PxVec3 * poco)
+ {
+ NVBLAST_ASSERT(poco != nullptr);
+
+ builder.setX(poco->x);
+ builder.setY(poco->y);
+ builder.setZ(poco->z);
+
+ return true;
+ }
+
+ physx::PxVec3* PxVec3DTO::deserialize(Nv::Blast::Serialization::PxVec3::Reader reader)
+ {
+ //TODO: Allocate using ExtContext and return
+ reader = reader;
+ return nullptr;
+ }
+
+ bool PxVec3DTO::deserializeInto(Nv::Blast::Serialization::PxVec3::Reader reader, physx::PxVec3* target)
+ {
+ target->x = reader.getX();
+ target->y = reader.getY();
+ target->z = reader.getZ();
+
+ return true;
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/PxVec3DTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/PxVec3DTO.h
new file mode 100644
index 0000000..8a04c8b
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/PxVec3DTO.h
@@ -0,0 +1,17 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+#include "PxVec3.h"
+#include "PxCooking.h"
+
+DTO_CLASS(PxVec3, physx::PxVec3, Nv::Blast::Serialization::PxVec3)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetDTO.cpp
new file mode 100644
index 0000000..acc55ba
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetDTO.cpp
@@ -0,0 +1,67 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "TkAssetDTO.h"
+#include "AssetDTO.h"
+#include "TkAssetJointDescDTO.h"
+#include <vector>
+#include "NvBlastTkFramework.h"
+
+
+
+namespace Nv
+{
+ namespace Blast
+ {
+ bool TkAssetDTO::serialize(Nv::Blast::Serialization::TkAsset::Builder builder, const Nv::Blast::TkAsset * poco)
+ {
+ const Asset* assetLL = reinterpret_cast<const Nv::Blast::Asset*>(poco->getAssetLL());
+
+ Nv::Blast::AssetDTO::serialize(builder.getAssetLL(), assetLL);
+
+ uint32_t jointDescCount = poco->getJointDescCount();
+
+ capnp::List<Nv::Blast::Serialization::TkAssetJointDesc>::Builder jointDescs = builder.initJointDescs(jointDescCount);
+
+ for (uint32_t i = 0; i < jointDescCount; i++)
+ {
+ TkAssetJointDescDTO::serialize(jointDescs[i], &poco->getJointDescs()[i]);
+ }
+
+ return true;
+ }
+
+ Nv::Blast::TkAsset* TkAssetDTO::deserialize(Nv::Blast::Serialization::TkAsset::Reader reader)
+ {
+ const NvBlastAsset* assetLL = reinterpret_cast<const NvBlastAsset*>(AssetDTO::deserialize(reader.getAssetLL()));
+
+ std::vector<Nv::Blast::TkAssetJointDesc> jointDescs;
+ jointDescs.resize(reader.getJointDescs().size());
+
+ for (uint32_t i = 0; i < jointDescs.size(); i++)
+ {
+ TkAssetJointDescDTO::deserializeInto(reader.getJointDescs()[i], &jointDescs[i]);
+ }
+
+ // Make sure to set ownsAsset to true - this is serialization and no one else owns it.
+ Nv::Blast::TkAsset* asset = NvBlastTkFrameworkGet()->createAsset(assetLL, jointDescs.data(), jointDescs.size(), true);
+
+ return asset;
+ }
+
+ bool TkAssetDTO::deserializeInto(Nv::Blast::Serialization::TkAsset::Reader reader, Nv::Blast::TkAsset * poco)
+ {
+ reader = reader;
+ poco = nullptr;
+ // NOTE: Because of the way TkAsset is currently structured, this won't work.
+ return false;
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetDTO.h
new file mode 100644
index 0000000..1b21eba
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetDTO.h
@@ -0,0 +1,17 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "NvBlastTkAsset.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+#include "PxCooking.h"
+
+DTO_CLASS(TkAsset, Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset)
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetJointDescDTO.cpp b/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetJointDescDTO.cpp
new file mode 100644
index 0000000..9118d19
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetJointDescDTO.cpp
@@ -0,0 +1,53 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "TkAssetJointDescDTO.h"
+#include "PxVec3DTO.h"
+
+
+namespace Nv
+{
+ namespace Blast
+ {
+
+ bool TkAssetJointDescDTO::serialize(Nv::Blast::Serialization::TkAssetJointDesc::Builder builder, const Nv::Blast::TkAssetJointDesc * poco)
+ {
+ kj::ArrayPtr<const uint32_t> nodeIndices(poco->nodeIndices, 2);
+ builder.setNodeIndices(nodeIndices);
+
+ for (int i = 0; i < 2; i++)
+ {
+ PxVec3DTO::serialize(builder.getAttachPositions()[i], &poco->attachPositions[i]);
+ }
+
+ return true;
+ }
+
+ Nv::Blast::TkAssetJointDesc* TkAssetJointDescDTO::deserialize(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader)
+ {
+ //TODO: Allocate with ExtContent and return
+
+ reader = reader;
+
+ return nullptr;
+ }
+
+ bool TkAssetJointDescDTO::deserializeInto(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader, Nv::Blast::TkAssetJointDesc * poco)
+ {
+ PxVec3DTO::deserializeInto(reader.getAttachPositions()[0], &poco->attachPositions[0]);
+ PxVec3DTO::deserializeInto(reader.getAttachPositions()[1], &poco->attachPositions[1]);
+
+ poco->nodeIndices[0] = reader.getNodeIndices()[0];
+ poco->nodeIndices[1] = reader.getNodeIndices()[1];
+
+ return true;
+ }
+ }
+} \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetJointDescDTO.h b/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetJointDescDTO.h
new file mode 100644
index 0000000..88364bd
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/DTO/TkAssetJointDescDTO.h
@@ -0,0 +1,17 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "DTOMacros.h"
+#include "NvBlastTkAsset.h"
+#include "generated/NvBlastExtSerialization.capn.h"
+#include "PxCooking.h"
+
+DTO_CLASS(TkAssetJointDesc, Nv::Blast::TkAssetJointDesc, Nv::Blast::Serialization::TkAssetJointDesc)
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtGlobals.h b/NvBlast/sdk/extensions/serialization/source/NvBlastExtGlobals.h
new file mode 100644
index 0000000..dd50afd
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtGlobals.h
@@ -0,0 +1,10 @@
+#pragma once
+
+/**
+Function pointer type for allocation - has same signature as stdlib malloc.
+*/
+typedef void* (*NvBlastExtAlloc)(size_t size);
+
+extern NvBlastExtAlloc gAlloc;
+extern NvBlastLog gLog;
+
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtInputStream.cpp b/NvBlast/sdk/extensions/serialization/source/NvBlastExtInputStream.cpp
new file mode 100644
index 0000000..7ef9b62
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtInputStream.cpp
@@ -0,0 +1,38 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtInputStream.h"
+
+
+Nv::Blast::ExtInputStream::ExtInputStream(std::istream &inputStream):
+ m_inputStream(inputStream)
+{
+
+}
+
+
+size_t Nv::Blast::ExtInputStream::tryRead(void* buffer, size_t /*minBytes*/, size_t maxBytes)
+{
+ m_inputStream.read((char *) buffer, maxBytes);
+
+ if (m_inputStream.fail())
+ {
+ // Throw exception, log error
+// NVBLASTEXT_LOG_ERROR("Failure when reading from stream");
+ }
+
+ // Since we're using a blocking read above, if we don't have maxBytes we're probably done
+ if ((size_t) m_inputStream.gcount() < maxBytes)
+ {
+// NVBLASTEXT_LOG_ERROR("Failed to read requested number of bytes during blocking read.");
+ }
+
+ return m_inputStream.gcount();
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtInputStream.h b/NvBlast/sdk/extensions/serialization/source/NvBlastExtInputStream.h
new file mode 100644
index 0000000..9b19d9c
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtInputStream.h
@@ -0,0 +1,32 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "kj/io.h"
+#include <istream>
+
+namespace Nv
+{
+ namespace Blast
+ {
+ class ExtInputStream : public kj::InputStream
+ {
+ public:
+ ExtInputStream() = delete;
+ ExtInputStream(std::istream &inputStream);
+
+ // Returns a read of maxBytes. This is supposed to be happy doing partial reads, but currently isn't.
+ virtual size_t tryRead(void* buffer, size_t minBytes, size_t maxBytes) override;
+
+ private:
+ std::istream &m_inputStream;
+ };
+ }
+} \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxInputStream.cpp b/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxInputStream.cpp
new file mode 100644
index 0000000..9af13a9
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxInputStream.cpp
@@ -0,0 +1,32 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtKJPxInputStream.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ ExtKJPxInputStream::ExtKJPxInputStream(capnp::Data::Reader inReader) :
+ dataReader(inReader),
+ inputStream(nullptr)
+ {
+ kj::ArrayPtr<const unsigned char> buffer(inReader.begin(), inReader.size());
+
+ inputStream = std::make_shared<kj::ArrayInputStream>(buffer);
+ }
+
+ uint32_t ExtKJPxInputStream::read(void* dest, uint32_t count)
+ {
+ return inputStream->tryRead(dest, count, count);
+ }
+ }
+}
+
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxInputStream.h b/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxInputStream.h
new file mode 100644
index 0000000..452892d
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxInputStream.h
@@ -0,0 +1,41 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "PxIO.h"
+#include "capnp/common.h"
+#include "kj/io.h"
+#include <memory>
+#include "generated/NvBlastExtSerialization.capn.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ /*
+ A wrapper around a Capn Proto Data reader.
+
+ Since it needs to behave like a stream, it's internally wrapped in a stream.
+
+ */
+ class ExtKJPxInputStream : public physx::PxInputStream
+ {
+ public:
+ ExtKJPxInputStream(capnp::Data::Reader inReader);
+ ~ExtKJPxInputStream() = default;
+
+ virtual uint32_t read(void* dest, uint32_t count) override;
+
+ private:
+ capnp::Data::Reader dataReader;
+ std::shared_ptr<kj::ArrayInputStream> inputStream;
+ };
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxOutputStream.cpp b/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxOutputStream.cpp
new file mode 100644
index 0000000..0f17a01
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxOutputStream.cpp
@@ -0,0 +1,35 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtKJPxOutputStream.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ ExtKJPxOutputStream::ExtKJPxOutputStream(kj::ArrayPtr<unsigned char> inBuffer) :
+ writtenBytes(0),
+ Buffer(inBuffer),
+ outputStream(nullptr)
+ {
+ outputStream = std::make_shared<kj::ArrayOutputStream>(inBuffer);
+ }
+
+ uint32_t ExtKJPxOutputStream::write(const void* src, uint32_t count)
+ {
+ outputStream->write(src, count);
+
+ writtenBytes += count;
+
+ return count;
+ }
+ }
+}
+
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxOutputStream.h b/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxOutputStream.h
new file mode 100644
index 0000000..0ed563f
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtKJPxOutputStream.h
@@ -0,0 +1,40 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "PxIO.h"
+#include "kj/common.h"
+#include <memory>
+#include "kj/io.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ class ExtKJPxOutputStream : public physx::PxOutputStream
+ {
+ public:
+ ExtKJPxOutputStream(kj::ArrayPtr<unsigned char> inBuffer);
+ ~ExtKJPxOutputStream() = default;
+
+ virtual uint32_t write(const void* src, uint32_t count) override;
+
+ uint32_t getWrittenBytes() { return writtenBytes; }
+
+ kj::ArrayPtr<unsigned char> getBuffer() { return Buffer; }
+
+ private:
+ uint32_t writtenBytes;
+
+ kj::ArrayPtr<unsigned char> Buffer;
+ std::shared_ptr<kj::ArrayOutputStream> outputStream;
+ };
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtOutputStream.cpp b/NvBlast/sdk/extensions/serialization/source/NvBlastExtOutputStream.cpp
new file mode 100644
index 0000000..cf6e31f
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtOutputStream.cpp
@@ -0,0 +1,24 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtOutputStream.h"
+
+
+
+Nv::Blast::ExtOutputStream::ExtOutputStream(std::ostream &outputStream):
+ m_outputStream(outputStream)
+{
+
+}
+
+void Nv::Blast::ExtOutputStream::write(const void* buffer, size_t size)
+{
+ m_outputStream.write((char *) buffer, size);
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtOutputStream.h b/NvBlast/sdk/extensions/serialization/source/NvBlastExtOutputStream.h
new file mode 100644
index 0000000..ba0044e
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtOutputStream.h
@@ -0,0 +1,31 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "kj/io.h"
+#include <ostream>
+
+namespace Nv
+{
+ namespace Blast
+ {
+ class ExtOutputStream : public kj::OutputStream
+ {
+
+ public:
+ ExtOutputStream() = delete;
+ ExtOutputStream(std::ostream &outputStream);
+
+ virtual void write(const void* buffer, size_t size) override;
+ private:
+ std::ostream &m_outputStream;
+ };
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.capn b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.capn
new file mode 100644
index 0000000..ddc439a
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.capn
@@ -0,0 +1,95 @@
+# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+@0xaffe4498f275ee58;
+
+using Cxx = import "/capnp/c++.capnp";
+
+using LL = import "NvBlastExtSerializationLL.capn";
+
+$Cxx.namespace("Nv::Blast::Serialization");
+
+struct TkAsset
+{
+ assetLL @0 :LL.Asset;
+
+ jointDescs @1 :List(TkAssetJointDesc);
+}
+
+struct ExtPxAsset
+{
+ asset @0 :TkAsset;
+ chunks @1 :List(ExtPxChunk);
+ subchunks @2 :List(ExtPxSubchunk);
+}
+
+struct ExtPxChunk
+{
+ firstSubchunkIndex @0 :UInt32;
+ subchunkCount @1 :UInt32;
+ isStatic @2 :Bool;
+}
+
+struct ExtPxSubchunk
+{
+ transform @0 :PxTransform;
+ geometry @1 :PxConvexMeshGeometry;
+}
+
+struct PxConvexMeshGeometry
+{
+ scale @0 :PxMeshScale;
+ convexMesh @1 :Data;
+ meshFlags @2 :UInt8;
+
+ enum Type
+ {
+ eSPHERE @0;
+ ePLANE @1;
+ eCAPSULE @2;
+ eBOX @3;
+ eCONVEXMESH @4;
+ eTRIANGLEMESH @5;
+ eHEIGHTFIELD @6;
+ }
+
+ type @3 :Type;
+}
+
+struct TkAssetJointDesc
+{
+ nodeIndices @0 :List(UInt32);
+ attachPositions @1 :List(PxVec3);
+}
+
+struct PxVec3
+{
+ x @0 :Float32;
+ y @1 :Float32;
+ z @2 :Float32;
+}
+
+struct PxQuat
+{
+ x @0 :Float32;
+ y @1 :Float32;
+ z @2 :Float32;
+ w @3 :Float32;
+}
+
+struct PxMeshScale
+{
+ scale @0 :PxVec3;
+ rotation @1 :PxQuat;
+}
+
+struct PxTransform
+{
+ q @0 :PxQuat;
+ p @1 :PxVec3;
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.cpp b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.cpp
new file mode 100644
index 0000000..28a1553
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.cpp
@@ -0,0 +1,146 @@
+#include "NvBlastExtSerialization.h"
+#include "BlastSerialization.h"
+#include <memory>
+#include "PxPhysicsVersion.h"
+#include "PxConvexMeshGeometryDTO.h"
+#include "NvBlastExtDefs.h"
+
+
+// This is terrible.
+physx::PxPhysics* g_Physics = nullptr;
+
+
+std::shared_ptr<physx::PxCooking> getCooking()
+{
+ physx::PxCookingParams cookingParams(g_Physics->getTolerancesScale());
+ cookingParams.buildGPUData = true;
+
+ std::shared_ptr<physx::PxCooking> m_Cooking = std::shared_ptr<physx::PxCooking>(PxCreateCooking(PX_PHYSICS_VERSION, g_Physics->getFoundation(), cookingParams), [=](physx::PxCooking* cooking)
+ {
+ cooking->release();
+ });
+
+ NVBLASTEXT_CHECK_ERROR(m_Cooking, "Error: failed to create PhysX Cooking\n", return nullptr);
+
+ return m_Cooking;
+}
+
+
+extern "C"
+{
+ NVBLAST_API void setPhysXSDK(physx::PxPhysics* physXSDK)
+ {
+ g_Physics = physXSDK;
+ }
+
+ NVBLAST_API NvBlastAsset* deserializeAsset(const unsigned char* input, uint32_t size)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::deserialize(input, size);
+ }
+
+ NVBLAST_API NvBlastAsset* deserializeAssetFromStream(std::istream &inputStream)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::deserializeFromStream(inputStream);
+ }
+
+ NVBLAST_API bool serializeAssetIntoStream(const NvBlastAsset *asset, std::ostream &outputStream)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::serializeIntoStream(reinterpret_cast<const Nv::Blast::Asset *>(asset), outputStream);
+ }
+
+ NVBLAST_API bool serializeAssetIntoNewBuffer(const NvBlastAsset *asset, unsigned char **outBuffer, uint32_t &outSize)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::serializeIntoNewBuffer(reinterpret_cast<const Nv::Blast::Asset *>(asset), outBuffer, outSize);
+ }
+
+ NVBLAST_API bool serializeAssetIntoExistingBuffer(const NvBlastAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::serializeIntoExistingBuffer(reinterpret_cast<const Nv::Blast::Asset *>(asset), buffer, maxSize, usedSize);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // TkAsset
+ //////////////////////////////////////////////////////////////////////////
+
+ NVBLAST_API Nv::Blast::TkAsset* deserializeTkAsset(const unsigned char* input, uint32_t size)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::deserialize(input, size);
+ }
+
+ NVBLAST_API Nv::Blast::TkAsset* deserializeTkAssetFromStream(std::istream &inputStream)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::deserializeFromStream(inputStream);
+ }
+
+ NVBLAST_API bool serializeTkAssetIntoStream(const Nv::Blast::TkAsset *asset, std::ostream &outputStream)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::serializeIntoStream(reinterpret_cast<const Nv::Blast::TkAsset *>(asset), outputStream);
+ }
+
+ NVBLAST_API bool serializeTkAssetIntoNewBuffer(const Nv::Blast::TkAsset *asset, unsigned char **outBuffer, uint32_t &outSize)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::serializeIntoNewBuffer(reinterpret_cast<const Nv::Blast::TkAsset *>(asset), outBuffer, outSize);
+ }
+
+ NVBLAST_API bool serializeTkAssetIntoExistingBuffer(const Nv::Blast::TkAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize)
+ {
+ return Nv::Blast::BlastSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::serializeIntoExistingBuffer(reinterpret_cast<const Nv::Blast::TkAsset *>(asset), buffer, maxSize, usedSize);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // ExtPxAsset
+ //////////////////////////////////////////////////////////////////////////
+
+ NVBLAST_API Nv::Blast::ExtPxAsset* deserializeExtPxAsset(const unsigned char* input, uint32_t size)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ return Nv::Blast::BlastSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::deserialize(input, size);
+ }
+
+ NVBLAST_API Nv::Blast::ExtPxAsset* deserializeExtPxAssetFromStream(std::istream &inputStream)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ return Nv::Blast::BlastSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::deserializeFromStream(inputStream);
+ }
+
+ NVBLAST_API bool serializeExtPxAssetIntoStream(const Nv::Blast::ExtPxAsset *asset, std::ostream &outputStream)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ auto cooking = getCooking();
+
+ PxConvexMeshGeometryDTO::Cooking = cooking.get();
+ PxConvexMeshGeometryDTO::Physics = g_Physics;
+
+ return Nv::Blast::BlastSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::serializeIntoStream(reinterpret_cast<const Nv::Blast::ExtPxAsset *>(asset), outputStream);
+ }
+
+ NVBLAST_API bool serializeExtPxAssetIntoNewBuffer(const Nv::Blast::ExtPxAsset *asset, unsigned char **outBuffer, uint32_t &outSize)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ auto cooking = getCooking();
+
+ PxConvexMeshGeometryDTO::Cooking = cooking.get();
+ PxConvexMeshGeometryDTO::Physics = g_Physics;
+
+ return Nv::Blast::BlastSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::serializeIntoNewBuffer(reinterpret_cast<const Nv::Blast::ExtPxAsset *>(asset), outBuffer, outSize);
+ }
+
+ NVBLAST_API bool serializeExtPxAssetIntoExistingBuffer(const Nv::Blast::ExtPxAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ auto cooking = getCooking();
+
+ PxConvexMeshGeometryDTO::Cooking = cooking.get();
+ PxConvexMeshGeometryDTO::Physics = g_Physics;
+
+ return Nv::Blast::BlastSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::serializeIntoExistingBuffer(reinterpret_cast<const Nv::Blast::ExtPxAsset *>(asset), buffer, maxSize, usedSize);
+ }
+
+
+}
+
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.h b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.h
new file mode 100644
index 0000000..5d44554
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerialization.h
@@ -0,0 +1,172 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+#include "kj/io.h"
+#include "capnp/serialize.h"
+#include "generated/NvBlastExtSerializationLL.capn.h"
+#include <vector>
+#include "NvBlastExtInputStream.h"
+#include "NvBlastExtOutputStream.h"
+
+#if !defined(BLAST_LL_ALLOC)
+#include "NvBlastExtAllocator.h"
+#endif
+#include "NvBlastExtGlobals.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ template<typename TAsset, typename TSerializationReader, typename TSerializationBuilder>
+ class ExtSerialization
+ {
+ public:
+ static TAsset* deserialize(const unsigned char* input, uint32_t size);
+ static TAsset* deserializeFromStream(std::istream &inputStream);
+
+ static bool serializeIntoExistingBuffer(const TAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize);
+ static bool serializeIntoNewBuffer(const TAsset *asset, unsigned char **outBuffer, uint32_t &outSize);
+ static bool serializeIntoStream(const TAsset *asset, std::ostream &outputStream);
+
+ private:
+
+ static void serializeMessageIntoNewBuffer(capnp::MallocMessageBuilder& message, unsigned char ** outBuffer, uint32_t &outSize);
+
+ // Specialized
+ static bool serializeIntoBuilder(TSerializationBuilder& assetBuilder, const TAsset* asset);
+ static bool serializeIntoMessage(capnp::MallocMessageBuilder& message, const TAsset* asset);
+ static TAsset* deserializeFromStreamReader(capnp::InputStreamMessageReader &message);
+ };
+
+ template<typename TAsset, typename TSerializationReader, typename TSerializationBuilder>
+ TAsset* ExtSerialization<TAsset, TSerializationReader, TSerializationBuilder>::deserialize(const unsigned char* input, uint32_t size)
+ {
+ kj::ArrayPtr<const unsigned char> source(input, size);
+
+ kj::ArrayInputStream inputStream(source);
+
+ std::vector<uint64_t> scratch;
+ scratch.resize(size);
+ kj::ArrayPtr<capnp::word> scratchArray((capnp::word*) scratch.data(), size);
+
+ capnp::InputStreamMessageReader message(inputStream, capnp::ReaderOptions(), scratchArray);
+
+ return deserializeFromStreamReader(message);
+ }
+
+ template<typename TAsset, typename TSerializationReader, typename TSerializationBuilder>
+ TAsset* ExtSerialization<TAsset, TSerializationReader, TSerializationBuilder>::deserializeFromStream(std::istream &inputStream)
+ {
+ Nv::Blast::ExtInputStream readStream(inputStream);
+
+ capnp::InputStreamMessageReader message(readStream);
+
+ return deserializeFromStreamReader(message);
+ }
+
+ template<typename TAsset, typename TSerializationReader, typename TSerializationBuilder>
+ bool ExtSerialization<TAsset, TSerializationReader, TSerializationBuilder>::serializeIntoExistingBuffer(const TAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize)
+ {
+ capnp::MallocMessageBuilder message;
+
+ bool result = serializeIntoMessage(message, asset);
+
+ if (result == false)
+ {
+ usedSize = 0;
+ return false;
+ }
+
+ uint32_t messageSize = computeSerializedSizeInWords(message) * sizeof(uint64_t);
+
+ if (maxSize < messageSize)
+ {
+ NvBlastLog logFn = gLog;
+
+#if !defined(BLAST_LL_ALLOC)
+ logFn = NvBlastTkFrameworkGet()->getLogFn();
+#endif
+
+ NVBLAST_LOG_ERROR(logFn, "When attempting to serialize into an existing buffer, the provided buffer was too small.");
+ usedSize = 0;
+ return false;
+ }
+
+ kj::ArrayPtr<unsigned char> outputBuffer(buffer, maxSize);
+ kj::ArrayOutputStream outputStream(outputBuffer);
+
+ capnp::writeMessage(outputStream, message);
+
+ usedSize = messageSize;;
+ return true;
+ }
+
+ template<typename TAsset, typename TSerializationReader, typename TSerializationBuilder>
+ bool ExtSerialization<TAsset, TSerializationReader, TSerializationBuilder>::serializeIntoNewBuffer(const TAsset *asset, unsigned char **outBuffer, uint32_t &outSize)
+ {
+ capnp::MallocMessageBuilder message;
+
+ bool result = serializeIntoMessage(message, asset);
+
+ if (result == false)
+ {
+ *outBuffer = nullptr;
+ outSize = 0;
+ return false;
+ }
+
+ serializeMessageIntoNewBuffer(message, outBuffer, outSize);
+
+ return true;
+ }
+
+ template<typename TAsset, typename TSerializationReader, typename TSerializationBuilder>
+ bool ExtSerialization<TAsset, TSerializationReader, TSerializationBuilder>::serializeIntoStream(const TAsset *asset, std::ostream &outputStream)
+ {
+ capnp::MallocMessageBuilder message;
+
+ bool result = serializeIntoMessage(message, asset);
+
+ if (result == false)
+ {
+ return false;
+ }
+
+ Nv::Blast::ExtOutputStream blastOutputStream(outputStream);
+
+ writeMessage(blastOutputStream, message);
+
+ return true;
+ }
+
+ template<typename TAsset, typename TSerializationReader, typename TSerializationBuilder>
+ void ExtSerialization<TAsset, TSerializationReader, TSerializationBuilder>::serializeMessageIntoNewBuffer(capnp::MallocMessageBuilder& message, unsigned char ** outBuffer, uint32_t &outSize)
+ {
+ uint32_t messageSize = computeSerializedSizeInWords(message) * sizeof(uint64_t);
+
+ NvBlastExtAlloc allocFn = gAlloc;
+
+#if !defined(BLAST_LL_ALLOC)
+ allocFn = ExtAllocator::alignedAlloc16;
+#endif
+
+ unsigned char* buffer = static_cast<unsigned char *>(allocFn(messageSize));
+
+ kj::ArrayPtr<unsigned char> outputBuffer(buffer, messageSize);
+ kj::ArrayOutputStream outputStream(outputBuffer);
+
+ capnp::writeMessage(outputStream, message);
+
+ *outBuffer = buffer;
+ outSize = messageSize;
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationImpl.h b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationImpl.h
new file mode 100644
index 0000000..7f9fbc9
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationImpl.h
@@ -0,0 +1,75 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+
+#include "NvBlastExtSerialization.h"
+#include "NvBlastTkAsset.h"
+#include "NvBlastExtPxAsset.h"
+#include "TkAssetDTO.h"
+#include "ExtPxAssetDTO.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ /*
+ Specializations here - one set for each top level asset. (TkAsset, ExtPxAsset)
+ */
+
+
+ // TkAsset
+ template<>
+ NV_INLINE bool ExtSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::serializeIntoBuilder(Nv::Blast::Serialization::TkAsset::Builder& assetBuilder, const Nv::Blast::TkAsset* asset)
+ {
+ return TkAssetDTO::serialize(assetBuilder, asset);
+ }
+
+ template<>
+ NV_INLINE Nv::Blast::TkAsset* ExtSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message)
+ {
+ Nv::Blast::Serialization::TkAsset::Reader reader = message.getRoot<Nv::Blast::Serialization::TkAsset>();
+
+ return TkAssetDTO::deserialize(reader);
+ }
+
+ template<>
+ NV_INLINE bool ExtSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const Nv::Blast::TkAsset* asset)
+ {
+ Nv::Blast::Serialization::TkAsset::Builder assetBuilder = message.initRoot<Nv::Blast::Serialization::TkAsset>();
+
+ return serializeIntoBuilder(assetBuilder, asset);
+ }
+
+
+ //ExtPxAsset
+ template<>
+ NV_INLINE bool ExtSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::serializeIntoBuilder(Nv::Blast::Serialization::ExtPxAsset::Builder& assetBuilder, const Nv::Blast::ExtPxAsset* asset)
+ {
+ return ExtPxAssetDTO::serialize(assetBuilder, asset);
+ }
+
+ template<>
+ NV_INLINE Nv::Blast::ExtPxAsset* ExtSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message)
+ {
+ Nv::Blast::Serialization::ExtPxAsset::Reader reader = message.getRoot<Nv::Blast::Serialization::ExtPxAsset>();
+
+ return ExtPxAssetDTO::deserialize(reader);
+ }
+
+ template<>
+ NV_INLINE bool ExtSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const Nv::Blast::ExtPxAsset* asset)
+ {
+ Nv::Blast::Serialization::ExtPxAsset::Builder assetBuilder = message.initRoot<Nv::Blast::Serialization::ExtPxAsset>();
+
+ return serializeIntoBuilder(assetBuilder, asset);
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationInterface.cpp b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationInterface.cpp
new file mode 100644
index 0000000..bebee5b
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationInterface.cpp
@@ -0,0 +1,133 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtSerializationImpl.h"
+#include <memory>
+#include "PxPhysicsVersion.h"
+#include "PxConvexMeshGeometryDTO.h"
+#include "NvBlastExtDefs.h"
+#include "PxPhysics.h"
+#include "NvBlastAssert.h"
+
+
+// This is terrible.
+physx::PxPhysics* g_Physics = nullptr;
+
+using namespace Nv::Blast;
+
+std::shared_ptr<physx::PxCooking> getCooking()
+{
+ physx::PxCookingParams cookingParams(g_Physics->getTolerancesScale());
+ cookingParams.buildGPUData = true;
+
+ std::shared_ptr<physx::PxCooking> m_Cooking = std::shared_ptr<physx::PxCooking>(PxCreateCooking(PX_PHYSICS_VERSION, g_Physics->getFoundation(), cookingParams), [=](physx::PxCooking* cooking)
+ {
+ cooking->release();
+ });
+
+ NVBLASTEXT_CHECK_ERROR(m_Cooking, "Error: failed to create PhysX Cooking\n", return nullptr);
+
+ return m_Cooking;
+}
+
+
+extern "C"
+{
+ NVBLAST_API void setPhysXSDK(physx::PxPhysics* physXSDK)
+ {
+ g_Physics = physXSDK;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // TkAsset
+ //////////////////////////////////////////////////////////////////////////
+
+ NVBLAST_API Nv::Blast::TkAsset* deserializeTkAsset(const unsigned char* input, uint32_t size)
+ {
+ return Nv::Blast::ExtSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::deserialize(input, size);
+ }
+
+ NVBLAST_API Nv::Blast::TkAsset* deserializeTkAssetFromStream(std::istream &inputStream)
+ {
+ return Nv::Blast::ExtSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::deserializeFromStream(inputStream);
+ }
+
+ NVBLAST_API bool serializeTkAssetIntoStream(const Nv::Blast::TkAsset *asset, std::ostream &outputStream)
+ {
+ return Nv::Blast::ExtSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::serializeIntoStream(reinterpret_cast<const Nv::Blast::TkAsset *>(asset), outputStream);
+ }
+
+ NVBLAST_API bool serializeTkAssetIntoNewBuffer(const Nv::Blast::TkAsset *asset, unsigned char **outBuffer, uint32_t &outSize)
+ {
+ return Nv::Blast::ExtSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::serializeIntoNewBuffer(reinterpret_cast<const Nv::Blast::TkAsset *>(asset), outBuffer, outSize);
+ }
+
+ NVBLAST_API bool serializeTkAssetIntoExistingBuffer(const Nv::Blast::TkAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize)
+ {
+ return Nv::Blast::ExtSerialization<Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset::Reader, Nv::Blast::Serialization::TkAsset::Builder>::serializeIntoExistingBuffer(reinterpret_cast<const Nv::Blast::TkAsset *>(asset), buffer, maxSize, usedSize);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // ExtPxAsset
+ //////////////////////////////////////////////////////////////////////////
+
+ NVBLAST_API Nv::Blast::ExtPxAsset* deserializeExtPxAsset(const unsigned char* input, uint32_t size)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::deserialize(input, size);
+ }
+
+ NVBLAST_API Nv::Blast::ExtPxAsset* deserializeExtPxAssetFromStream(std::istream &inputStream)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::deserializeFromStream(inputStream);
+ }
+
+ NVBLAST_API bool serializeExtPxAssetIntoStream(const Nv::Blast::ExtPxAsset *asset, std::ostream &outputStream)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ auto cooking = getCooking();
+
+ PxConvexMeshGeometryDTO::Cooking = cooking.get();
+ PxConvexMeshGeometryDTO::Physics = g_Physics;
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::serializeIntoStream(reinterpret_cast<const Nv::Blast::ExtPxAsset *>(asset), outputStream);
+ }
+
+ NVBLAST_API bool serializeExtPxAssetIntoNewBuffer(const Nv::Blast::ExtPxAsset *asset, unsigned char **outBuffer, uint32_t &outSize)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ auto cooking = getCooking();
+
+ PxConvexMeshGeometryDTO::Cooking = cooking.get();
+ PxConvexMeshGeometryDTO::Physics = g_Physics;
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::serializeIntoNewBuffer(reinterpret_cast<const Nv::Blast::ExtPxAsset *>(asset), outBuffer, outSize);
+ }
+
+ NVBLAST_API bool serializeExtPxAssetIntoExistingBuffer(const Nv::Blast::ExtPxAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize)
+ {
+ NVBLAST_ASSERT(g_Physics != nullptr);
+
+ auto cooking = getCooking();
+
+ PxConvexMeshGeometryDTO::Cooking = cooking.get();
+ PxConvexMeshGeometryDTO::Physics = g_Physics;
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::ExtPxAsset, Nv::Blast::Serialization::ExtPxAsset::Reader, Nv::Blast::Serialization::ExtPxAsset::Builder>::serializeIntoExistingBuffer(reinterpret_cast<const Nv::Blast::ExtPxAsset *>(asset), buffer, maxSize, usedSize);
+ }
+
+
+}
+
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLL.capn b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLL.capn
new file mode 100644
index 0000000..026056f
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLL.capn
@@ -0,0 +1,89 @@
+# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+@0x9a4a58fac38375e0;
+
+using Cxx = import "/capnp/c++.capnp";
+
+$Cxx.namespace("Nv::Blast::Serialization");
+
+struct Asset
+{
+ header @0 :NvBlastDataBlock;
+
+ iD @1 :UUID;
+
+ chunkCount @2 :UInt32;
+
+ graph @3 :NvBlastSupportGraph;
+
+ leafChunkCount @4 :UInt32;
+
+ firstSubsupportChunkIndex @5 :UInt32;
+
+ bondCount @6 :UInt32;
+
+ chunks @7: List(NvBlastChunk);
+
+ bonds @8: List(NvBlastBond);
+
+ subtreeLeafChunkCounts @9: List(UInt32);
+
+ chunkToGraphNodeMap @10: List(UInt32);
+}
+
+struct NvBlastDataBlock
+{
+ enum Type
+ {
+ assetDataBlock @0;
+ instanceDataBlock @1;
+ }
+
+ dataType @0 :Type;
+
+ formatVersion @1 :UInt32;
+
+ size @2 :UInt32;
+}
+
+struct NvBlastChunk
+{
+ centroid @0 :List(Float32);
+
+ volume @1 :Float32;
+
+ parentChunkIndex @2 :UInt32;
+ firstChildIndex @3 :UInt32;
+ childIndexStop @4 :UInt32;
+ userData @5 :UInt32;
+}
+
+struct NvBlastBond
+{
+ normal @0 :List(Float32);
+ area @1 :Float32;
+ centroid @2 :List(Float32);
+ userData @3 :UInt32;
+}
+
+struct NvBlastSupportGraph
+{
+ nodeCount @0 : UInt32;
+
+ chunkIndices @1 : List(UInt32);
+ adjacencyPartition @2 : List(UInt32);
+ adjacentNodeIndices @3 : List(UInt32);
+ adjacentBondIndices @4 : List(UInt32);
+}
+
+struct UUID
+{
+ value @0 : Data;
+}
+
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLLImpl.h b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLLImpl.h
new file mode 100644
index 0000000..d7595ba
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLLImpl.h
@@ -0,0 +1,48 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#pragma once
+
+#include "NvBlastExtSerialization.h"
+#include "NvBlastAsset.h"
+#include "AssetDTO.h"
+
+namespace Nv
+{
+ namespace Blast
+ {
+ /*
+ Specializations here - LL asset only
+ */
+
+ // Asset
+ template<>
+ NV_INLINE bool ExtSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::serializeIntoBuilder(Nv::Blast::Serialization::Asset::Builder& assetBuilder, const Nv::Blast::Asset* asset)
+ {
+ return AssetDTO::serialize(assetBuilder, asset);
+ }
+
+ template<>
+ NV_INLINE Nv::Blast::Asset* ExtSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message)
+ {
+ Nv::Blast::Serialization::Asset::Reader reader = message.getRoot<Nv::Blast::Serialization::Asset>();
+
+ return AssetDTO::deserialize(reader);
+ }
+
+ template<>
+ NV_INLINE bool ExtSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const Nv::Blast::Asset* asset)
+ {
+ Nv::Blast::Serialization::Asset::Builder assetBuilder = message.initRoot<Nv::Blast::Serialization::Asset>();
+
+ return serializeIntoBuilder(assetBuilder, asset);
+ }
+ }
+}
diff --git a/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLLInterface.cpp b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLLInterface.cpp
new file mode 100644
index 0000000..26d8667
--- /dev/null
+++ b/NvBlast/sdk/extensions/serialization/source/NvBlastExtSerializationLLInterface.cpp
@@ -0,0 +1,101 @@
+/*
+* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtSerializationLLInterface.h"
+#include "NvBlastExtSerializationLLImpl.h"
+#include <memory>
+#include "NvBlastExtSerialization.h"
+#include "NvBlastAsset.h"
+#include <iostream>
+#include "NvBlastExtGlobals.h"
+
+
+NvBlastExtAlloc gAlloc = nullptr;
+NvBlastLog gLog = nullptr;
+
+extern "C"
+{
+ NVBLAST_API void setAllocator(NvBlastExtAlloc alloc)
+ {
+ gAlloc = alloc;
+ }
+
+ NVBLAST_API void setLog(NvBlastLog log)
+ {
+ gLog = log;
+ }
+
+ NVBLAST_API NvBlastAsset* deserializeAsset(const unsigned char* input, uint32_t size)
+ {
+#if defined(BLAST_LL_ALLOC)
+ if (gAlloc == nullptr || gLog == nullptr)
+ {
+ std::cerr << "Must set allocator and log when using low level serialization library. See setAllocator() and setLog() functions." << std::endl;
+ return nullptr;
+ }
+#endif
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::deserialize(input, size);
+ }
+
+ NVBLAST_API NvBlastAsset* deserializeAssetFromStream(std::istream &inputStream)
+ {
+#if defined(BLAST_LL_ALLOC)
+ if (gAlloc == nullptr || gLog == nullptr)
+ {
+ std::cerr << "Must set allocator and log when using low level serialization library. See setAllocator() and setLog() functions." << std::endl;
+ return nullptr;
+ }
+#endif
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::deserializeFromStream(inputStream);
+ }
+
+ NVBLAST_API bool serializeAssetIntoStream(const NvBlastAsset *asset, std::ostream &outputStream)
+ {
+#if defined(BLAST_LL_ALLOC)
+ if (gAlloc == nullptr || gLog == nullptr)
+ {
+ std::cerr << "Must set allocator and log when using low level serialization library. See setAllocator() and setLog() functions." << std::endl;
+ return false;
+ }
+#endif
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::serializeIntoStream(reinterpret_cast<const Nv::Blast::Asset *>(asset), outputStream);
+ }
+
+ NVBLAST_API bool serializeAssetIntoNewBuffer(const NvBlastAsset *asset, unsigned char **outBuffer, uint32_t &outSize)
+ {
+#if defined(BLAST_LL_ALLOC)
+ if (gAlloc == nullptr || gLog == nullptr)
+ {
+ std::cerr << "Must set allocator and log when using low level serialization library. See setAllocator() and setLog() functions." << std::endl;
+ return false;
+ }
+#endif
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::serializeIntoNewBuffer(reinterpret_cast<const Nv::Blast::Asset *>(asset), outBuffer, outSize);
+ }
+
+ NVBLAST_API bool serializeAssetIntoExistingBuffer(const NvBlastAsset *asset, unsigned char *buffer, uint32_t maxSize, uint32_t &usedSize)
+ {
+#if defined(BLAST_LL_ALLOC)
+ if (gAlloc == nullptr || gLog == nullptr)
+ {
+ std::cerr << "Must set allocator and log when using low level serialization library. See setAllocator() and setLog() functions." << std::endl;
+ return false;
+ }
+#endif
+
+ return Nv::Blast::ExtSerialization<Nv::Blast::Asset, Nv::Blast::Serialization::Asset::Reader, Nv::Blast::Serialization::Asset::Builder>::serializeIntoExistingBuffer(reinterpret_cast<const Nv::Blast::Asset *>(asset), buffer, maxSize, usedSize);
+ }
+
+}
+
diff --git a/NvBlast/sdk/extensions/shaders/include/NvBlastExtDamageShaders.h b/NvBlast/sdk/extensions/shaders/include/NvBlastExtDamageShaders.h
new file mode 100644
index 0000000..385bf52
--- /dev/null
+++ b/NvBlast/sdk/extensions/shaders/include/NvBlastExtDamageShaders.h
@@ -0,0 +1,111 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTEXTDAMAGESHADERS_H
+#define NVBLASTEXTDAMAGESHADERS_H
+
+#include "NvBlastTypes.h"
+#include "NvBlastPreprocessor.h"
+
+/**
+A few example damage shader implementations.
+*/
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Common Material
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+Specific parameters for the material functions here present.
+
+Material function implementers may choose their own set.
+*/
+struct NvBlastExtMaterial
+{
+ float singleChunkThreshold; //!< subsupport chunks only take damage surpassing this value
+ float graphChunkThreshold; //!< support chunks only take damage surpassing this value
+ float bondTangentialThreshold; //!< bond only take damage surpassing this value
+ float bondNormalThreshold; //!< currently unused - forward damage propagation
+ float damageAttenuation; //!< factor of damage attenuation while forwarding
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Radial Damage
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+Radial Damage Desc
+*/
+struct NvBlastExtRadialDamageDesc
+{
+ float compressive; //!< compressive (radial) damage component
+ float position[3]; //!< origin of damage action
+ float minRadius; //!< inner radius of damage action
+ float maxRadius; //!< outer radius of damage action
+};
+
+/**
+Radial Falloff and Radial Cutter damage for both graph and subgraph shaders.
+
+NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction and NvBlastSubgraphShaderFunction respectively.
+They are not expected to be called directly.
+@see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction
+*/
+NVBLAST_API void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const NvBlastProgramParams* params);
+NVBLAST_API void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const NvBlastProgramParams* params);
+NVBLAST_API void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const NvBlastProgramParams* params);
+NVBLAST_API void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const NvBlastProgramParams* params);
+
+
+/**
+Helper Radial Falloff Damage function.
+
+Basically it calls NvBlastActorGenerateFracture and then NvBlastActorApplyFracture with Radial Falloff shader.
+
+\param[in,out] actor The NvBlastActor to apply fracture to.
+\param[in,out] buffers Target buffers to hold applied command events.
+\param[in] damageDescBuffer Damage descriptors array.
+\param[in] damageDescCount Size of damage descriptors array.
+\param[in] material Material to use.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+\param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations.
+
+\return true iff any fracture was applied.
+*/
+NVBLAST_API bool NvBlastExtDamageActorRadialFalloff(NvBlastActor* actor, NvBlastFractureBuffers* buffers, const NvBlastExtRadialDamageDesc* damageDescBuffer, uint32_t damageDescCount, const NvBlastExtMaterial* material, NvBlastLog logFn, NvBlastTimers* timers);
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Shear Damage
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+Shear Damage Desc
+*/
+struct NvBlastExtShearDamageDesc
+{
+ float shear[3]; //!< directional damage component
+ float position[3]; //!< origin of damage action
+};
+
+/**
+Shear Damage Shaders
+
+NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction and NvBlastSubgraphShaderFunction respectively.
+They are not expected to be called directly.
+@see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction
+*/
+NVBLAST_API void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const NvBlastProgramParams* params);
+NVBLAST_API void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const NvBlastProgramParams* params);
+
+
+#endif // NVBLASTEXTDAMAGESHADERS_H
diff --git a/NvBlast/sdk/extensions/shaders/source/NvBlastExtRadialShaders.cpp b/NvBlast/sdk/extensions/shaders/source/NvBlastExtRadialShaders.cpp
new file mode 100644
index 0000000..00d1010
--- /dev/null
+++ b/NvBlast/sdk/extensions/shaders/source/NvBlastExtRadialShaders.cpp
@@ -0,0 +1,205 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtDamageShaders.h"
+#include "NvBlastIndexFns.h"
+#include "NvBlastMath.h"
+#include "NvBlastGeometry.h"
+#include "NvBlastAssert.h"
+#include "NvBlast.h"
+#include "stdlib.h" // for abs() on linux
+
+using namespace Nv::Blast;
+using namespace Nv::Blast::VecMath;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Profiles
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+typedef float(*ProfileFunction)(float, float, float, float);
+
+float falloffProfile(float min, float max, float x, float f = 1.0f)
+{
+ if (x > max) return 0.0f;
+ if (x < min) return f;
+
+ float y = 1.0f - (x - min) / (max - min);
+ return y * f;
+}
+
+float cutterProfile(float min, float max, float x, float f = 1.0f)
+{
+ if (x > max || x < min) return 0.0f;
+
+ return f;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Radial Graph Shader Template
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <ProfileFunction profile>
+void RadialProfileGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const NvBlastProgramParams* params)
+{
+ const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks;
+ const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex;
+ const uint32_t* adjacencyPartition = actor->adjacencyPartition;
+ const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices;
+ const uint32_t* adjacentBondIndices = actor->adjacentBondIndices;
+ const NvBlastBond* assetBonds = actor->assetBonds;
+ const float* familyBondHealths = actor->familyBondHealths;
+
+ const NvBlastExtRadialDamageDesc* damageData = reinterpret_cast<const NvBlastExtRadialDamageDesc*>(params->damageDescBuffer);
+ const uint32_t damageCount = params->damageDescCount;
+
+ uint32_t outCount = 0;
+
+ uint32_t currentNodeIndex = firstGraphNodeIndex;
+ while (!Nv::Blast::isInvalidIndex(currentNodeIndex))
+ {
+ for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++)
+ {
+ uint32_t adjacentNodeIndex = adjacentNodeIndices[adj];
+ if (currentNodeIndex < adjacentNodeIndex)
+ {
+ uint32_t bondIndex = adjacentBondIndices[adj];
+
+ // skip bonds that are already broken or were visited already
+ // TODO: investigate why testing against health > -1.0f seems slower
+ // could reuse the island edge bitmap instead
+ if ((familyBondHealths[bondIndex] > 0.0f))
+ {
+
+ const NvBlastBond& bond = assetBonds[bondIndex];
+
+ float totalBondDamage = 0.0f;
+
+ for (uint32_t damageIndex = 0; damageIndex < damageCount; damageIndex++)
+ {
+ const NvBlastExtRadialDamageDesc& damage = damageData[damageIndex];
+
+ float relativePosition[3];
+ sub(damage.position, bond.centroid, relativePosition);
+ float distance = sqrtf(dot(relativePosition, relativePosition));
+
+ float dir[3];
+ normal(relativePosition, dir);
+
+ totalBondDamage += profile(damage.minRadius, damage.maxRadius, distance, damage.compressive);
+ }
+
+ if (totalBondDamage > 0.0f)
+ {
+ NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++];
+ outCommand.nodeIndex0 = currentNodeIndex;
+ outCommand.nodeIndex1 = adjacentNodeIndex;
+ outCommand.health = totalBondDamage;
+ }
+ }
+ }
+ }
+ currentNodeIndex = graphNodeIndexLinks[currentNodeIndex];
+ }
+
+ commandBuffers->bondFractureCount = outCount;
+ commandBuffers->chunkFractureCount = 0;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Radial Single Shader Template
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <ProfileFunction profile>
+void RadialProfileSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const NvBlastProgramParams* params)
+{
+ uint32_t chunkFractureCount = 0;
+ const uint32_t chunkIndex = actor->chunkIndex;
+ const NvBlastChunk* assetChunks = actor->assetChunks;
+ const NvBlastChunk& chunk = assetChunks[chunkIndex];
+
+ float totalDamage = 0.0f;
+ for (uint32_t i = 0; i < params->damageDescCount; ++i)
+ {
+ const NvBlastExtRadialDamageDesc& damage = reinterpret_cast<const NvBlastExtRadialDamageDesc*>(params->damageDescBuffer)[i];
+
+ float relativePosition[3];
+ sub(damage.position, chunk.centroid, relativePosition);
+ float distance = sqrtf(dot(relativePosition, relativePosition));
+
+ totalDamage += profile(damage.minRadius, damage.maxRadius, distance, damage.compressive);
+ }
+
+ if (totalDamage > 0.0f)
+ {
+ NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++];
+ frac.chunkIndex = chunkIndex;
+ frac.health = totalDamage;
+ }
+
+ commandBuffers->bondFractureCount = 0;
+ commandBuffers->chunkFractureCount = chunkFractureCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Shader Instantiation
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const NvBlastProgramParams* params)
+{
+ RadialProfileGraphShader<falloffProfile>(commandBuffers, actor, params);
+}
+
+void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const NvBlastProgramParams* params)
+{
+ RadialProfileSubgraphShader<falloffProfile>(commandBuffers, actor, params);
+}
+
+void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const NvBlastProgramParams* params)
+{
+ RadialProfileGraphShader<cutterProfile>(commandBuffers, actor, params);
+}
+
+void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const NvBlastProgramParams* params)
+{
+ RadialProfileSubgraphShader<cutterProfile>(commandBuffers, actor, params);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Helper Functions
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool NvBlastExtDamageActorRadialFalloff(NvBlastActor* actor, NvBlastFractureBuffers* buffers, const NvBlastExtRadialDamageDesc* damageDescBuffer, uint32_t damageDescCount, const NvBlastExtMaterial* material, NvBlastLog logFn, NvBlastTimers* timers)
+{
+ NvBlastDamageProgram program =
+ {
+ NvBlastExtFalloffGraphShader,
+ NvBlastExtFalloffSubgraphShader
+ };
+
+ NvBlastProgramParams params =
+ {
+ damageDescBuffer,
+ damageDescCount,
+ material
+ };
+
+ NvBlastActorGenerateFracture(buffers, actor, program, &params, logFn, timers);
+ if (buffers->bondFractureCount > 0 || buffers->chunkFractureCount > 0)
+ {
+ NvBlastActorApplyFracture(nullptr, actor, buffers, logFn, timers);
+ return true;
+ }
+
+ return false;
+} \ No newline at end of file
diff --git a/NvBlast/sdk/extensions/shaders/source/NvBlastExtShearShaders.cpp b/NvBlast/sdk/extensions/shaders/source/NvBlastExtShearShaders.cpp
new file mode 100644
index 0000000..26707e1
--- /dev/null
+++ b/NvBlast/sdk/extensions/shaders/source/NvBlastExtShearShaders.cpp
@@ -0,0 +1,149 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastExtDamageShaders.h"
+#include "NvBlastIndexFns.h"
+#include "NvBlastMath.h"
+#include "NvBlastGeometry.h"
+#include "NvBlastAssert.h"
+#include "stdlib.h" // for abs() on linux
+
+using namespace Nv::Blast;
+using namespace Nv::Blast::VecMath;
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Graph Shader
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const NvBlastProgramParams* params)
+{
+ const NvBlastExtMaterial* materialProperties = reinterpret_cast<const NvBlastExtMaterial*>(params->material);
+ const float graphChunkThreshold = materialProperties->graphChunkThreshold;
+ const float bondTangentialThreshold = materialProperties->bondTangentialThreshold;
+ const float damageAttenuation = 1.0f - materialProperties->damageAttenuation;
+
+ uint32_t chunkFractureCount = 0;
+ uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount;
+ uint32_t bondFractureCount = 0;
+ uint32_t bondFractureCountMax = commandBuffers->bondFractureCount;
+
+ for (uint32_t i = 0; i < params->damageDescCount; ++i)
+ {
+ const NvBlastExtShearDamageDesc& damage = reinterpret_cast<const NvBlastExtShearDamageDesc*>(params->damageDescBuffer)[i];
+
+ const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks;
+ const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex;
+ const uint32_t* chunkIndices = actor->chunkIndices;
+ const uint32_t* adjacencyPartition = actor->adjacencyPartition;
+ const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices;
+ const uint32_t* adjacentBondIndices = actor->adjacentBondIndices;
+ const NvBlastBond* assetBonds = actor->assetBonds;
+ const float* familyBondHealths = actor->familyBondHealths;
+
+ uint32_t closestNode = findNodeByPositionLinked(damage.position, firstGraphNodeIndex, graphNodeIndexLinks, adjacencyPartition, adjacentNodeIndices, adjacentBondIndices, assetBonds, familyBondHealths);
+ NVBLAST_ASSERT(!isInvalidIndex(closestNode));
+
+ float damageDir[3];
+ float damageMag = VecMath::normal(damage.shear, damageDir);
+
+ uint32_t nodeIndex = closestNode;
+ float maxDist = 0.0f;
+ uint32_t nextNode = invalidIndex<uint32_t>();
+
+ if (damageMag > graphChunkThreshold && chunkFractureCount < chunkFractureCountMax)
+ {
+ NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++];
+ frac.chunkIndex = chunkIndices[nodeIndex];
+ frac.health = damageMag * 2;
+ }
+
+ do {
+ const uint32_t startIndex = adjacencyPartition[nodeIndex];
+ const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1];
+
+
+ for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++)
+ {
+ const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex];
+ const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex];
+ const NvBlastBond& bond = assetBonds[bondIndex];
+
+ if (!(familyBondHealths[bondIndex] > 0.0f))
+ continue;
+
+ float shear = 1 * abs(1 - abs(VecMath::dot(damage.shear, bond.normal)));
+
+ float d[3]; VecMath::sub(bond.centroid, damage.position, d);
+ float ahead = VecMath::dot(d, damage.shear);
+ if (ahead > maxDist)
+ {
+ maxDist = ahead;
+ nextNode = neighbourIndex;
+ }
+
+ if (shear > bondTangentialThreshold && bondFractureCount < bondFractureCountMax)
+ {
+ NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++];
+ frac.userdata = bond.userData;
+ frac.nodeIndex0 = nodeIndex;
+ frac.nodeIndex1 = neighbourIndex;
+ frac.health = shear;
+ }
+ }
+
+ if (nodeIndex == nextNode)
+ break;
+
+ nodeIndex = nextNode;
+
+ damageMag *= damageAttenuation;
+ } while (!isInvalidIndex(nextNode));
+ }
+
+ commandBuffers->bondFractureCount = bondFractureCount;
+ commandBuffers->chunkFractureCount = chunkFractureCount;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Single Shader
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const NvBlastProgramParams* params)
+{
+ const NvBlastExtMaterial* materialProperties = reinterpret_cast<const NvBlastExtMaterial*>(params->material);
+
+ uint32_t chunkFractureCount = 0;
+
+ float totalDamage = 0.0f;
+ for (uint32_t i = 0; i < params->damageDescCount; ++i)
+ {
+ const NvBlastExtShearDamageDesc& damage = reinterpret_cast<const NvBlastExtShearDamageDesc*>(params->damageDescBuffer)[i];
+
+ float damageDir[3];
+ float damageMag = VecMath::normal(damage.shear, damageDir);
+
+ if (damageMag > materialProperties->singleChunkThreshold)
+ {
+ totalDamage += damageMag * 2;
+ }
+ }
+
+ if (totalDamage > 0.0f)
+ {
+ NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++];
+ frac.chunkIndex = actor->chunkIndex;
+ frac.health = totalDamage;
+ }
+
+ commandBuffers->bondFractureCount = 0;
+ commandBuffers->chunkFractureCount = chunkFractureCount;
+}
diff --git a/NvBlast/sdk/lowlevel/include/NvBlast.h b/NvBlast/sdk/lowlevel/include/NvBlast.h
new file mode 100644
index 0000000..d4c91a7
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/include/NvBlast.h
@@ -0,0 +1,807 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLAST_H
+#define NVBLAST_H
+
+
+#include "NvBlastTypes.h"
+
+
+///////////////////////////////////////////////////////////////////////////////
+// NvBlastAsset functions
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+/**
+Calculates the memory requirements for an asset based upon its descriptor. Use this function
+when building an asset with NvBlastCreateAsset.
+
+\param[in] desc Asset descriptor (see NvBlastAssetDesc).
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the memory size (in bytes) required for the asset, or zero if desc is invalid.
+*/
+NVBLAST_API size_t NvBlastGetAssetMemorySize(const NvBlastAssetDesc* desc, NvBlastLog logFn);
+
+
+/**
+Returns the number of bytes of scratch memory that the user must supply to NvBlastCreateAsset,
+based upon the descriptor that will be passed into that function.
+
+\param[in] desc The asset descriptor that will be passed into NvBlastCreateAsset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of bytes of scratch memory required for a call to NvBlastCreateAsset with that descriptor.
+*/
+NVBLAST_API size_t NvBlastGetRequiredScratchForCreateAsset(const NvBlastAssetDesc* desc, NvBlastLog logFn);
+
+
+/**
+Asset-building function.
+
+Constructs an NvBlastAsset in-place at the address given by the user. The address must point to a block
+of memory of at least the size given by NvBlastGetAssetMemorySize(desc, logFn), and must be 16-byte aligned.
+
+Support chunks (marked in the NvBlastChunkDesc struct) must provide full coverage over the asset.
+This means that from any leaf chunk to the root node, exactly one chunk must be support. If this condition
+is not met the function fails to create an asset.
+
+Any bonds described by NvBlastBondDesc descriptors that reference non-support chunks will be removed.
+Duplicate bonds will be removed as well (bonds that are between the same chunk pairs).
+
+Chunks in the asset should be arranged such that sibling chunks (chunks with the same parent) are contiguous.
+Chunks are also should be arranged such that leaf chunks (chunks with no children) are at the end of the chunk list.
+If chunks aren't arranged properly the function fails to create an asset.
+
+\param[in] mem Pointer to block of memory of at least the size given by NvBlastGetAssetMemorySize(desc, logFn). Must be 16-byte aligned.
+\param[in] desc Asset descriptor (see NvBlastAssetDesc).
+\param[in] scratch User-supplied scratch memory of size NvBlastGetRequiredScratchForCreateAsset(desc) bytes.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return pointer to new NvBlastAsset (will be the same address as mem), or NULL if unsuccessful.
+*/
+NVBLAST_API NvBlastAsset* NvBlastCreateAsset(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn);
+
+
+/**
+Calculates the memory requirements for a family based upon an asset. Use this function
+when building a family with NvBlastAssetCreateFamily.
+
+\param[in] asset Asset used to build the family (see NvBlastAsset).
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the memory size (in bytes) required for the family, or zero if asset is invalid.
+*/
+NVBLAST_API size_t NvBlastAssetGetFamilyMemorySize(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Family-building function.
+
+Constructs an NvBlastFamily in-place at the address given by the user. The address must point to a block
+of memory of at least the size given by NvBlastAssetGetFamilyMemorySize(asset, logFn), and must be 16-byte aligned.
+
+\param[in] mem Pointer to block of memory of at least the size given by NvBlastAssetGetFamilyMemorySize(asset, logFn). Must be 16-byte aligned.
+\param[in] asset Asset to instance.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the family.
+*/
+NVBLAST_API NvBlastFamily* NvBlastAssetCreateFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Retrieve the asset ID.
+
+\param[in] asset The given asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the ID of the asset.
+*/
+NVBLAST_API NvBlastID NvBlastAssetGetID(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Set an asset's ID
+
+\param[in] asset The given asset.
+\param[in] id A pointer to the id to copy into the asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return true iff the id is successfully set.
+*/
+NVBLAST_API bool NvBlastAssetSetID(NvBlastAsset* asset, const NvBlastID* id, NvBlastLog logFn);
+
+
+/**
+Retrieve the data format version for the given asset
+
+\param[in] asset The asset. Cannot be NULL.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the data format version (NvBlastAssetDataFormat).
+*/
+NVBLAST_API uint32_t NvBlastAssetGetFormatVersion(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Retrieve the memory size (in bytes) of the given data asset
+
+\param[in] asset The asset. Cannot be NULL.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the memory size of the asset (in bytes).
+*/
+NVBLAST_API uint32_t NvBlastAssetGetSize(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Get the number of chunks in the given asset.
+
+\param[in] asset The asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of chunks in the asset.
+*/
+NVBLAST_API uint32_t NvBlastAssetGetChunkCount(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Get the number of leaf chunks in the given asset.
+
+\param[in] asset The asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of leaf chunks in the asset.
+*/
+NVBLAST_API uint32_t NvBlastAssetGetLeafChunkCount(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Get the first subsupport chunk index in the given asset. Chunks are sorted such that subsupport chunks
+come last. This is the first subsupport chunk index. Equals to total chunk count if there are no subsupport
+chunks.
+
+\param[in] asset The asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the first subsupport chunk index in the asset.
+*/
+NVBLAST_API uint32_t NvBlastAssetGetFirstSubsupportChunkIndex(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Get the number of bonds in the given asset.
+
+\param[in] asset The asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of bonds in the asset.
+*/
+NVBLAST_API uint32_t NvBlastAssetGetBondCount(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Access the support graph for the given asset.
+
+\param[in] asset The asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return a struct of support graph for the given asset.
+*/
+NVBLAST_API const NvBlastSupportGraph NvBlastAssetGetSupportGraph(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Access a map from chunk index to graph node index.
+Returned map is valid in the domain [0, NvBlastAssetGetChunkCount(asset, logFn)).
+Non-support chunks are mapped to the invalid index 0xFFFFFFFF.
+
+\param[in] asset The asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return an array of uint32_t values defining the map, of size NvBlastAssetGetChunkCount(asset, logFn).
+*/
+NVBLAST_API const uint32_t* NvBlastAssetGetChunkToGraphNodeMap(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Access an array of chunks of the given asset.
+
+\param[in] asset The asset.
+\param[in] logFn User - supplied message function(see NvBlastLog definition).May be NULL.
+
+\return a pointer to an array of chunks of the asset.
+*/
+NVBLAST_API const NvBlastChunk* NvBlastAssetGetChunks(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Access an array of bonds of the given asset.
+
+\param[in] asset The asset.
+\param[in] logFn User - supplied message function(see NvBlastLog definition).May be NULL.
+
+\return a pointer to an array of bonds of the asset.
+*/
+NVBLAST_API const NvBlastBond* NvBlastAssetGetBonds(const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+A buffer size sufficient to serialize an actor instanced from a given asset.
+This function is faster than NvBlastActorGetSerializationSize, and can be used to create a reusable buffer
+for actor serialization.
+
+\param[in] asset The asset.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the required buffer size in bytes.
+*/
+NVBLAST_API uint32_t NvBlastAssetGetActorSerializationSizeUpperBound(const NvBlastAsset* asset, NvBlastLog logFn);
+
+///@} End NvBlastAsset functions
+
+
+///////////////////////////////////////////////////////////////////////////////
+// NvBlastAsset helper functions
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+/**
+Function to ensure (check and update) support coverage of chunks.
+
+Support chunks (marked in the NvBlastChunkDesc struct) must provide full coverage over the asset.
+This means that from any leaf chunk to the root node, exactly one chunk must be support. If this condition
+is not met, the actual support chunks will be adjusted accordingly.
+
+Chunk order depends on support coverage, so this function should be called before chunk reordering.
+
+\param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly.
+\param[in] chunkCount The number of chunk descriptors.
+\param[in] scratch User-supplied scratch storage, must point to chunkCount valid bytes of memory.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return true iff coverage was already exact.
+*/
+NVBLAST_API bool NvBlastEnsureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn);
+
+
+/**
+Build chunk reorder map.
+
+NvBlastCreateAsset function requires NvBlastChunkDesc array to be in correct oder:
+
+1. Root chunks (chunks with invalid parent index) must be first in the asset's chunk list.
+2. Chunks in the asset should be arranged such that sibling chunks (chunks with the same parent) are contiguous.
+3. Chunks are also should be arranged such that upper-support chunks (support chunks and their parent chunks) should go first in
+chunk list.
+
+This function builds chunk reorder map which can be used to order chunk descs. Reordering chunk's descriptors
+according to generated map places them in correct order for NvBlastCreateAsset to succeed.
+
+Iff chunks are already ordered correctly, function returns 'true' and identity chunk reorder map. Otherwise 'false' is returned.
+
+\param[out] chunkReorderMap User-supplied map of size chunkCount to fill. For every chunk index this array will contain new chunk position (index).
+\param[in] chunkDescs Array of chunk descriptors of size chunkCount.
+\param[in] chunkCount The number of chunk descriptors.
+\param[in] scratch User-supplied scratch storage, must point to 2 * chunkCount * sizeof(uint32_t) valid bytes of memory.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return true iff the chunks did not require reordering (chunkReorderMap is the identity map).
+*/
+NVBLAST_API bool NvBlastBuildAssetDescChunkReorderMap(uint32_t* chunkReorderMap, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn);
+
+
+/**
+Apply chunk reorder map.
+
+Function applies reorder map on NvBlastChunkDesc and NvBlastBondDesc arrays. It reorders chunks, replaces their 'parentChunkIndex' field
+with new indices. Bonds are kept in the same order, but their 'chunkIndices' field is updated with proper indices.
+
+@see NvBlastBuildAssetDescChunkReorderMap
+
+\param[out] reorderedChunkDescs User-supplied array of size chunkCount to fill with new reordered NvBlastChunkDesc's.
+\param[in] chunkDescs Array of chunk descriptors of size chunkCount.
+\param[in] chunkCount The number of chunk descriptors.
+\param[in] bondDescs Array of bond descriptors of size chunkCount. It will be updated accordingly.
+\param[in] bondCount The number of bond descriptors.
+\param[in] chunkReorderMap Chunk reorder map to use, must be of size chunkCount.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+*/
+NVBLAST_API void NvBlastApplyAssetDescChunkReorderMap
+(
+ NvBlastChunkDesc* reorderedChunkDescs,
+ const NvBlastChunkDesc* chunkDescs,
+ uint32_t chunkCount,
+ NvBlastBondDesc* bondDescs,
+ uint32_t bondCount,
+ const uint32_t* chunkReorderMap,
+ NvBlastLog logFn
+);
+
+
+/**
+Apply chunk reorder map.
+
+Function applies reorder map on NvBlastChunkDesc and NvBlastBondDesc arrays. It reorders chunks, replaces their 'parentChunkIndex' field
+with new indices. Bonds are kept in the same order, but their 'chunkIndices' field is updated with proper indices.
+
+This overload of function reorders chunks in place.
+
+@see NvBlastBuildAssetDescChunkReorderMap
+
+\param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly.
+\param[in] chunkCount The number of chunk descriptors.
+\param[in] bondDescs Array of bond descriptors of size chunkCount. It will be updated accordingly.
+\param[in] bondCount The number of bond descriptors.
+\param[in] chunkReorderMap Chunk reorder map to use, must be of size chunkCount.
+\param[in] scratch User-supplied scratch storage, must point to chunkCount * sizeof(NvBlastChunkDesc) valid bytes of memory.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+*/
+NVBLAST_API void NvBlastApplyAssetDescChunkReorderMapInplace(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, const uint32_t* chunkReorderMap, void* scratch, NvBlastLog logFn);
+
+
+/**
+Build and apply chunk reorder map.
+
+Function basically calls NvBlastBuildAssetDescChunkReorderMap and NvBlastApplyAssetDescChunkReorderMap. Used for Convenience.
+
+\param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly.
+\param[in] chunkCount The number of chunk descriptors.
+\param[in] bondDescs Array of bond descriptors of size chunkCount. It will be updated accordingly.
+\param[in] bondCount The number of bond descriptors.
+\param[in] chunkReorderMap Chunk reorder map to fill, must be of size chunkCount.
+\param[in] scratch User-supplied scratch storage, must point to chunkCount * sizeof(NvBlastChunkDesc) valid bytes of memory.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return true iff the chunks did not require reordering (chunkReorderMap is the identity map).
+*/
+NVBLAST_API bool NvBlastReorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap, void* scratch, NvBlastLog logFn);
+
+///@} End NvBlastAsset helper functions
+
+
+///////////////////////////////////////////////////////////////////////////////
+// NvBlastFamily functions
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+/**
+Retrieve the data format version for the given family.
+
+\param[in] family The family.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the family format version.
+*/
+NVBLAST_API uint32_t NvBlastFamilyGetFormatVersion(const NvBlastFamily* family, NvBlastLog logFn);
+
+
+/**
+Set asset to the family. It should be the same asset as the one family was created from (same ID).
+
+\param[in] family The family.
+\param[in] asset Asset to instance.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+*/
+NVBLAST_API void NvBlastFamilySetAsset(NvBlastFamily* family, const NvBlastAsset* asset, NvBlastLog logFn);
+
+
+/**
+Retrieve the size (in bytes) of the given family.
+
+\param[in] family The family.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the size of the family (in bytes).
+*/
+NVBLAST_API uint32_t NvBlastFamilyGetSize(const NvBlastFamily* family, NvBlastLog logFn);
+
+
+/**
+Retrieve the asset ID of the given family.
+
+\param[in] family The family.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the ID of the asset associated with the family.
+*/
+NVBLAST_API NvBlastID NvBlastFamilyGetAssetID(const NvBlastFamily* family, NvBlastLog logFn);
+
+
+/**
+Returns the number of bytes of scratch memory that the user must supply to NvBlastFamilyCreateFirstActor.
+
+\param[in] family The family from which the first actor will be instanced.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of bytes of scratch memory required for a call to NvBlastFamilyCreateFirstActor.
+*/
+NVBLAST_API size_t NvBlastFamilyGetRequiredScratchForCreateFirstActor(const NvBlastFamily* family, NvBlastLog logFn);
+
+
+/**
+Instance the family's asset into a new, unfractured actor.
+
+\param[in] family Family in which to create a new actor. The family must have no other actors in it. (See NvBlastAssetCreateFamily.)
+\param[in] desc Actor descriptor (see NvBlastActorDesc).
+\param[in] scratch User-supplied scratch memory of size NvBlastFamilyGetRequiredScratchForCreateFirstActor(asset) bytes, where 'asset' is the NvBlastAsset from which the family was created.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return pointer to new NvBlastActor if successful (the actor was successfully inserted into the family), or NULL if unsuccessful.
+*/
+NVBLAST_API NvBlastActor* NvBlastFamilyCreateFirstActor(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn);
+
+
+/**
+Retrieve the number of active actors associated with the given family.
+
+\param[in] family The family.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of active actors in the family.
+*/
+NVBLAST_API uint32_t NvBlastFamilyGetActorCount(const NvBlastFamily* family, NvBlastLog logFn);
+
+
+/**
+Deserialize a single Actor from a buffer into the given family. The actor will be inserted if it
+is compatible with the current family state. That is, it must not share any chunks or internal
+IDs with the actors already present in the family.
+
+\param[in] family Family in which to deserialize the actor.
+\param[in] buffer User-supplied buffer containing the actor to deserialize.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the deserialized actor if successful, NULL otherwise.
+*/
+NVBLAST_API NvBlastActor* NvBlastFamilyDeserializeActor(NvBlastFamily* family, const void* buffer, NvBlastLog logFn);
+
+
+/**
+Retrieve the active actors associated with the given family.
+
+\param[out] actors User-supplied array to be filled with the returned actor pointers.
+\param[out] actorsSize The size of the actors array. To receive all actor pointers, the size must be at least that given by NvBlastFamilyGetActorCount(family).
+\param[in] family The family.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of actor pointers written to actors. This will not exceed actorsSize.
+*/
+NVBLAST_API uint32_t NvBlastFamilyGetActors(NvBlastActor** actors, uint32_t actorsSize, const NvBlastFamily* family, NvBlastLog logFn);
+
+
+/**
+Retrieve the actor associated with the given chunk.
+
+\param[in] family The family.
+\param[in] chunkIndex The index of chunk.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return pointer to actor associated with given chunk. NULL if there is no such actor.
+*/
+NVBLAST_API NvBlastActor* NvBlastFamilyGetChunkActor(const NvBlastFamily* family, uint32_t chunkIndex, NvBlastLog logFn);
+
+
+/**
+Retrieve the max active actor count family could have.
+
+\param[in] family The family.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the max number of active actors family could have.
+*/
+NVBLAST_API uint32_t NvBlastFamilyGetMaxActorCount(const NvBlastFamily* family, NvBlastLog logFn);
+
+///@} End NvBlastFamily functions
+
+
+///////////////////////////////////////////////////////////////////////////////////////
+// NvBlastActor accessor, serialization, and deactivation functions
+///////////////////////////////////////////////////////////////////////////////////////
+///@{
+
+/**
+Get the number of visible chunks for this actor. May be used in conjunction with NvBlastActorGetVisibleChunkIndices.
+
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of visible chunk indices for the actor.
+*/
+NVBLAST_API uint32_t NvBlastActorGetVisibleChunkCount(const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Retrieve a list of visible chunk indices for the actor into the given array.
+
+\param[in] visibleChunkIndices User-supplied array to be filled in with indices of visible chunks for this actor.
+\param[in] visibleChunkIndicesSize The size of the visibleChunkIndices array. To receive all visible chunk indices, the size must be at least that given by NvBlastActorGetVisibleChunkCount(actor).
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of indices written to visibleChunkIndices. This will not exceed visibleChunkIndicesSize.
+*/
+NVBLAST_API uint32_t NvBlastActorGetVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize, const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Get the number of graph nodes for this actor. May be used in conjunction with NvBlastActorGetGraphNodeIndices.
+
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of graph node indices for the actor.
+*/
+NVBLAST_API uint32_t NvBlastActorGetGraphNodeCount(const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Retrieve a list of graph node indices for the actor into the given array.
+
+\param[in] graphNodeIndices User-supplied array to be filled in with indices of graph nodes for this actor.
+\param[in] graphNodeIndicesSize The size of the graphNodeIndices array. To receive all graph node indices, the size must be at least that given by NvBlastActorGetGraphNodeCount(actor).
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of indices written to graphNodeIndices. This will not exceed graphNodeIndicesSize.
+*/
+NVBLAST_API uint32_t NvBlastActorGetGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize, const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Access the bond health data for an actor.
+
+This function returns a pointer to the head of an array of bond healths (floats). This array is the same for any actor that
+has been created from repeated fracturing of the same original instance of an asset (in the same instance family).
+
+The indices obtained from NvBlastSupportGraph::adjacentBondIndices in the asset may be used to access this array.
+
+The size of the array returned is NvBlastAssetGetBondCount(asset, logFn), where 'asset' is the NvBlastAsset
+that was used to create the actor.
+
+This array is valid as long as any actor in the instance family for the input actor exists.
+
+If the input actor is invalid, NULL will be returned.
+
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the array of bond healths for the actor's instance family, or NULL if the actor is invalid.
+*/
+NVBLAST_API const float* NvBlastActorGetBondHealths(const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+The buffer size needed to serialize a single actor. This will give the exact size needed. For an upper bound
+on the buffer size needed for any actor instanced from an NvBlastAsset, use NvBlastAssetGetActorSerializationSizeUpperBound.
+
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the required buffer size in bytes.
+*/
+NVBLAST_API uint32_t NvBlastActorGetSerializationSize(const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Serialize a single actor to a buffer.
+
+\param[out] buffer User-supplied buffer, must be at least of size given by NvBlastActorGetSerializationSize(actor).
+\param[in] bufferSize The size of the user-supplied buffer.
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of bytes written to the buffer, or 0 if there is an error (such as an under-sized buffer).
+*/
+NVBLAST_API uint32_t NvBlastActorSerialize(void* buffer, uint32_t bufferSize, const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Access to an actor's family.
+
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the family with which the actor is associated.
+*/
+NVBLAST_API NvBlastFamily* NvBlastActorGetFamily(const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Access to an actor's internal index.
+
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return actor's internal index in family.
+*/
+NVBLAST_API uint32_t NvBlastActorGetIndex(const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Deactivate an actor within its family. Conceptually this is "destroying" the actor, however memory will not be released until the family is released.
+
+\param[in] actor Points to a user-supplied actor struct. May be NULL, in which case this function no-ops.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return true iff successful (actor was active).
+*/
+NVBLAST_API bool NvBlastActorDeactivate(NvBlastActor* actor, NvBlastLog logFn);
+
+///@} End NvBlastActor accessor, serialization, and deactivation functions
+
+
+///////////////////////////////////////////////////////////////////////////////
+// NvBlastActor damage and fracturing functions
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+/**
+Creates fracture commands for the actor using a damage program and program parameters (material and damage descriptions).
+
+\param[in,out] commandBuffers Target buffers to hold generated commands.
+ To avoid data loss, provide an entry for every support chunk and every bond in the original actor.
+\param[in] actor The NvBlastActor to create fracture commands for.
+\param[in] program A NvBlastDamageProgram containing damage shaders.
+\param[in] programParams Parameters for the NvBlastDamageProgram.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+\param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations.
+
+Interpretation of NvBlastFractureBuffers:
+As input:
+ Counters denote available entries for FractureData.
+ Chunk and Bond userdata are not used.
+ Health values are not used.
+
+As output:
+ Counters denote valid entires in FractureData arrays.
+ Chunks and Bond userdata reflect the respective userdata set during asset initialization, where implemented by the material function.
+ Health values denote how much damage is to be applied.
+*/
+NVBLAST_API void NvBlastActorGenerateFracture
+(
+ NvBlastFractureBuffers* commandBuffers,
+ const NvBlastActor* actor,
+ const NvBlastDamageProgram program,
+ const NvBlastProgramParams* programParams,
+ NvBlastLog logFn,
+ NvBlastTimers* timers
+);
+
+
+/**
+Applies the direct fracture and breaks graph bonds/edges as necessary.
+Chunks damaged beyond their respective health fracture their children recursively, creating a NvBlastChunkFractureData for each.
+
+\param[in,out] eventBuffers Target buffers to hold applied fracture events. May be NULL, in which case events are not reported.
+ To avoid data loss, provide an entry for every lower-support chunk and every bond in the original actor.
+\param[in,out] actor The NvBlastActor to apply fracture to.
+\param[in] commands The fracture commands to process.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+\param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations.
+
+Interpretation of NvBlastFractureBuffers:
+commands:
+ Counters denote the number of command entries to process.
+ Chunk and Bond userdata are not used.
+ Health values denote the amount of damage to apply, as a positive value.
+
+eventBuffers as input:
+ Counters denote available entries for FractureData.
+ Chunk and Bond userdata are not used.
+ Health values are not used.
+
+eventBuffers as output:
+ Counters denote valid entires in FractureData arrays.
+ Chunks and Bond userdata reflect the respective userdata set during asset initialization.
+ Health values denote how much health is remaining for the damaged element.
+ Broken elements report a negative value corresponding to the superfluous health damage.
+
+commands and eventBuffers may point to the same memory.
+*/
+NVBLAST_API void NvBlastActorApplyFracture
+(
+ NvBlastFractureBuffers* eventBuffers,
+ NvBlastActor* actor,
+ const NvBlastFractureBuffers* commands,
+ NvBlastLog logFn,
+ NvBlastTimers* timers
+);
+
+
+/**
+Releases the oldActor and creates its children newActors if necessary.
+
+\param[out] result The list of deleted and created NvBlastActor objects.
+\param[in] actor The actor to split.
+\param[in] newActorsMaxCount Number of available NvBlastActor slots. In the worst case, one NvBlastActor may be created for every chunk in the asset.
+\param[in] scratch Scratch Memory used during processing. NvBlastActorGetRequiredScratchForSplit provides the necessary size.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+\param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations
+
+\return 1..n: new actors were created
+\return 0: oldActor is unchanged
+*/
+NVBLAST_API uint32_t NvBlastActorSplit
+(
+ NvBlastActorSplitEvent* result,
+ NvBlastActor* actor,
+ uint32_t newActorsMaxCount,
+ void* scratch,
+ NvBlastLog logFn,
+ NvBlastTimers* timers
+);
+
+
+/**
+Returns the number of bytes of scratch memory that the user must supply to NvBlastActorSplit,
+based upon the actor that will be passed into that function.
+
+\param[in] actor The actor that will be passed into NvBlastActorSplit.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the number of bytes of scratch memory required for a call to NvBlastActorSplit with that actor.
+*/
+NVBLAST_API size_t NvBlastActorGetRequiredScratchForSplit(const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Returns the upper-bound number of actors which can be created by calling NvBlastActorSplit with that actor, this
+value can't exceed chunk count.
+
+\param[in] actor The actor.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return the upper-bound number of actors which can be created by calling NvBlastActorSplit with that actor.
+*/
+NVBLAST_API uint32_t NvBlastActorGetMaxActorCountForSplit(const NvBlastActor* actor, NvBlastLog logFn);
+
+
+/**
+Determines if the actor can fracture further.
+
+\param[in] actor The actor potentially being fractured.
+\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+\return true if any result can be expected from fracturing the actor. false if no further change to the actor is possible.
+*/
+NVBLAST_API bool NvBlastActorCanFracture(const NvBlastActor* actor, NvBlastLog logFn);
+
+///@} End NvBlastActor damage and fracturing functions
+
+
+///////////////////////////////////////////////////////////////////////////////
+// NvBlastTimers functions and helpers
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+/**
+Resets all values in the given NvBlastTimers struct to zero.
+
+\param[in] timers The NvBlastTimers to set to zero.
+*/
+NVBLAST_API void NvBlastTimersReset(NvBlastTimers* timers);
+
+
+/**
+Convert a tick value from NvBlastTimers to seconds.
+
+\param[in] ticks The tick value.
+
+\return the seconds correposnding to the input tick value.
+*/
+NVBLAST_API double NvBlastTicksToSeconds(int64_t ticks);
+
+///@} End NvBlastTimers functions and helpers
+
+
+#endif // ifndef NVBLAST_H
diff --git a/NvBlast/sdk/lowlevel/include/NvBlastPreprocessor.h b/NvBlast/sdk/lowlevel/include/NvBlastPreprocessor.h
new file mode 100644
index 0000000..25a8516
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/include/NvBlastPreprocessor.h
@@ -0,0 +1,31 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTPREPROCESSOR_H
+#define NVBLASTPREPROCESSOR_H
+
+
+#include "NvPreprocessor.h"
+
+
+/** Blast API declaration */
+#define NVBLAST_API NV_C_EXPORT NV_DLL_EXPORT
+
+
+/**
+Macros for more convenient logging
+*/
+#define NVBLAST_LOG_ERROR(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Error, _msg, __FILE__, __LINE__); } ((void)0)
+#define NVBLAST_LOG_WARNING(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Warning, _msg, __FILE__, __LINE__); } ((void)0)
+#define NVBLAST_LOG_INFO(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Info, _msg, __FILE__, __LINE__); } ((void)0)
+#define NVBLAST_LOG_DEBUG(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Debug, _msg, __FILE__, __LINE__); } ((void)0)
+
+
+#endif // ifndef NVBLASTPREPROCESSOR_H
diff --git a/NvBlast/sdk/lowlevel/include/NvBlastProfiler.h b/NvBlast/sdk/lowlevel/include/NvBlastProfiler.h
new file mode 100644
index 0000000..cd60b4f
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/include/NvBlastProfiler.h
@@ -0,0 +1,52 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTPROFILER_H
+#define NVBLASTPROFILER_H
+
+#include "NvBlastPreprocessor.h"
+
+namespace physx {
+ class PxProfilerCallback;
+}
+
+struct NvBlastProfilerDetail
+{
+ enum Level
+ {
+ LOW,
+ MEDIUM,
+ HIGH
+ };
+};
+
+/**
+Profiler features are only active in checked, debug and profile builds.
+*/
+
+/**
+Set a callback to PVD or another PxProfilerCallback based profiler.
+*/
+NVBLAST_API void NvBlastProfilerSetCallback(physx::PxProfilerCallback* pcb);
+
+/**
+Enable events for platform specific profiler tools. Currently supported:
+Nsight, PS4, Xbox One
+*/
+NVBLAST_API void NvBlastProfilerEnablePlatform(bool);
+
+/**
+Sets the depth of reported profile zones.
+Higher levels (more nesting) of instrumentation can have a significant impact.
+Defaults to NvBlastProfilerDetail::Level::LOW.
+*/
+NVBLAST_API void NvBlastProfilerSetDetail(NvBlastProfilerDetail::Level);
+
+#endif
diff --git a/NvBlast/sdk/lowlevel/include/NvBlastTypes.h b/NvBlast/sdk/lowlevel/include/NvBlastTypes.h
new file mode 100644
index 0000000..d711842
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/include/NvBlastTypes.h
@@ -0,0 +1,632 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTYPES_H
+#define NVBLASTTYPES_H
+
+
+#include "NvBlastPreprocessor.h"
+#include <stdint.h>
+
+
+///////////////////////////////////////////////////////////////////////////////
+// NvBlast common types
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+/**
+Types of log messages.
+*/
+struct NvBlastMessage
+{
+ enum Type
+ {
+ Error, //!< Error messages
+ Warning, //!< Warning messages
+ Info, //!< Information messages
+ Debug //!< Used only in debug version of dll
+ };
+};
+
+
+/**
+Function pointer type for logging.
+
+When a function with this signature is passed into Blast functions with an NvBlastLog argument,
+Blast will use it to report errors, warnings, and other information.
+*/
+typedef void(*NvBlastLog)(int type, const char* msg, const char* file, int line);
+
+
+/**
+ID used to identify assets.
+*/
+struct NvBlastID
+{
+ char data[16];
+};
+
+
+/**
+Time spent (in ticks) in various parts of Blast.
+These values may be filled in during the execution of various API functions.
+To convert to seconds, use NvBlastTicksToSeconds(ticks).
+
+In profile build configurations, if a pointer to an instance of this struct is passed into
+Blast functions with an NvBlastTimers argument, then Blast will add to appropriate fields
+the time measured in corresponding sections of code. The user must clear the timer fields
+with NvBlastTimersReset to initialize or reset.
+*/
+struct NvBlastTimers
+{
+ int64_t material; //!< Time spent in material function
+ int64_t fracture; //!< Time spent applying damage
+ int64_t island; //!< Time spent discovering islands
+ int64_t partition; //!< Time spent partitioning the graph
+ int64_t visibility; //!< Time spent updating visibility
+};
+
+
+/**
+Generic data block header for all data blocks.
+*/
+struct NvBlastDataBlock
+{
+ /**
+ Enum of data block types
+ */
+ enum Type
+ {
+ AssetDataBlock,
+ FamilyDataBlock,
+
+ Count
+ };
+
+
+ /**
+ A data type keeps value from Type enum
+ */
+ uint32_t dataType;
+
+ /**
+ A number which is incremented every time the data layout changes. Depending on dataType corresponding data
+ format is kept. See NvBlastAssetDataFormat, NvBlastFamilyDataFormat enum.
+ */
+ uint32_t formatVersion;
+
+ /**
+ The size of the family, including this header.
+
+ Memory sizes are restricted to 32-bit representable values.
+ */
+ uint32_t size;
+
+ /**
+ Reserved to be possibly used in future versions
+ */
+ uint32_t reserved;
+};
+
+///@} End NvBlast common types
+
+
+///////////////////////////////////////////////////////////////////////////////
+// NvBlastAsset related types
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+
+/**
+Struct-enum which keeps track of the asset data format.
+*/
+struct NvBlastAssetDataFormat
+{
+ enum Version
+ {
+ /** Initial version */
+ Initial,
+
+ // New formats must come before Count. They should be given descriptive names with more information in comments.
+
+ /** The number of asset formats. */
+ Count,
+
+ /** The current version. This should always be Count-1 */
+ Current = Count - 1
+ };
+};
+
+
+/**
+Represents a piece of a destructible asset which may be realized as an entity with a physical and graphical component.
+
+Chunks may form a hierarchical decomposition of the asset. They contain parent and child chunk index information which
+defines the hierarchy. The parent and child chunk indices are their positions with the NvBlastAsset::chunks array.
+
+Child chunk indices are contiguous, starting at firstChildIndex and ending with childIndexStop - 1.
+*/
+struct NvBlastChunk
+{
+ /**
+ Central position for the chunk's volume
+ */
+ float centroid[3];
+
+ /**
+ Volume of the chunk
+ */
+ float volume;
+
+ /**
+ Index of parent (UINT32_MAX denotes no parent)
+ */
+ uint32_t parentChunkIndex;
+
+ /**
+ Index of first child
+ */
+ uint32_t firstChildIndex;
+
+ /**
+ Stop for child indices
+ */
+ uint32_t childIndexStop;
+
+ /**
+ Field for user to associate with external data
+ */
+ uint32_t userData;
+};
+
+
+/**
+Represents the interface between two chunks. At most one bond is created for a chunk pair.
+The convention regarding the normal direction is based upon the chunk indices,
+pointing from the lower-indexed chunk to the higher-indexed chunk.
+*/
+struct NvBlastBond
+{
+ /**
+ Interface average normal
+ */
+ float normal[3];
+
+ /**
+ Area of interface
+ */
+ float area;
+
+ /**
+ Central position on the interface between chunks
+ */
+ float centroid[3];
+
+ /**
+ Extra data associated with bond, e.g. whether or not to create a joint
+ */
+ uint32_t userData;
+};
+
+
+/**
+Describes the connectivity between support chunks via bonds.
+
+Vertices in the support graph are termed "nodes," and represent particular chunks (NvBlastChunk) in an NvBlastAsset.
+The indexing for nodes is not the same as that for chunks. Only some chunks are represented by nodes in the graph,
+and these chunks are called "support chunks."
+
+Adjacent node indices and adjacent bond indices are stored for each node, and therefore each bond is represented twice in this graph,
+going from node[i] -> node[j] and from node[j] -> node[i]. Therefore the size of the adjacentNodeIndices and adjacentBondIndices
+arrays are twice the number of bonds stored in the corresponding NvBlastAsset.
+
+The graph is used as follows. Given a NvBlastSupportGraph "graph" and node index i, (0 <= i < graph.nodeCount), one may find all
+adjacent bonds and nodes using:
+
+ // adj is the lookup value in graph.adjacentNodeIndices and graph.adjacentBondIndices
+ for (uint32_t adj = graph.adjacencyPartition[i]; adj < graph.adjacencyPartition[i+1]; ++adj)
+ {
+ // An adjacent node:
+ uint32_t adjacentNodeIndex = graph.adjacentNodeIndices[adj];
+
+ // The corresponding bond (that connects node index i with node indexed adjacentNodeIndex:
+ uint32_t adjacentBondIndex = graph.adjacentBondIndices[adj];
+ }
+
+For a graph node with index i, the corresponding asset chunk index is found using graph.chunkIndices[i]. The reverse mapping
+(obtaining a graph node index from an asset chunk index) can be done using the
+
+ NvBlastAssetGetChunkToGraphNodeMap(asset, logFn)
+
+function. See the documentation for its use. The returned "node index" for a non-support chunk is the invalid value 0xFFFFFFFF.
+*/
+struct NvBlastSupportGraph
+{
+ /**
+ Total number of nodes in the support graph.
+ */
+ uint32_t nodeCount;
+
+ /**
+ Indices of chunks represented by the nodes, an array of size nodeCount.
+ */
+ uint32_t* chunkIndices;
+
+ /**
+ Partitions both the adjacentNodeIndices and the adjacentBondIndices arrays into subsets corresponding to each node.
+ The size of this array is nodeCount+1.
+ For 0 <= i < nodeCount, adjacencyPartition[i] is the index of the first element in adjacentNodeIndices (or adjacentBondIndices) for nodes adjacent to the node with index i.
+ adjacencyPartition[nodeCount] is the size of the adjacentNodeIndices and adjacentBondIndices arrays.
+ This allows one to easily count the number of nodes adjacent to a node with index i, using adjacencyPartition[i+1] - adjacencyPartition[i].
+ */
+ uint32_t* adjacencyPartition;
+
+ /**
+ Array composed of subarrays holding the indices of nodes adjacent to a given node. The subarrays may be accessed through the adjacencyPartition array.
+ */
+ uint32_t* adjacentNodeIndices;
+
+ /**
+ Array composed of subarrays holding the indices of bonds (NvBlastBond) for a given node. The subarrays may be accessed through the adjacencyPartition array.
+ */
+ uint32_t* adjacentBondIndices;
+};
+
+
+/**
+Asset (opaque)
+
+Static destructible data, used to create actor familes.
+
+Pointer to this struct can be created with NvBlastCreateAsset.
+
+The NvBlastAsset includes a ID which may be used to match it with physics and graphics data.
+*/
+struct NvBlastAsset {};
+
+
+/**
+Chunk descriptor used to build an asset. See NvBlastAssetDesc.
+*/
+struct NvBlastChunkDesc
+{
+ enum Flags
+ {
+ NoFlags = 0,
+
+ /** If this flag is set then the chunk will become a support chunk, unless an ancestor chunk is also marked as support. */
+ SupportFlag = (1 << 0)
+ };
+
+ /** Central position in chunk. */
+ float centroid[3];
+
+ /** Volume of chunk. */
+ float volume;
+
+ /** Index of this chunk's parent. If this is a root chunk, then this value must be UINT32_MAX. */
+ uint32_t parentChunkIndex;
+
+ /** See Flags enum for possible flags. */
+ uint32_t flags;
+
+ /** User-supplied data which will be accessible to the user in chunk fracture events. */
+ uint32_t userData;
+};
+
+
+/**
+Chunk bond descriptor used to build an asset. See NvBlastAssetDesc.
+*/
+struct NvBlastBondDesc
+{
+ /** The indices of the chunks linked by this bond. They must be different support chunk indices. */
+ uint32_t chunkIndices[2];
+
+ /** Bond data (see NvBlastBond). */
+ NvBlastBond bond;
+};
+
+
+/**
+Asset descriptor, used to build an asset with NvBlastCreateAsset
+
+A valid asset descriptor must have a non-zero chunkCount and valid chunkDescs.
+
+The user may create an asset with no bonds (e.g. a single-chunk asset). In this case bondCount should be
+zero and bondDescs is ignored.
+*/
+struct NvBlastAssetDesc
+{
+ /** The number of chunk descriptors. */
+ uint32_t chunkCount;
+
+ /** Array of chunk descriptors of size chunkCount. */
+ const NvBlastChunkDesc* chunkDescs;
+
+ /** The number of bond descriptors. */
+ uint32_t bondCount;
+
+ /** Array of bond descriptors of size bondCount. */
+ const NvBlastBondDesc* bondDescs;
+};
+
+///@} End NvBlastAsset related types
+
+
+///////////////////////////////////////////////////////////////////////////////
+// NvBlastActor related types
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+
+/**
+Struct-enum which keeps track of the family data format.
+*/
+struct NvBlastFamilyDataFormat
+{
+ enum Version
+ {
+ /** Initial version */
+ Initial,
+
+ // New formats must come before Count. They should be given descriptive names with more information in comments.
+
+ /** The number of family formats. */
+ Count,
+
+ /** The current version. This should always be Count-1 */
+ Current = Count - 1
+ };
+};
+
+
+/**
+Family (opaque)
+
+a family can be created by the NvBlastAssetCreateFamily function and released
+by the NvBlastFamilyRelease. Family is needed to create first actor. All the following
+actors which can be created with NvBlastActorSplit function (as a result of fracture) will share the same family
+block. NvBlastFamilyGetActorCount can be used to know if family can be safely released.
+*/
+struct NvBlastFamily {};
+
+
+/**
+Actor (opaque)
+
+Actors can be generated by the NvBlastFamilyCreateFirstActor
+and NvBlastActorSplit functions. Opaque NvBlastActor pointers reference data within the family
+generated during NvBlastFamilyCreateFirstActor, and represent the actor in all actor-related API
+functions.
+*/
+struct NvBlastActor {};
+
+
+/**
+Actor descriptor, used to create an instance of an NvBlastAsset with NvBlastFamilyCreateFirstActor
+
+See NvBlastFamilyCreateFirstActor.
+*/
+struct NvBlastActorDesc
+{
+ /**
+ Initial health of all bonds, if initialBondHealths is NULL (see initialBondHealths).
+ */
+ float uniformInitialBondHealth;
+
+ /**
+ Initial bond healths. If not NULL, this array must be of length NvBlastAssetGetChunkCount(asset, logFn).
+ If NULL, uniformInitialBondHealth must be set.
+ */
+ const float* initialBondHealths;
+
+ /**
+ Initial health of all lower-support chunks, if initialSupportChunkHealths is NULL (see initialSupportChunkHealths).
+ */
+ float uniformInitialLowerSupportChunkHealth;
+
+ /**
+ Initial health of all support chunks. If not NULL, this must be of length
+ NvBlastAssetGetSupportGraph(asset, logFn).nodeCount. The elements in the initialSupportChunkHealth
+ array will correspond to the chunk indices in the NvBlastAssetGetSupportGraph(asset, logFn).chunkIndices
+ array. Every descendent of a support chunk will have its health initialized to its ancestor support
+ chunk's health, so this initializes all lower-support chunk healths.
+ If NULL, uniformInitialLowerSupportChunkHealth must be set.
+ */
+ const float* initialSupportChunkHealths;
+};
+
+///@} End NvBlastActor related types
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Types used for damage and fracturing
+///////////////////////////////////////////////////////////////////////////////
+///@{
+
+
+/**
+Fracture Data for Chunks
+
+Data interpretation varies depending on the function used.
+@see NvBlastActorGenerateFracture NvBlastActorApplyFracture NvBlastFractureBuffers
+*/
+struct NvBlastChunkFractureData
+{
+ uint32_t userdata; //!< chunk's user data
+ uint32_t chunkIndex; //!< asset chunk index
+ float health; //!< health value (damage or remains)
+};
+
+
+/**
+Fracture Data for Bonds
+
+Data interpretation varies depending on the function used.
+@see NvBlastActorGenerateFracture NvBlastActorApplyFracture NvBlastFractureBuffers
+*/
+struct NvBlastBondFractureData
+{
+ uint32_t userdata; //!< bond's user data
+ uint32_t nodeIndex0; //!< graph node index of bond
+ uint32_t nodeIndex1; //!< pair graph node index of bond
+ float health; //!< health value (damage or remains)
+};
+
+
+/**
+Memory to be used by fracture functions.
+
+Used as input and output target.
+@see NvBlastActorGenerateFracture NvBlastActorApplyFracture
+*/
+struct NvBlastFractureBuffers
+{
+ uint32_t bondFractureCount; //!< available elements in bondFractures
+ uint32_t chunkFractureCount; //!< available elements in chunkFractures
+ NvBlastBondFractureData* bondFractures; //!< memory to be filled by fracture functions
+ NvBlastChunkFractureData* chunkFractures; //!< memory to be filled by fracture functions
+};
+
+
+/**
+Description of a NvBlastActorSplit result.
+This tells the user about changes in the actor, or creation of children.
+*/
+struct NvBlastActorSplitEvent
+{
+ NvBlastActor* deletedActor; //!< deleted actor or nullptr if actor has not changed
+ NvBlastActor** newActors; //!< list of created actors
+};
+
+
+/**
+A single actor's representation used by NvBlastGraphShaderFunction.
+*/
+struct NvBlastGraphShaderActor
+{
+ uint32_t firstGraphNodeIndex; //!< Entry index for graphNodeIndexLinks
+ const uint32_t* graphNodeIndexLinks; //!< Linked index list of connected nodes. Traversable with nextIndex = graphNodeIndexLinks[currentIndex], terminates with 0xFFFFFFFF.
+ const uint32_t* chunkIndices; //!< Graph's map from node index to support chunk index.
+ const uint32_t* adjacencyPartition; //!< See NvBlastSupportGraph::adjacencyPartition.
+ const uint32_t* adjacentNodeIndices; //!< See NvBlastSupportGraph::adjacentNodeIndices.
+ const uint32_t* adjacentBondIndices; //!< See NvBlastSupportGraph::adjacentBondIndices.
+ const NvBlastBond* assetBonds; //!< NvBlastBonds geometry in the NvBlastAsset.
+ const float* familyBondHealths; //!< Actual bond health values for broken bond detection.
+};
+
+
+/**
+Damage program params.
+
+Custom user params to be passed in shader functions. This structure hints recommended parameters layout, but it
+doesn't required to be this way.
+
+The idea of this 'hint' is that damage parameters are basically 2 entities: material + damage description.
+1. Material is something that describes an actor properties (e.g. mass, stiffness, fragility) which are not expected to be changed often.
+2. Damage description is something that describes particular damage event (e.g. position, radius and force of explosion).
+
+Also this damage program hints that there could be more than one damage event happening and processed per one shader call (for efficiency reasons).
+So different damage descriptions can be stacked and passed in one shader call (while material is kept the same obviously).
+*/
+struct NvBlastProgramParams
+{
+ const void* damageDescBuffer; //!< array of damage descriptions
+ uint32_t damageDescCount; //!< number of damage descriptions in array
+ const void* material; //!< pointer to material
+};
+
+
+/**
+A single actor's representation used by NvBlastSubgraphShaderFunction.
+*/
+struct NvBlastSubgraphShaderActor
+{
+ uint32_t chunkIndex; //!< Index of chunk represented by this actor.
+ const NvBlastChunk* assetChunks; //!< NvBlastChunks geometry in the NvBlastAsset.
+};
+
+
+/**
+Damage shader for actors with more then one node in support graph.
+
+From a an input actor data (NvBlastGraphShaderActor) and user custom data (params),
+creates a list of NvBlastFractureCommand to be applied to the respective NvBlastActor.
+
+\param[in,out] commandBuffers The resulting health damage to apply.
+ Typically requires an array of size (number of support chunks) + (number of bonds) of the processed asset
+ but may depend on the actual implementation.
+\param[in] actor The actor representation used for creating commands.
+\param[in] params A set of parameters defined by the damage shader implementer.
+
+Interpretation of NvBlastFractureBuffers:
+As input:
+Counters denote available entries for FractureData.
+Chunk and Bond userdata are not used.
+Health values are not used.
+
+As output:
+Counters denote valid entires in FractureData arrays.
+Chunks and Bond userdata reflect the respective userdata set during asset initialization.
+Health values denote how much damage is to be applied.
+
+@see NvBlastFractureBuffers NvBlastGraphShaderActor
+*/
+typedef void(*NvBlastGraphShaderFunction)(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const NvBlastProgramParams* params);
+
+
+/**
+Damage shader for actors with single chunk.
+
+From a an input actor data (NvBlastSubgraphShaderActor) and user custom data (params),
+creates a list of NvBlastFractureCommand to be applied to the respective NvBlastActor.
+
+\param[in,out] commandBuffers The resulting health damage to apply.
+ Typically requires an array of size (number of support chunks) + (number of bonds) of the processed asset
+ but may depend on the actual implementation.
+\param[in] actor The actor representation used for creating commands.
+\param[in] params A set of parameters defined by the damage shader implementer.
+
+Interpretation of NvBlastFractureBuffers:
+As input:
+Counters denote available entries for FractureData.
+Chunk and Bond userdata are not used.
+Health values are not used.
+
+As output:
+Counters denote valid entires in FractureData arrays.
+Chunks and Bond userdata reflect the respective userdata set during asset initialization.
+Health values denote how much damage is to be applied.
+
+@see NvBlastFractureBuffers NvBlastSubgraphShaderActor
+*/
+typedef void(*NvBlastSubgraphShaderFunction)(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const NvBlastProgramParams* params);
+
+
+/**
+Damage Program.
+
+Contains both graph and subgraph shader. When used on actor appropriate shader will be called.
+Any shader can be nullptr to be skipped.
+
+@see NvBlastGraphShaderFunction NvBlastSubgraphShaderFunction
+*/
+struct NvBlastDamageProgram
+{
+ NvBlastGraphShaderFunction graphShaderFunction;
+ NvBlastSubgraphShaderFunction subgraphShaderFunction;
+};
+
+
+///@} End of types used for damage and fracturing
+
+
+#endif // ifndef NVBLASTTYPES_H
diff --git a/NvBlast/sdk/lowlevel/include/NvPreprocessor.h b/NvBlast/sdk/lowlevel/include/NvPreprocessor.h
new file mode 100644
index 0000000..07a3ebc
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/include/NvPreprocessor.h
@@ -0,0 +1,540 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVPREPROCESSOR_H
+#define NV_NVFOUNDATION_NVPREPROCESSOR_H
+
+#include <stddef.h>
+
+/** \addtogroup foundation
+ @{
+*/
+
+/*
+The following preprocessor identifiers specify compiler, OS, and architecture.
+All definitions have a value of 1 or 0, use '#if' instead of '#ifdef'.
+*/
+
+/**
+Compiler defines, see http://sourceforge.net/p/predef/wiki/Compilers/
+*/
+#if defined(_MSC_VER)
+#if _MSC_VER >= 1900
+#define NV_VC 14
+#elif _MSC_VER >= 1800
+#define NV_VC 12
+#elif _MSC_VER >= 1700
+#define NV_VC 11
+#elif _MSC_VER >= 1600
+#define NV_VC 10
+#elif _MSC_VER >= 1500
+#define NV_VC 9
+#else
+#error "Unknown VC version"
+#endif
+#elif defined(__clang__)
+#define NV_CLANG 1
+#elif defined(__SNC__)
+#define NV_SNC 1
+#elif defined(__ghs__)
+#define NV_GHS 1
+#elif defined(__GNUC__) // note: __clang__, __SNC__, or __ghs__ imply __GNUC__
+#define NV_GCC 1
+#else
+#error "Unknown compiler"
+#endif
+
+/**
+Operating system defines, see http://sourceforge.net/p/predef/wiki/OperatingSystems/
+*/
+#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_PARTITION_APP
+#define NV_WINRT 1 // Windows Runtime, either on Windows RT or Windows 8
+#elif defined(XBOXONE) || defined(_XBOX_ONE)
+#define NV_XBOXONE 1
+#elif defined(_WIN64) // note: XBOXONE implies _WIN64
+#define NV_WIN64 1
+#elif defined(_M_PPC)
+#define NV_X360 1
+#elif defined(_WIN32) // note: _M_PPC implies _WIN32
+#define NV_WIN32 1
+#elif defined(__ANDROID__)
+#define NV_ANDROID 1
+#elif defined(__linux__) // note: __ANDROID__ implies __linux__
+#define NV_LINUX 1
+#elif defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
+#define NV_IOS 1
+#elif defined(__APPLE__)
+#define NV_OSX 1
+#elif defined(__CELLOS_LV2__)
+#define NV_PS3 1
+#elif defined(__ORBIS__)
+#define NV_PS4 1
+#elif defined(__SNC__) && defined(__arm__)
+#define NV_PSP2 1
+#elif defined(__ghs__)
+#define NV_WIIU 1
+#else
+#error "Unknown operating system"
+#endif
+
+/**
+Architecture defines, see http://sourceforge.net/p/predef/wiki/Architectures/
+*/
+#if defined(__x86_64__) || defined(_M_X64) // ps4 compiler defines _M_X64 without value
+#define NV_X64 1
+#elif defined(__i386__) || defined(_M_IX86)
+#define NV_X86 1
+#elif defined(__arm64__) || defined(__aarch64__)
+#define NV_A64 1
+#elif defined(__arm__) || defined(_M_ARM)
+#define NV_ARM 1
+#elif defined(__SPU__)
+#define NV_SPU 1
+#elif defined(__ppc__) || defined(_M_PPC) || defined(__CELLOS_LV2__)
+#define NV_PPC 1
+#else
+#error "Unknown architecture"
+#endif
+
+/**
+SIMD defines
+*/
+#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+#define NV_SSE2 1
+#endif
+#if defined(_M_ARM) || defined(__ARM_NEON__)
+#define NV_NEON 1
+#endif
+#if defined(_M_PPC) || defined(__CELLOS_LV2__)
+#define NV_VMX 1
+#endif
+
+/**
+define anything not defined on this platform to 0
+*/
+#ifndef NV_VC
+#define NV_VC 0
+#endif
+#ifndef NV_CLANG
+#define NV_CLANG 0
+#endif
+#ifndef NV_SNC
+#define NV_SNC 0
+#endif
+#ifndef NV_GHS
+#define NV_GHS 0
+#endif
+#ifndef NV_GCC
+#define NV_GCC 0
+#endif
+#ifndef NV_WINRT
+#define NV_WINRT 0
+#endif
+#ifndef NV_XBOXONE
+#define NV_XBOXONE 0
+#endif
+#ifndef NV_WIN64
+#define NV_WIN64 0
+#endif
+#ifndef NV_X360
+#define NV_X360 0
+#endif
+#ifndef NV_WIN32
+#define NV_WIN32 0
+#endif
+#ifndef NV_ANDROID
+#define NV_ANDROID 0
+#endif
+#ifndef NV_LINUX
+#define NV_LINUX 0
+#endif
+#ifndef NV_IOS
+#define NV_IOS 0
+#endif
+#ifndef NV_OSX
+#define NV_OSX 0
+#endif
+#ifndef NV_PS3
+#define NV_PS3 0
+#endif
+#ifndef NV_PS4
+#define NV_PS4 0
+#endif
+#ifndef NV_PSP2
+#define NV_PSP2 0
+#endif
+#ifndef NV_WIIU
+#define NV_WIIU 0
+#endif
+#ifndef NV_X64
+#define NV_X64 0
+#endif
+#ifndef NV_X86
+#define NV_X86 0
+#endif
+#ifndef NV_A64
+#define NV_A64 0
+#endif
+#ifndef NV_ARM
+#define NV_ARM 0
+#endif
+#ifndef NV_SPU
+#define NV_SPU 0
+#endif
+#ifndef NV_PPC
+#define NV_PPC 0
+#endif
+#ifndef NV_SSE2
+#define NV_SSE2 0
+#endif
+#ifndef NV_NEON
+#define NV_NEON 0
+#endif
+#ifndef NV_VMX
+#define NV_VMX 0
+#endif
+
+/*
+define anything not defined through the command line to 0
+*/
+#ifndef NV_DEBUG
+#define NV_DEBUG 0
+#endif
+#ifndef NV_CHECKED
+#define NV_CHECKED 0
+#endif
+#ifndef NV_PROFILE
+#define NV_PROFILE 0
+#endif
+#ifndef NV_NVTX
+#define NV_NVTX 0
+#endif
+#ifndef NV_DOXYGEN
+#define NV_DOXYGEN 0
+#endif
+
+/**
+family shortcuts
+*/
+// compiler
+#define NV_GCC_FAMILY (NV_CLANG || NV_SNC || NV_GHS || NV_GCC)
+// os
+#define NV_WINDOWS_FAMILY (NV_WINRT || NV_WIN32 || NV_WIN64)
+#define NV_MICROSOFT_FAMILY (NV_XBOXONE || NV_X360 || NV_WINDOWS_FAMILY)
+#define NV_LINUX_FAMILY (NV_LINUX || NV_ANDROID)
+#define NV_APPLE_FAMILY (NV_IOS || NV_OSX) // equivalent to #if __APPLE__
+#define NV_UNIX_FAMILY (NV_LINUX_FAMILY || NV_APPLE_FAMILY) // shortcut for unix/posix platforms
+// architecture
+#define NV_INTEL_FAMILY (NV_X64 || NV_X86)
+#define NV_ARM_FAMILY (NV_ARM || NV_A64)
+#define NV_P64_FAMILY (NV_X64 || NV_A64) // shortcut for 64-bit architectures
+
+// shortcut for PS3 PPU
+#define NV_PPU (NV_PS3&& NV_PPC)
+
+/**
+Assert macro
+*/
+#ifndef NV_ENABLE_ASSERTS
+#if NV_DEBUG && !defined(__CUDACC__)
+#define NV_ENABLE_ASSERTS 1
+#else
+#define NV_ENABLE_ASSERTS 0
+#endif
+#endif
+
+/**
+DLL export macros
+*/
+#ifndef NV_C_EXPORT
+#if NV_WINDOWS_FAMILY || NV_LINUX || NV_PS4 || NV_XBOXONE
+#define NV_C_EXPORT extern "C"
+#else
+#define NV_C_EXPORT
+#endif
+#endif
+
+#if NV_UNIX_FAMILY && __GNUC__ >= 4
+#define NV_UNIX_EXPORT __attribute__((visibility("default")))
+#else
+#define NV_UNIX_EXPORT
+#endif
+
+#if NV_WINDOWS_FAMILY
+#define NV_DLL_EXPORT __declspec(dllexport)
+#define NV_DLL_IMPORT __declspec(dllimport)
+#else
+#define NV_DLL_EXPORT NV_UNIX_EXPORT
+#define NV_DLL_IMPORT
+#endif
+
+/**
+Define API function declaration
+
+NV_FOUNDATION_DLL=1 - used by the DLL library (PhysXCommon) to export the API
+NV_FOUNDATION_DLL=0 - for windows configurations where the NV_FOUNDATION_API is linked through standard static linking
+no definition - this will allow DLLs and libraries to use the exported API from PhysXCommon
+
+*/
+
+#if NV_WINDOWS_FAMILY && !NV_ARM_FAMILY || NV_WINRT
+#ifndef NV_FOUNDATION_DLL
+#define NV_FOUNDATION_API NV_DLL_IMPORT
+#elif NV_FOUNDATION_DLL
+#define NV_FOUNDATION_API NV_DLL_EXPORT
+#endif
+#elif NV_UNIX_FAMILY
+#ifdef NV_FOUNDATION_DLL
+#define NV_FOUNDATION_API NV_UNIX_EXPORT
+#endif
+#endif
+
+#ifndef NV_FOUNDATION_API
+#define NV_FOUNDATION_API
+#endif
+
+/**
+Calling convention
+*/
+#ifndef NV_CALL_CONV
+#if NV_MICROSOFT_FAMILY
+#define NV_CALL_CONV __cdecl
+#else
+#define NV_CALL_CONV
+#endif
+#endif
+
+/**
+Pack macros - disabled on SPU because they are not supported
+*/
+#if NV_VC
+#define NV_PUSH_PACK_DEFAULT __pragma(pack(push, 8))
+#define NV_POP_PACK __pragma(pack(pop))
+#elif NV_GCC_FAMILY && !NV_SPU
+#define NV_PUSH_PACK_DEFAULT _Pragma("pack(push, 8)")
+#define NV_POP_PACK _Pragma("pack(pop)")
+#else
+#define NV_PUSH_PACK_DEFAULT
+#define NV_POP_PACK
+#endif
+
+/**
+Inline macro
+*/
+#define NV_INLINE inline
+#if NV_MICROSOFT_FAMILY
+#pragma inline_depth(255)
+#endif
+
+/**
+Force inline macro
+*/
+#if NV_VC
+#define NV_FORCE_INLINE __forceinline
+#elif NV_LINUX // Workaround; Fedora Core 3 do not agree with force inline and NvcPool
+#define NV_FORCE_INLINE inline
+#elif NV_GCC_FAMILY
+#define NV_FORCE_INLINE inline __attribute__((always_inline))
+#else
+#define NV_FORCE_INLINE inline
+#endif
+
+/**
+Noinline macro
+*/
+#if NV_MICROSOFT_FAMILY
+#define NV_NOINLINE __declspec(noinline)
+#elif NV_GCC_FAMILY
+#define NV_NOINLINE __attribute__((noinline))
+#else
+#define NV_NOINLINE
+#endif
+
+/**
+Restrict macro
+*/
+#if defined(__CUDACC__)
+#define NV_RESTRICT __restrict__
+#else
+#define NV_RESTRICT __restrict
+#endif
+
+/**
+Noalias macro
+*/
+#if NV_MICROSOFT_FAMILY
+#define NV_NOALIAS __declspec(noalias)
+#else
+#define NV_NOALIAS
+#endif
+
+/**
+Alignment macros
+
+NV_ALIGN_PREFIX and NV_ALIGN_SUFFIX can be used for type alignment instead of aligning individual variables as follows:
+NV_ALIGN_PREFIX(16)
+struct A {
+...
+} NV_ALIGN_SUFFIX(16);
+This declaration style is parsed correctly by Visual Assist.
+
+*/
+#ifndef NV_ALIGN
+#if NV_MICROSOFT_FAMILY
+#define NV_ALIGN(alignment, decl) __declspec(align(alignment)) decl
+#define NV_ALIGN_PREFIX(alignment) __declspec(align(alignment))
+#define NV_ALIGN_SUFFIX(alignment)
+#elif NV_GCC_FAMILY
+#define NV_ALIGN(alignment, decl) decl __attribute__((aligned(alignment)))
+#define NV_ALIGN_PREFIX(alignment)
+#define NV_ALIGN_SUFFIX(alignment) __attribute__((aligned(alignment)))
+#else
+#define NV_ALIGN(alignment, decl)
+#define NV_ALIGN_PREFIX(alignment)
+#define NV_ALIGN_SUFFIX(alignment)
+#endif
+#endif
+
+/**
+Deprecated macro
+- To deprecate a function: Place NV_DEPRECATED at the start of the function header (leftmost word).
+- To deprecate a 'typedef', a 'struct' or a 'class': Place NV_DEPRECATED directly after the keywords ('typdef',
+'struct', 'class').
+
+Use these macro definitions to create warnings for deprecated functions
+#define NV_DEPRECATED __declspec(deprecated) // Microsoft
+#define NV_DEPRECATED __attribute__((deprecated())) // GCC
+*/
+#define NV_DEPRECATED
+
+/**
+General defines
+*/
+
+// static assert
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) || defined(__ORBIS__)
+#define NV_COMPILE_TIME_ASSERT(exp) typedef char NvCompileTimeAssert_Dummy[(exp) ? 1 : -1] __attribute__((unused))
+#else
+#define NV_COMPILE_TIME_ASSERT(exp) typedef char NvCompileTimeAssert_Dummy[(exp) ? 1 : -1]
+#endif
+
+#if NV_GCC_FAMILY && !NV_SNC && !NV_GHS
+#define NV_OFFSET_OF(X, Y) __builtin_offsetof(X, Y)
+#else
+#define NV_OFFSET_OF(X, Y) offsetof(X, Y)
+#endif
+
+#define NV_OFFSETOF_BASE 0x100 // casting the null ptr takes a special-case code path, which we don't want
+#define NV_OFFSET_OF_RT(Class, Member) \
+ (reinterpret_cast<size_t>(&reinterpret_cast<Class*>(NV_OFFSETOF_BASE)->Member) - size_t(NV_OFFSETOF_BASE))
+
+// check that exactly one of NDEBUG and _DEBUG is defined
+#if !defined(NDEBUG) ^ defined(_DEBUG)
+#error Exactly one of NDEBUG and _DEBUG needs to be defined!
+#endif
+
+// make sure NV_CHECKED is defined in all _DEBUG configurations as well
+#if !defined(NV_CHECKED) && defined(NV_DEBUG)
+#error NV_CHECKED must be defined when NV_DEBUG is defined
+#endif
+
+#ifdef __CUDACC__
+#define NV_CUDA_CALLABLE __host__ __device__
+#else
+#define NV_CUDA_CALLABLE
+#endif
+
+// avoid unreferenced parameter warning
+// preferred solution: omit the parameter's name from the declaration
+template <class T>
+NV_CUDA_CALLABLE NV_INLINE void NV_UNUSED(T const&)
+{
+}
+
+// Ensure that the application hasn't tweaked the pack value to less than 8, which would break
+// matching between the API headers and the binaries
+// This assert works on win32/win64/360/ps3, but may need further specialization on other platforms.
+// Some GCC compilers need the compiler flag -malign-double to be set.
+// Apparently the apple-clang-llvm compiler doesn't support malign-double.
+#if NV_PS4 || NV_APPLE_FAMILY
+struct NvPackValidation
+{
+ char _;
+ long a;
+};
+#elif NV_ANDROID
+struct NvPackValidation
+{
+ char _;
+ double a;
+};
+#else
+struct NvPackValidation
+{
+ char _;
+ long long a;
+};
+#endif
+#if !NV_APPLE_FAMILY
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvPackValidation, a) == 8);
+#endif
+
+// use in a cpp file to suppress LNK4221
+#if NV_VC
+#define NV_DUMMY_SYMBOL \
+ namespace \
+ { \
+ char NvDummySymbol; \
+ }
+#else
+#define NV_DUMMY_SYMBOL
+#endif
+
+#if NV_GCC_FAMILY && !NV_GHS
+#define NV_WEAK_SYMBOL __attribute__((weak)) // this is to support SIMD constant merging in template specialization
+#else
+#define NV_WEAK_SYMBOL
+#endif
+
+// Macro for avoiding default assignment and copy, because doing this by inheritance can increase class size on some
+// platforms.
+#define NV_NOCOPY(Class) \
+ \
+protected: \
+ Class(const Class&); \
+ Class& operator=(const Class&);
+
+#define NV_STRINGIZE_HELPER(X) #X
+#define NV_STRINGIZE(X) NV_STRINGIZE_HELPER(X)
+
+#define NV_CONCAT_HELPER(X, Y) X##Y
+#define NV_CONCAT(X, Y) NV_CONCAT_HELPER(X, Y)
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVPREPROCESSOR_H
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastActor.cpp b/NvBlast/sdk/lowlevel/source/NvBlastActor.cpp
new file mode 100644
index 0000000..b93c47a
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastActor.cpp
@@ -0,0 +1,1316 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastActor.h"
+#include "NvBlastFamilyGraph.h"
+#include "NvBlastChunkHierarchy.h"
+#include "NvBlastIndexFns.h"
+#include "NvBlastDLink.h"
+#include "NvBlastGeometry.h"
+#include "NvBlastTime.h"
+#include <float.h>
+#include <algorithm>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Local helper functions ////////
+
+#if NVBLAST_CHECK_PARAMS
+/**
+Helper function to validate fracture buffer values being meaningful.
+*/
+static inline bool isValid(const NvBlastFractureBuffers* buffers)
+{
+ if (buffers->chunkFractureCount != 0 && buffers->chunkFractures == nullptr)
+ return false;
+
+ if (buffers->bondFractureCount != 0 && buffers->bondFractures == nullptr)
+ return false;
+
+ return true;
+}
+#endif
+
+//////// Actor static methods ////////
+
+size_t Actor::createRequiredScratch(const NvBlastFamily* family)
+{
+#if NVBLAST_CHECK_PARAMS
+ if (family == nullptr || reinterpret_cast<const FamilyHeader*>(family)->m_asset == nullptr)
+ {
+ NVBLAST_ALWAYS_ASSERT();
+ return 0;
+ }
+#endif
+
+ const Asset& solverAsset = *reinterpret_cast<const FamilyHeader*>(family)->m_asset;
+ return FamilyGraph::findIslandsRequiredScratch(solverAsset.m_graph.m_nodeCount);
+}
+
+
+Actor* Actor::create(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "Actor::create: NULL family pointer input.", return nullptr);
+ NVBLAST_CHECK(reinterpret_cast<FamilyHeader*>(family)->m_asset != nullptr, logFn, "Actor::create: family has NULL asset.", return nullptr);
+ NVBLAST_CHECK(reinterpret_cast<FamilyHeader*>(family)->m_asset->m_graph.m_nodeCount != 0, logFn, "Actor::create: family's asset has no support chunks.", return nullptr);
+ NVBLAST_CHECK(desc != nullptr, logFn, "Actor::create: NULL desc pointer input.", return nullptr);
+ NVBLAST_CHECK(scratch != nullptr, logFn, "Actor::create: NULL scratch input.", return nullptr);
+
+ FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family);
+
+ if (header->m_actorCount > 0)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Actor::create: input family is not empty.");
+ return nullptr;
+ }
+
+ const Asset& solverAsset = *static_cast<const Asset*>(header->m_asset);
+ const Nv::Blast::SupportGraph& graph = solverAsset.m_graph;
+
+ // Lower support chunk healths - initialize
+ float* lowerSupportChunkHealths = header->getLowerSupportChunkHealths();
+ if (desc->initialSupportChunkHealths != nullptr) // Health array given
+ {
+ const uint32_t* supportChunkIndices = graph.getChunkIndices();
+ for (uint32_t supportChunkNum = 0; supportChunkNum < graph.m_nodeCount; ++supportChunkNum)
+ {
+ const float initialHealth = desc->initialSupportChunkHealths[supportChunkNum];
+ for (Asset::DepthFirstIt i(solverAsset, supportChunkIndices[supportChunkNum]); (bool)i; ++i)
+ {
+ lowerSupportChunkHealths[solverAsset.getContiguousLowerSupportIndex((uint32_t)i)] = initialHealth;
+ }
+ }
+ }
+ else // Use uniform initialization
+ {
+ const uint32_t lowerSupportChunkCount = solverAsset.getLowerSupportChunkCount();
+ for (uint32_t i = 0; i < lowerSupportChunkCount; ++i)
+ {
+ lowerSupportChunkHealths[i] = desc->uniformInitialLowerSupportChunkHealth;
+ }
+ }
+
+ // Bond healths - initialize
+ const uint32_t bondCount = solverAsset.getBondCount();
+ float* bondHealths = header->getBondHealths();
+ if (desc->initialBondHealths != nullptr) // Health array given
+ {
+ memcpy(bondHealths, desc->initialBondHealths, bondCount * sizeof(float));
+ }
+ else // Use uniform initialization
+ {
+ for (uint32_t bondNum = 0; bondNum < bondCount; ++bondNum)
+ {
+ bondHealths[bondNum] = desc->uniformInitialBondHealth;
+ }
+ }
+
+ // Get first actor - NOTE: we don't send an event for this! May need to do so for consistency.
+ Actor* actor = header->borrowActor(0); // Using actor[0]
+
+ // Fill in actor fields
+ actor->m_firstGraphNodeIndex = 0;
+ actor->m_graphNodeCount = graph.m_nodeCount;
+ actor->m_leafChunkCount = solverAsset.m_leafChunkCount;
+
+ // Graph node index links - initialize to chain
+ uint32_t* graphNodeLinks = header->getGraphNodeIndexLinks();
+ for (uint32_t i = 0; i < graph.m_nodeCount - 1; ++i)
+ {
+ graphNodeLinks[i] = i + 1;
+ }
+ graphNodeLinks[graph.m_nodeCount - 1] = invalidIndex<uint32_t>();
+
+ // Update visible chunks (we assume that all chunks belong to one actor at the beginning)
+ actor->updateVisibleChunksFromGraphNodes();
+
+ // Initialize instance graph with this actor
+ header->getFamilyGraph()->initialize(actor->getIndex(), &graph);
+
+ // Call findIslands to set up the internal instance graph data
+ header->getFamilyGraph()->findIslands(actor->getIndex(), scratch, &graph);
+
+ return actor;
+}
+
+
+//////// Actor member methods ////////
+
+uint32_t Actor::damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, float healthDamage)
+{
+ const uint32_t bondIndex = getGraph()->findBond(nodeIndex0, nodeIndex1);
+ damageBond(nodeIndex0, nodeIndex1, bondIndex, healthDamage);
+ return bondIndex;
+}
+
+
+void Actor::damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, uint32_t bondIndex, float healthDamage)
+{
+ if (bondIndex == invalidIndex<uint32_t>())
+ {
+ NVBLAST_ALWAYS_ASSERT();
+ return;
+ }
+
+ float* bondHealths = getBondHealths();
+ if (bondHealths[bondIndex] > 0 && healthDamage > 0.0f)
+ {
+ // Subtract health
+ bondHealths[bondIndex] -= healthDamage;
+
+ // Was removed?
+ if (bondHealths[bondIndex] <= 0)
+ {
+ // Notify graph that bond was removed
+ getFamilyGraph()->notifyEdgeRemoved(getIndex(), nodeIndex0, nodeIndex1, bondIndex, getGraph());
+ bondHealths[bondIndex] = 0; // Doing this for single-actor serialization consistency; should not actually be necessary
+ }
+ }
+}
+
+
+uint32_t Actor::damageBond(const NvBlastBondFractureData& cmd)
+{
+ NVBLAST_ASSERT(!isInvalidIndex(cmd.nodeIndex1));
+ return damageBond(cmd.nodeIndex0, cmd.nodeIndex1, cmd.health);
+}
+
+
+void Actor::generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastDamageProgram& program, const NvBlastProgramParams* programParams,
+ NvBlastLog logFn, NvBlastTimers* timers) const
+{
+ NVBLAST_CHECK(commandBuffers != nullptr, logFn, "Actor::generateFracture: NULL commandBuffers pointer input.", return);
+ NVBLAST_CHECK(isValid(commandBuffers), logFn, "NvBlastActorGenerateFracture: commandBuffers memory is NULL but size is > 0.",
+ commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = 0; return);
+
+#if NVBLAST_CHECK_PARAMS
+ if (commandBuffers->bondFractureCount == 0 && commandBuffers->chunkFractureCount == 0)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastActorGenerateFracture: commandBuffers do not provide any space.");
+ return;
+ }
+#endif
+
+#if NV_PROFILE
+ Time time;
+#else
+ NV_UNUSED(timers);
+#endif
+
+ const SupportGraph* graph = getGraph();
+
+ const uint32_t graphNodeCount = getGraphNodeCount();
+ if (graphNodeCount > 1 && program.graphShaderFunction != nullptr)
+ {
+ const NvBlastGraphShaderActor shaderActor = {
+ getFirstGraphNodeIndex(),
+ getGraphNodeIndexLinks(),
+ graph->getChunkIndices(),
+ graph->getAdjacencyPartition(),
+ graph->getAdjacentNodeIndices(),
+ graph->getAdjacentBondIndices(),
+ getBonds(),
+ getBondHealths()
+ };
+
+ program.graphShaderFunction(commandBuffers, &shaderActor, programParams);
+ }
+ else if (graphNodeCount <= 1 && program.subgraphShaderFunction != nullptr)
+ {
+ const NvBlastSubgraphShaderActor shaderActor = {
+ // The conditional (visible vs. support chunk) is needed because we allow single-child chunk chains
+ // This makes it possible that an actor with a single support chunk will have a different visible chunk (ancestor of the support chunk)
+ graphNodeCount == 1 ? graph->getChunkIndices()[getFirstGraphNodeIndex()] : getFirstVisibleChunkIndex(),
+ getChunks()
+ };
+
+ program.subgraphShaderFunction(commandBuffers, &shaderActor, programParams);
+ }
+ else
+ {
+ commandBuffers->bondFractureCount = 0;
+ commandBuffers->chunkFractureCount = 0;
+ }
+
+#if NV_PROFILE
+ if (timers != nullptr)
+ {
+ timers->material += time.getElapsedTicks();
+ }
+#endif
+}
+
+
+void Actor::fractureSubSupportNoEvents(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks)
+{
+ const NvBlastChunk& chunk = chunks[chunkIndex];
+ uint32_t numChildren = chunk.childIndexStop - chunk.firstChildIndex;
+
+ if (numChildren > 0)
+ {
+ healthDamage /= numChildren;
+ for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; childIndex++)
+ {
+ float& health = chunkHealths[childIndex - suboffset];
+ if (health > 0.0f)
+ {
+ float remainingDamage = healthDamage - health;
+ health -= healthDamage;
+
+ NVBLAST_ASSERT(chunks[childIndex].parentChunkIndex == chunkIndex);
+
+ if (health <= 0.0f && remainingDamage > 0.0f)
+ {
+ fractureSubSupportNoEvents(childIndex, suboffset, remainingDamage, chunkHealths, chunks);
+ }
+ }
+ }
+ }
+}
+
+
+void Actor::fractureSubSupport(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks, NvBlastChunkFractureData* outBuffer, uint32_t* currentIndex, const uint32_t maxCount)
+{
+ const NvBlastChunk& chunk = chunks[chunkIndex];
+ uint32_t numChildren = chunk.childIndexStop - chunk.firstChildIndex;
+
+ if (numChildren > 0)
+ {
+ healthDamage /= numChildren;
+ for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; childIndex++)
+ {
+ float& health = chunkHealths[childIndex - suboffset];
+ if (health > 0.0f)
+ {
+ float remainingDamage = healthDamage - health;
+ health -= healthDamage;
+
+ NVBLAST_ASSERT(chunks[childIndex].parentChunkIndex == chunkIndex);
+
+ if (*currentIndex < maxCount)
+ {
+ NvBlastChunkFractureData& event = outBuffer[*currentIndex];
+ event.userdata = chunks[childIndex].userData;
+ event.chunkIndex = childIndex;
+ event.health = health;
+ }
+ (*currentIndex)++;
+
+ if (health <= 0.0f && remainingDamage > 0.0f)
+ {
+ fractureSubSupport(childIndex, suboffset, remainingDamage, chunkHealths, chunks, outBuffer, currentIndex, maxCount);
+ }
+ }
+ }
+ }
+
+}
+
+
+void Actor::fractureNoEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* chunkFractures)
+{
+ const Asset& asset = *getAsset();
+ const SupportGraph& graph = *getGraph();
+ const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
+ const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
+ float* bondHealths = getBondHealths();
+ float* chunkHealths = getLowerSupportChunkHealths();
+ float* subChunkHealths = getSubsupportChunkHealths();
+ const NvBlastChunk* chunks = getChunks();
+
+ for (uint32_t i = 0; i < chunkFractureCount; ++i)
+ {
+ const NvBlastChunkFractureData& command = chunkFractures[i];
+ const uint32_t chunkIndex = command.chunkIndex;
+ const uint32_t chunkHealthIndex = asset.getContiguousLowerSupportIndex(chunkIndex);
+ NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex));
+ if (isInvalidIndex(chunkHealthIndex))
+ {
+ continue;
+ }
+ float& health = chunkHealths[chunkHealthIndex];
+ if (health > 0.0f && command.health > 0.0f)
+ {
+ const uint32_t nodeIndex = asset.getChunkToGraphNodeMap()[chunkIndex];
+ if (getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex))
+ {
+ for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++)
+ {
+
+ const uint32_t bondIndex = graph.findBond(nodeIndex, graphAdjacentNodeIndices[adjacentIndex]);
+ NVBLAST_ASSERT(!isInvalidIndex(bondIndex));
+ if (bondHealths[bondIndex] > 0.0f)
+ {
+ bondHealths[bondIndex] = 0.0f;
+ }
+ }
+ getFamilyGraph()->notifyNodeRemoved(getIndex(), nodeIndex, &graph);
+ }
+
+ health -= command.health;
+
+ const float remainingDamage = -health;
+
+ if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health
+ {
+ uint32_t firstSubOffset = getFirstSubsupportChunkIndex();
+ fractureSubSupportNoEvents(chunkIndex, firstSubOffset, remainingDamage, subChunkHealths, chunks);
+ }
+ }
+ }
+}
+
+
+void Actor::fractureWithEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* commands, NvBlastChunkFractureData* events, uint32_t eventsSize, uint32_t* count)
+{
+ const Asset& asset = *getAsset();
+ const SupportGraph& graph = *getGraph();
+ const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
+ const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
+ float* bondHealths = getBondHealths();
+ float* chunkHealths = getLowerSupportChunkHealths();
+ float* subChunkHealths = getSubsupportChunkHealths();
+ const NvBlastChunk* chunks = getChunks();
+
+ for (uint32_t i = 0; i < chunkFractureCount; ++i)
+ {
+ const NvBlastChunkFractureData& command = commands[i];
+ const uint32_t chunkIndex = command.chunkIndex;
+ const uint32_t chunkHealthIndex = asset.getContiguousLowerSupportIndex(chunkIndex);
+ NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex));
+ if (isInvalidIndex(chunkHealthIndex))
+ {
+ continue;
+ }
+ float& health = chunkHealths[chunkHealthIndex];
+ if (health > 0.0f && command.health > 0.0f)
+ {
+ const uint32_t nodeIndex = asset.getChunkToGraphNodeMap()[chunkIndex];
+ if (getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex))
+ {
+ for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++)
+ {
+ const uint32_t bondIndex = graph.findBond(nodeIndex, graphAdjacentNodeIndices[adjacentIndex]);
+ NVBLAST_ASSERT(!isInvalidIndex(bondIndex));
+ if (bondHealths[bondIndex] > 0.0f)
+ {
+ bondHealths[bondIndex] = 0.0f;
+ }
+ }
+ getFamilyGraph()->notifyNodeRemoved(getIndex(), nodeIndex, &graph);
+ }
+
+ health -= command.health;
+
+ if (*count < eventsSize)
+ {
+ NvBlastChunkFractureData& outEvent = events[*count];
+ outEvent.userdata = chunks[chunkIndex].userData;
+ outEvent.chunkIndex = chunkIndex;
+ outEvent.health = health;
+ }
+ (*count)++;
+
+ const float remainingDamage = -health;
+
+ if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health
+ {
+ uint32_t firstSubOffset = getFirstSubsupportChunkIndex();
+ fractureSubSupport(chunkIndex, firstSubOffset, remainingDamage, subChunkHealths, chunks, events, count, eventsSize);
+ }
+ }
+ }
+}
+
+
+void Actor::fractureInPlaceEvents(uint32_t chunkFractureCount, NvBlastChunkFractureData* inoutbuffer, uint32_t eventsSize, uint32_t* count)
+{
+ const Asset& asset = *getAsset();
+ const SupportGraph& graph = *getGraph();
+ const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
+ const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
+ float* bondHealths = getBondHealths();
+ float* chunkHealths = getLowerSupportChunkHealths();
+ float* subChunkHealths = getSubsupportChunkHealths();
+ const NvBlastChunk* chunks = getChunks();
+
+ //
+ // First level Chunk Fractures
+ //
+
+ for (uint32_t i = 0; i < chunkFractureCount; ++i)
+ {
+ const NvBlastChunkFractureData& command = inoutbuffer[i];
+ const uint32_t chunkIndex = command.chunkIndex;
+ const uint32_t chunkHealthIndex = asset.getContiguousLowerSupportIndex(chunkIndex);
+ NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex));
+ if (isInvalidIndex(chunkHealthIndex))
+ {
+ continue;
+ }
+ float& health = chunkHealths[chunkHealthIndex];
+ if (health > 0.0f && command.health > 0.0f)
+ {
+ const uint32_t nodeIndex = asset.getChunkToGraphNodeMap()[chunkIndex];
+ if (getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex))
+ {
+ for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++)
+ {
+ const uint32_t bondIndex = graph.findBond(nodeIndex, graphAdjacentNodeIndices[adjacentIndex]);
+ NVBLAST_ASSERT(!isInvalidIndex(bondIndex));
+ if (bondHealths[bondIndex] > 0.0f)
+ {
+ bondHealths[bondIndex] = 0.0f;
+ }
+ }
+ getFamilyGraph()->notifyNodeRemoved(getIndex(), nodeIndex, &graph);
+ }
+
+ health -= command.health;
+
+ NvBlastChunkFractureData& outEvent = inoutbuffer[(*count)++];
+ outEvent.userdata = chunks[chunkIndex].userData;
+ outEvent.chunkIndex = chunkIndex;
+ outEvent.health = health;
+ }
+ }
+
+ //
+ // Hierarchical Chunk Fractures
+ //
+
+ uint32_t commandedChunkFractures = *count;
+
+ for (uint32_t i = 0; i < commandedChunkFractures; ++i)
+ {
+ NvBlastChunkFractureData& event = inoutbuffer[i];
+ const uint32_t chunkIndex = event.chunkIndex;
+
+ const float remainingDamage = -event.health;
+ if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health
+ {
+ uint32_t firstSubOffset = getFirstSubsupportChunkIndex();
+ fractureSubSupport(chunkIndex, firstSubOffset, remainingDamage, subChunkHealths, chunks, inoutbuffer, count, eventsSize);
+ }
+ }
+}
+
+
+void Actor::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, NvBlastLog logFn, NvBlastTimers* timers)
+{
+ NVBLAST_CHECK(commands != nullptr, logFn, "Actor::applyFracture: NULL commands pointer input.", return);
+ NVBLAST_CHECK(isValid(commands), logFn, "Actor::applyFracture: commands memory is NULL but size is > 0.", return);
+ NVBLAST_CHECK(eventBuffers == nullptr || isValid(eventBuffers), logFn, "NvBlastActorApplyFracture: eventBuffers memory is NULL but size is > 0.",
+ eventBuffers->bondFractureCount = 0; eventBuffers->chunkFractureCount = 0; return);
+
+#if NVBLAST_CHECK_PARAMS
+ if (eventBuffers != nullptr && eventBuffers->bondFractureCount == 0 && eventBuffers->chunkFractureCount == 0)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers do not provide any space.");
+ return;
+ }
+#endif
+
+#if NV_PROFILE
+ Time time;
+#else
+ NV_UNUSED(timers);
+#endif
+
+ //
+ // Chunk Fracture
+ //
+
+ if (eventBuffers == nullptr || eventBuffers->chunkFractures == nullptr)
+ {
+ // immediate hierarchical fracture
+ fractureNoEvents(commands->chunkFractureCount, commands->chunkFractures);
+ }
+ else if (eventBuffers->chunkFractures != commands->chunkFractures)
+ {
+ // immediate hierarchical fracture
+ uint32_t count = 0;
+ fractureWithEvents(commands->chunkFractureCount, commands->chunkFractures, eventBuffers->chunkFractures, eventBuffers->chunkFractureCount, &count);
+
+ if (count > eventBuffers->chunkFractureCount)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Chunk events were lost.");
+ }
+ else
+ {
+ eventBuffers->chunkFractureCount = count;
+ }
+ }
+ else if (eventBuffers->chunkFractures == commands->chunkFractures)
+ {
+ // compacting first
+ uint32_t count = 0;
+ fractureInPlaceEvents(commands->chunkFractureCount, commands->chunkFractures, eventBuffers->chunkFractureCount, &count);
+
+ if (count > eventBuffers->chunkFractureCount)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Chunk events were lost.");
+ }
+ else
+ {
+ eventBuffers->chunkFractureCount = count;
+ }
+ }
+
+ //
+ // Bond Fracture
+ //
+
+ uint32_t outCount = 0;
+ const uint32_t eventBufferSize = eventBuffers ? eventBuffers->bondFractureCount : 0;
+
+ NvBlastBond* bonds = getBonds();
+ float* bondHealths = getBondHealths();
+ for (uint32_t i = 0; i < commands->bondFractureCount; ++i)
+ {
+ const NvBlastBondFractureData& frac = commands->bondFractures[i];
+
+ const uint32_t bondIndex = damageBond(frac.nodeIndex0, frac.nodeIndex1, frac.health);
+
+ if (!isInvalidIndex(bondIndex))
+ {
+ if (eventBuffers && eventBuffers->bondFractures)
+ {
+ if (outCount < eventBufferSize)
+ {
+ NvBlastBondFractureData& outEvent = eventBuffers->bondFractures[outCount];
+ outEvent.userdata = bonds[bondIndex].userData;
+ outEvent.nodeIndex0 = frac.nodeIndex0;
+ outEvent.nodeIndex1 = frac.nodeIndex1;
+ outEvent.health = bondHealths[bondIndex];
+ }
+ }
+ outCount++;
+ }
+ }
+
+ if (eventBuffers && eventBuffers->bondFractures)
+ {
+ if (outCount > eventBufferSize)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Bond events were lost.");
+ }
+ else
+ {
+ eventBuffers->bondFractureCount = outCount;
+ }
+ }
+
+#if NV_PROFILE
+ if (timers != nullptr)
+ {
+ timers->fracture += time.getElapsedTicks();
+ }
+#endif
+
+}
+
+
+size_t Actor::splitRequiredScratch() const
+{
+ return FamilyGraph::findIslandsRequiredScratch(getGraph()->m_nodeCount);
+}
+
+
+uint32_t Actor::split(NvBlastActorSplitEvent* result, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers)
+{
+ NVBLAST_CHECK(result != nullptr, logFn, "Actor::split: NULL result pointer input.", return 0);
+ NVBLAST_CHECK(newActorsMaxCount > 0 && result->newActors != nullptr, logFn, "NvBlastActorSplit: no space for results provided.", return 0);
+ NVBLAST_CHECK(scratch != nullptr, logFn, "Actor::split: NULL scratch pointer input.", return 0);
+
+#if NV_PROFILE
+ Time time;
+#else
+ NV_UNUSED(timers);
+#endif
+
+ Actor** newActors = reinterpret_cast<Actor**>(result->newActors);
+
+ uint32_t actorsCount = 0;
+
+ if (getGraphNodeCount() <= 1)
+ {
+ uint32_t chunkHealthIndex = isSingleSupportChunk() ? getIndex() : getFirstVisibleChunkIndex() - getFirstSubsupportChunkIndex() + getGraph()->m_nodeCount;
+
+ float* chunkHealths = getLowerSupportChunkHealths();
+ if (chunkHealths[chunkHealthIndex] <= 0.0f)
+ {
+ actorsCount = partitionSingleLowerSupportChunk(newActors, newActorsMaxCount, logFn);
+
+ for (uint32_t i = 0; i < actorsCount; ++i)
+ {
+ Actor* newActor = newActors[i];
+ uint32_t firstVisible = newActor->getFirstVisibleChunkIndex();
+ uint32_t firstSub = newActor->getFirstSubsupportChunkIndex();
+ uint32_t nodeCount = newActor->getGraph()->m_nodeCount;
+ uint32_t newActorIndex = newActor->getIndex();
+ uint32_t healthIndex = newActor->isSubSupportChunk() ? firstVisible - firstSub + nodeCount : newActorIndex;
+
+ if (chunkHealths[healthIndex] <= 0.0f)
+ {
+ uint32_t brittleActors = newActors[i]->partitionSingleLowerSupportChunk(&newActors[actorsCount], newActorsMaxCount - actorsCount, logFn);
+ actorsCount += brittleActors;
+
+ if (brittleActors > 0)
+ {
+ actorsCount--;
+ newActors[i] = newActors[actorsCount];
+ i--;
+ }
+ }
+ }
+ }
+
+
+#if NV_PROFILE
+ if (timers != nullptr)
+ {
+ timers->partition += time.getElapsedTicks();
+ }
+#endif
+ }
+ else
+ {
+ findIslands(scratch);
+
+#if NV_PROFILE
+ if (timers != nullptr)
+ {
+ timers->island += time.getElapsedTicks();
+ }
+#endif
+
+ actorsCount = partitionMultipleGraphNodes(newActors, newActorsMaxCount, logFn);
+
+ if (actorsCount > 1)
+ {
+#if NV_PROFILE
+ if (timers != nullptr)
+ {
+ timers->partition += time.getElapsedTicks();
+ }
+#endif
+
+ // Recalculate visible chunk lists if the graph nodes have been redistributed
+ for (uint32_t i = 0; i < actorsCount; ++i)
+ {
+ newActors[i]->updateVisibleChunksFromGraphNodes();
+ }
+
+#if NV_PROFILE
+ if (timers != nullptr)
+ {
+ timers->visibility += time.getElapsedTicks();
+ }
+#endif
+
+ for (uint32_t i = 0; i < actorsCount; ++i)
+ {
+ Actor* newActor = newActors[i];
+ float* chunkHealths = newActor->getLowerSupportChunkHealths();
+ uint32_t firstVisible = newActor->getFirstVisibleChunkIndex();
+ uint32_t firstSub = newActor->getFirstSubsupportChunkIndex();
+ uint32_t nodeCount = newActor->getGraph()->m_nodeCount;
+ uint32_t newActorIndex = newActor->getIndex();
+ uint32_t healthIndex = newActor->isSubSupportChunk() ? firstVisible - firstSub + nodeCount : newActorIndex;
+
+ if (newActors[i]->getGraphNodeCount() <= 1)
+ {
+ // this relies on visibility updated, subsupport actors only have m_firstVisibleChunkIndex to identify the chunk
+ if (chunkHealths[healthIndex] <= 0.0f)
+ {
+ uint32_t brittleActors = newActors[i]->partitionSingleLowerSupportChunk(&newActors[actorsCount], newActorsMaxCount - actorsCount, logFn);
+ actorsCount += brittleActors;
+
+ if (brittleActors > 0)
+ {
+ actorsCount--;
+ newActors[i] = newActors[actorsCount];
+ i--;
+ }
+ }
+ }
+ }
+
+#if NV_PROFILE
+ if (timers != nullptr)
+ {
+ timers->partition += time.getElapsedTicks();
+ }
+#endif
+ }
+ else
+ {
+ actorsCount = 0;
+ }
+ }
+
+ result->deletedActor = actorsCount == 0 ? nullptr : this;
+
+ return actorsCount;
+}
+
+
+uint32_t Actor::findIslands(void* scratch)
+{
+ return getFamilyHeader()->getFamilyGraph()->findIslands(getIndex(), scratch, &getAsset()->m_graph);
+}
+
+
+uint32_t Actor::partitionMultipleGraphNodes(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn)
+{
+ NVBLAST_ASSERT(newActorsSize == 0 || newActors != nullptr);
+
+ // Check for single subsupport chunk, no partitioning
+ if (m_graphNodeCount <= 1)
+ {
+ NVBLAST_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionMultipleGraphNodes: actor is a single lower-support chunk, and cannot be partitioned by this function.");
+ return 0;
+ }
+
+ FamilyHeader* header = getFamilyHeader();
+ NVBLAST_ASSERT(header != nullptr); // If m_actorEntryDataIndex is valid, this should be too
+
+ // Get the links for the graph nodes
+ uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks();
+
+ // Get the graph chunk indices and leaf chunk counts
+ const Asset* asset = getAsset();
+ const uint32_t* graphChunkIndices = asset->m_graph.getChunkIndices();
+ const uint32_t* subtreeLeafChunkCounts = asset->getSubtreeLeafChunkCounts();
+
+ // Distribute graph nodes to new actors
+ uint32_t newActorCount = 0;
+ const uint32_t thisActorIndex = getIndex();
+ m_leafChunkCount = 0;
+ const uint32_t* islandIDs = header->getFamilyGraph()->getIslandIds();
+ uint32_t lastGraphNodeIndex = invalidIndex<uint32_t>();
+ uint32_t nextGraphNodeIndex = invalidIndex<uint32_t>();
+ bool overflow = false;
+ for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = nextGraphNodeIndex)
+ {
+ nextGraphNodeIndex = graphNodeIndexLinks[graphNodeIndex];
+ const uint32_t islandID = islandIDs[graphNodeIndex];
+
+ if (islandID == thisActorIndex)
+ {
+ m_leafChunkCount += subtreeLeafChunkCounts[graphChunkIndices[graphNodeIndex]];
+ lastGraphNodeIndex = graphNodeIndex;
+ continue; // Leave the chunk in this actor
+ }
+
+ // Remove link from this actor
+ if (isInvalidIndex(lastGraphNodeIndex))
+ {
+ m_firstGraphNodeIndex = nextGraphNodeIndex;
+ }
+ else
+ {
+ graphNodeIndexLinks[lastGraphNodeIndex] = nextGraphNodeIndex;
+ }
+ graphNodeIndexLinks[graphNodeIndex] = invalidIndex<uint32_t>();
+ --m_graphNodeCount;
+
+ // See if the chunk had been removed
+ if (islandID == invalidIndex<uint32_t>())
+ {
+ continue;
+ }
+
+ // Get new actor if the islandID is valid
+ Actor* newActor = header->borrowActor(islandID);
+
+ // Check new actor to see if we're adding the first chunk
+ if (isInvalidIndex(newActor->m_firstGraphNodeIndex))
+ {
+ // See if we can fit it in the output list
+ if (newActorCount < newActorsSize)
+ {
+ newActors[newActorCount++] = newActor;
+ }
+ else
+ {
+ overflow = true;
+ }
+ }
+
+ // Put link in new actor
+ graphNodeIndexLinks[graphNodeIndex] = newActor->m_firstGraphNodeIndex;
+ newActor->m_firstGraphNodeIndex = graphNodeIndex;
+ ++newActor->m_graphNodeCount;
+ // Add to the actor's leaf chunk count
+ newActor->m_leafChunkCount += subtreeLeafChunkCounts[graphChunkIndices[graphNodeIndex]];
+ }
+
+ if (m_graphNodeCount > 0)
+ {
+ // There are still chunks in this actor. See if we can fit this in the output list.
+ if (newActorCount < newActorsSize)
+ {
+ newActors[newActorCount++] = this;
+ }
+ else
+ {
+ overflow = true;
+ }
+ }
+ else
+ {
+ // No more chunks; release this actor.
+ release();
+ }
+
+ if (overflow)
+ {
+ NVBLAST_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionMultipleGraphNodes: input newActors array could not hold all actors generated.");
+ }
+
+ return newActorCount;
+}
+
+
+uint32_t Actor::partitionSingleLowerSupportChunk(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn)
+{
+ NVBLAST_ASSERT(newActorsSize == 0 || newActors != nullptr);
+
+ // Ensure this is a single subsupport chunk, no partitioning
+ if (m_graphNodeCount > 1)
+ {
+ NVBLAST_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionSingleLowerSupportChunk: actor is not a single lower-support chunk, and cannot be partitioned by this function.");
+ return 0;
+ }
+
+ FamilyHeader* header = getFamilyHeader();
+
+ // The conditional (visible vs. support chunk) is needed because we allow single-child chunk chains
+ // This makes it possible that an actor with a single support chunk will have a different visible chunk (ancestor of the support chunk)
+ const uint32_t chunkIndex = m_graphNodeCount == 0 ? m_firstVisibleChunkIndex : getGraph()->getChunkIndices()[m_firstGraphNodeIndex];
+ NVBLAST_ASSERT(isInvalidIndex(header->getVisibleChunkIndexLinks()[chunkIndex].m_adj[1]));
+
+ const NvBlastChunk& chunk = header->m_asset->getChunks()[chunkIndex];
+ uint32_t childCount = chunk.childIndexStop - chunk.firstChildIndex;
+
+ // Warn if we cannot fit all child chunks in the output list
+ if (childCount > newActorsSize)
+ {
+ NVBLAST_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionSingleLowerSupportChunk: input newActors array will not hold all actors generated.");
+ childCount = newActorsSize;
+ }
+
+ // Return if no chunks will be created.
+ if (childCount == 0)
+ {
+ return 0;
+ }
+
+ // Activate a new actor for every child chunk
+ const Asset* asset = getAsset();
+ const NvBlastChunk* chunks = asset->getChunks();
+ const uint32_t firstChildIndex = chunks[chunkIndex].firstChildIndex;
+ for (uint32_t i = 0; i < childCount; ++i)
+ {
+ const uint32_t childIndex = firstChildIndex + i;
+ NVBLAST_ASSERT(childIndex >= asset->m_firstSubsupportChunkIndex);
+ const uint32_t actorIndex = asset->m_graph.m_nodeCount + (childIndex - asset->m_firstSubsupportChunkIndex);
+ NVBLAST_ASSERT(!header->isActorActive(actorIndex));
+ newActors[i] = header->borrowActor(actorIndex);
+ newActors[i]->m_firstVisibleChunkIndex = childIndex;
+ newActors[i]->m_visibleChunkCount = 1;
+ newActors[i]->m_leafChunkCount = asset->getSubtreeLeafChunkCounts()[childIndex];
+ }
+
+ // Release this actor
+ release();
+
+ return childCount;
+}
+
+
+void Actor::updateVisibleChunksFromGraphNodes()
+{
+ // Only apply this to upper-support chunk actors
+ if (m_graphNodeCount == 0)
+ {
+ return;
+ }
+
+ const Asset* asset = getAsset();
+
+ const uint32_t thisActorIndex = getIndex();
+
+ // Get various arrays
+ FamilyHeader* header = getFamilyHeader();
+ Actor* actors = header->getActors();
+ IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks();
+ uint32_t* chunkActorIndices = header->getChunkActorIndices();
+ const Nv::Blast::SupportGraph& graph = asset->m_graph;
+ const uint32_t* graphChunkIndices = graph.getChunkIndices();
+ const NvBlastChunk* chunks = asset->getChunks();
+ const uint32_t upperSupportChunkCount = asset->getUpperSupportChunkCount();
+
+ // Iterate over all graph nodes and update visible chunk list
+ const uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks();
+ for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex])
+ {
+ updateVisibleChunksFromSupportChunk<Actor>(actors, visibleChunkIndexLinks, chunkActorIndices, thisActorIndex, graphChunkIndices[graphNodeIndex], chunks, upperSupportChunkCount);
+ }
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+// API implementation
+
+extern "C"
+{
+
+NvBlastActor* NvBlastFamilyCreateFirstActor(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL family input.", return nullptr);
+ NVBLAST_CHECK(desc != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL desc input.", return nullptr);
+ NVBLAST_CHECK(scratch != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL scratch input.", return nullptr);
+
+ return Nv::Blast::Actor::create(family, desc, scratch, logFn);
+}
+
+
+size_t NvBlastFamilyGetRequiredScratchForCreateFirstActor(const NvBlastFamily* family, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyGetRequiredScratchForCreateFirstActor: NULL family input.", return 0);
+ NVBLAST_CHECK(reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_asset != nullptr,
+ logFn, "NvBlastFamilyGetRequiredScratchForCreateFirstActor: family has NULL asset.", return 0);
+
+ return Nv::Blast::Actor::createRequiredScratch(family);
+}
+
+
+bool NvBlastActorDeactivate(NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorDeactivate: NULL actor input.", return false);
+
+ Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastActorDeactivate: inactive actor input.");
+ }
+
+ return a.release();
+}
+
+
+uint32_t NvBlastActorGetVisibleChunkCount(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetVisibleChunkCount: NULL actor input.", return 0);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetVisibleChunkCount: inactive actor input.");
+ return 0;
+ }
+
+ return a.getVisibleChunkCount();
+}
+
+
+uint32_t NvBlastActorGetVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize, const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(visibleChunkIndices != nullptr, logFn, "NvBlastActorGetVisibleChunkIndices: NULL visibleChunkIndices pointer input.", return 0);
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetVisibleChunkIndices: NULL actor pointer input.", return 0);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetVisibleChunkIndices: inactive actor pointer input.");
+ return 0;
+ }
+
+ // Iterate through visible chunk list and write to supplied array
+ uint32_t indexCount = 0;
+ for (Nv::Blast::Actor::VisibleChunkIt i = a; indexCount < visibleChunkIndicesSize && (bool)i; ++i)
+ {
+ visibleChunkIndices[indexCount++] = (uint32_t)i;
+ }
+
+ return indexCount;
+}
+
+
+uint32_t NvBlastActorGetGraphNodeCount(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetGraphNodeCount: NULL actor pointer input.", return 0);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetGraphNodeCount: inactive actor pointer input.");
+ return 0;
+ }
+
+ return a.getGraphNodeCount();
+}
+
+
+uint32_t NvBlastActorGetGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize, const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(graphNodeIndices != nullptr, logFn, "NvBlastActorGetGraphNodeIndices: NULL graphNodeIndices pointer input.", return 0);
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetGraphNodeIndices: NULL actor pointer input.", return 0);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetGraphNodeIndices: inactive actor pointer input.");
+ return 0;
+ }
+
+ // Iterate through graph node list and write to supplied array
+ uint32_t indexCount = 0;
+ for (Nv::Blast::Actor::GraphNodeIt i = a; indexCount < graphNodeIndicesSize && (bool)i; ++i)
+ {
+ graphNodeIndices[indexCount++] = (uint32_t)i;
+ }
+
+ return indexCount;
+}
+
+
+const float* NvBlastActorGetBondHealths(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetBondHealths: NULL actor pointer input.", return nullptr);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetBondHealths: inactive actor pointer input.");
+ return nullptr;
+ }
+
+ return a.getFamilyHeader()->getBondHealths();
+}
+
+
+NvBlastFamily* NvBlastActorGetFamily(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetFamily: NULL actor pointer input.", return nullptr);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetFamily: inactive actor pointer input.");
+ return nullptr;
+ }
+
+ return reinterpret_cast<NvBlastFamily*>(a.getFamilyHeader());
+}
+
+
+uint32_t NvBlastActorGetIndex(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetIndex: NULL actor pointer input.", return Nv::Blast::invalidIndex<uint32_t>());
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetIndex: actor is not active.");
+ return Nv::Blast::invalidIndex<uint32_t>();
+ }
+
+ return a.getIndex();
+}
+
+
+void NvBlastActorGenerateFracture
+(
+ NvBlastFractureBuffers* commandBuffers,
+ const NvBlastActor* actor,
+ const NvBlastDamageProgram program,
+ const NvBlastProgramParams* programParams,
+ NvBlastLog logFn,
+ NvBlastTimers* timers
+)
+{
+ NVBLAST_CHECK(commandBuffers != nullptr, logFn, "NvBlastActorGenerateFracture: NULL commandBuffers pointer input.", return);
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGenerateFracture: NULL actor pointer input.", return);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGenerateFracture: actor is not active.");
+ commandBuffers->bondFractureCount = 0;
+ commandBuffers->chunkFractureCount = 0;
+ return;
+ }
+
+ a.generateFracture(commandBuffers, program, programParams, logFn, timers);
+}
+
+
+void NvBlastActorApplyFracture
+(
+ NvBlastFractureBuffers* eventBuffers,
+ NvBlastActor* actor,
+ const NvBlastFractureBuffers* commands,
+ NvBlastLog logFn,
+ NvBlastTimers* timers
+)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorApplyFracture: NULL actor pointer input.", return);
+ NVBLAST_CHECK(commands != nullptr, logFn, "NvBlastActorApplyFracture: NULL commands pointer input.", return);
+ NVBLAST_CHECK(Nv::Blast::isValid(commands), logFn, "NvBlastActorApplyFracture: commands memory is NULL but size is > 0.", return);
+
+ Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorApplyFracture: actor is not active.");
+ if (eventBuffers != nullptr)
+ {
+ eventBuffers->bondFractureCount = 0;
+ eventBuffers->chunkFractureCount = 0;
+ }
+ return;
+ }
+
+ a.applyFracture(eventBuffers, commands, logFn, timers);
+}
+
+
+size_t NvBlastActorGetRequiredScratchForSplit(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetRequiredScratchForSplit: NULL actor input.", return 0);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetRequiredScratchForSplit: actor is not active.");
+ return 0;
+ }
+
+ return a.splitRequiredScratch();
+}
+
+
+uint32_t NvBlastActorGetMaxActorCountForSplit(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetMaxActorCountForSplit: NULL actor input.", return 0);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetMaxActorCountForSplit: actor is not active.");
+ return 0;
+ }
+
+ return a.getLeafChunkCount() + 1; // GWD-167 workaround (+1)
+}
+
+
+uint32_t NvBlastActorSplit
+(
+ NvBlastActorSplitEvent* result,
+ NvBlastActor* actor,
+ uint32_t newActorsMaxCount,
+ void* scratch,
+ NvBlastLog logFn,
+ NvBlastTimers* timers
+)
+{
+ NVBLAST_CHECK(result != nullptr, logFn, "NvBlastActorSplit: NULL result pointer input.", return 0);
+ NVBLAST_CHECK(newActorsMaxCount > 0 && result->newActors != nullptr, logFn, "NvBlastActorSplit: no space for results provided.", return 0);
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorSplit: NULL actor pointer input.", return 0);
+ NVBLAST_CHECK(scratch != nullptr, logFn, "NvBlastActorSplit: NULL scratch pointer input.", return 0);
+
+ Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetIndex: actor is not active.");
+ return 0;
+ }
+
+ return a.split(result, newActorsMaxCount, scratch, logFn, timers);
+}
+
+
+bool NvBlastActorCanFracture(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorCanFracture: NULL actor input.", return false);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorCanFracture: actor is not active.");
+ return false;
+ }
+
+ bool canFracture = true;
+
+ uint32_t graphNodeCount = a.getGraphNodeCount();
+ if (graphNodeCount < 2)
+ {
+ uint32_t chunkHealthIndex = graphNodeCount == 0 ?
+ a.getFirstVisibleChunkIndex() - a.getFirstSubsupportChunkIndex() + a.getGraph()->m_nodeCount :
+ a.getFirstGraphNodeIndex();
+ canFracture = (a.getLowerSupportChunkHealths()[chunkHealthIndex] > 0.0f);
+ }
+
+ return canFracture;
+}
+
+
+} // extern "C"
+
+
+// deprecated API, still used in tests
+uint32_t NvBlastActorClosestChunk(const float point[4], const NvBlastActor* actor, NvBlastLog logFn)
+{
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (a.isSubSupportChunk())
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastActorClosestChunk: not a graph actor.");
+ return Nv::Blast::invalidIndex<uint32_t>();
+ }
+
+ uint32_t closestNode = Nv::Blast::findNodeByPositionLinked(
+ point,
+ a.getFirstGraphNodeIndex(),
+ a.getFamilyHeader()->getGraphNodeIndexLinks(),
+ a.getAsset()->m_graph.getAdjacencyPartition(),
+ a.getAsset()->m_graph.getAdjacentNodeIndices(),
+ a.getAsset()->m_graph.getAdjacentBondIndices(),
+ a.getAsset()->getBonds(),
+ a.getFamilyHeader()->getBondHealths()
+ );
+
+ return a.getAsset()->m_graph.getChunkIndices()[closestNode];
+}
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastActor.h b/NvBlast/sdk/lowlevel/source/NvBlastActor.h
new file mode 100644
index 0000000..42879e7
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastActor.h
@@ -0,0 +1,732 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTACTOR_H
+#define NVBLASTACTOR_H
+
+
+#include "NvBlastAsset.h"
+#include "NvBlastDLink.h"
+#include "NvBlastIteratorBase.h"
+#include "NvBlastSupportGraph.h"
+
+#include <cstring>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class FamilyGraph;
+struct FamilyHeader;
+
+/**
+Internal implementation of solver actor.
+
+These objects are stored within the family in a single array. A pointer to a Actor class will be given
+to the user through the NvBlastActor opaque type.
+*/
+class Actor : public NvBlastActor
+{
+ friend struct FamilyHeader;
+
+ friend void updateVisibleChunksFromSupportChunk<>(Actor*, IndexDLink<uint32_t>*, uint32_t*, uint32_t, uint32_t, const NvBlastChunk*, uint32_t);
+
+public:
+ Actor() : m_familyOffset(0), m_firstVisibleChunkIndex(UINT32_MAX), m_visibleChunkCount(0), m_firstGraphNodeIndex(UINT32_MAX), m_graphNodeCount(0), m_leafChunkCount(0) {}
+
+ //////// Accessors ////////
+
+ /**
+ Find the family (see FamilyHeader) that this actor belongs to.
+
+ \return a pointer to the FamilyHeader for this actor.
+ */
+ FamilyHeader* getFamilyHeader() const;
+
+ /**
+ Utility to get the asset this actor is associated with, through its family.
+
+ \return the asset associated with this actor.
+ */
+ const Asset* getAsset() const;
+
+ /**
+ Since this object is not deleted (unless the family is deleted), we use m_familyOffset
+ to determine if the actor is valid, or "active." When no actors in an instance return isActive(),
+ it should be safe to delete the family.
+
+ \return true iff this actor is valid for use (active).
+ */
+ bool isActive() const;
+
+ /**
+ Whether or not this actor represents a subsupport chunk. If the actor contains a subsupport chunk, then it can have only that chunk.
+
+ \return true iff this actor contains a chunk which is a descendant of a support chunk.
+ */
+ bool isSubSupportChunk() const;
+
+ /**
+ Whether or not this actor represents a single support chunk. If the actor contains a single support chunk, it can have no other
+ chunks associated with it.
+
+ \return true iff this actor contains exactly one support chunk.
+ */
+ bool isSingleSupportChunk() const;
+
+ /**
+ Utility to calculate actor index.
+
+ \return the index of this actor in the FamilyHeader's getActors() array.
+ */
+ uint32_t getIndex() const;
+
+ /**
+ The number of visible chunks. This is calculated from updateVisibleChunksFromGraphNodes().
+ See also getFirstVisibleChunkIndex.
+
+ \return the number of chunks in the actor's visible chunk index list.
+ */
+ uint32_t getVisibleChunkCount() const;
+
+ /**
+ Access to visible chunk linked list for this actor. The index returned is that of a link in the FamilyHeader's getVisibleChunkIndexLinks().
+
+ \return the index of the head of the visible chunk linked list.
+ */
+ uint32_t getFirstVisibleChunkIndex() const;
+
+ /**
+ The number of graph nodes, corresponding to support chunks, for this actor.
+ See also getFirstGraphNodeIndex.
+
+ \return the number of graph nodes in the actor's graph node index list.
+ */
+ uint32_t getGraphNodeCount() const;
+
+ /**
+ The number of leaf chunks for this actor.
+
+ \return number of leaf chunks for this actor.
+ */
+ uint32_t getLeafChunkCount() const;
+
+ /**
+ Access to graph node linked list for this actor. The index returned is that of a link in the FamilyHeader's getGraphNodeIndexLinks().
+
+ \return the index of the head of the graph node linked list.
+ */
+ uint32_t getFirstGraphNodeIndex() const;
+
+ /**
+ Access to the index of the first subsupport chunk.
+
+ \return the index of the first subsupport chunk.
+ */
+ uint32_t getFirstSubsupportChunkIndex() const;
+
+ /**
+ Access to the support graph.
+
+ \return the support graph associated with this actor.
+ */
+ const SupportGraph* getGraph() const;
+
+ /**
+ Access the instance graph for islands searching.
+
+ Return the dynamic data generated for the support graph. (See FamilyGraph.)
+ This is used to store current connectivity information based upon bond and chunk healths, as well as cached intermediate data for faster incremental updates.
+ */
+ FamilyGraph* getFamilyGraph() const;
+
+ /**
+ Access to the chunks, of type NvBlastChunk.
+
+ \return an array of size m_chunkCount.
+ */
+ NvBlastChunk* getChunks() const;
+
+ /**
+ Access to the bonds, of type NvBlastBond.
+
+ \return an array of size m_bondCount.
+ */
+ NvBlastBond* getBonds() const;
+
+ /**
+ Access to the health for each support chunk and subsupport chunk, of type float.
+
+ Use getAsset()->getContiguousLowerSupportIndex() to map lower-support chunk indices into the range of indices valid for this array.
+
+ \return a float array of chunk healths.
+ */
+ float* getLowerSupportChunkHealths() const;
+
+ /**
+ Access to the start of the subsupport chunk health array.
+
+ \return the array of health values associated with all descendants of support chunks.
+ */
+ float* getSubsupportChunkHealths() const;
+
+ /**
+ Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i].
+
+ \return the array of healths associated with all bonds in the support graph.
+ */
+ float* getBondHealths() const;
+
+ /**
+ Graph node index links, of type uint32_t. The successor to index[i] is m_graphNodeIndexLinksOffset[i]. A value of invalidIndex<uint32_t>() indicates no successor.
+
+ getGraphNodeIndexLinks returns an array of size m_asset->m_graphNodeCount.
+ */
+ const uint32_t* getGraphNodeIndexLinks() const;
+
+
+ //////// Iterators ////////
+
+ /**
+ Visible chunk iterator. Usage:
+
+ Given a solver actor a,
+
+ for (Actor::VisibleChunkIt i = a; (bool)i; ++i)
+ {
+ uint32_t visibleChunkIndex = (uint32_t)i;
+
+ // visibleChunkIndex references the asset index list
+ }
+
+ */
+ class VisibleChunkIt : public DListIt<uint32_t>
+ {
+ public:
+ /** Constructed from an actor. */
+ VisibleChunkIt(const Actor& actor);
+ };
+
+ /**
+ Graph node iterator. Usage:
+
+ Given a solver actor a,
+
+ for (Actor::GraphNodeIt i = a; (bool)i; ++i)
+ {
+ uint32_t graphNodeIndex = (uint32_t)i;
+
+ // graphNodeIndex references the asset's graph node index list
+ }
+
+ */
+ class GraphNodeIt : public LListIt<uint32_t>
+ {
+ public:
+ /** Constructed from an actor. */
+ GraphNodeIt(const Actor& actor);
+ };
+
+
+ //////// Operations ////////
+
+ /**
+ Create an actor from a descriptor (creates a family). This actor will represent an unfractured instance of the asset.
+ The asset must be in a valid state, for example each chunk hierarchy in it must contain at least one support chunk (a single
+ support chunk in a hierarchy corresponds to the root chunk). This will always be the case for assets created by NvBlastCreateAsset.
+
+ \param[in] family Family in which to create a new actor. The family must be valid and have no other actors in it. (See createFamily.)
+ \param[in] desc Actor initialization data, must be a valid pointer.
+ \param[in] scratch User-supplied scratch memory of size createRequiredScratch(desc) bytes.
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return the new actor if the input is valid (by the conditions described above), NULL otherwise.
+ */
+ static Actor* create(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn);
+
+ /**
+ Returns the size of the scratch space (in bytes) required to be passed into the create function, based upon
+ the family that will be passed to the create function.
+
+ \param[in] family The family being instanced.
+
+ \return the number of bytes required.
+ */
+ static size_t createRequiredScratch(const NvBlastFamily* family);
+
+ /**
+ Deserialize a single Actor from a buffer. An actor family must given, into which
+ the actor will be inserted if it is compatible. That is, it must not share any chunks or internal
+ IDs with the actors already present in the block.
+
+ \param[in] family Family in which to deserialize the actor.
+ \param[in] buffer Buffer containing the serialized actor data.
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return the deserialized actor if successful, NULL otherwise.
+ */
+ static Actor* deserialize(NvBlastFamily* family, const void* buffer, NvBlastLog logFn);
+
+ /**
+ Serialize actor into single-actor buffer.
+
+ \param[out] buffer User-supplied buffer, must be at least of size given by NvBlastActorGetSerializationSize(actor).
+ \param[in] bufferSize The size of the user-supplied buffer. The buffer size must be less than 4GB. If NvBlastActorGetSerializationSize(actor) >= 4GB, this actor cannot be serialized with this method.
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return the number of bytes written to the buffer, or 0 if there is an error (such as an under-sized buffer).
+ */
+ uint32_t serialize(void* buffer, uint32_t bufferSize, NvBlastLog logFn) const;
+
+ /**
+ Calculate the space required to serialize this actor.
+
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return the required buffer size in bytes.
+ */
+ uint32_t serializationRequiredStorage(NvBlastLog logFn) const;
+
+ /**
+ Release this actor's association with a family, if any. This actor should be considered deleted
+ after this function is called.
+
+ \return true if release was successful (actor was active).
+ */
+ bool release();
+
+
+ //////// Damage and fracturing methods ////////
+
+ /**
+ Damage bond between two chunks by health amount (instance graph also will be notified in case bond is broken after).
+ */
+ uint32_t damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, float healthDamage);
+
+ /**
+ TODO: document
+ */
+ void damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, uint32_t bondIndex, float healthDamage);
+
+ /**
+ TODO: document
+ */
+ uint32_t damageBond(const NvBlastBondFractureData& cmd);
+
+ /**
+ See NvBlastActorGenerateFracture
+ */
+ void generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastDamageProgram& program, const NvBlastProgramParams* programParams, NvBlastLog logFn, NvBlastTimers* timers) const;
+
+ /**
+ Hierarchically distribute damage to child chunks.
+
+ \param chunkIndex asset chunk index to hierarchically damage
+ \param suboffset index of the first sub-support health
+ \param healthDamage damage strength to apply
+ \param chunkHealths instance chunk healths
+ \param chunks asset chunk collection
+ */
+ void fractureSubSupportNoEvents(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks);
+
+ /**
+ Hierarchically distribute damage to child chunks, recording a fracture event for each health damage applied.
+
+ If outBuffer is too small, events are dropped but the chunks are still damaged.
+
+ \param chunkIndex asset chunk index to hierarchically damage
+ \param suboffset index of the first sub-support health
+ \param healthDamage damage strength to apply
+ \param chunkHealths instance chunk healths
+ \param chunks asset chunk collection
+ \param outBuffer target buffer for fracture events
+ \param currentIndex current position in outBuffer - returns the number of damaged chunks
+ \param maxCount capacity of outBuffer
+ */
+ void fractureSubSupport(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks, NvBlastChunkFractureData* outBuffer, uint32_t* currentIndex, const uint32_t maxCount);
+
+ /**
+ Apply chunk fracture commands hierarchically.
+
+ \param chunkFractureCount number of chunk fracture commands to apply
+ \param chunkFractures array of chunk fracture commands
+ */
+ void fractureNoEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* chunkFractures);
+
+ /**
+ Apply chunk fracture commands hierarchically, recording a fracture event for each health damage applied.
+
+ If events array is too small, events are dropped but the chunks are still damaged.
+
+ \param chunkFractureCount number of chunk fracture commands to apply
+ \param commands array of chunk fracture commands
+ \param events target buffer for fracture events
+ \param eventsSize number of available entries in 'events'
+ \param count returns the number of damaged chunks
+ */
+ void fractureWithEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* commands, NvBlastChunkFractureData* events, uint32_t eventsSize, uint32_t* count);
+
+ /**
+ Apply chunk fracture commands hierarchically, recording a fracture event for each health damage applied.
+
+ In-Place version: fracture commands are replaced by fracture events.
+
+ If inoutbuffer array is too small, events are dropped but the chunks are still damaged.
+
+ \param chunkFractureCount number of chunk fracture commands to apply
+ \param inoutbuffer array of chunk fracture commands to be replaced by events
+ \param eventsSize number of available entries in inoutbuffer
+ \param count returns the number of damaged chunks
+ */
+ void fractureInPlaceEvents(uint32_t chunkFractureCount, NvBlastChunkFractureData* inoutbuffer, uint32_t eventsSize, uint32_t* count);
+
+ /**
+ See NvBlastActorApplyFracture
+ */
+ void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, NvBlastLog logFn, NvBlastTimers* timers);
+
+ /**
+ The scratch space required to call the findIslands function, or the split function, in bytes.
+
+ \return the number of bytes required.
+ */
+ size_t splitRequiredScratch() const;
+
+ /**
+ See NvBlastActorSplit
+ */
+ uint32_t split(NvBlastActorSplitEvent* result, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers);
+
+ /**
+ Perform islands search. Bonds which are broken when their health values drop to zero (or below) may lead
+ to new islands of chunks which need to be split into new actors. This function labels all nodes in the instance
+ graph (see FamilyGraph) with a unique index per island that may be used as actor indices for new islands.
+
+ \param[in] scratch User-supplied scratch memory of size splitRequiredScratch().
+
+ \return the number of new islands found.
+ */
+ uint32_t findIslands(void* scratch);
+
+ /**
+ Partition this actor into smaller pieces.
+
+ If this actor represents a single support or subsupport chunk, then after this operation
+ this actor will released if child chunks are created (see Return value), and its pointer no longer valid for use (unless it appears in the newActors list).
+
+ This function will not split a leaf chunk actor. In that case, the actor is not destroyed and this function returns 0.
+
+ \param[in] newActors user-supplied array of actor pointers to hold the actors generated from this partitioning.
+ This array must be of size equal to the number of leaf chunks in the asset, to guarantee
+ that all actors are reported. (See AssetDataHeader::m_leafChunkCount.)
+ \param[in] newActorsSize The size of the user-supplied newActors array.
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return the number of new actors created. If greater than newActorsSize, some actors are not reported in the newActors array.
+ */
+ uint32_t partition(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn);
+
+ /**
+ Recalculate the visible chunk list for this actor based upon it graph node list (does not modify subsupport chunk actors)
+ */
+ void updateVisibleChunksFromGraphNodes();
+
+ /**
+ Partition this actor into smaller pieces if it is a single lower-support chunk actor. Use this function on single support or sub-support chunks.
+
+ After this operation, if successful (child chunks created, see Return value), this actor will released, and its pointer no longer valid for use.
+
+ This function will not split a leaf chunk actor. In that case, the actor is not destroyed and this function returns 0.
+
+ \param[in] newActors User-supplied array of actor pointers to hold the actors generated from this partitioning. Note: this actor will be released.
+ This array must be of size equal to the lower-support chunk's child count, to guarantee that all actors are reported.
+ \param[in] newActorsSize The size of the user-supplied newActors array.
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return the number of new actors created.
+ */
+ uint32_t partitionSingleLowerSupportChunk(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn);
+
+ /**
+ Partition this actor into smaller pieces. Use this function if this actor contains more than one support chunk.
+
+ After this operation, if successful, this actor will released, and its pointer no longer valid for use (unless it appears in the newActors list).
+
+ \param[in] newActors User-supplied array of actor pointers to hold the actors generated from this partitioning. Note: this actor will not be released,
+ but will hold a subset of the graph nodes that it had before the function was called.
+ This array must be of size equal to the number of graph nodes in the asset, to guarantee
+ that all actors are reported.
+ \param[in] newActorsSize The size of the user-supplied newActors array.
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return the number of new actors created.
+ */
+ uint32_t partitionMultipleGraphNodes(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn);
+
+private:
+
+ //////// Data ////////
+
+ /**
+ Offset to block of memory which holds the data associated with all actors in this actor's lineage.
+ This offset is positive. The block address is this object's pointer _minus_ the m_familyOffset.
+ This value is initialized to 0, which denotes an invalid actor. Actors should be obtained through
+ the FamilyHeader::borrowActor API, which will create a valid offset, and
+ the FamilyHeader::returnActor API, which will zero the offset.
+ */
+ uint32_t m_familyOffset;
+
+ /**
+ The index of the head of a doubly-linked list of visible chunk indices. If m_firstVisibleChunkIndex == invalidIndex<uint32_t>(),
+ then there are no visible chunks.
+ */
+ uint32_t m_firstVisibleChunkIndex;
+
+ /**
+ The number of elements in the visible chunk list.
+ */
+ uint32_t m_visibleChunkCount;
+
+ /**
+ The index of the head of a singly-linked list of graph node indices. If m_firstGraphNodeIndex == invalidIndex<uint32_t>(),
+ then there are no graph nodes.
+ */
+ uint32_t m_firstGraphNodeIndex;
+
+ /**
+ The number of elements in the graph node list.
+ */
+ uint32_t m_graphNodeCount;
+
+ /**
+ The number of leaf chunks in this actor.
+ */
+ uint32_t m_leafChunkCount;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#include "NvBlastFamily.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Actor inline methods ////////
+
+NV_INLINE FamilyHeader* Actor::getFamilyHeader() const
+{
+ NVBLAST_ASSERT(isActive());
+ return (FamilyHeader*)((uintptr_t)this - (uintptr_t)m_familyOffset);
+}
+
+
+NV_INLINE const Asset* Actor::getAsset() const
+{
+ return getFamilyHeader()->m_asset;
+}
+
+
+NV_INLINE bool Actor::isActive() const
+{
+ return m_familyOffset != 0;
+}
+
+
+NV_INLINE bool Actor::isSubSupportChunk() const
+{
+ return m_graphNodeCount == 0;
+}
+
+
+NV_INLINE bool Actor::isSingleSupportChunk() const
+{
+ return m_graphNodeCount == 1;
+}
+
+
+NV_INLINE uint32_t Actor::getIndex() const
+{
+ NVBLAST_ASSERT(isActive());
+ const FamilyHeader* header = getFamilyHeader();
+ NVBLAST_ASSERT(header != nullptr);
+ const size_t index = this - header->getActors();
+ NVBLAST_ASSERT(index <= UINT32_MAX);
+ return (uint32_t)index;
+}
+
+
+NV_INLINE uint32_t Actor::getVisibleChunkCount() const
+{
+ return m_visibleChunkCount;
+}
+
+
+NV_INLINE uint32_t Actor::getFirstVisibleChunkIndex() const
+{
+ return m_firstVisibleChunkIndex;
+}
+
+
+NV_INLINE uint32_t Actor::getGraphNodeCount() const
+{
+ return m_graphNodeCount;
+}
+
+
+NV_INLINE uint32_t Actor::getLeafChunkCount() const
+{
+ return m_leafChunkCount;
+}
+
+
+NV_INLINE uint32_t Actor::getFirstGraphNodeIndex() const
+{
+ return m_firstGraphNodeIndex;
+}
+
+NV_INLINE uint32_t Actor::getFirstSubsupportChunkIndex() const
+{
+ return getAsset()->m_firstSubsupportChunkIndex;
+}
+
+NV_INLINE const SupportGraph* Actor::getGraph() const
+{
+ return &getAsset()->m_graph;
+}
+
+NV_INLINE FamilyGraph* Actor::getFamilyGraph() const
+{
+ return getFamilyHeader()->getFamilyGraph();
+}
+
+NV_INLINE NvBlastChunk* Actor::getChunks() const
+{
+ return getAsset()->getChunks();
+}
+
+NV_INLINE NvBlastBond* Actor::getBonds() const
+{
+ return getAsset()->getBonds();
+}
+
+NV_INLINE float* Actor::getLowerSupportChunkHealths() const
+{
+ return getFamilyHeader()->getLowerSupportChunkHealths();
+}
+
+NV_INLINE float* Actor::getSubsupportChunkHealths() const
+{
+ return getFamilyHeader()->getSubsupportChunkHealths();
+}
+
+NV_INLINE float* Actor::getBondHealths() const
+{
+ return getFamilyHeader()->getBondHealths();
+}
+
+NV_INLINE const uint32_t* Actor::getGraphNodeIndexLinks() const
+{
+ return getFamilyHeader()->getGraphNodeIndexLinks();
+}
+
+
+NV_INLINE bool Actor::release()
+{
+ // Do nothing if this actor is not currently active.
+ if (!isActive())
+ {
+ return false;
+ }
+
+ FamilyHeader* header = getFamilyHeader();
+
+ // Clear the graph node list
+ uint32_t* graphNodeIndexLinks = getFamilyHeader()->getGraphNodeIndexLinks();
+ while (!isInvalidIndex(m_firstGraphNodeIndex))
+ {
+ const uint32_t graphNodeIndex = m_firstGraphNodeIndex;
+ m_firstGraphNodeIndex = graphNodeIndexLinks[m_firstGraphNodeIndex];
+ graphNodeIndexLinks[graphNodeIndex] = invalidIndex<uint32_t>();
+ --m_graphNodeCount;
+ }
+ NVBLAST_ASSERT(m_graphNodeCount == 0);
+
+ const Asset* asset = getAsset();
+
+ // Clear the visible chunk list
+ IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks();
+ uint32_t* chunkActorIndices = header->getChunkActorIndices();
+ while (!isInvalidIndex(m_firstVisibleChunkIndex))
+ {
+ // Descendants of the visible actor may be accessed again if the actor is deserialized. Clear subtree.
+ for (Asset::DepthFirstIt i(*asset, m_firstVisibleChunkIndex, true); (bool)i; ++i)
+ {
+ chunkActorIndices[(uint32_t)i] = invalidIndex<uint32_t>();
+ }
+ IndexDList<uint32_t>().removeListHead(m_firstVisibleChunkIndex, visibleChunkIndexLinks);
+ --m_visibleChunkCount;
+ }
+ NVBLAST_ASSERT(m_visibleChunkCount == 0);
+
+ // Clear the leaf chunk count
+ m_leafChunkCount = 0;
+
+ // This invalidates the actor and decrements the reference count
+ header->returnActor(*this);
+
+ return true;
+}
+
+
+NV_INLINE uint32_t Actor::partition(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(newActorsSize == 0 || newActors != nullptr, logFn, "Nv::Blast::Actor::partition: NULL newActors pointer array input with non-zero newActorCount.", return 0);
+
+ // Call one of two partition functions depending on the actor's support status
+ return m_graphNodeCount <= 1 ?
+ partitionSingleLowerSupportChunk(newActors, newActorsSize, logFn) : // This actor will partition into subsupport chunks
+ partitionMultipleGraphNodes(newActors, newActorsSize, logFn); // This actor will partition into support chunks
+}
+
+
+//////// Actor::VisibleChunkIt inline methods ////////
+
+NV_INLINE Actor::VisibleChunkIt::VisibleChunkIt(const Actor& actor) : DListIt<uint32_t>(actor.m_firstVisibleChunkIndex, actor.getFamilyHeader()->getVisibleChunkIndexLinks())
+{
+}
+
+
+//////// Actor::GraphNodeIt inline methods ////////
+
+NV_INLINE Actor::GraphNodeIt::GraphNodeIt(const Actor& actor) : LListIt<uint32_t>(actor.m_firstGraphNodeIndex, actor.getFamilyHeader()->getGraphNodeIndexLinks())
+{
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+/**
+Returns the closest chunk asset index for a supported actor.
+Helper functions still used in tests.
+Has become obsolete with introduction of chunkMap and its inverse.
+*/
+uint32_t NvBlastActorClosestChunk(const float point[4], const NvBlastActor* actor, NvBlastLog logFn);
+
+
+#endif // ifndef NVBLASTACTOR_H
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastActorSerializationBlock.cpp b/NvBlast/sdk/lowlevel/source/NvBlastActorSerializationBlock.cpp
new file mode 100644
index 0000000..e496a69
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastActorSerializationBlock.cpp
@@ -0,0 +1,575 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+
+#include "NvBlastActor.h"
+#include "NvBlastActorSerializationBlock.h"
+#include "NvBlastFamilyGraph.h"
+
+#include <algorithm>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Actor static methods for serialization ////////
+
+Actor* Actor::deserialize(NvBlastFamily* family, const void* buffer, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "Actor::deserialize: NULL family pointer input.", return nullptr);
+
+ const ActorSerializationHeader* serHeader = reinterpret_cast<const ActorSerializationHeader*>(buffer);
+ if (serHeader->m_formatVersion != ActorSerializationFormat::Current)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Actor::deserialize: wrong data format. Serialization data must be converted to current version.");
+ return nullptr;
+ }
+
+ FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family);
+ const Asset* asset = header->m_asset;
+ const Nv::Blast::SupportGraph& graph = asset->m_graph;
+ const uint32_t* graphChunkIndices = graph.getChunkIndices();
+ const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
+ const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
+ const uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices();
+
+ Actor* actor = nullptr;
+ const uint32_t actorIndex = serHeader->m_index;
+
+ if (serHeader->m_index < header->getActorBufferSize())
+ {
+ if (!header->getActors()[actorIndex].isActive())
+ {
+ actor = header->borrowActor(serHeader->m_index);
+ }
+ }
+
+ if (actor == nullptr)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Actor::deserialize: invalid actor index in serialized data. Actor not created.");
+ return nullptr;
+ }
+
+ // Commonly used data
+ uint32_t* chunkActorIndices = header->getChunkActorIndices();
+ FamilyGraph* familyGraph = header->getFamilyGraph();
+
+#if NVBLAST_CHECK_PARAMS
+ {
+ const uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices();
+ for (uint32_t i = 0; i < serHeader->m_visibleChunkCount; ++i)
+ {
+ const uint32_t visibleChunkIndex = serVisibleChunkIndices[i];
+ if (!isInvalidIndex(chunkActorIndices[visibleChunkIndex]))
+ {
+ NVBLAST_LOG_ERROR(logFn, "Actor::deserialize: visible chunk already has an actor in family. Actor not created.");
+ header->returnActor(*actor);
+ return nullptr;
+ }
+ }
+ }
+#endif
+
+ // Visible chunk indices and chunk actor indices
+ {
+ // Add visible chunks, set chunk subtree ownership
+ const uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices();
+ IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks();
+ for (uint32_t i = serHeader->m_visibleChunkCount; i--;) // Reverse-order, so the resulting linked list is in the original order
+ {
+ const uint32_t visibleChunkIndex = serVisibleChunkIndices[i];
+ NVBLAST_ASSERT(isInvalidIndex(visibleChunkIndexLinks[visibleChunkIndex].m_adj[0]) && isInvalidIndex(visibleChunkIndexLinks[visibleChunkIndex].m_adj[1]));
+ IndexDList<uint32_t>().insertListHead(actor->m_firstVisibleChunkIndex, visibleChunkIndexLinks, visibleChunkIndex);
+ for (Asset::DepthFirstIt j(*asset, visibleChunkIndex, true); (bool)j; ++j)
+ {
+ NVBLAST_ASSERT(isInvalidIndex(chunkActorIndices[(uint32_t)j]));
+ chunkActorIndices[(uint32_t)j] = actorIndex;
+ }
+ }
+ actor->m_visibleChunkCount = serHeader->m_visibleChunkCount;
+ }
+
+ // Graph node indices, leaf chunk count, and and island IDs
+ {
+ // Add graph nodes
+ const uint32_t* serGraphNodeIndices = serHeader->getGraphNodeIndices();
+ uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks();
+ uint32_t* islandIDs = familyGraph->getIslandIds();
+ for (uint32_t i = serHeader->m_graphNodeCount; i--;) // Reverse-order, so the resulting linked list is in the original order
+ {
+ const uint32_t graphNodeIndex = serGraphNodeIndices[i];
+ NVBLAST_ASSERT(isInvalidIndex(graphNodeIndexLinks[graphNodeIndex]));
+ graphNodeIndexLinks[graphNodeIndex] = actor->m_firstGraphNodeIndex;
+ actor->m_firstGraphNodeIndex = graphNodeIndex;
+ islandIDs[graphNodeIndex] = actorIndex;
+ }
+ actor->m_graphNodeCount = serHeader->m_graphNodeCount;
+ actor->m_leafChunkCount = serHeader->m_leafChunkCount;
+ }
+
+ // Lower support chunk healths
+ {
+ const float* serLowerSupportChunkHealths = serHeader->getLowerSupportChunkHealths();
+ float* subsupportHealths = header->getSubsupportChunkHealths();
+ const uint32_t subsupportChunkCount = asset->getUpperSupportChunkCount();
+ if (actor->m_graphNodeCount > 0)
+ {
+ uint32_t serLowerSupportChunkCount = 0;
+ float* graphNodeHealths = header->getLowerSupportChunkHealths();
+ for (Nv::Blast::Actor::GraphNodeIt i = *actor; (bool)i; ++i)
+ {
+ const uint32_t graphNodeIndex = (uint32_t)i;
+ graphNodeHealths[graphNodeIndex] = serLowerSupportChunkHealths[serLowerSupportChunkCount++];
+ const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex];
+ Asset::DepthFirstIt j(*asset, supportChunkIndex);
+ NVBLAST_ASSERT((bool)j);
+ ++j; // Skip first (support) chunk, it's already been handled
+ for (; (bool)j; ++j)
+ {
+ subsupportHealths[(uint32_t)j] = serLowerSupportChunkHealths[serLowerSupportChunkCount++];
+ }
+ }
+ }
+ else // Single subsupport chunk
+ if (!isInvalidIndex(actor->m_firstVisibleChunkIndex))
+ {
+ NVBLAST_ASSERT(actor->m_firstVisibleChunkIndex >= subsupportChunkCount);
+ subsupportHealths[actor->m_firstVisibleChunkIndex - subsupportChunkCount] = *serLowerSupportChunkHealths;
+ }
+ }
+
+ // Bond healths
+ uint32_t serBondCount = 0;
+ {
+ const float* serBondHealths = serHeader->getBondHealths();
+ float* bondHealths = header->getBondHealths();
+ for (Nv::Blast::Actor::GraphNodeIt i = *actor; (bool)i; ++i)
+ {
+ const uint32_t graphNodeIndex = (uint32_t)i;
+ for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
+ {
+ const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
+ if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
+ {
+ // Only count if the adjacent node belongs to this actor
+ const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
+ if (chunkActorIndices[adjacentChunkIndex] == actorIndex)
+ {
+ const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex];
+ bondHealths[adjacentBondIndex] = serBondHealths[serBondCount++];
+ }
+ }
+ }
+ }
+ }
+
+ // Fast routes
+ {
+ const uint32_t* serFastRoute = serHeader->getFastRoute();
+ uint32_t* fastRoute = header->getFamilyGraph()->getFastRoute();
+ for (Nv::Blast::Actor::GraphNodeIt i = *actor; (bool)i; ++i)
+ {
+ fastRoute[(uint32_t)i] = *serFastRoute++;
+ }
+ }
+
+ // Hop counts
+ {
+ const uint32_t* serHopCounts = serHeader->getHopCounts();
+ uint32_t* hopCounts = header->getFamilyGraph()->getHopCounts();
+ for (Nv::Blast::Actor::GraphNodeIt i = *actor; (bool)i; ++i)
+ {
+ hopCounts[(uint32_t)i] = *serHopCounts++;
+ }
+ }
+
+ // Edge removed array
+ if (serBondCount > 0)
+ {
+ uint32_t serBondIndex = 0;
+ const FixedBoolArray* serEdgeRemovedArray = serHeader->getEdgeRemovedArray();
+ FixedBoolArray* edgeRemovedArray = familyGraph->getIsEdgeRemoved();
+ for (Nv::Blast::Actor::GraphNodeIt i = *actor; (bool)i; ++i)
+ {
+ const uint32_t graphNodeIndex = (uint32_t)i;
+ for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
+ {
+ const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
+ if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
+ {
+ // Only count if the adjacent node belongs to this actor
+ const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
+ if (chunkActorIndices[adjacentChunkIndex] == actorIndex)
+ {
+ if (!serEdgeRemovedArray->test(serBondIndex))
+ {
+ const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex];
+ edgeRemovedArray->reset(adjacentBondIndex);
+ }
+ ++serBondIndex;
+ }
+ }
+ }
+ }
+ }
+
+ return actor;
+}
+
+
+//////// Actor member methods for serialization ////////
+
+uint32_t Actor::serialize(void* buffer, uint32_t bufferSize, NvBlastLog logFn) const
+{
+ // Set up pointers and such
+ const Asset* asset = getAsset();
+ const Nv::Blast::SupportGraph& graph = asset->m_graph;
+ const uint32_t* graphChunkIndices = graph.getChunkIndices();
+ const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
+ const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
+ const uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices();
+ const FamilyHeader* header = getFamilyHeader();
+ const uint32_t* chunkActorIndices = header->getChunkActorIndices();
+ const uint32_t thisActorIndex = getIndex();
+
+ // Make sure there are no dirty nodes
+ if (m_graphNodeCount)
+ {
+ const uint32_t* firstDirtyNodeIndices = header->getFamilyGraph()->getFirstDirtyNodeIndices();
+ if (!isInvalidIndex(firstDirtyNodeIndices[thisActorIndex]))
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: instance graph has dirty nodes. Call Nv::Blast::Actor::findIslands before serializing.");
+ return 0;
+ }
+ }
+
+ uint64_t offset = 0;
+
+ // Header
+ ActorSerializationHeader* serHeader = reinterpret_cast<ActorSerializationHeader*>(buffer);
+ offset = align16(sizeof(ActorSerializationHeader));
+ if (offset > bufferSize)
+ {
+ return 0; // Buffer size insufficient
+ }
+ serHeader->m_formatVersion = ActorSerializationFormat::Current;
+ serHeader->m_size = 0; // Will be updated below
+ serHeader->m_index = thisActorIndex;
+ serHeader->m_visibleChunkCount = m_visibleChunkCount;
+ serHeader->m_graphNodeCount = m_graphNodeCount;
+ serHeader->m_leafChunkCount = m_leafChunkCount;
+
+ // Visible chunk indices
+ {
+ serHeader->m_visibleChunkIndicesOffset = (uint32_t)offset;
+ offset = align16(offset + m_visibleChunkCount*sizeof(uint32_t));
+ if (offset > bufferSize)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::Actor::serialize: buffer size exceeded.");
+ return 0; // Buffer size insufficient
+ }
+ uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices();
+ uint32_t serVisibleChunkCount = 0;
+ for (Nv::Blast::Actor::VisibleChunkIt i = *this; (bool)i; ++i)
+ {
+ NVBLAST_ASSERT(serVisibleChunkCount < m_visibleChunkCount);
+ serVisibleChunkIndices[serVisibleChunkCount++] = (uint32_t)i;
+ }
+ NVBLAST_ASSERT(serVisibleChunkCount == m_visibleChunkCount);
+ }
+
+ // Graph node indices
+ {
+ serHeader->m_graphNodeIndicesOffset = (uint32_t)offset;
+ offset = align16(offset + m_graphNodeCount*sizeof(uint32_t));
+ if (offset > bufferSize)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
+ return 0; // Buffer size insufficient
+ }
+ uint32_t* serGraphNodeIndices = serHeader->getGraphNodeIndices();
+ uint32_t serGraphNodeCount = 0;
+ for (Nv::Blast::Actor::GraphNodeIt i = *this; (bool)i; ++i)
+ {
+ NVBLAST_ASSERT(serGraphNodeCount < m_graphNodeCount);
+ serGraphNodeIndices[serGraphNodeCount++] = (uint32_t)i;
+ }
+ NVBLAST_ASSERT(serGraphNodeCount == m_graphNodeCount);
+ }
+
+ // Lower support chunk healths
+ {
+ serHeader->m_lowerSupportChunkHealthsOffset = (uint32_t)offset;
+ float* serLowerSupportChunkHealths = serHeader->getLowerSupportChunkHealths();
+ const float* subsupportHealths = header->getSubsupportChunkHealths();
+ const uint32_t subsupportChunkCount = asset->getUpperSupportChunkCount();
+ if (m_graphNodeCount > 0)
+ {
+ uint32_t serLowerSupportChunkCount = 0;
+ const float* graphNodeHealths = header->getLowerSupportChunkHealths();
+ for (Nv::Blast::Actor::GraphNodeIt i = *this; (bool)i; ++i)
+ {
+ const uint32_t graphNodeIndex = (uint32_t)i;
+ serLowerSupportChunkHealths[serLowerSupportChunkCount++] = graphNodeHealths[graphNodeIndex];
+ offset += sizeof(float);
+ const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex];
+ Asset::DepthFirstIt j(*asset, supportChunkIndex);
+ NVBLAST_ASSERT((bool)j);
+ ++j; // Skip first (support) chunk, it's already been handled
+ for (; (bool)j; ++j)
+ {
+ if (offset >= bufferSize)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
+ return 0; // Buffer size insufficient
+ }
+ serLowerSupportChunkHealths[serLowerSupportChunkCount++] = subsupportHealths[(uint32_t)j - subsupportChunkCount];
+ offset += sizeof(float);
+ }
+ }
+ }
+ else // Single subsupport chunk
+ if (!isInvalidIndex(m_firstVisibleChunkIndex))
+ {
+ NVBLAST_ASSERT(m_firstVisibleChunkIndex >= subsupportChunkCount);
+ if (offset >= bufferSize)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
+ return 0; // Buffer size insufficient
+ }
+ *serLowerSupportChunkHealths = subsupportHealths[m_firstVisibleChunkIndex - subsupportChunkCount];
+ offset += sizeof(float);
+ }
+ }
+ offset = align16(offset);
+
+ // Bond healths
+ uint32_t serBondCount = 0;
+ {
+ serHeader->m_bondHealthsOffset = (uint32_t)offset;
+ float* serBondHealths = serHeader->getBondHealths();
+ const float* bondHealths = header->getBondHealths();
+ for (Nv::Blast::Actor::GraphNodeIt i = *this; (bool)i; ++i)
+ {
+ const uint32_t graphNodeIndex = (uint32_t)i;
+ for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
+ {
+ const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
+ if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
+ {
+ // Only count if the adjacent node belongs to this actor
+ const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
+ if (chunkActorIndices[adjacentChunkIndex] == thisActorIndex)
+ {
+ if (offset >= bufferSize)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
+ return 0; // Buffer size insufficient
+ }
+ const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex];
+ serBondHealths[serBondCount++] = bondHealths[adjacentBondIndex];
+ offset += sizeof(float);
+ }
+ }
+ }
+ }
+ }
+ offset = align16(offset);
+
+ // Fast routes
+ {
+ serHeader->m_fastRouteOffset = (uint32_t)offset;
+ offset = align16(offset + m_graphNodeCount*sizeof(uint32_t));
+ if (offset > bufferSize)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
+ return 0; // Buffer size insufficient
+ }
+ uint32_t* serFastRoute = serHeader->getFastRoute();
+ const uint32_t* fastRoute = header->getFamilyGraph()->getFastRoute();
+ for (Nv::Blast::Actor::GraphNodeIt i = *this; (bool)i; ++i)
+ {
+ *serFastRoute++ = fastRoute[(uint32_t)i];
+ }
+ }
+
+ // Hop counts
+ {
+ serHeader->m_hopCountsOffset = (uint32_t)offset;
+ offset = align16(offset + m_graphNodeCount*sizeof(uint32_t));
+ if (offset > bufferSize)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
+ return 0; // Buffer size insufficient
+ }
+ uint32_t* serHopCounts = serHeader->getHopCounts();
+ const uint32_t* hopCounts = header->getFamilyGraph()->getHopCounts();
+ for (Nv::Blast::Actor::GraphNodeIt i = *this; (bool)i; ++i)
+ {
+ *serHopCounts++ = hopCounts[(uint32_t)i];
+ }
+ }
+
+ // Edge removed array
+ if (serBondCount > 0)
+ {
+ serHeader->m_edgeRemovedArrayOffset = (uint32_t)offset;
+ offset = align16(offset + FixedBoolArray::requiredMemorySize(serBondCount));
+ if (offset > bufferSize)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
+ return 0; // Buffer size insufficient
+ }
+ uint32_t serBondIndex = 0;
+ FixedBoolArray* serEdgeRemovedArray = serHeader->getEdgeRemovedArray();
+ new (serEdgeRemovedArray)FixedBoolArray(serBondCount);
+ serEdgeRemovedArray->fill(); // Reset bits as we find bonds
+ const FixedBoolArray* edgeRemovedArray = header->getFamilyGraph()->getIsEdgeRemoved();
+ for (Nv::Blast::Actor::GraphNodeIt i = *this; (bool)i; ++i)
+ {
+ const uint32_t graphNodeIndex = (uint32_t)i;
+ for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
+ {
+ const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
+ if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
+ {
+ // Only count if the adjacent node belongs to this actor
+ const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
+ if (chunkActorIndices[adjacentChunkIndex] == thisActorIndex)
+ {
+ const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex];
+ if (!edgeRemovedArray->test(adjacentBondIndex))
+ {
+ serEdgeRemovedArray->reset(serBondIndex);
+ }
+ ++serBondIndex;
+ }
+ }
+ }
+ }
+ }
+
+ // Finally record size
+ serHeader->m_size = static_cast<uint32_t>(offset);
+
+ return serHeader->m_size;
+}
+
+
+uint32_t Actor::serializationRequiredStorage(NvBlastLog logFn) const
+{
+ const Asset* asset = getAsset();
+ const Nv::Blast::SupportGraph& graph = asset->m_graph;
+ const uint32_t* graphChunkIndices = graph.getChunkIndices();
+ const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
+ const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
+ const uint32_t* graphNodeIndexLinks = getFamilyHeader()->getGraphNodeIndexLinks();
+ const uint32_t* chunkActorIndices = getFamilyHeader()->getChunkActorIndices();
+ const uint32_t thisActorIndex = getIndex();
+
+ // Lower-support chunk count and bond counts for this actor need to be calculated. Iterate over all support chunks to count these.
+ uint32_t lowerSupportChunkCount = 0;
+ uint32_t bondCount = 0;
+ if (m_graphNodeCount > 0)
+ {
+ for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex])
+ {
+ // Update bond count
+ const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex];
+ for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
+ {
+ const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
+ if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
+ {
+ // Only count if the adjacent node belongs to this actor
+ const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
+ if (chunkActorIndices[adjacentChunkIndex] == thisActorIndex)
+ {
+ ++bondCount;
+ }
+ }
+ }
+
+ // Update lower-support chunk count
+ for (Asset::DepthFirstIt i(*asset, supportChunkIndex); (bool)i; ++i)
+ {
+ ++lowerSupportChunkCount;
+ }
+ }
+ }
+ else // Subsupport chunk
+ {
+ ++lowerSupportChunkCount;
+ }
+
+ const uint64_t dataSize = getActorSerializationSize(m_visibleChunkCount, lowerSupportChunkCount, m_graphNodeCount, bondCount);
+
+ if (dataSize > UINT32_MAX)
+ {
+ NVBLAST_LOG_WARNING(logFn, "Nv::Blast::Actor::serializationRequiredStorage: Serialization block size exceeds 4GB. Returning 0.\n");
+ return 0;
+ }
+
+ return static_cast<uint32_t>(dataSize);
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+// API implementation
+
+extern "C"
+{
+
+uint32_t NvBlastActorGetSerializationSize(const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorGetSerializationSize: NULL actor pointer input.", return 0);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorGetSerializationSize: inactive actor pointer input.");
+ return 0;
+ }
+
+ return a.serializationRequiredStorage(logFn);
+}
+
+
+uint32_t NvBlastActorSerialize(void* buffer, uint32_t bufferSize, const NvBlastActor* actor, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(buffer != nullptr, logFn, "NvBlastActorSerialize: NULL buffer pointer input.", return 0);
+ NVBLAST_CHECK(actor != nullptr, logFn, "NvBlastActorSerialize: NULL actor pointer input.", return 0);
+
+ const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
+
+ if (!a.isActive())
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastActorSerialize: inactive actor pointer input.");
+ return 0;
+ }
+
+ return a.serialize(buffer, bufferSize, logFn);
+}
+
+
+NvBlastActor* NvBlastFamilyDeserializeActor(NvBlastFamily* family, const void* buffer, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyDeserializeActor: NULL family input. No actor deserialized.", return nullptr);
+ NVBLAST_CHECK(buffer != nullptr, logFn, "NvBlastFamilyDeserializeActor: NULL buffer pointer input. No actor deserialized.", return nullptr);
+
+ return Nv::Blast::Actor::deserialize(family, buffer, logFn);
+}
+
+} // extern "C"
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastActorSerializationBlock.h b/NvBlast/sdk/lowlevel/source/NvBlastActorSerializationBlock.h
new file mode 100644
index 0000000..1388a74
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastActorSerializationBlock.h
@@ -0,0 +1,151 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTACTORSERIALIZATIONBLOCK_H
+#define NVBLASTACTORSERIALIZATIONBLOCK_H
+
+
+#include "NvBlastFixedBoolArray.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Struct-enum which keeps track of the actor serialization format.
+*/
+struct ActorSerializationFormat
+{
+ enum Version
+ {
+ /** Initial version */
+ Initial,
+
+ // New formats must come before Count. They should be given descriptive names with more information in comments.
+
+ /** The number of serialization formats. */
+ Count,
+
+ /** The current version. This should always be Count-1 */
+ Current = Count - 1
+ };
+};
+
+
+/**
+Data header at the beginning of a NvBlastActor serialization block
+
+The block address may be cast to a valid ActorSerializationHeader pointer.
+
+Serialization state is only valid if partition has been called since the last call to findIslands().
+*/
+struct ActorSerializationHeader
+{
+ /**
+ A number which is incremented every time the data layout changes.
+ */
+ uint32_t m_formatVersion;
+
+ /**
+ The size of the serialization block, including this header.
+
+ Memory sizes are restricted to 32-bit representable values.
+ */
+ uint32_t m_size;
+
+ /**
+ The index of the actor within its family.
+ */
+ uint32_t m_index;
+
+ /**
+ The number of elements in the visible chunk indices list.
+ */
+ uint32_t m_visibleChunkCount;
+
+ /**
+ The number of elements in the graph node indices list.
+ */
+ uint32_t m_graphNodeCount;
+
+ /**
+ The number of leaf chunks in this actor.
+ */
+ uint32_t m_leafChunkCount;
+
+ /**
+ Visible chunk indices, of type uint32_t.
+ */
+ NvBlastBlockArrayData(uint32_t, m_visibleChunkIndicesOffset, getVisibleChunkIndices, m_visibleChunkCount);
+
+ /**
+ Graph node indices, of type uint32_t.
+ */
+ NvBlastBlockArrayData(uint32_t, m_graphNodeIndicesOffset, getGraphNodeIndices, m_graphNodeCount);
+
+ /**
+ Healths for lower support chunks in this actor, in breadth-first order from the support chunks associated with the graph nodes. Type float.
+ */
+ NvBlastBlockData(float, m_lowerSupportChunkHealthsOffset, getLowerSupportChunkHealths);
+
+ /**
+ Healths for bonds associated with support chunks in this actor, in order of graph adjacency from associated graph nodes, i < j only. Type float.
+ */
+ NvBlastBlockData(float, m_bondHealthsOffset, getBondHealths);
+
+ /**
+ Fast route in instance graph calculated for each graph node in this actor, of type uint32_t.
+ */
+ NvBlastBlockArrayData(uint32_t, m_fastRouteOffset, getFastRoute, m_graphNodeCount);
+
+ /**
+ Hop counts in instance graph calculated for each graph node in this actor, of type uint32_t.
+ */
+ NvBlastBlockArrayData(uint32_t, m_hopCountsOffset, getHopCounts, m_graphNodeCount);
+
+ /**
+ "Edge removed" bits for bonds associated with support chunks in this actor, in order of graph adjacency from associated graph nodes, i < j only. Type FixedBoolArray.
+ */
+ NvBlastBlockData(FixedBoolArray, m_edgeRemovedArrayOffset, getEdgeRemovedArray);
+};
+
+
+//////// Global functions ////////
+
+/**
+A buffer size sufficient to serialize an actor with a given visible chunk count, lower support chunk count, graph node count, and bond count.
+
+\param[in] visibleChunkCount The number of visible chunks
+\param[in] lowerSupportChunkCount The number of lower-support chunks in the asset.
+\param[in] graphNodeCount The number of graph nodes in the asset.
+\param[in] bondCount The number of graph bonds in the asset.
+
+\return the required buffer size in bytes.
+*/
+NV_INLINE size_t getActorSerializationSize(uint32_t visibleChunkCount, uint32_t lowerSupportChunkCount, uint32_t graphNodeCount, uint32_t bondCount)
+{
+ // Family offsets
+ const size_t visibleChunkIndicesOffset = align16(sizeof(ActorSerializationHeader)); // size = visibleChunkCount*sizeof(uint32_t)
+ const size_t graphNodeIndicesOffset = align16(visibleChunkIndicesOffset + visibleChunkCount*sizeof(uint32_t)); // size = graphNodeCount*sizeof(uint32_t)
+ const size_t lowerSupportHealthsOffset = align16(graphNodeIndicesOffset + graphNodeCount*sizeof(uint32_t)); // size = lowerSupportChunkCount*sizeof(float)
+ const size_t bondHealthsOffset = align16(lowerSupportHealthsOffset + lowerSupportChunkCount*sizeof(float)); // size = bondCount*sizeof(float)
+ const size_t fastRouteOffset = align16(bondHealthsOffset + bondCount*sizeof(float)); // size = graphNodeCount*sizeof(uint32_t)
+ const size_t hopCountsOffset = align16(fastRouteOffset + graphNodeCount*sizeof(uint32_t)); // size = graphNodeCount*sizeof(uint32_t)
+ const size_t edgeRemovedArrayOffset = align16(hopCountsOffset + graphNodeCount*sizeof(uint32_t)); // size = 0 or FixedBoolArray::requiredMemorySize(bondCount)
+ return align16(edgeRemovedArrayOffset + (bondCount == 0 ? 0 : FixedBoolArray::requiredMemorySize(bondCount)));
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTACTORSERIALIZATIONBLOCK_H
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastAsset.cpp b/NvBlast/sdk/lowlevel/source/NvBlastAsset.cpp
new file mode 100644
index 0000000..29fc6b0
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastAsset.cpp
@@ -0,0 +1,931 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastAssert.h"
+#include "NvBlastAsset.h"
+#include "NvBlastActor.h"
+#include "NvBlastMath.h"
+#include "NvBlastPreprocessorInternal.h"
+#include "NvBlastIndexFns.h"
+#include "NvBlastActorSerializationBlock.h"
+#include "NvBlastMemory.h"
+
+#include <algorithm>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+//////// Local helper functions ////////
+
+
+/**
+Helper function to validate the input parameters for NvBlastCreateAsset. See NvBlastCreateAsset for parameter definitions.
+*/
+static bool solverAssetBuildValidateInput(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn)
+{
+ if (mem == nullptr)
+ {
+ NVBLAST_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL mem pointer input.");
+ return false;
+ }
+
+ if (desc == nullptr)
+ {
+ NVBLAST_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL desc pointer input.");
+ return false;
+ }
+
+ if (desc->chunkCount == 0)
+ {
+ NVBLAST_LOG_ERROR(logFn, "AssetBuildValidateInput: Zero chunk count not allowed.");
+ return false;
+ }
+
+ if (desc->chunkDescs == nullptr)
+ {
+ NVBLAST_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL chunkDescs pointer input.");
+ return false;
+ }
+
+ if (desc->bondCount != 0 && desc->bondDescs == nullptr)
+ {
+ NVBLAST_LOG_ERROR(logFn, "AssetBuildValidateInput: bondCount non-zero but NULL bondDescs pointer input.");
+ return false;
+ }
+
+ if (scratch == nullptr)
+ {
+ NVBLAST_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL scratch pointer input.");
+ return false;
+ }
+
+ return true;
+}
+
+
+struct AssetDataOffsets
+{
+ size_t m_chunks;
+ size_t m_bonds;
+ size_t m_subtreeLeafChunkCounts;
+ size_t m_supportChunkIndices;
+ size_t m_chunkToGraphNodeMap;
+ size_t m_graphAdjacencyPartition;
+ size_t m_graphAdjacentNodeIndices;
+ size_t m_graphAdjacentBondIndices;
+};
+
+
+static size_t createAssetDataOffsets(AssetDataOffsets& offsets, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t bondCount)
+{
+ NvBlastCreateOffsetStart(sizeof(Asset));
+ NvBlastCreateOffsetAlign16(offsets.m_chunks, chunkCount * sizeof(NvBlastChunk));
+ NvBlastCreateOffsetAlign16(offsets.m_bonds, bondCount * sizeof(NvBlastBond));
+ NvBlastCreateOffsetAlign16(offsets.m_subtreeLeafChunkCounts, chunkCount * sizeof(uint32_t));
+ NvBlastCreateOffsetAlign16(offsets.m_supportChunkIndices, graphNodeCount * sizeof(uint32_t));
+ NvBlastCreateOffsetAlign16(offsets.m_chunkToGraphNodeMap, chunkCount * sizeof(uint32_t));
+ NvBlastCreateOffsetAlign16(offsets.m_graphAdjacencyPartition, (graphNodeCount + 1) * sizeof(uint32_t));
+ NvBlastCreateOffsetAlign16(offsets.m_graphAdjacentNodeIndices, (2 * bondCount) * sizeof(uint32_t));
+ NvBlastCreateOffsetAlign16(offsets.m_graphAdjacentBondIndices, (2 * bondCount) * sizeof(uint32_t));
+ return NvBlastCreateOffsetEndAlign16();
+}
+
+
+Asset* initializeAsset(void* mem, NvBlastID id, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t leafChunkCount, uint32_t firstSubsupportChunkIndex, uint32_t bondCount, NvBlastLog logFn)
+{
+ // Data offsets
+ AssetDataOffsets offsets;
+ const size_t dataSize = createAssetDataOffsets(offsets, chunkCount, graphNodeCount, bondCount);
+
+ // Restricting our data size to < 4GB so that we may use uint32_t offsets
+ if (dataSize > (size_t)UINT32_MAX)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::allocateAsset: Asset data size will exceed 4GB. Instance not created.\n");
+ return nullptr;
+ }
+
+ // Zero memory and cast to Asset
+ Asset* asset = reinterpret_cast<Asset*>(memset(mem, 0, dataSize));
+
+ // Fill in fields
+ const size_t graphOffset = NV_OFFSET_OF(Asset, m_graph);
+ asset->m_header.dataType = NvBlastDataBlock::AssetDataBlock;
+ asset->m_header.formatVersion = NvBlastAssetDataFormat::Current;
+ asset->m_header.size = (uint32_t)dataSize;
+ asset->m_header.reserved = 0;
+ asset->m_ID = id;
+ asset->m_chunkCount = chunkCount;
+ asset->m_graph.m_nodeCount = graphNodeCount;
+ asset->m_graph.m_chunkIndicesOffset = (uint32_t)(offsets.m_supportChunkIndices - graphOffset);
+ asset->m_graph.m_adjacencyPartitionOffset = (uint32_t)(offsets.m_graphAdjacencyPartition - graphOffset);
+ asset->m_graph.m_adjacentNodeIndicesOffset = (uint32_t)(offsets.m_graphAdjacentNodeIndices - graphOffset);
+ asset->m_graph.m_adjacentBondIndicesOffset = (uint32_t)(offsets.m_graphAdjacentBondIndices - graphOffset);
+ asset->m_leafChunkCount = leafChunkCount;
+ asset->m_firstSubsupportChunkIndex = firstSubsupportChunkIndex;
+ asset->m_bondCount = bondCount;
+ asset->m_chunksOffset = (uint32_t)offsets.m_chunks;
+ asset->m_bondsOffset = (uint32_t)offsets.m_bonds;
+ asset->m_subtreeLeafChunkCountsOffset = (uint32_t)offsets.m_subtreeLeafChunkCounts;
+ asset->m_chunkToGraphNodeMapOffset = (uint32_t)offsets.m_chunkToGraphNodeMap;
+
+ // Ensure Bonds remain aligned
+ NV_COMPILE_TIME_ASSERT((sizeof(NvBlastBond) & 0xf) == 0);
+
+ // Ensure Bonds are aligned - note, this requires that the block be aligned
+ NVBLAST_ASSERT((uintptr_t(asset->getBonds()) & 0xf) == 0);
+
+ return asset;
+}
+
+
+/**
+Tests for a loop in a digraph starting at a given graph vertex.
+
+Using the implied digraph given by the chunkDescs' parentChunkIndex fields, the graph is walked from the chunk descriptor chunkDescs[chunkIndex],
+to determine if that walk leads to a loop.
+
+Input:
+chunkDescs - the chunk descriptors
+chunkIndex - the index of the starting chunk descriptor
+
+Return:
+true if a loop is found, false otherwise.
+*/
+NV_INLINE bool testForLoop(const NvBlastChunkDesc* chunkDescs, uint32_t chunkIndex)
+{
+ NVBLAST_ASSERT(!isInvalidIndex(chunkIndex));
+
+ uint32_t chunkIndex1 = chunkDescs[chunkIndex].parentChunkIndex;
+ if (isInvalidIndex(chunkIndex1))
+ {
+ return false;
+ }
+
+ uint32_t chunkIndex2 = chunkDescs[chunkIndex1].parentChunkIndex;
+ if (isInvalidIndex(chunkIndex2))
+ {
+ return false;
+ }
+
+ do
+ {
+ // advance index 1
+ chunkIndex1 = chunkDescs[chunkIndex1].parentChunkIndex; // No need to check for termination here. index 2 would find it first.
+
+ // advance index 2 twice and check for incidence with index 1 as well as termination
+ if ((chunkIndex2 = chunkDescs[chunkIndex2].parentChunkIndex) == chunkIndex1)
+ {
+ return true;
+ }
+ if (isInvalidIndex(chunkIndex2))
+ {
+ return false;
+ }
+ if ((chunkIndex2 = chunkDescs[chunkIndex2].parentChunkIndex) == chunkIndex1)
+ {
+ return true;
+ }
+ } while (!isInvalidIndex(chunkIndex2));
+
+ return false;
+}
+
+
+/**
+Tests a set of chunk descriptors to see if the implied hierarchy describes valid trees.
+
+A single tree implies that only one of the chunkDescs has an invalid (invalidIndex<uint32_t>()) parentChunkIndex, and all other
+chunks are descendents of that chunk. Passed set of chunk is checked to contain one or more single trees.
+
+Input:
+chunkCount - the number of chunk descriptors
+chunkDescs - an array of chunk descriptors of length chunkCount
+logFn - message function (see NvBlastLog definition).
+
+Return:
+true if the descriptors imply a valid trees, false otherwise.
+*/
+static bool testForValidTrees(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, NvBlastLog logFn)
+{
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ // Ensure there are no loops
+ if (testForLoop(chunkDescs, i))
+ {
+ NVBLAST_LOG_WARNING(logFn, "testForValidTrees: loop found. Asset will not be created.");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+/**
+Struct to hold chunk indices and bond index for sorting
+
+Utility struct used by NvBlastCreateAsset in order to arrange bond data in a lookup table, and also to easily identify redundant input.
+*/
+struct BondSortData
+{
+ BondSortData(uint32_t c0, uint32_t c1, uint32_t b) : m_c0(c0), m_c1(c1), m_b(b) {}
+
+ uint32_t m_c0;
+ uint32_t m_c1;
+ uint32_t m_b;
+};
+
+
+/**
+Functional class for sorting a list of BondSortData
+*/
+class BondsOrdered
+{
+public:
+ bool operator () (const BondSortData& bond0, const BondSortData& bond1) const
+ {
+ return (bond0.m_c0 != bond1.m_c0) ? (bond0.m_c0 < bond1.m_c0) : (bond0.m_c1 < bond1.m_c1);
+ }
+};
+
+
+//////// Asset static functions ////////
+
+size_t Asset::getMemorySize(const NvBlastAssetDesc* desc)
+{
+ NVBLAST_ASSERT(desc != nullptr);
+
+ // Count graph nodes
+ uint32_t graphNodeCount = 0;
+ for (uint32_t i = 0; i < desc->chunkCount; ++i)
+ {
+ graphNodeCount += (uint32_t)((desc->chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0);
+ }
+
+ AssetDataOffsets offsets;
+ return createAssetDataOffsets(offsets, desc->chunkCount, graphNodeCount, desc->bondCount);
+}
+
+
+size_t Asset::createRequiredScratch(const NvBlastAssetDesc* desc)
+{
+#if NVBLAST_CHECK_PARAMS
+ if (desc == nullptr)
+ {
+ NVBLAST_ALWAYS_ASSERT();
+ return 0;
+ }
+#endif
+
+ // Aligned and padded
+ return 16 +
+ align16(desc->chunkCount*sizeof(char)) +
+ align16(desc->chunkCount*sizeof(uint32_t)) +
+ align16(2 * desc->bondCount*sizeof(Nv::Blast::BondSortData)) +
+ align16(desc->bondCount*sizeof(uint32_t));
+}
+
+
+Asset* Asset::create(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn)
+{
+#if NVBLAST_CHECK_PARAMS
+ if (!solverAssetBuildValidateInput(mem, desc, scratch, logFn))
+ {
+ return nullptr;
+ }
+#else
+ NV_UNUSED(solverAssetBuildValidateInput);
+#endif
+
+ NVBLAST_CHECK((reinterpret_cast<uintptr_t>(mem) & 0xF) == 0, logFn, "NvBlastCreateAsset: mem pointer not 16-byte aligned.", return nullptr);
+
+ // Make sure we have valid trees before proceeding
+ if (!testForValidTrees(desc->chunkCount, desc->chunkDescs, logFn))
+ {
+ return nullptr;
+ }
+
+ scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment (see padding in NvBlastGetRequiredScratchForCreateAsset)
+
+ // reserve chunkAnnotation on scratch
+ char* chunkAnnotation = reinterpret_cast<char*>(scratch); scratch = Nv::Blast::pointerOffset(scratch, align16(desc->chunkCount));
+
+ // test for coverage, chunkAnnotation will be filled there.
+ uint32_t leafChunkCount;
+ uint32_t supportChunkCount;
+ if (!ensureExactSupportCoverage(supportChunkCount, leafChunkCount, chunkAnnotation, desc->chunkCount, const_cast<NvBlastChunkDesc*>(desc->chunkDescs), true, logFn))
+ {
+ return nullptr;
+ }
+
+ // test for valid chunk order
+ if (!testForValidChunkOrder(desc->chunkCount, desc->chunkDescs, chunkAnnotation, scratch))
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastCreateAsset: chunks order is invalid. Asset will not be created. Use Asset helper functions such as NvBlastBuildAssetDescChunkReorderMap to fix descriptor order.");
+ return nullptr;
+ }
+
+ // Find first subsupport chunk
+ uint32_t firstSubsupportChunkIndex = desc->chunkCount; // Set value to chunk count if no subsupport chunks are found
+ for (uint32_t i = 0; i < desc->chunkCount; ++i)
+ {
+ if ((chunkAnnotation[i] & ChunkAnnotation::UpperSupport) == 0)
+ {
+ firstSubsupportChunkIndex = i;
+ break;
+ }
+ }
+
+ // Create map from global indices to graph node indices and initialize to invalid values
+ uint32_t* graphNodeIndexMap = (uint32_t*)scratch; scratch = Nv::Blast::pointerOffset(scratch, align16(desc->chunkCount * sizeof(uint32_t)));
+ memset(graphNodeIndexMap, 0xFF, desc->chunkCount*sizeof(uint32_t));
+
+ // Fill graphNodeIndexMap
+ uint32_t graphNodeCount = 0;
+ for (uint32_t i = 0; i < desc->chunkCount; ++i)
+ {
+ if ((chunkAnnotation[i] & ChunkAnnotation::Support) != 0)
+ {
+ graphNodeIndexMap[i] = graphNodeCount++;
+ }
+ }
+ NVBLAST_ASSERT(graphNodeCount == supportChunkCount);
+
+ // Scratch array for bond sorting, of size 2*desc->bondCount
+ Nv::Blast::BondSortData* bondSortArray = (Nv::Blast::BondSortData*)scratch; scratch = Nv::Blast::pointerOffset(scratch, align16(2 * desc->bondCount*sizeof(Nv::Blast::BondSortData)));
+
+ // Bond remapping array of size desc->bondCount
+ uint32_t* bondMap = (uint32_t*)scratch;
+ memset(bondMap, 0xFF, desc->bondCount*sizeof(uint32_t));
+
+ // Eliminate bad or redundant bonds, finding actual bond count
+ uint32_t bondCount = 0;
+ if (desc->bondCount > 0)
+ {
+ // Check for duplicates from input data as well as non-support chunk indices. All such bonds must be removed.
+ bool invalidFound = false;
+ bool duplicateFound = false;
+ bool nonSupportFound = false;
+
+ // Construct temp array of chunk index pairs and bond indices. This array is symmetrized to hold the reversed chunk indices as well.
+ uint32_t bondSortArraySize = 0;
+ Nv::Blast::BondSortData* t = bondSortArray;
+ for (uint32_t i = 0; i < desc->bondCount; ++i)
+ {
+ const NvBlastBondDesc& bondDesc = desc->bondDescs[i];
+ const uint32_t chunkIndex0 = bondDesc.chunkIndices[0];
+ const uint32_t chunkIndex1 = bondDesc.chunkIndices[1];
+
+ if (chunkIndex0 >= desc->chunkCount || chunkIndex1 >= desc->chunkCount || chunkIndex0 == chunkIndex1)
+ {
+ invalidFound = true;
+ continue;
+ }
+
+ const uint32_t graphIndex0 = graphNodeIndexMap[chunkIndex0];
+ const uint32_t graphIndex1 = graphNodeIndexMap[chunkIndex1];
+ if (Nv::Blast::isInvalidIndex(graphIndex0) || Nv::Blast::isInvalidIndex(graphIndex1))
+ {
+ nonSupportFound = true;
+ continue;
+ }
+
+ t[bondSortArraySize++] = Nv::Blast::BondSortData(graphIndex0, graphIndex1, i);
+ t[bondSortArraySize++] = Nv::Blast::BondSortData(graphIndex1, graphIndex0, i);
+ }
+
+ // Sort the temp array
+ std::sort(bondSortArray, bondSortArray + bondSortArraySize, Nv::Blast::BondsOrdered());
+
+ uint32_t symmetrizedBondCount = 0;
+ for (uint32_t i = 0; i < bondSortArraySize; ++i)
+ {
+ const bool duplicate = i > 0 && bondSortArray[i].m_c0 == bondSortArray[i - 1].m_c0 && bondSortArray[i].m_c1 == bondSortArray[i - 1].m_c1; // Since the array is sorted, uniqueness may be tested by only considering the previous element
+ duplicateFound = duplicateFound || duplicate;
+ if (!duplicate)
+ { // Keep this bond
+ if (symmetrizedBondCount != i)
+ {
+ bondSortArray[symmetrizedBondCount] = bondSortArray[i]; // Compact array if we've dropped bonds
+ }
+ ++symmetrizedBondCount;
+ }
+ }
+ NVBLAST_ASSERT((symmetrizedBondCount & 1) == 0); // Because we symmetrized, there should be an even number
+
+ bondCount = symmetrizedBondCount / 2;
+
+ // Report warnings
+ if (invalidFound)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastCreateAsset: Invalid bonds found (non-existent or same chunks referenced) and removed from asset.");
+ }
+ if (duplicateFound)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastCreateAsset: Duplicate bonds found and removed from asset.");
+ }
+ if (nonSupportFound)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastCreateAsset: Bonds referencing non-support chunks found and removed from asset.");
+ }
+ }
+
+ // Allocate memory for asset
+ NvBlastID id;
+ memset(&id, 0, sizeof(NvBlastID)); // To do - create an actual id
+ Nv::Blast::Asset* asset = initializeAsset(mem, id, desc->chunkCount, supportChunkCount, leafChunkCount, firstSubsupportChunkIndex, bondCount, logFn);
+
+ // Asset data pointers
+ Nv::Blast::SupportGraph& graph = asset->m_graph;
+ NvBlastChunk* chunks = asset->getChunks();
+ NvBlastBond* bonds = asset->getBonds();
+ uint32_t* subtreeLeafChunkCounts = asset->getSubtreeLeafChunkCounts();
+
+ // Create chunks
+ uint32_t* graphChunkIndices = graph.getChunkIndices();
+ for (uint32_t i = 0; i < desc->chunkCount; ++i)
+ {
+ const NvBlastChunkDesc& chunkDesc = desc->chunkDescs[i];
+ const uint32_t newChunkIndex = i;
+ NvBlastChunk& assetChunk = chunks[newChunkIndex];
+ memcpy(assetChunk.centroid, chunkDesc.centroid, 3 * sizeof(float));
+ assetChunk.volume = chunkDesc.volume;
+ assetChunk.parentChunkIndex = Nv::Blast::isInvalidIndex(chunkDesc.parentChunkIndex) ? chunkDesc.parentChunkIndex : chunkDesc.parentChunkIndex;
+ assetChunk.firstChildIndex = Nv::Blast::invalidIndex<uint32_t>(); // Will be filled in below
+ assetChunk.childIndexStop = assetChunk.firstChildIndex;
+ assetChunk.userData = chunkDesc.userData;
+ if (!Nv::Blast::isInvalidIndex(graphNodeIndexMap[newChunkIndex]))
+ {
+ graphChunkIndices[graphNodeIndexMap[newChunkIndex]] = newChunkIndex;
+ }
+ }
+
+ // Copy chunkToGraphNodeMap
+ memcpy(asset->getChunkToGraphNodeMap(), graphNodeIndexMap, desc->chunkCount * sizeof(uint32_t));
+
+ // Count chunk children
+ for (uint32_t i = 0; i < desc->chunkCount; ++i)
+ {
+ const uint32_t parentChunkIndex = chunks[i].parentChunkIndex;
+ if (!Nv::Blast::isInvalidIndex(parentChunkIndex))
+ {
+ if (chunks[parentChunkIndex].childIndexStop == chunks[parentChunkIndex].firstChildIndex)
+ {
+ chunks[parentChunkIndex].childIndexStop = chunks[parentChunkIndex].firstChildIndex = i;
+ }
+ ++chunks[parentChunkIndex].childIndexStop;
+ }
+ }
+
+ // Create bonds
+ uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
+ uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
+ uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices();
+ if (bondCount > 0)
+ {
+ // Create the lookup table from the sorted array
+ Nv::Blast::createIndexStartLookup<uint32_t>(graphAdjacencyPartition, 0, graphNodeCount - 1, &bondSortArray->m_c0, 2 * bondCount, sizeof(Nv::Blast::BondSortData));
+
+ // Write the adjacent chunk and bond index data
+ uint32_t bondIndex = 0;
+ for (uint32_t i = 0; i < 2 * bondCount; ++i)
+ {
+ const Nv::Blast::BondSortData& bondSortData = bondSortArray[i];
+ graphAdjacentNodeIndices[i] = bondSortData.m_c1;
+ const uint32_t oldBondIndex = bondSortData.m_b;
+ const NvBlastBondDesc& bondDesc = desc->bondDescs[oldBondIndex];
+ if (Nv::Blast::isInvalidIndex(bondMap[oldBondIndex]))
+ {
+ bonds[bondIndex] = bondDesc.bond;
+ // Our convention is that the bond normal points away from the lower-indexed chunk, towards the higher-indexed chunk.
+ // If our new (graph node) indexing would reverse this direction from the bond descriptor's indexing, we must flip the nomral.
+ const bool nodeIndicesOrdered = bondSortData.m_c0 < bondSortData.m_c1;
+ const bool descNodeIndicesOrdered = bondDesc.chunkIndices[0] < bondDesc.chunkIndices[1];
+ if (descNodeIndicesOrdered && !nodeIndicesOrdered)
+ {
+ float* normal = bonds[bondIndex].normal;
+ normal[0] = -normal[0];
+ normal[1] = -normal[1];
+ normal[2] = -normal[2];
+ }
+ bondMap[oldBondIndex] = bondIndex++;
+ }
+ graphAdjacentBondIndices[i] = bondMap[oldBondIndex];
+ }
+ }
+ else
+ {
+ // No bonds - zero out all partition elements (including last one, to give zero size for adjacent data arrays)
+ memset(graphAdjacencyPartition, 0, (graphNodeCount + 1)*sizeof(uint32_t));
+ }
+
+ // Count subtree leaf chunks
+ memset(subtreeLeafChunkCounts, 0, desc->chunkCount*sizeof(uint32_t));
+ uint32_t* breadthFirstChunkIndices = graphNodeIndexMap; // Reusing graphNodeIndexMap ... graphNodeIndexMap may no longer be used
+ for (uint32_t startChunkIndex = 0; startChunkIndex < desc->chunkCount; ++startChunkIndex)
+ {
+ if (!Nv::Blast::isInvalidIndex(chunks[startChunkIndex].parentChunkIndex))
+ {
+ break; // Only iterate through root chunks at this level
+ }
+ const uint32_t enumeratedChunkCount = enumerateChunkHierarchyBreadthFirst(breadthFirstChunkIndices, desc->chunkCount, chunks, startChunkIndex, false);
+ for (uint32_t chunkNum = enumeratedChunkCount; chunkNum--;)
+ {
+ const uint32_t chunkIndex = breadthFirstChunkIndices[chunkNum];
+ const NvBlastChunk& chunk = chunks[chunkIndex];
+ if (chunk.childIndexStop <= chunk.firstChildIndex)
+ {
+ subtreeLeafChunkCounts[chunkIndex] = 1;
+ }
+ NVBLAST_ASSERT(!isInvalidIndex(chunk.parentChunkIndex)); // Parent index is valid because root chunk is not included in this list (because of 'false' passed into enumerateChunkHierarchyBreadthFirst, above)
+ subtreeLeafChunkCounts[chunk.parentChunkIndex] += subtreeLeafChunkCounts[chunkIndex];
+ }
+ }
+
+ return asset;
+}
+
+
+bool Asset::ensureExactSupportCoverage(uint32_t& supportChunkCount, uint32_t& leafChunkCount, char* chunkAnnotation, uint32_t chunkCount, NvBlastChunkDesc* chunkDescs, bool testOnly, NvBlastLog logFn)
+{
+ // Clear leafChunkCount
+ leafChunkCount = 0;
+
+ memset(chunkAnnotation, 0, chunkCount);
+
+ // Walk up the hierarchy from all chunks and mark all parents
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent)
+ {
+ continue;
+ }
+ uint32_t chunkIndex = i;
+ while (!isInvalidIndex(chunkIndex = chunkDescs[chunkIndex].parentChunkIndex))
+ {
+ chunkAnnotation[chunkIndex] = Asset::ChunkAnnotation::Parent; // Note as non-leaf
+ }
+ }
+
+ // Walk up the hierarchy from all leaves (counting them with leafChunkCount) and keep track of the support chunks found on each chain
+ // Exactly one support chunk should be found on each walk. Remove all but the highest support markings if more than one are found.
+ bool redundantCoverage = false;
+ bool insufficientCoverage = false;
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent)
+ {
+ continue;
+ }
+ ++leafChunkCount;
+ uint32_t supportChunkIndex;
+ supportChunkIndex = invalidIndex<uint32_t>();
+ uint32_t chunkIndex = i;
+ bool doneWithChain = false;
+ do
+ {
+ if (chunkDescs[chunkIndex].flags & NvBlastChunkDesc::SupportFlag)
+ {
+ if (chunkAnnotation[chunkIndex] & Asset::ChunkAnnotation::Support)
+ {
+ // We've already been up this chain and marked this as support, so we have unique coverage already
+ doneWithChain = true;
+ }
+ chunkAnnotation[chunkIndex] |= Asset::ChunkAnnotation::Support; // Note as support
+ if (!isInvalidIndex(supportChunkIndex))
+ {
+ if (testOnly)
+ {
+ return false;
+ }
+ redundantCoverage = true;
+ chunkAnnotation[supportChunkIndex] &= ~Asset::ChunkAnnotation::Support; // Remove support marking
+ do // Run up the hierarchy from supportChunkIndex to chunkIndex and remove the supersupport markings
+ {
+ supportChunkIndex = chunkDescs[supportChunkIndex].parentChunkIndex;
+ chunkAnnotation[supportChunkIndex] &= ~Asset::ChunkAnnotation::SuperSupport; // Remove supersupport marking
+ } while (supportChunkIndex != chunkIndex);
+ }
+ supportChunkIndex = chunkIndex;
+ }
+ else
+ if (!isInvalidIndex(supportChunkIndex))
+ {
+ chunkAnnotation[chunkIndex] |= Asset::ChunkAnnotation::SuperSupport; // Not a support chunk and we've already found a support chunk, so this is super-support
+ }
+ } while (!doneWithChain && !isInvalidIndex(chunkIndex = chunkDescs[chunkIndex].parentChunkIndex));
+ if (isInvalidIndex(supportChunkIndex))
+ {
+ if (testOnly)
+ {
+ return false;
+ }
+ insufficientCoverage = true;
+ }
+ }
+
+ if (redundantCoverage)
+ {
+ NVBLAST_LOG_INFO(logFn, "NvBlastCreateAsset: some leaf-to-root chains had more than one support chunk. Some support chunks removed.");
+ }
+
+ if (insufficientCoverage)
+ {
+ // If coverage was insufficient, then walk up the hierarchy again and mark all chunks that have a support descendant.
+ // This will allow us to place support chunks at the highest possible level to obtain coverage.
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent)
+ {
+ continue;
+ }
+ bool supportFound = false;
+ uint32_t chunkIndex = i;
+ do
+ {
+ if (chunkAnnotation[chunkIndex] & Asset::ChunkAnnotation::Support)
+ {
+ supportFound = true;
+ }
+ else
+ if (supportFound)
+ {
+ chunkAnnotation[chunkIndex] |= Asset::ChunkAnnotation::SuperSupport; // Note that a descendant has support
+ }
+ } while (!isInvalidIndex(chunkIndex = chunkDescs[chunkIndex].parentChunkIndex));
+ }
+
+ // Now walk up the hierarchy from each leaf one more time, and make sure there is coverage
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent)
+ {
+ continue;
+ }
+ uint32_t previousChunkIndex;
+ previousChunkIndex = invalidIndex<uint32_t>();
+ uint32_t chunkIndex = i;
+ for (;;)
+ {
+ if (chunkAnnotation[chunkIndex] & Asset::ChunkAnnotation::Support)
+ {
+ break; // There is support along this chain
+ }
+ if (chunkAnnotation[chunkIndex] & Asset::ChunkAnnotation::SuperSupport)
+ {
+ NVBLAST_ASSERT(!isInvalidIndex(previousChunkIndex)); // This should be impossible
+ chunkAnnotation[previousChunkIndex] |= Asset::ChunkAnnotation::Support; // There is no support along this chain, and this is the highest place where we can put support
+ break;
+ }
+ previousChunkIndex = chunkIndex;
+ chunkIndex = chunkDescs[chunkIndex].parentChunkIndex;
+ if (isInvalidIndex(chunkIndex))
+ {
+ chunkAnnotation[previousChunkIndex] |= Asset::ChunkAnnotation::Support; // There was no support found anywhere in the hierarchy, so we add it at the root
+ break;
+ }
+ }
+ }
+
+ NVBLAST_LOG_INFO(logFn, "NvBlastCreateAsset: some leaf-to-root chains had no support chunks. Support chunks added.");
+ }
+
+ // Apply changes and count the number of support chunks
+ supportChunkCount = 0;
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ const bool wasSupport = (chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0;
+ const bool nowSupport = (chunkAnnotation[i] & Asset::ChunkAnnotation::Support) != 0;
+ if (wasSupport != nowSupport)
+ {
+ chunkDescs[i].flags ^= NvBlastChunkDesc::SupportFlag;
+ }
+ if ((chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0)
+ {
+ ++supportChunkCount;
+ }
+ }
+
+ return !redundantCoverage && !insufficientCoverage;
+}
+
+
+bool Asset::testForValidChunkOrder(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, const char* chunkAnnotation, void* scratch)
+{
+ char* chunkMarks = static_cast<char*>(memset(scratch, 0, chunkCount));
+
+ uint32_t currentParentChunkIndex = invalidIndex<uint32_t>();
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ const uint32_t parentChunkIndex = chunkDescs[i].parentChunkIndex;
+ if (parentChunkIndex != currentParentChunkIndex)
+ {
+ if (!Nv::Blast::isInvalidIndex(currentParentChunkIndex))
+ {
+ chunkMarks[currentParentChunkIndex] = 1;
+ }
+ currentParentChunkIndex = parentChunkIndex;
+ if (Nv::Blast::isInvalidIndex(currentParentChunkIndex))
+ {
+ return false;
+ }
+ else if (chunkMarks[currentParentChunkIndex] != 0)
+ {
+ return false;
+ }
+ }
+
+ if (i < chunkCount - 1)
+ {
+ const bool upperSupport0 = (chunkAnnotation[i] & ChunkAnnotation::UpperSupport) != 0;
+ const bool upperSupport1 = (chunkAnnotation[i + 1] & ChunkAnnotation::UpperSupport) != 0;
+
+ if (!upperSupport0 && upperSupport1)
+ {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+// API implementation
+
+extern "C"
+{
+
+size_t NvBlastGetRequiredScratchForCreateAsset(const NvBlastAssetDesc* desc, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(desc != nullptr, logFn, "NvBlastGetRequiredScratchForCreateAsset: NULL desc pointer input.", return 0);
+
+ return Nv::Blast::Asset::createRequiredScratch(desc);
+}
+
+
+size_t NvBlastGetAssetMemorySize(const NvBlastAssetDesc* desc, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(desc != nullptr, logFn, "NvBlastGetAssetMemorySize: NULL desc input.", return 0);
+
+ return Nv::Blast::Asset::getMemorySize(desc);
+}
+
+
+NvBlastAsset* NvBlastCreateAsset(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn)
+{
+ return Nv::Blast::Asset::create(mem, desc, scratch, logFn);
+}
+
+
+size_t NvBlastAssetGetFamilyMemorySize(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFamilyMemorySize: NULL asset pointer input.", return 0);
+
+ return Nv::Blast::getFamilyMemorySize(reinterpret_cast<const Nv::Blast::Asset*>(asset));
+}
+
+
+NvBlastID NvBlastAssetGetID(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetID: NULL asset pointer input.", NvBlastID zero; memset(&zero, 0, sizeof(NvBlastID)); return zero);
+
+ return ((Nv::Blast::Asset*)asset)->m_ID;
+}
+
+
+bool NvBlastAssetSetID(NvBlastAsset* asset, const NvBlastID* id, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetSetID: NULL asset pointer input.", return false);
+ NVBLAST_CHECK(id != nullptr, logFn, "NvBlastAssetSetID: NULL id pointer input.", return false);
+
+ ((Nv::Blast::Asset*)asset)->m_ID = *id;
+
+ return true;
+}
+
+
+uint32_t NvBlastAssetGetFormatVersion(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFormatVersion: NULL asset input.", return UINT32_MAX);
+
+ return ((Nv::Blast::Asset*)asset)->m_header.formatVersion;
+}
+
+
+uint32_t NvBlastAssetGetSize(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSize: NULL asset input.", return 0);
+
+ return ((Nv::Blast::Asset*)asset)->m_header.size;
+}
+
+
+uint32_t NvBlastAssetGetChunkCount(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunkCount: NULL asset input.", return 0);
+
+ return ((Nv::Blast::Asset*)asset)->m_chunkCount;
+}
+
+
+uint32_t NvBlastAssetGetLeafChunkCount(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetLeafChunkCount: NULL asset input.", return 0);
+
+ return ((Nv::Blast::Asset*)asset)->m_leafChunkCount;
+}
+
+
+uint32_t NvBlastAssetGetFirstSubsupportChunkIndex(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFirstSubsupportChunkIndex: NULL asset input.", return 0);
+
+ return ((Nv::Blast::Asset*)asset)->m_firstSubsupportChunkIndex;
+}
+
+
+uint32_t NvBlastAssetGetBondCount(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetBondCount: NULL asset input.", return 0);
+
+ return ((Nv::Blast::Asset*)asset)->m_bondCount;
+}
+
+
+const NvBlastSupportGraph NvBlastAssetGetSupportGraph(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSupportGraph: NULL asset input.",
+ NvBlastSupportGraph blank; blank.nodeCount = 0; blank.chunkIndices = blank.adjacencyPartition = blank.adjacentNodeIndices = blank.adjacentBondIndices = nullptr; return blank);
+
+ const Nv::Blast::SupportGraph& supportGraph = static_cast<const Nv::Blast::Asset*>(asset)->m_graph;
+
+ NvBlastSupportGraph graph;
+ graph.nodeCount = supportGraph.m_nodeCount;
+ graph.chunkIndices = supportGraph.getChunkIndices();
+ graph.adjacencyPartition = supportGraph.getAdjacencyPartition();
+ graph.adjacentNodeIndices = supportGraph.getAdjacentNodeIndices();
+ graph.adjacentBondIndices = supportGraph.getAdjacentBondIndices();
+
+ return graph;
+}
+
+
+const uint32_t* NvBlastAssetGetChunkToGraphNodeMap(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunkToGraphNodeMap: NULL asset input.", return nullptr);
+
+ return static_cast<const Nv::Blast::Asset*>(asset)->getChunkToGraphNodeMap();
+}
+
+
+const NvBlastChunk* NvBlastAssetGetChunks(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunks: NULL asset input.", return 0);
+
+ return ((Nv::Blast::Asset*)asset)->getChunks();
+}
+
+
+const NvBlastBond* NvBlastAssetGetBonds(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetBonds: NULL asset input.", return 0);
+
+ return ((Nv::Blast::Asset*)asset)->getBonds();
+}
+
+
+uint32_t NvBlastAssetGetActorSerializationSizeUpperBound(const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastAssetGetActorSerializationSizeUpperBound: NULL asset input.", return 0);
+
+ const Nv::Blast::Asset& solverAsset = *(const Nv::Blast::Asset*)asset;
+ const uint32_t graphNodeCount = solverAsset.m_graph.m_nodeCount;
+
+ // Calculate serialization size for an actor with all graph nodes (and therefore all bonds), and somehow with all graph nodes visible (after all, this is an upper bound).
+ const uint64_t upperBound = Nv::Blast::getActorSerializationSize(graphNodeCount, solverAsset.getLowerSupportChunkCount(), graphNodeCount, solverAsset.getBondCount());
+
+ if (upperBound > UINT32_MAX)
+ {
+ NVBLAST_LOG_WARNING(logFn, "NvBlastAssetGetActorSerializationSizeUpperBound: Serialization block size exceeds 4GB. Returning 0.\n");
+ return 0;
+ }
+
+ return static_cast<uint32_t>(upperBound);
+}
+
+} // extern "C"
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastAsset.h b/NvBlast/sdk/lowlevel/source/NvBlastAsset.h
new file mode 100644
index 0000000..30e8161
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastAsset.h
@@ -0,0 +1,294 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTASSET_H
+#define NVBLASTASSET_H
+
+
+#include "NvBlastSupportGraph.h"
+#include "NvBlast.h"
+#include "NvBlastAssert.h"
+#include "NvBlastIndexFns.h"
+#include "NvBlastChunkHierarchy.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+class Asset : public NvBlastAsset
+{
+public:
+
+ /**
+ Struct-enum which is used to mark chunk descriptors when building an asset.
+ */
+ struct ChunkAnnotation
+ {
+ enum Enum
+ {
+ Parent = (1 << 0),
+ Support = (1 << 1),
+ SuperSupport = (1 << 2),
+
+ // Combinations
+ UpperSupport = Support | SuperSupport
+ };
+ };
+
+
+ /**
+ Create an asset from a descriptor.
+
+ \param[in] mem Pointer to block of memory of at least the size given by getMemorySize(desc). Must be 16-byte aligned.
+ \param[in] desc Asset descriptor (see NvBlastAssetDesc).
+ \param[in] scratch User-supplied scratch memory of size createRequiredScratch(desc) bytes.
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return the pointer to the new asset, or nullptr if unsuccessful.
+ */
+ static Asset* create(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn);
+
+ /**
+ Returns the number of bytes of memory that an asset created using the given descriptor will require. A pointer
+ to a block of memory of at least this size must be passed in as the mem argument of create.
+
+ \param[in] desc The asset descriptor that will be passed into NvBlastCreateAsset.
+ */
+ static size_t getMemorySize(const NvBlastAssetDesc* desc);
+
+ /**
+ Returns the size of the scratch space (in bytes) required to be passed into the create function, based upon
+ the input descriptor that will be passed to the create function.
+
+ \param[in] desc The descriptor that will be passed to the create function.
+
+ \return the number of bytes required.
+ */
+ static size_t createRequiredScratch(const NvBlastAssetDesc* desc);
+
+
+ /**
+ Returns the number of upper-support chunks in this asset..
+
+ \return the number of upper-support chunks.
+ */
+ uint32_t getUpperSupportChunkCount() const;
+
+ /**
+ Returns the number of lower-support chunks in this asset. This is the required actor buffer size for a Actor family.
+
+ \return the number of lower-support chunks.
+ */
+ uint32_t getLowerSupportChunkCount() const;
+
+ /**
+ Returns the number of bonds in this asset's support graph.
+
+ \return the number of bonds in this asset's support graph.
+ */
+ uint32_t getBondCount() const;
+
+ /**
+ Returns the number of separate chunk hierarchies in the asset. This will be the initial number of visible chunks in an actor instanced from this asset.
+
+ \return the number of separate chunk hierarchies in the asset.
+ */
+ uint32_t getHierarchyCount() const;
+
+ /**
+ Maps all lower-support chunk indices to a contiguous range [0, getLowerSupportChunkCount()).
+
+ \param[in] chunkIndex Asset chunk index.
+
+ \return an index in the range [0, getLowerSupportChunkCount()) if it is a lower-support chunk, invalidIndex<uint32_t>() otherwise.
+ */
+ uint32_t getContiguousLowerSupportIndex(uint32_t chunkIndex) const;
+
+
+ // Static functions
+
+ /**
+ Function to ensure support coverage of chunks.
+
+ Support chunks (marked in the NvBlastChunkDesc struct) must provide full coverage over the asset.
+ This means that from any leaf chunk to the root node, exactly one chunk must be support. If this condition
+ is not met, the actual support chunks will be adjusted accordingly.
+
+ Chunk order depends on support coverage, so this function should be called before chunk reordering.
+
+ \param[out] supportChunkCount The number of support chunks. NOTE - this value is not meaninful if testOnly = true and the return value is false.
+ \param[out] leafChunkCount The number of leaf chunks. NOTE - this value is not meaninful if testOnly = true and the return value is false.
+ \param[out] chunkAnnotation User-supplied char array of size chunkCount. NOTE - these values are not meaninful if testOnly = true and the return value is false.
+ \param[in] chunkCount The number of chunk descriptors.
+ \param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly.
+ \param[in] testOnly If true, this function early-outs if support coverage is not exact. If false, exact coverage is ensured by possibly modifying chunkDescs' flags.
+ \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
+
+ \return true iff coverage was already exact.
+ */
+ static bool ensureExactSupportCoverage(uint32_t& supportChunkCount, uint32_t& leafChunkCount, char* chunkAnnotation, uint32_t chunkCount, NvBlastChunkDesc* chunkDescs, bool testOnly, NvBlastLog logFn);
+
+ /**
+ Tests a set of chunk descriptors to see if chunks are in valid chunk order.
+
+ Chunk order conditions checked:
+ 1. 'all chunks with same parent index should go in a row'.
+ 2. 'root chunks should go first'.
+ 3. 'upper-support chunks should come before subsupport chunks'.
+
+ \param[in] chunkCount The number of chunk descriptors.
+ \param[in] chunkDescs An array of chunk descriptors of length chunkCount.
+ \param[in] chunkAnnotation Annotation generated from ensureExactSupportCoverage (see ensureExactSupportCoverage).
+ \param[in] scratch User-supplied scratch memory of chunkCount bytes.
+
+ \return true if the descriptors meet the ordering conditions, false otherwise.
+ */
+ static bool testForValidChunkOrder(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, const char* chunkAnnotation, void* scratch);
+
+
+ //////// Data ////////
+
+ /**
+ Asset data block header.
+ */
+ NvBlastDataBlock m_header;
+
+ /**
+ ID for this asset.
+ */
+ NvBlastID m_ID;
+
+ /**
+ The total number of chunks in the asset, support and non-support.
+ */
+ uint32_t m_chunkCount;
+
+ /**
+ The support graph.
+ */
+ SupportGraph m_graph;
+
+ /**
+ The number of leaf chunks in the asset.
+ */
+ uint32_t m_leafChunkCount;
+
+ /**
+ Chunks are sorted such that subsupport chunks come last. This is the first subsupport chunk index. Equals m_chunkCount if there are no subsupport chunks.
+ */
+ uint32_t m_firstSubsupportChunkIndex;
+
+ /**
+ The number of bonds in the asset.
+ */
+ uint32_t m_bondCount;
+
+ /**
+ Chunks, of type NvBlastChunk.
+
+ getChunks returns an array of size m_chunkCount.
+ */
+ NvBlastBlockArrayData(NvBlastChunk, m_chunksOffset, getChunks, m_chunkCount);
+
+ /**
+ Array of bond data for the interfaces between two chunks. Since the bond is shared by two chunks, the same
+ bond data is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i].
+ The size of the array is m_graph.adjacencyPartition[m_graph.m_nodeCount]/2.
+ See NvBlastBond.
+
+ getBonds returns an array of size m_bondCount.
+ */
+ NvBlastBlockArrayData(NvBlastBond, m_bondsOffset, getBonds, m_bondCount);
+
+ /**
+ Caching the number of leaf chunks descended from each chunk (including the chunk itself).
+ This data parallels the Chunks array, and is an array of the same size.
+
+ getSubtreeLeafChunkCount returns a uint32_t array of size m_chunkCount.
+ */
+ NvBlastBlockArrayData(uint32_t, m_subtreeLeafChunkCountsOffset, getSubtreeLeafChunkCounts, m_chunkCount);
+
+ /**
+ Mapping from chunk index to graph node index (inverse of m_graph.getChunkIndices().
+
+ getChunkToGraphNodeMap returns a uint32_t array of size m_chunkCount.
+ */
+ NvBlastBlockArrayData(uint32_t, m_chunkToGraphNodeMapOffset, getChunkToGraphNodeMap, m_chunkCount);
+
+
+ //////// Iterators ////////
+
+ /**
+ Chunk hierarchy depth-first iterator. Traverses subtree with root given by startChunkIndex.
+ If upperSupportOnly == true, then the iterator will not traverse subsuppport chunks.
+ */
+ class DepthFirstIt : public ChunkDepthFirstIt
+ {
+ public:
+ /** Constructed from an asset. */
+ DepthFirstIt(const Asset& asset, uint32_t startChunkIndex, bool upperSupportOnly = false) :
+ ChunkDepthFirstIt(asset.getChunks(), startChunkIndex, upperSupportOnly ? asset.getUpperSupportChunkCount() : asset.m_chunkCount) {}
+ };
+};
+
+
+//////// Asset inline member functions ////////
+
+NV_INLINE uint32_t Asset::getUpperSupportChunkCount() const
+{
+ return m_firstSubsupportChunkIndex;
+}
+
+
+NV_INLINE uint32_t Asset::getLowerSupportChunkCount() const
+{
+ return m_graph.m_nodeCount + (m_chunkCount - m_firstSubsupportChunkIndex);
+}
+
+
+NV_INLINE uint32_t Asset::getBondCount() const
+{
+ NVBLAST_ASSERT((m_graph.getAdjacencyPartition()[m_graph.m_nodeCount] & 1) == 0); // The bidirectional graph data should have an even number of edges
+ return m_graph.getAdjacencyPartition()[m_graph.m_nodeCount] / 2; // Directional bonds, divide by two
+}
+
+
+NV_INLINE uint32_t Asset::getHierarchyCount() const
+{
+ const NvBlastChunk* chunks = getChunks();
+ for (uint32_t i = 0; i < m_chunkCount; ++i)
+ {
+ if (!isInvalidIndex(chunks[i].parentChunkIndex))
+ {
+ return i;
+ }
+ }
+ return m_chunkCount;
+}
+
+
+NV_INLINE uint32_t Asset::getContiguousLowerSupportIndex(uint32_t chunkIndex) const
+{
+ NVBLAST_ASSERT(chunkIndex < m_chunkCount);
+
+ return chunkIndex < m_firstSubsupportChunkIndex ? getChunkToGraphNodeMap()[chunkIndex] : (chunkIndex - m_firstSubsupportChunkIndex + m_graph.m_nodeCount);
+}
+
+
+//JDM: Expose this so serialization layer can use it.
+NVBLAST_API Asset* initializeAsset(void* mem, NvBlastID id, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t leafChunkCount, uint32_t firstSubsupportChunkIndex, uint32_t bondCount, NvBlastLog logFn);
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTASSET_H
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastAssetHelper.cpp b/NvBlast/sdk/lowlevel/source/NvBlastAssetHelper.cpp
new file mode 100644
index 0000000..0d6c5d2
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastAssetHelper.cpp
@@ -0,0 +1,183 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastAsset.h"
+#include "NvBlastIndexFns.h"
+#include "NvBlastAssert.h"
+#include "NvBlastMemory.h"
+
+#include <algorithm>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Class to hold chunk descriptor and annotation context for sorting a list of indices
+*/
+class ChunksOrdered
+{
+public:
+ ChunksOrdered(const NvBlastChunkDesc* descs, const char* annotation) : m_descs(descs), m_annotation(annotation) {}
+
+ bool operator () (uint32_t i0, uint32_t i1) const
+ {
+ const bool upperSupport0 = (m_annotation[i0] & Asset::ChunkAnnotation::UpperSupport) != 0;
+ const bool upperSupport1 = (m_annotation[i1] & Asset::ChunkAnnotation::UpperSupport) != 0;
+
+ if (upperSupport0 != upperSupport1)
+ {
+ return upperSupport0; // If one is uppersupport and one is subsupport, uppersupport should come first
+ }
+
+ // Parent chunk index (+1 so that UINT32_MAX becomes the lowest value)
+ const uint32_t p0 = m_descs[i0].parentChunkIndex + 1;
+ const uint32_t p1 = m_descs[i1].parentChunkIndex + 1;
+
+ return p0 < p1; // With the same support relationship, order by parent index
+ }
+
+private:
+ const NvBlastChunkDesc* m_descs;
+ const char* m_annotation;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+using namespace Nv::Blast;
+
+extern "C"
+{
+
+bool NvBlastBuildAssetDescChunkReorderMap(uint32_t* chunkReorderMap, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL chunkDescs input with non-zero chunkCount", return false);
+ NVBLAST_CHECK(chunkReorderMap == nullptr || chunkCount != 0, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL chunkReorderMap input with non-zero chunkCount", return false);
+ NVBLAST_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL scratch input with non-zero chunkCount", return false);
+
+ uint32_t* chunkMap = static_cast<uint32_t*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(uint32_t));
+ char* chunkAnnotation = static_cast<char*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(char));
+
+ uint32_t supportChunkCount;
+ uint32_t leafChunkCount;
+ if (!Asset::ensureExactSupportCoverage(supportChunkCount, leafChunkCount, chunkAnnotation, chunkCount, const_cast<NvBlastChunkDesc*>(chunkDescs), true, logFn))
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastBuildAssetDescChunkReorderMap: chunk descriptors did not have exact coverage, map could not be built. Use NvBlastEnsureAssetExactSupportCoverage to fix descriptors.");
+ return false;
+ }
+
+ // check order for fast out (identity map)
+ if (Asset::testForValidChunkOrder(chunkCount, chunkDescs, chunkAnnotation, scratch))
+ {
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ chunkReorderMap[i] = i;
+ }
+
+ return true;
+ }
+
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ chunkMap[i] = i;
+ }
+ std::sort(chunkMap, chunkMap + chunkCount, ChunksOrdered(chunkDescs, chunkAnnotation));
+
+ invertMap(chunkReorderMap, chunkMap, chunkCount);
+
+ return false;
+}
+
+
+void NvBlastApplyAssetDescChunkReorderMap
+(
+ NvBlastChunkDesc* reorderedChunkDescs,
+ const NvBlastChunkDesc* chunkDescs,
+ uint32_t chunkCount,
+ NvBlastBondDesc* bondDescs,
+ uint32_t bondCount,
+ const uint32_t* chunkReorderMap,
+ NvBlastLog logFn
+)
+{
+ NVBLAST_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL chunkDescs input with non-zero chunkCount", return);
+ NVBLAST_CHECK(reorderedChunkDescs == nullptr || chunkCount != 0, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL reorderedChunkDescs input with non-zero chunkCount", return);
+ NVBLAST_CHECK(chunkReorderMap == nullptr || chunkCount != 0, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL chunkReorderMap input with non-zero chunkCount", return);
+ NVBLAST_CHECK(bondCount == 0 || bondDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL bondDescs input with non-zero bondCount", return);
+ NVBLAST_CHECK(bondDescs == nullptr || chunkReorderMap != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL bondDescs input with NULL chunkReorderMap", return);
+
+ // Copy chunk descs
+ if (reorderedChunkDescs)
+ {
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ reorderedChunkDescs[chunkReorderMap[i]] = chunkDescs[i];
+ uint32_t& parentIndex = reorderedChunkDescs[chunkReorderMap[i]].parentChunkIndex;
+ if (parentIndex < chunkCount)
+ {
+ parentIndex = chunkReorderMap[parentIndex]; // If the parent index is valid, remap it too to reflect the new order
+ }
+ }
+ }
+
+ if (bondDescs)
+ {
+ for (uint32_t i = 0; i < bondCount; ++i)
+ {
+ for (int j = 0; j < 2; ++j)
+ {
+ uint32_t& index = bondDescs[i].chunkIndices[j];
+ if (index < chunkCount)
+ {
+ index = chunkReorderMap[index];
+ }
+ }
+ }
+ }
+}
+
+
+void NvBlastApplyAssetDescChunkReorderMapInplace(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, const uint32_t* chunkReorderMap, void* scratch, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMapInplace: NULL chunkDescs input with non-zero chunkCount", return);
+ NVBLAST_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMapInplace: NULL scratch input with non-zero chunkCount", return);
+
+ NvBlastChunkDesc* chunksTemp = static_cast<NvBlastChunkDesc*>(scratch);
+ memcpy(chunksTemp, chunkDescs, sizeof(NvBlastChunkDesc) * chunkCount);
+ NvBlastApplyAssetDescChunkReorderMap(chunkDescs, chunksTemp, chunkCount, bondDescs, bondCount, chunkReorderMap, logFn);
+}
+
+
+bool NvBlastReorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap, void* scratch, NvBlastLog logFn)
+{
+ if (!NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap, chunkDescs, chunkCount, scratch, logFn))
+ {
+ NvBlastApplyAssetDescChunkReorderMapInplace(chunkDescs, chunkCount, bondDescs, bondCount, chunkReorderMap, scratch, logFn);
+ return false;
+ }
+ return true;
+}
+
+
+bool NvBlastEnsureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastEnsureAssetExactSupportCoverage: NULL chunkDescs input with non-zero chunkCount", return false);
+ NVBLAST_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastEnsureAssetExactSupportCoverage: NULL scratch input with non-zero chunkCount", return false);
+
+ uint32_t supportChunkCount;
+ uint32_t leafChunkCount;
+ return Asset::ensureExactSupportCoverage(supportChunkCount, leafChunkCount, static_cast<char*>(scratch), chunkCount, chunkDescs, false, logFn);
+}
+
+} // extern "C"
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastChunkHierarchy.h b/NvBlast/sdk/lowlevel/source/NvBlastChunkHierarchy.h
new file mode 100644
index 0000000..e7e05c6
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastChunkHierarchy.h
@@ -0,0 +1,232 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTCHUNKHIERARCHY_H
+#define NVBLASTCHUNKHIERARCHY_H
+
+
+#include "NvBlastIndexFns.h"
+#include "NvBlastDLink.h"
+#include "NvBlast.h"
+#include "NvBlastAssert.h"
+#include "NvBlastIteratorBase.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Chunk hierarchy depth-first iterator. Traverses subtree with root given by startChunkIndex.
+Will not traverse chunks with index at or beyond chunkIndexLimit.
+*/
+class ChunkDepthFirstIt : public IteratorBase<uint32_t>
+{
+public:
+ /** Constructed from a chunk array. */
+ ChunkDepthFirstIt(const NvBlastChunk* chunks, uint32_t startChunkIndex, uint32_t chunkIndexLimit) :
+ IteratorBase<uint32_t>(startChunkIndex), m_chunks(chunks), m_stop(startChunkIndex), m_limit(chunkIndexLimit)
+ {
+ if (m_curr >= m_limit)
+ {
+ m_curr = invalidIndex<uint32_t>();
+ }
+ }
+
+ /** Pre-increment. Only use if valid() == true. */
+ uint32_t operator ++ ()
+ {
+ NVBLAST_ASSERT(!isInvalidIndex(m_curr));
+ const NvBlastChunk* chunk = m_chunks + m_curr;
+ if (chunk->childIndexStop > chunk->firstChildIndex && chunk->firstChildIndex < m_limit)
+ {
+ m_curr = chunk->firstChildIndex;
+ }
+ else
+ {
+ for (;;)
+ {
+ if (m_curr == m_stop)
+ {
+ m_curr = invalidIndex<uint32_t>();
+ break;
+ }
+ NVBLAST_ASSERT(!isInvalidIndex(chunk->parentChunkIndex)); // This should not be possible with this search
+ const NvBlastChunk* parentChunk = m_chunks + chunk->parentChunkIndex;
+ if (++m_curr < parentChunk->childIndexStop)
+ {
+ break; // Sibling chunk is valid, that's the next chunk
+ }
+ m_curr = chunk->parentChunkIndex;
+ chunk = parentChunk;
+ }
+ }
+ return m_curr;
+ }
+
+private:
+ const NvBlastChunk* m_chunks;
+ uint32_t m_stop;
+ uint32_t m_limit;
+};
+
+
+/**
+Enumerates chunk indices in a subtree with root given by chunkIndex, in breadth-first order.
+Will not traverse chunks with index at or beyond chunkIndexLimit.
+Returns the number of indices written to the chunkIndex array
+*/
+NV_INLINE uint32_t enumerateChunkHierarchyBreadthFirst
+(
+uint32_t* chunkIndices,
+uint32_t chunkIndicesSize,
+const NvBlastChunk* chunks,
+uint32_t chunkIndex,
+bool includeRoot = true,
+uint32_t chunkIndexLimit = invalidIndex<uint32_t>()
+)
+{
+ if (chunkIndicesSize == 0)
+ {
+ return 0;
+ }
+ uint32_t chunkIndexCount = 0;
+ bool rootHandled = false;
+ if (includeRoot)
+ {
+ chunkIndices[chunkIndexCount++] = chunkIndex;
+ rootHandled = true;
+ }
+ for (uint32_t curr = 0; !rootHandled || curr < chunkIndexCount;)
+ {
+ const NvBlastChunk& chunk = chunks[rootHandled ? chunkIndices[curr] : chunkIndex];
+ if (chunk.firstChildIndex < chunkIndexLimit)
+ {
+ const uint32_t childIndexStop = chunk.childIndexStop < chunkIndexLimit ? chunk.childIndexStop : chunkIndexLimit;
+ const uint32_t childIndexBufferStop = chunk.firstChildIndex + (chunkIndicesSize - chunkIndexCount);
+ const uint32_t stop = childIndexStop < childIndexBufferStop ? childIndexStop : childIndexBufferStop;
+ for (uint32_t childIndex = chunk.firstChildIndex; childIndex < stop; ++childIndex)
+ {
+ chunkIndices[chunkIndexCount++] = childIndex;
+ }
+ }
+ if (rootHandled)
+ {
+ ++curr;
+ }
+ rootHandled = true;
+ }
+ return chunkIndexCount;
+}
+
+
+/**
+VisibilityRep must have m_firstVisibleChunkIndex and m_visibleChunkCount fields
+*/
+template<class VisibilityRep>
+void updateVisibleChunksFromSupportChunk
+(
+VisibilityRep* actors,
+IndexDLink<uint32_t>* visibleChunkIndexLinks,
+uint32_t* chunkActorIndices,
+uint32_t actorIndex,
+uint32_t supportChunkIndex,
+const NvBlastChunk* chunks,
+uint32_t upperSupportChunkCount
+)
+{
+ uint32_t chunkIndex = supportChunkIndex;
+ uint32_t chunkActorIndex = chunkActorIndices[supportChunkIndex];
+ uint32_t newChunkActorIndex = actorIndex;
+ VisibilityRep& thisActor = actors[actorIndex];
+
+ do
+ {
+ if (chunkActorIndex == newChunkActorIndex)
+ {
+ break; // Nothing to do
+ }
+
+ const uint32_t parentChunkIndex = chunks[chunkIndex].parentChunkIndex;
+ const uint32_t parentChunkActorIndex = parentChunkIndex != invalidIndex<uint32_t>() ? chunkActorIndices[parentChunkIndex] : invalidIndex<uint32_t>();
+ const bool chunkVisible = chunkActorIndex != parentChunkActorIndex;
+
+ // If the chunk is visible, it needs to be removed from its old actor's visibility list
+ if (chunkVisible && !isInvalidIndex(chunkActorIndex))
+ {
+ VisibilityRep& chunkActor = actors[chunkActorIndex];
+ IndexDList<uint32_t>().removeFromList(chunkActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, chunkIndex);
+ --chunkActor.m_visibleChunkCount;
+ }
+
+ // Now update the chunk's actor index
+ const uint32_t oldChunkActorIndex = chunkActorIndices[chunkIndex];
+ chunkActorIndices[chunkIndex] = newChunkActorIndex;
+ if (newChunkActorIndex != invalidIndex<uint32_t>() && parentChunkActorIndex != newChunkActorIndex)
+ {
+ // The chunk is now visible. Add it to this actor's visibility list
+ IndexDList<uint32_t>().insertListHead(thisActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, chunkIndex);
+ ++thisActor.m_visibleChunkCount;
+ // Remove its children from this actor's visibility list
+ if (actorIndex != oldChunkActorIndex)
+ {
+ const NvBlastChunk& chunk = chunks[chunkIndex];
+ if (chunk.firstChildIndex < upperSupportChunkCount) // Only need to deal with upper-support children
+ {
+ for (uint32_t childChunkIndex = chunk.firstChildIndex; childChunkIndex < chunk.childIndexStop; ++childChunkIndex)
+ {
+ if (chunkActorIndices[childChunkIndex] == actorIndex)
+ {
+ IndexDList<uint32_t>().removeFromList(thisActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, childChunkIndex);
+ --thisActor.m_visibleChunkCount;
+ }
+ }
+ }
+ }
+ }
+
+ if (parentChunkIndex != invalidIndex<uint32_t>())
+ {
+ // If all of its siblings have the same index, then the parent will too. Otherwise, the parent will have an invalid index and its children will be visible
+ const NvBlastChunk& parentChunk = chunks[parentChunkIndex];
+ bool uniform = true;
+ for (uint32_t childChunkIndex = parentChunk.firstChildIndex; uniform && childChunkIndex < parentChunk.childIndexStop; ++childChunkIndex)
+ {
+ uniform = (newChunkActorIndex == chunkActorIndices[childChunkIndex]);
+ }
+ if (!uniform)
+ {
+ newChunkActorIndex = invalidIndex<uint32_t>();
+ for (uint32_t childChunkIndex = parentChunk.firstChildIndex; childChunkIndex < parentChunk.childIndexStop; ++childChunkIndex)
+ {
+ const uint32_t childChunkActorIndex = chunkActorIndices[childChunkIndex];
+ if (childChunkActorIndex != invalidIndex<uint32_t>() && childChunkActorIndex == parentChunkActorIndex)
+ {
+ // The child was invisible. Add it to its actor's visibility list
+ VisibilityRep& childChunkActor = actors[childChunkActorIndex];
+ IndexDList<uint32_t>().insertListHead(childChunkActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, childChunkIndex);
+ ++childChunkActor.m_visibleChunkCount;
+ }
+ }
+ }
+ }
+
+ // Climb the hierarchy
+ chunkIndex = parentChunkIndex;
+ chunkActorIndex = parentChunkActorIndex;
+ } while (chunkIndex != invalidIndex<uint32_t>());
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTCHUNKHIERARCHY_H
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastFamily.cpp b/NvBlast/sdk/lowlevel/source/NvBlastFamily.cpp
new file mode 100644
index 0000000..1f517b1
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastFamily.cpp
@@ -0,0 +1,295 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+
+#include "NvBlastFamily.h"
+#include "NvBlastFamilyGraph.h"
+#include "NvBlastIndexFns.h"
+
+#include <new>
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Global functions ////////
+
+struct FamilyDataOffsets
+{
+ size_t m_actors;
+ size_t m_visibleChunkIndexLinks;
+ size_t m_chunkActorIndices;
+ size_t m_graphNodeIndexLinks;
+ size_t m_lowerSupportChunkHealths;
+ size_t m_graphBondHealths;
+ size_t m_familyGraph;
+};
+
+
+static size_t createFamilyDataOffsets(FamilyDataOffsets& offsets, const Asset* asset)
+{
+ const Nv::Blast::SupportGraph& graph = asset->m_graph;
+
+ NvBlastCreateOffsetStart(sizeof(FamilyHeader));
+ NvBlastCreateOffsetAlign16(offsets.m_actors, asset->getLowerSupportChunkCount() * sizeof(Actor));
+ NvBlastCreateOffsetAlign16(offsets.m_visibleChunkIndexLinks, asset->m_chunkCount * sizeof(IndexDLink<uint32_t>));
+ NvBlastCreateOffsetAlign16(offsets.m_chunkActorIndices, asset->getUpperSupportChunkCount() * sizeof(uint32_t));
+ NvBlastCreateOffsetAlign16(offsets.m_graphNodeIndexLinks, graph.m_nodeCount * sizeof(uint32_t));
+ NvBlastCreateOffsetAlign16(offsets.m_lowerSupportChunkHealths, asset->getLowerSupportChunkCount() * sizeof(float));
+ NvBlastCreateOffsetAlign16(offsets.m_graphBondHealths, asset->getBondCount() * sizeof(float));
+ NvBlastCreateOffsetAlign16(offsets.m_familyGraph, static_cast<size_t>(FamilyGraph::requiredMemorySize(graph.m_nodeCount, asset->getBondCount())));
+ return NvBlastCreateOffsetEndAlign16();
+}
+
+
+size_t getFamilyMemorySize(const Asset* asset)
+{
+#if NVBLAST_CHECK_PARAMS
+ if (asset == nullptr)
+ {
+ NVBLAST_ALWAYS_ASSERT();
+ return 0;
+ }
+#endif
+
+ FamilyDataOffsets offsets;
+ return createFamilyDataOffsets(offsets, asset);
+}
+
+
+NvBlastFamily* createFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(mem != nullptr, logFn, "createFamily: NULL mem pointer input.", return nullptr);
+ NVBLAST_CHECK(asset != nullptr, logFn, "createFamily: NULL asset pointer input.", return nullptr);
+
+ NVBLAST_CHECK((reinterpret_cast<uintptr_t>(mem) & 0xF) == 0, logFn, "createFamily: mem pointer not 16-byte aligned.", return nullptr);
+
+ const Asset& solverAsset = *static_cast<const Asset*>(asset);
+
+ if (solverAsset.m_chunkCount == 0)
+ {
+ NVBLAST_LOG_ERROR(logFn, "createFamily: Asset has no chunks. Family not created.\n");
+ return nullptr;
+ }
+
+ const Nv::Blast::SupportGraph& graph = solverAsset.m_graph;
+
+ const uint32_t bondCount = solverAsset.getBondCount();
+
+ // We need to keep this many actor representations around for our island indexing scheme.
+ const uint32_t lowerSupportChunkCount = solverAsset.getLowerSupportChunkCount();
+
+ // We need this many chunk actor indices.
+ const uint32_t upperSupportChunkCount = solverAsset.getUpperSupportChunkCount();
+
+ // Family offsets
+ FamilyDataOffsets offsets;
+ const size_t dataSize = createFamilyDataOffsets(offsets, &solverAsset);
+
+ // Restricting our data size to < 4GB so that we may use uint32_t offsets
+ if (dataSize > (size_t)UINT32_MAX)
+ {
+ NVBLAST_LOG_ERROR(logFn, "Nv::Blast::Actor::instanceAllocate: Instance data block size will exceed 4GB. Instance not created.\n");
+ return nullptr;
+ }
+
+ // Allocate family
+ NvBlastFamily* family = (NvBlastFamily*)mem;
+
+ // Fill in family header
+ FamilyHeader* header = (FamilyHeader*)family;
+ header->dataType = NvBlastDataBlock::FamilyDataBlock;
+ header->formatVersion = NvBlastFamilyDataFormat::Current;
+ header->size = (uint32_t)dataSize;
+ header->m_assetID = solverAsset.m_ID;
+ header->m_actorCount = 0;
+ header->m_actorsOffset = (uint32_t)offsets.m_actors;
+ header->m_visibleChunkIndexLinksOffset = (uint32_t)offsets.m_visibleChunkIndexLinks;
+ header->m_chunkActorIndicesOffset = (uint32_t)offsets.m_chunkActorIndices;
+ header->m_graphNodeIndexLinksOffset = (uint32_t)offsets.m_graphNodeIndexLinks;
+ header->m_lowerSupportChunkHealthsOffset = (uint32_t)offsets.m_lowerSupportChunkHealths;
+ header->m_graphBondHealthsOffset = (uint32_t)offsets.m_graphBondHealths;
+ header->m_familyGraphOffset = (uint32_t)offsets.m_familyGraph;
+
+ // Runtime data
+ header->m_asset = &solverAsset; // NOTE: this should be resolved from m_assetID
+
+ // Initialize family header data:
+
+ // Actors - initialize to defaults, with zero offset value (indicating inactive state)
+ Actor* actors = header->getActors(); // This will get the subsupport actors too
+ for (uint32_t i = 0; i < lowerSupportChunkCount; ++i)
+ {
+ new (actors + i) Actor();
+ }
+
+ // Visible chunk index links - initialize to solitary links (0xFFFFFFFF fields)
+ memset(header->getVisibleChunkIndexLinks(), 0xFF, solverAsset.m_chunkCount*sizeof(IndexDLink<uint32_t>));
+
+ // Chunk actor IDs - initialize to invalid (0xFFFFFFFF)
+ memset(header->getChunkActorIndices(), 0xFF, upperSupportChunkCount*sizeof(uint32_t));
+
+ // Graph node index links - initialize to solitary links
+ memset(header->getGraphNodeIndexLinks(), 0xFF, graph.m_nodeCount*sizeof(uint32_t));
+
+ // Healths are initialized to 0
+ memset(header->getLowerSupportChunkHealths(), 0, lowerSupportChunkCount*sizeof(float));
+ memset(header->getBondHealths(), 0, bondCount*sizeof(float));
+
+ // FamilyGraph ctor
+ new (header->getFamilyGraph()) FamilyGraph(&graph);
+
+ return family;
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+// API implementation
+
+extern "C"
+{
+
+NvBlastFamily* NvBlastAssetCreateFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ return Nv::Blast::createFamily(mem, asset, logFn);
+}
+
+
+uint32_t NvBlastFamilyGetFormatVersion(const NvBlastFamily* family, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyGetFormatVersion: NULL family pointer input.", return UINT32_MAX);
+ return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->formatVersion;
+}
+
+
+void NvBlastFamilySetAsset(NvBlastFamily* family, const NvBlastAsset* asset, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilySetAsset: NULL family pointer input.", return);
+ NVBLAST_CHECK(asset != nullptr, logFn, "NvBlastFamilySetAsset: NULL asset pointer input.", return);
+
+ Nv::Blast::FamilyHeader* header = reinterpret_cast<Nv::Blast::FamilyHeader*>(family);
+ const Nv::Blast::Asset* solverAsset = reinterpret_cast<const Nv::Blast::Asset*>(asset);
+
+ if (memcmp(&header->m_assetID, &solverAsset->m_ID, sizeof(NvBlastID)))
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastFamilySetAsset: wrong asset. Passed asset ID doesn't match family asset ID.");
+ return;
+ }
+
+ header->m_asset = solverAsset;
+}
+
+
+uint32_t NvBlastFamilyGetSize(const NvBlastFamily* family, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyGetSize: NULL family pointer input.", return 0);
+ return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->size;
+}
+
+
+NvBlastID NvBlastFamilyGetAssetID(const NvBlastFamily* family, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyGetAssetID: NULL family pointer input.", return NvBlastID());
+ return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_assetID;
+}
+
+
+uint32_t NvBlastFamilyGetActorCount(const NvBlastFamily* family, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActorCount: NULL family pointer input.", return 0);
+
+ const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
+
+ if (header->formatVersion != NvBlastFamilyDataFormat::Current)
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastFamilyGetActorCount: wrong family format. Family must be converted to current version.");
+ return 0;
+ }
+
+ return header->m_actorCount;
+}
+
+
+uint32_t NvBlastFamilyGetActors(NvBlastActor** actors, uint32_t actorsSize, const NvBlastFamily* family, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(actors != nullptr, logFn, "NvBlastFamilyGetActors: NULL actors pointer input.", return 0);
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActors: NULL family pointer input.", return 0);
+
+ const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
+
+ if (header->formatVersion != NvBlastFamilyDataFormat::Current)
+ {
+ NVBLAST_LOG_ERROR(logFn, "NvBlastFamilyGetActors: wrong family format. Family must be converted to current version.");
+ return 0;
+ }
+
+ // Iterate through active actors and write to supplied array
+ const uint32_t familyActorCount = header->getActorBufferSize();
+ Nv::Blast::Actor* familyActor = header->getActors();
+ uint32_t actorCount = 0;
+ for (uint32_t i = 0; actorCount < actorsSize && i < familyActorCount; ++i, ++familyActor)
+ {
+ if (familyActor->isActive())
+ {
+ actors[actorCount++] = familyActor;
+ }
+ }
+
+ return actorCount;
+}
+
+
+NvBlastActor* NvBlastFamilyGetChunkActor(const NvBlastFamily* family, uint32_t chunkIndex, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyGetChunkActor: NULL family pointer input.", return nullptr);
+
+ const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
+
+ NVBLAST_CHECK(header->m_asset != nullptr, logFn, "NvBlastFamilyGetChunkActor: NvBlastFamily has null asset set.", return nullptr);
+
+ const Nv::Blast::Asset& solverAsset = *static_cast<const Nv::Blast::Asset*>(header->m_asset);
+ NVBLAST_CHECK(chunkIndex < solverAsset.m_chunkCount, logFn, "NvBlastFamilyGetChunkActor: bad value of chunkIndex for the given family's asset.", return nullptr);
+
+ // get actorIndex from chunkIndex
+ uint32_t actorIndex;
+ if (chunkIndex < solverAsset.getUpperSupportChunkCount())
+ {
+ actorIndex = header->getChunkActorIndices()[chunkIndex];
+ }
+ else
+ {
+ actorIndex = chunkIndex - (solverAsset.getUpperSupportChunkCount() - solverAsset.m_graph.m_nodeCount);
+ }
+
+ // get actor from actorIndex
+ if (!Nv::Blast::isInvalidIndex(actorIndex))
+ {
+ NVBLAST_ASSERT(actorIndex < header->getActorBufferSize());
+ Nv::Blast::Actor* actor = &header->getActors()[actorIndex];
+ if (actor->isActive())
+ {
+ return actor;
+ }
+ }
+ return nullptr;
+}
+
+
+uint32_t NvBlastFamilyGetMaxActorCount(const NvBlastFamily* family, NvBlastLog logFn)
+{
+ NVBLAST_CHECK(family != nullptr, logFn, "NvBlastFamilyGetMaxActorCount: NULL family pointer input.", return 0);
+ const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
+ return header->getActorBufferSize();
+}
+
+} // extern "C"
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastFamily.h b/NvBlast/sdk/lowlevel/source/NvBlastFamily.h
new file mode 100644
index 0000000..d1cb069
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastFamily.h
@@ -0,0 +1,238 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTFAMILY_H
+#define NVBLASTFAMILY_H
+
+
+#include "NvBlastPreprocessorInternal.h"
+#include "NvBlastAsset.h"
+#include "NvBlastPreprocessor.h"
+#include "NvBlastDLink.h"
+#include "NvBlastAtomic.h"
+#include "NvBlastMemory.h"
+
+#include <cstring>
+
+
+struct NvBlastAsset;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class FamilyGraph;
+class Actor;
+class Asset;
+
+
+/**
+Data header at the beginning of every NvBlastActor family
+
+The block address may be cast to a valid FamilyHeader pointer.
+*/
+struct FamilyHeader : public NvBlastDataBlock
+{
+ /**
+ The ID for the asset. This will be resolved into a pointer in the runtime data.
+ */
+ NvBlastID m_assetID;
+
+ /**
+ Actors, of type Actor.
+
+ Actors with support chunks will use this array in the range [0, m_asset->m_graphNodeCount),
+ while subsupport actors will be placed in the range [m_asset->m_graphNodeCount, getActorBufferSize()).
+ */
+ NvBlastBlockArrayData(Actor, m_actorsOffset, getActors, m_asset->m_graph.m_nodeCount);
+
+ /**
+ Visible chunk index links, of type IndexDLink<uint32_t>.
+
+ getVisibleChunkIndexLinks returns an array of size m_asset->m_chunkCount of IndexDLink<uint32_t> (see IndexDLink).
+ */
+ NvBlastBlockArrayData(IndexDLink<uint32_t>, m_visibleChunkIndexLinksOffset, getVisibleChunkIndexLinks, m_asset->m_chunkCount);
+
+ /**
+ Chunk actor IDs, of type uint32_t. These correspond to the ID of the actor which owns each chunk. A value of invalidIndex<uint32_t>() indicates no owner.
+
+ getChunkActorIndices returns an array of size m_asset->m_firstSubsupportChunkIndex.
+ */
+ NvBlastBlockArrayData(uint32_t, m_chunkActorIndicesOffset, getChunkActorIndices, m_asset->m_firstSubsupportChunkIndex);
+
+ /**
+ Graph node index links, of type uint32_t. The successor to index[i] is m_graphNodeIndexLinksOffset[i]. A value of invalidIndex<uint32_t>() indicates no successor.
+
+ getGraphNodeIndexLinks returns an array of size m_asset->m_graphNodeCount.
+ */
+ NvBlastBlockArrayData(uint32_t, m_graphNodeIndexLinksOffset, getGraphNodeIndexLinks, m_asset->m_graph.m_nodeCount);
+
+ /**
+ Health for each support chunk and subsupport chunk, of type float.
+
+ To access support chunks, use the corresponding graph node index in the array returned by getLowerSupportChunkHealths.
+
+ To access subsupport chunk healths, use getSubsupportChunkHealths (see documentation for details).
+ */
+ NvBlastBlockArrayData(float, m_lowerSupportChunkHealthsOffset, getLowerSupportChunkHealths, m_asset->getLowerSupportChunkCount());
+
+ /**
+ Utility function to get the start of the subsupport chunk health array.
+
+ To access a subsupport chunk health indexed by i, use getSubsupportChunkHealths()[i - m_asset->m_firstSubsupportChunkIndex]
+
+ \return the array of health values associated with all descendants of support chunks.
+ */
+ float* getSubsupportChunkHealths() const
+ {
+ NVBLAST_ASSERT(m_asset != nullptr);
+ return (float*)((uintptr_t)this + m_lowerSupportChunkHealthsOffset) + m_asset->m_graph.m_nodeCount;
+ }
+
+ /**
+ Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i].
+
+ getBondHealths returns the array of healths associated with all bonds in the support graph.
+ */
+ NvBlastBlockArrayData(float, m_graphBondHealthsOffset, getBondHealths, m_asset->getBondCount());
+
+ /**
+ The instance graph for islands searching, of type FamilyGraph.
+
+ Return the dynamic data generated for the support graph. (See FamilyGraph.)
+ This is used to store current connectivity information based upon bond and chunk healths, as well as cached intermediate data for faster incremental updates.
+ */
+ NvBlastBlockData(FamilyGraph, m_familyGraphOffset, getFamilyGraph);
+
+
+ //////// Runtime data ////////
+
+ /**
+ The number of actors using this block.
+ */
+ volatile uint32_t m_actorCount;
+
+ /**
+ The asset corresponding to all actors in this family.
+ This is runtime data and will be resolved from m_assetID.
+ */
+ union
+ {
+ const Asset* m_asset;
+ uint64_t m_runtimePlaceholder; // Make sure we reserve enough room for an 8-byte pointer
+ };
+
+
+ //////// Functions ////////
+
+ /**
+ Gets an actor from the actor array and validates it if it is not already valid. This increments the actor reference count.
+
+ \param[in] index The index of the actor to borrow. Must be in the range [0, getActorBufferSize()).
+
+ \return A pointer to the indexed Actor.
+ */
+ Actor* borrowActor(uint32_t index);
+
+ /**
+ Invalidates the actor if it is not already invalid. This decrements the actor reference count, but does not free this block when the count goes to zero.
+
+ \param[in] actor The actor to invalidate.
+ */
+ void returnActor(Actor& actor);
+
+ /**
+ Returns the total number of actors in the Actor buffer, active and inactive.
+
+ \return the number of Actors in the actor buffer. See borrowActor.
+ */
+ uint32_t getActorBufferSize() const;
+
+ /**
+ Returns a value to indicate whether or not the Actor with the given index is valid for use (active).
+
+ \return true iff the indexed actor is active.
+ */
+ bool isActorActive(uint32_t index) const;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#include "NvBlastActor.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// FamilyHeader inline methods ////////
+
+NV_INLINE Actor* FamilyHeader::borrowActor(uint32_t index)
+{
+ NVBLAST_ASSERT(index < getActorBufferSize());
+ Actor& actor = getActors()[index];
+ if (actor.m_familyOffset == 0)
+ {
+ const uintptr_t offset = (uintptr_t)&actor - (uintptr_t)this;
+ NVBLAST_ASSERT(offset <= UINT32_MAX);
+ actor.m_familyOffset = (uint32_t)offset;
+ atomicIncrement(reinterpret_cast<volatile int32_t*>(&m_actorCount));
+ }
+ return &actor;
+}
+
+
+NV_INLINE void FamilyHeader::returnActor(Actor& actor)
+{
+ if (actor.m_familyOffset != 0)
+ {
+ actor.m_familyOffset = 0;
+ // The actor count should be positive since this actor was valid. Check to be safe.
+ NVBLAST_ASSERT(m_actorCount > 0);
+ atomicDecrement(reinterpret_cast<volatile int32_t*>(&m_actorCount));
+ }
+}
+
+
+NV_INLINE uint32_t FamilyHeader::getActorBufferSize() const
+{
+ NVBLAST_ASSERT(m_asset);
+ return m_asset->getLowerSupportChunkCount();
+}
+
+
+NV_INLINE bool FamilyHeader::isActorActive(uint32_t index) const
+{
+ NVBLAST_ASSERT(index < getActorBufferSize());
+ return getActors()[index].m_familyOffset != 0;
+}
+
+
+//////// Global functions ////////
+
+/**
+Returns the number of bytes of memory that a family created using the given asset will require. A pointer
+to a block of memory of at least this size must be passed in as the mem argument of createFamily.
+
+\param[in] asset The asset that will be passed into NvBlastAssetCreateFamily.
+*/
+size_t getFamilyMemorySize(const Asset* asset);
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTFAMILY_H
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastFamilyGraph.cpp b/NvBlast/sdk/lowlevel/source/NvBlastFamilyGraph.cpp
new file mode 100644
index 0000000..08ed83d
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastFamilyGraph.cpp
@@ -0,0 +1,629 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastFamilyGraph.h"
+
+#include "NvBlastAssert.h"
+
+#include <vector>
+#include <stack>
+
+#define SANITY_CHECKS 0
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+size_t FamilyGraph::fillMemory(FamilyGraph* familyGraph, uint32_t nodeCount, uint32_t bondCount)
+{
+ // calculate all offsets, and dataSize as a result
+ NvBlastCreateOffsetStart(sizeof(FamilyGraph));
+ const size_t NvBlastCreateOffsetAlign16(dirtyNodeLinksOffset, sizeof(NodeIndex) * nodeCount);
+ const size_t NvBlastCreateOffsetAlign16(firstDirtyNodeIndicesOffset, sizeof(uint32_t) * nodeCount);
+ const size_t NvBlastCreateOffsetAlign16(islandIdsOffset, sizeof(IslandId) * nodeCount);
+ const size_t NvBlastCreateOffsetAlign16(fastRouteOffset, sizeof(NodeIndex) * nodeCount);
+ const size_t NvBlastCreateOffsetAlign16(hopCountsOffset, sizeof(uint32_t) * nodeCount);
+ const size_t NvBlastCreateOffsetAlign16(isEdgeRemovedOffset, FixedBoolArray::requiredMemorySize(bondCount));
+ const size_t NvBlastCreateOffsetAlign16(isNodeInDirtyListOffset, FixedBoolArray::requiredMemorySize(nodeCount));
+ const size_t dataSize = NvBlastCreateOffsetEndAlign16();
+
+ // fill only if familyGraph was passed (otherwise we just used this function to get dataSize)
+ if (familyGraph)
+ {
+ familyGraph->m_dirtyNodeLinksOffset = static_cast<uint32_t>(dirtyNodeLinksOffset);
+ familyGraph->m_firstDirtyNodeIndicesOffset = static_cast<uint32_t>(firstDirtyNodeIndicesOffset);
+ familyGraph->m_islandIdsOffset = static_cast<uint32_t>(islandIdsOffset);
+ familyGraph->m_fastRouteOffset = static_cast<uint32_t>(fastRouteOffset);
+ familyGraph->m_hopCountsOffset = static_cast<uint32_t>(hopCountsOffset);
+ familyGraph->m_isEdgeRemovedOffset = static_cast<uint32_t>(isEdgeRemovedOffset);
+ familyGraph->m_isNodeInDirtyListOffset = static_cast<uint32_t>(isNodeInDirtyListOffset);
+
+ new (familyGraph->getIsEdgeRemoved())FixedBoolArray(bondCount);
+ new (familyGraph->getIsNodeInDirtyList())FixedBoolArray(nodeCount);
+ }
+
+ return dataSize;
+}
+
+
+FamilyGraph::FamilyGraph(const SupportGraph* graph)
+{
+ // fill memory with all internal data
+ // we need chunks count for size calculation
+ const uint32_t nodeCount = graph->m_nodeCount;
+ const uint32_t bondCount = graph->getAdjacencyPartition()[nodeCount] / 2;
+
+ fillMemory(this, nodeCount, bondCount);
+
+ // fill arrays with invalid indices / max value (0xFFFFFFFF)
+ memset(getIslandIds(), 0xFF, nodeCount*sizeof(uint32_t));
+ memset(getFastRoute(), 0xFF, nodeCount*sizeof(uint32_t));
+ memset(getHopCounts(), 0xFF, nodeCount*sizeof(uint32_t)); // Initializing to large value
+ memset(getDirtyNodeLinks(), 0xFF, nodeCount*sizeof(uint32_t)); // No dirty list initially
+ memset(getFirstDirtyNodeIndices(), 0xFF, nodeCount*sizeof(uint32_t));
+
+ getIsNodeInDirtyList()->clear();
+ getIsEdgeRemoved()->fill();
+}
+
+
+/**
+Graph initialization, reset all internal data to initial state. Marks all nodes dirty for this actor.
+First island search probably would be the longest one, as it has to traverse whole graph and set all the optimization stuff like fastRoute and hopCounts for all nodes.
+*/
+void FamilyGraph::initialize(ActorIndex actorIndex, const SupportGraph* graph)
+{
+ // used internal data pointers
+ NodeIndex* dirtyNodeLinks = getDirtyNodeLinks();
+ uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices();
+
+ // link dirty nodes
+ for (NodeIndex node = 1; node < graph->m_nodeCount; node++)
+ {
+ dirtyNodeLinks[node-1] = node;
+ }
+ firstDirtyNodeIndices[actorIndex] = 0;
+
+ getIsNodeInDirtyList()->fill();
+ getIsEdgeRemoved()->clear();
+}
+
+
+void FamilyGraph::addToDirtyNodeList(ActorIndex actorIndex, NodeIndex node)
+{
+ // used internal data pointers
+ FixedBoolArray* isNodeInDirtyList = getIsNodeInDirtyList();
+ NodeIndex* dirtyNodeLinks = getDirtyNodeLinks();
+ uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices();
+
+ // check for bitmap first for avoid O(n) list search
+ if (isNodeInDirtyList->test(node))
+ return;
+
+ // add node to dirty node list head
+ dirtyNodeLinks[node] = firstDirtyNodeIndices[actorIndex];
+ firstDirtyNodeIndices[actorIndex] = node;
+ isNodeInDirtyList->set(node);
+}
+
+
+/**
+Removes fast routes and marks involved nodes as dirty
+*/
+bool FamilyGraph::notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, const SupportGraph* graph)
+{
+ NVBLAST_ASSERT(node0 < graph->m_nodeCount);
+ NVBLAST_ASSERT(node1 < graph->m_nodeCount);
+
+ // used internal data pointers
+ NodeIndex* fastRoute = getFastRoute();
+ const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
+ const uint32_t* adjacentBondIndices = graph->getAdjacentBondIndices();
+
+ // search for bond
+ for (uint32_t adjacencyIndex = adjacencyPartition[node0]; adjacencyIndex < adjacencyPartition[node0 + 1]; adjacencyIndex++)
+ {
+ if (getAdjacentNode(adjacencyIndex, graph) == node1)
+ {
+ // found bond
+ const uint32_t bondIndex = adjacentBondIndices[adjacencyIndex];
+
+ // remove bond
+ getIsEdgeRemoved()->set(bondIndex);
+
+ // broke fast route if it goes through this edge:
+ if (fastRoute[node0] == node1)
+ fastRoute[node0] = invalidIndex<uint32_t>();
+ if (fastRoute[node1] == node0)
+ fastRoute[node1] = invalidIndex<uint32_t>();
+
+ // mark nodes dirty (add to list if doesn't exist)
+ addToDirtyNodeList(actorIndex, node0);
+ addToDirtyNodeList(actorIndex, node1);
+
+ // we don't expect to be more than one bond between 2 nodes
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool FamilyGraph::notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, uint32_t bondIndex, const SupportGraph* graph)
+{
+ NV_UNUSED(graph);
+ NVBLAST_ASSERT(node0 < graph->m_nodeCount);
+ NVBLAST_ASSERT(node1 < graph->m_nodeCount);
+
+ getIsEdgeRemoved()->set(bondIndex);
+
+
+ NodeIndex* fastRoute = getFastRoute();
+
+ // broke fast route if it goes through this edge:
+ if (fastRoute[node0] == node1)
+ fastRoute[node0] = invalidIndex<uint32_t>();
+ if (fastRoute[node1] == node0)
+ fastRoute[node1] = invalidIndex<uint32_t>();
+
+ // mark nodes dirty (add to list if doesn't exist)
+ addToDirtyNodeList(actorIndex, node0);
+ addToDirtyNodeList(actorIndex, node1);
+
+ return true;
+}
+
+bool FamilyGraph::notifyNodeRemoved(ActorIndex actorIndex, NodeIndex nodeIndex, const SupportGraph* graph)
+{
+ NVBLAST_ASSERT(nodeIndex < graph->m_nodeCount);
+
+ // used internal data pointers
+ NodeIndex* fastRoute = getFastRoute();
+ const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
+ const uint32_t* adjacentBondIndices = graph->getAdjacentBondIndices();
+
+ // remove all edges leaving this node
+ for (uint32_t adjacencyIndex = adjacencyPartition[nodeIndex]; adjacencyIndex < adjacencyPartition[nodeIndex + 1]; adjacencyIndex++)
+ {
+ const uint32_t adjacentNodeIndex = getAdjacentNode(adjacencyIndex, graph);
+ if (!isInvalidIndex(adjacentNodeIndex))
+ {
+ const uint32_t bondIndex = adjacentBondIndices[adjacencyIndex];
+ getIsEdgeRemoved()->set(bondIndex);
+
+ if (fastRoute[adjacentNodeIndex] == nodeIndex)
+ fastRoute[adjacentNodeIndex] = invalidIndex<uint32_t>();
+ if (fastRoute[nodeIndex] == adjacentNodeIndex)
+ fastRoute[nodeIndex] = invalidIndex<uint32_t>();
+
+ addToDirtyNodeList(actorIndex, adjacentNodeIndex);
+ }
+ }
+ addToDirtyNodeList(actorIndex, nodeIndex);
+
+ // ignore this node in partition (only needed for "chunk deleted from graph")
+ // getIslandIds()[nodeIndex] = invalidIndex<uint32_t>();
+
+ return true;
+}
+
+void FamilyGraph::unwindRoute(uint32_t traversalIndex, NodeIndex lastNode, uint32_t hopCount, IslandId id, FixedArray<TraversalState>* visitedNodes)
+{
+ // used internal data pointers
+ IslandId* islandIds = getIslandIds();
+ NodeIndex* fastRoute = getFastRoute();
+ uint32_t* hopCounts = getHopCounts();
+
+ uint32_t currIndex = traversalIndex;
+ uint32_t hc = hopCount + 1; //Add on 1 for the hop to the witness/root node.
+ do
+ {
+ TraversalState& state = visitedNodes->at(currIndex);
+ hopCounts[state.mNodeIndex] = hc++;
+ islandIds[state.mNodeIndex] = id;
+ fastRoute[state.mNodeIndex] = lastNode;
+ currIndex = state.mPrevIndex;
+ lastNode = state.mNodeIndex;
+ }
+ while(currIndex != invalidIndex<uint32_t>());
+}
+
+
+bool FamilyGraph::tryFastPath(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, const SupportGraph* graph)
+{
+ NV_UNUSED(graph);
+
+ // used internal data pointers
+ IslandId* islandIds = getIslandIds();
+ NodeIndex* fastRoute = getFastRoute();
+
+ // prepare for iterating path
+ NodeIndex currentNode = startNode;
+ uint32_t visitedNotesInitialSize = visitedNodes->size();
+ uint32_t depth = 0;
+
+ bool found = false;
+ do
+ {
+ // witness ?
+ if (isNodeWitness->test(currentNode))
+ {
+ // Already visited and not tagged with invalid island == a witness!
+ found = islandIds[currentNode] != invalidIndex<uint32_t>();
+ break;
+ }
+
+ // reached targetNode ?
+ if (currentNode == targetNode)
+ {
+ found = true;
+ break;
+ }
+
+ TraversalState state(currentNode, visitedNodes->size(), visitedNodes->size() - 1, depth++);
+ visitedNodes->pushBack(state);
+
+ NVBLAST_ASSERT(isInvalidIndex(fastRoute[currentNode]) || hasEdge(currentNode, fastRoute[currentNode], graph));
+
+ islandIds[currentNode] = invalidIndex<uint32_t>();
+ isNodeWitness->set(currentNode);
+
+ currentNode = fastRoute[currentNode];
+ } while (currentNode != invalidIndex<uint32_t>());
+
+ for (uint32_t a = visitedNotesInitialSize; a < visitedNodes->size(); ++a)
+ {
+ TraversalState& state = visitedNodes->at(a);
+ islandIds[state.mNodeIndex] = islandId;
+ }
+
+ // if fast path failed we have to remove all isWitness marks on visited nodes and nodes from visited list
+ if (!found)
+ {
+ for (uint32_t a = visitedNotesInitialSize; a < visitedNodes->size(); ++a)
+ {
+ TraversalState& state = visitedNodes->at(a);
+ isNodeWitness->reset(state.mNodeIndex);
+ }
+
+ visitedNodes->forceSize_Unsafe(visitedNotesInitialSize);
+ }
+
+ return found;
+}
+
+
+bool FamilyGraph::findRoute(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, NodePriorityQueue* priorityQueue, const SupportGraph* graph)
+{
+ // used internal data pointers
+ IslandId* islandIds = getIslandIds();
+ NodeIndex* fastRoute = getFastRoute();
+ uint32_t* hopCounts = getHopCounts();
+ const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
+
+ // Firstly, traverse the fast path and tag up witnesses. TryFastPath can fail. In that case, no witnesses are left but this node is permitted to report
+ // that it is still part of the island. Whichever node lost its fast path will be tagged as dirty and will be responsible for recovering the fast path
+ // and tagging up the visited nodes
+ if (fastRoute[startNode] != invalidIndex<uint32_t>())
+ {
+ if (tryFastPath(startNode, targetNode, islandId, visitedNodes, isNodeWitness, graph))
+ return true;
+ }
+
+ // If we got here, there was no fast path. Therefore, we need to fall back on searching for the root node. This is optimized by using "hop counts".
+ // These are per-node counts that indicate the expected number of hops from this node to the root node. These are lazily evaluated and updated
+ // as new edges are formed or when traversals occur to re-establish islands. As a result, they may be inaccurate but they still serve the purpose
+ // of guiding our search to minimize the chances of us doing an exhaustive search to find the root node.
+ islandIds[startNode] = invalidIndex<uint32_t>();
+ TraversalState startTraversal(startNode, visitedNodes->size(), invalidIndex<uint32_t>(), 0);
+ isNodeWitness->set(startNode);
+ QueueElement element(&visitedNodes->pushBack(startTraversal), hopCounts[startNode]);
+ priorityQueue->push(element);
+
+ do
+ {
+ QueueElement currentQE = priorityQueue->pop();
+
+ TraversalState& currentState = *currentQE.mState;
+ NodeIndex& currentNode = currentState.mNodeIndex;
+
+ // iterate all edges of currentNode
+ for (uint32_t adjacencyIndex = adjacencyPartition[currentNode]; adjacencyIndex < adjacencyPartition[currentNode + 1]; adjacencyIndex++)
+ {
+ NodeIndex nextIndex = getAdjacentNode(adjacencyIndex, graph);
+
+ if (nextIndex != invalidIndex<uint32_t>())
+ {
+ if (nextIndex == targetNode)
+ {
+ // targetNode found!
+ unwindRoute(currentState.mCurrentIndex, nextIndex, 0, islandId, visitedNodes);
+ return true;
+ }
+
+ if (isNodeWitness->test(nextIndex))
+ {
+ // We already visited this node. This means that it's either in the priority queue already or we
+ // visited in on a previous pass. If it was visited on a previous pass, then it already knows what island it's in.
+ // We now need to test the island id to find out if this node knows the root.
+ // If it has a valid root id, that id *is* our new root. We can guesstimate our hop count based on the node's properties
+
+ IslandId visitedIslandId = islandIds[nextIndex];
+ if (visitedIslandId != invalidIndex<uint32_t>())
+ {
+ // If we get here, we must have found a node that knows a route to our root node. It must not be a different island
+ // because that would caused me to have been visited already because totally separate islands trigger a full traversal on
+ // the orphaned side.
+ NVBLAST_ASSERT(visitedIslandId == islandId);
+ unwindRoute(currentState.mCurrentIndex, nextIndex, hopCounts[nextIndex], islandId, visitedNodes);
+ return true;
+ }
+ }
+ else
+ {
+ // This node has not been visited yet, so we need to push it into the stack and continue traversing
+ TraversalState state(nextIndex, visitedNodes->size(), currentState.mCurrentIndex, currentState.mDepth + 1);
+ QueueElement qe(&visitedNodes->pushBack(state), hopCounts[nextIndex]);
+ priorityQueue->push(qe);
+ isNodeWitness->set(nextIndex);
+ NVBLAST_ASSERT(islandIds[nextIndex] == islandId);
+ islandIds[nextIndex] = invalidIndex<uint32_t>(); //Flag as invalid island until we know whether we can find root or an island id.
+ }
+ }
+ }
+ } while (priorityQueue->size());
+
+ return false;
+}
+
+
+size_t FamilyGraph::findIslandsRequiredScratch(uint32_t graphNodeCount)
+{
+ const size_t visitedNodesSize = align16(FixedArray<TraversalState>::requiredMemorySize(graphNodeCount));
+ const size_t isNodeWitnessSize = align16(FixedBitmap::requiredMemorySize(graphNodeCount));
+ const size_t priorityQueueSize = align16(NodePriorityQueue::requiredMemorySize(graphNodeCount));
+
+ // Aligned and padded
+ return 16 + visitedNodesSize
+ + isNodeWitnessSize
+ + priorityQueueSize;
+}
+
+
+uint32_t FamilyGraph::findIslands(ActorIndex actorIndex, void* scratch, const SupportGraph* graph)
+{
+ // check if we have at least 1 dirty node for this actor before proceeding
+ uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices();
+ if (isInvalidIndex(firstDirtyNodeIndices[actorIndex]))
+ return 0;
+
+ // used internal data pointers
+ IslandId* islandIds = getIslandIds();
+ NodeIndex* fastRoute = getFastRoute();
+ uint32_t* hopCounts = getHopCounts();
+ NodeIndex* dirtyNodeLinks = getDirtyNodeLinks();
+ FixedBoolArray* isNodeInDirtyList = getIsNodeInDirtyList();
+
+ // prepare intermediate data on scratch
+ scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment (see padding in findIslandsRequiredScratch)
+ const uint32_t nodeCount = graph->m_nodeCount;
+
+ FixedArray<TraversalState>* visitedNodes = new (scratch)FixedArray<TraversalState>();
+ scratch = pointerOffset(scratch, align16(FixedArray<TraversalState>::requiredMemorySize(nodeCount)));
+
+ FixedBitmap* isNodeWitness = new (scratch)FixedBitmap(nodeCount);
+ scratch = pointerOffset(scratch, align16(FixedBitmap::requiredMemorySize(nodeCount)));
+
+ NodePriorityQueue* priorityQueue = new (scratch)NodePriorityQueue();
+ scratch = pointerOffset(scratch, align16(NodePriorityQueue::requiredMemorySize(nodeCount)));
+
+ // reset nodes visited bitmap
+ isNodeWitness->clear();
+
+ uint32_t newIslandsCount = 0;
+
+ while (!isInvalidIndex(firstDirtyNodeIndices[actorIndex]))
+ {
+ // Pop head off of dirty node's list
+ const NodeIndex dirtyNode = firstDirtyNodeIndices[actorIndex];
+ firstDirtyNodeIndices[actorIndex] = dirtyNodeLinks[dirtyNode];
+ dirtyNodeLinks[dirtyNode] = invalidIndex<uint32_t>();
+ NVBLAST_ASSERT(isNodeInDirtyList->test(dirtyNode));
+ isNodeInDirtyList->reset(dirtyNode);
+
+ // clear PriorityQueue
+ priorityQueue->clear();
+
+ // if we already visited this node before in this loop it's not dirty anymore
+ if (isNodeWitness->test(dirtyNode))
+ continue;
+
+ NodeIndex& islandRootNode = islandIds[dirtyNode];
+ IslandId islandId = islandRootNode; // the same in this implementation
+
+ // if this node is island root node we don't need to do anything
+ if (islandRootNode == dirtyNode)
+ continue;
+
+ // clear visited notes list (to fill during traverse)
+ visitedNodes->clear();
+
+ // try finding island root node from this dirtyNode
+ if (findRoute(dirtyNode, islandRootNode, islandId, visitedNodes, isNodeWitness, priorityQueue, graph))
+ {
+ // We found the root node so let's let every visited node know that we found its root
+ // and we can also update our hop counts because we recorded how many hops it took to reach this
+ // node
+
+ // We already filled in the path to the root/witness with accurate hop counts. Now we just need to fill in the estimates
+ // for the remaining nodes and re-define their islandIds. We approximate their path to the root by just routing them through
+ // the route we already found.
+
+ // This loop works because visitedNodes are recorded in the order they were visited and we already filled in the critical path
+ // so the remainder of the paths will just fork from that path.
+ for (uint32_t b = 0; b < visitedNodes->size(); ++b)
+ {
+ TraversalState& state = visitedNodes->at(b);
+ if (isInvalidIndex(islandIds[state.mNodeIndex]))
+ {
+ hopCounts[state.mNodeIndex] = hopCounts[visitedNodes->at(state.mPrevIndex).mNodeIndex] + 1;
+ fastRoute[state.mNodeIndex] = visitedNodes->at(state.mPrevIndex).mNodeIndex;
+ islandIds[state.mNodeIndex] = islandId;
+ }
+ }
+ }
+ else
+ {
+ // NEW ISLAND BORN!
+
+ // If I traversed and could not find the root node, then I have established a new island. In this island, I am the root node
+ // and I will point all my nodes towards me. Furthermore, I have established how many steps it took to reach all nodes in my island
+
+ // OK. We need to separate the islands. We have a list of nodes that are part of the new island (visitedNodes) and we know that the
+ // first node in that list is the root node.
+
+#if SANITY_CHECKS
+ NVBLAST_ASSERT(!canFindRoot(dirtyNode, islandRootNode, NULL));
+#endif
+
+ IslandId newIsland = dirtyNode;
+ newIslandsCount++;
+
+ hopCounts[dirtyNode] = 0;
+ fastRoute[dirtyNode] = invalidIndex<uint32_t>();
+ islandIds[dirtyNode] = newIsland;
+
+ for (uint32_t a = 1; a < visitedNodes->size(); ++a)
+ {
+ NodeIndex visitedNode = visitedNodes->at(a).mNodeIndex;
+ hopCounts[visitedNode] = visitedNodes->at(a).mDepth; //How many hops to root
+ fastRoute[visitedNode] = visitedNodes->at(visitedNodes->at(a).mPrevIndex).mNodeIndex;
+ islandIds[visitedNode] = newIsland;
+ }
+ }
+ }
+
+ // all dirty nodes processed
+ return newIslandsCount;
+}
+
+
+/**
+!!! Debug/Test function.
+Function to check that root between nodes exists.
+*/
+bool FamilyGraph::canFindRoot(NodeIndex startNode, NodeIndex targetNode, FixedArray<NodeIndex>* visitedNodes, const SupportGraph* graph)
+{
+ if (visitedNodes)
+ visitedNodes->pushBack(startNode);
+
+ if (startNode == targetNode)
+ return true;
+
+ std::vector<bool> visitedState;
+ visitedState.resize(graph->m_nodeCount);
+ for (uint32_t i = 0; i < graph->m_nodeCount; i++)
+ visitedState[i] = false;
+
+ std::stack<NodeIndex> stack;
+
+ stack.push(startNode);
+ visitedState[startNode] = true;
+
+ const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
+ do
+ {
+ NodeIndex currentNode = stack.top();
+ stack.pop();
+
+ for (uint32_t adjacencyIndex = adjacencyPartition[currentNode]; adjacencyIndex < adjacencyPartition[currentNode + 1]; adjacencyIndex++)
+ {
+ NodeIndex nextNode = getAdjacentNode(adjacencyIndex, graph);
+
+ if (isInvalidIndex(nextNode))
+ continue;
+
+ if (!visitedState[nextNode])
+ {
+ if (nextNode == targetNode)
+ {
+ return true;
+ }
+
+ visitedState[nextNode] = true;
+ stack.push(nextNode);
+
+ if (visitedNodes)
+ visitedNodes->pushBack(nextNode);
+ }
+ }
+
+ } while (!stack.empty());
+
+ return false;
+}
+
+
+/**
+!!! Debug/Test function.
+Function to check if edge exists.
+*/
+bool FamilyGraph::hasEdge(NodeIndex node0, NodeIndex node1, const SupportGraph* graph) const
+{
+ const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
+ uint32_t edges = 0;
+ for (uint32_t adjacencyIndex = adjacencyPartition[node0]; adjacencyIndex < adjacencyPartition[node0 + 1]; adjacencyIndex++)
+ {
+ if (getAdjacentNode(adjacencyIndex, graph) == node1)
+ {
+ edges++;
+ break;
+ }
+ }
+ for (uint32_t adjacencyIndex = adjacencyPartition[node1]; adjacencyIndex < adjacencyPartition[node1 + 1]; adjacencyIndex++)
+ {
+ if (getAdjacentNode(adjacencyIndex, graph) == node0)
+ {
+ edges++;
+ break;
+ }
+ }
+ return edges > 0;
+}
+
+
+/**
+!!! Debug/Test function.
+Function to calculate and return edges count
+*/
+uint32_t FamilyGraph::getEdgesCount(const SupportGraph* graph) const
+{
+ const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
+ uint32_t edges = 0;
+ for (NodeIndex n = 0; n < graph->m_nodeCount; n++)
+ {
+ for (uint32_t adjacencyIndex = adjacencyPartition[n]; adjacencyIndex < adjacencyPartition[n + 1]; adjacencyIndex++)
+ {
+ if (getAdjacentNode(adjacencyIndex, graph) != invalidIndex<uint32_t>())
+ edges++;
+ }
+ }
+ NVBLAST_ASSERT(edges % 2 == 0);
+ return edges / 2;
+}
+
+
+
+
+} // namespace Nv
+} // namespace Blast
+
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastFamilyGraph.h b/NvBlast/sdk/lowlevel/source/NvBlastFamilyGraph.h
new file mode 100644
index 0000000..9fa331a
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastFamilyGraph.h
@@ -0,0 +1,280 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTFAMILYGRAPH_H
+#define NVBLASTFAMILYGRAPH_H
+
+
+#include "NvBlastSupportGraph.h"
+#include "NvBlastFixedArray.h"
+#include "NvBlastFixedBitmap.h"
+#include "NvBlastFixedBoolArray.h"
+#include "NvBlastMath.h"
+#include "NvBlastFixedPriorityQueue.h"
+#include "NvBlastPreprocessorInternal.h"
+#include "NvBlastMemory.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+
+typedef uint32_t NodeIndex;
+typedef NodeIndex IslandId;
+typedef uint32_t ActorIndex;
+
+/**
+Internal implementation of family graph stored on the family.
+
+It processes full NvBlastSupportGraph graph, stores additional information used for faster islands finding,
+keeps and provides access to current islandId for every node.
+*/
+class FamilyGraph
+{
+public:
+
+ //////// ctor ////////
+
+ /**
+ Constructor. family graph is meant to be placed (with placement new) on family memory.
+
+ \param[in] graph The graph to instance (see SupportGraph)
+ */
+ FamilyGraph(const SupportGraph* graph);
+
+
+ /**
+ Returns memory needed for this class (see fillMemory).
+
+ \param[in] nodeCount The number of nodes in the graph.
+ \param[in] bondCount The number of bonds in the graph.
+
+ \return the number of bytes required.
+ */
+ static size_t requiredMemorySize(uint32_t nodeCount, uint32_t bondCount)
+ {
+ return fillMemory(nullptr, nodeCount, bondCount);
+ }
+
+
+ //////// API ////////
+
+ /**
+ Function to initialize graph (all nodes added to dirty list for this actor)
+
+ \param[in] actorIndex The index of the actor to initialize graph with. Must be in the range [0, m_nodeCount).
+ \param[in] graph The static graph data for this family.
+ */
+ void initialize(ActorIndex actorIndex, const SupportGraph* graph);
+
+ /**
+ Function to notify graph about removed edges. These nodes will be added to dirty list for this actor. Returns true if bond as removed.
+
+ \param[in] actorIndex The index of the actor from which the edge is removed. Must be in the range [0, m_nodeCount).
+ \param[in] node0 The index of the first node of removed edge. Must be in the range [0, m_nodeCount).
+ \param[in] node1 The index of the second node of removed edge. Must be in the range [0, m_nodeCount).
+ \param[in] graph The static graph data for this family.
+ */
+ bool notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, const SupportGraph* graph);
+ bool notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, uint32_t bondIndex, const SupportGraph* graph);
+
+ bool notifyNodeRemoved(ActorIndex actorIndex, NodeIndex nodeIndex, const SupportGraph* graph);
+
+ /**
+ Function to find new islands by examining dirty nodes associated with this actor (they can be associated with actor if
+ notifyEdgeRemoved() were previously called for it.
+
+ \param[in] actorIndex The index of the actor on which graph part (edges + nodes) findIslands will be performed. Must be in the range [0, m_nodeCount).
+ \param[in] scratch User-supplied scratch memory of size findIslandsRequiredScratch(graphNodeCount) bytes.
+ \param[in] graph The static graph data for this family.
+
+ \return the number of new islands found.
+ */
+ uint32_t findIslands(ActorIndex actorIndex, void* scratch, const SupportGraph* graph);
+
+ /**
+ The scratch space required to call the findIslands function, in bytes.
+
+ \param[in] graphNodeCount The number of nodes in the graph.
+
+ \return the number of bytes required.
+ */
+ static size_t findIslandsRequiredScratch(uint32_t graphNodeCount);
+
+
+ //////// data getters ////////
+
+ /**
+ Utility function to get the start of the island ids array. This is an array of size nodeCount.
+ Every islandId == NodeIndex of root node in this island, it is set for every Node.
+
+ \return the array of island ids.
+ */
+ NvBlastBlockData(IslandId, m_islandIdsOffset, getIslandIds);
+
+ /**
+ Utility function to get the start of the dirty node links array. This is an array of size nodeCount.
+ */
+ NvBlastBlockData(NodeIndex, m_dirtyNodeLinksOffset, getDirtyNodeLinks);
+
+ /**
+ Utility function to get the start of the first dirty node indices array. This is an array of size nodeCount.
+ */
+ NvBlastBlockData(uint32_t, m_firstDirtyNodeIndicesOffset, getFirstDirtyNodeIndices);
+
+ /**
+ Utility function to get the start of the fast route array. This is an array of size nodeCount.
+ */
+ NvBlastBlockData(NodeIndex, m_fastRouteOffset, getFastRoute);
+
+ /**
+ Utility function to get the start of the hop counts array. This is an array of size nodeCount.
+ */
+ NvBlastBlockData(uint32_t, m_hopCountsOffset, getHopCounts);
+
+ /**
+ Utility function to get the pointer of the is edge removed bitmap. This is an bitmap of size bondCount.
+ */
+ NvBlastBlockData(FixedBoolArray, m_isEdgeRemovedOffset, getIsEdgeRemoved);
+
+ /**
+ Utility function to get the pointer of the is node in dirty list bitmap. This is an bitmap of size nodeCount.
+ */
+ NvBlastBlockData(FixedBoolArray, m_isNodeInDirtyListOffset, getIsNodeInDirtyList);
+
+
+ //////// Debug/Test ////////
+
+ uint32_t getEdgesCount(const SupportGraph* graph) const;
+ bool hasEdge(NodeIndex node0, NodeIndex node1, const SupportGraph* graph) const;
+ bool canFindRoot(NodeIndex startNode, NodeIndex targetNode, FixedArray<NodeIndex>* visitedNodes, const SupportGraph* graph);
+
+
+private:
+
+ FamilyGraph& operator = (const FamilyGraph&);
+
+ //////// internal types ////////
+
+ /**
+ Used to represent current graph traverse state.
+ */
+ struct TraversalState
+ {
+ NodeIndex mNodeIndex;
+ uint32_t mCurrentIndex;
+ uint32_t mPrevIndex;
+ uint32_t mDepth;
+
+ TraversalState()
+ {
+ }
+
+ TraversalState(NodeIndex nodeIndex, uint32_t currentIndex, uint32_t prevIndex, uint32_t depth) :
+ mNodeIndex(nodeIndex), mCurrentIndex(currentIndex), mPrevIndex(prevIndex), mDepth(depth)
+ {
+ }
+ };
+
+ /**
+ Queue element for graph traversal with priority queue.
+ */
+ struct QueueElement
+ {
+ TraversalState* mState;
+ uint32_t mHopCount;
+
+ QueueElement()
+ {
+ }
+
+ QueueElement(TraversalState* state, uint32_t hopCount) : mState(state), mHopCount(hopCount)
+ {
+ }
+ };
+
+ /**
+ Queue comparator for graph traversal with priority queue.
+ */
+ struct NodeComparator
+ {
+ NodeComparator()
+ {
+ }
+
+ bool operator() (const QueueElement& node0, const QueueElement& node1) const
+ {
+ return node0.mHopCount < node1.mHopCount;
+ }
+ private:
+ NodeComparator& operator = (const NodeComparator&);
+ };
+
+ /**
+ PriorityQueue for graph traversal. Queue element with smallest hopCounts will be always on top.
+ */
+ typedef FixedPriorityQueue<QueueElement, NodeComparator> NodePriorityQueue;
+
+
+ //////// internal operations ////////
+
+ /**
+ Function calculate needed memory and feel it if familyGraph is passed. FamilyGraph is designed to use
+ memory right after itself. So it should be initialized with placement new operation on memory of memoryNeeded() size.
+
+ \param[in] familyGraph The pointer to actual FamilyGraph instance which will be filled. Can be nullptr, function will only return required bytes and do nothing.
+ \param[in] nodeCount The number of nodes in the graph.
+ \param[in] bondCount The number of bonds in the graph.
+
+ \return the number of bytes required or filled
+ */
+ static size_t fillMemory(FamilyGraph* familyGraph, uint32_t nodeCount, uint32_t bondCount);
+
+ /**
+ Function to find route from on node to another. It uses fastPath first as optimization and then if it fails it performs brute-force traverse (with hop count heuristic)
+ */
+ bool findRoute(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, NodePriorityQueue* priorityQueue, const SupportGraph* graph);
+
+ /**
+ Function to try finding targetNode (from startNode) with getFastRoute().
+ */
+ bool tryFastPath(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, const SupportGraph* graph);
+
+ /**
+ Function to unwind route upon successful finding of root node or witness.
+ We have found either a witness *or* the root node with this traversal. In the event of finding the root node, hopCount will be 0. In the event of finding
+ a witness, hopCount will be the hopCount that witness reported as being the distance to the root.
+ */
+ void unwindRoute(uint32_t traversalIndex, NodeIndex lastNode, uint32_t hopCount, IslandId id, FixedArray<TraversalState>* visitedNodes);
+
+ /**
+ Function to add node to dirty node list associated with actor.
+ */
+ void addToDirtyNodeList(ActorIndex actorIndex, NodeIndex node);
+
+ /**
+ Function used to get adjacentNode using index from adjacencyPartition with check for bondHealths (if it's not removed already)
+ */
+ NodeIndex getAdjacentNode(uint32_t adjacencyIndex, const SupportGraph* graph) const
+ {
+ const uint32_t bondIndex = graph->getAdjacentBondIndices()[adjacencyIndex];
+ return getIsEdgeRemoved()->test(bondIndex) ? invalidIndex<uint32_t>() : graph->getAdjacentNodeIndices()[adjacencyIndex];
+ }
+
+};
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTFAMILYGRAPH_H
diff --git a/NvBlast/sdk/lowlevel/source/NvBlastSupportGraph.h b/NvBlast/sdk/lowlevel/source/NvBlastSupportGraph.h
new file mode 100644
index 0000000..9ee3fc9
--- /dev/null
+++ b/NvBlast/sdk/lowlevel/source/NvBlastSupportGraph.h
@@ -0,0 +1,134 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTSUPPORTGRAPH_H
+#define NVBLASTSUPPORTGRAPH_H
+
+
+#include "NvBlastIndexFns.h"
+#include "NvBlastPreprocessorInternal.h"
+#include "NvBlastMemory.h"
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Describes the connectivity between support chunks via bonds.
+
+Vertices in the support graph are termed "nodes," and represent particular chunks (NvBlastChunk) in an NvBlastAsset.
+The indexing for nodes is not the same as that for chunks. Only some chunks are represented by nodes in the graph,
+and these chunks are called "support chunks."
+
+Adjacent node indices and adjacent bond indices are stored for each node, and therefore each bond is represented twice in this graph,
+going from node[i] -> node[j] and from node[j] -> node[i]. Therefore the size of the getAdjacentNodeIndices() and getAdjacentBondIndices()
+arrays are twice the number of bonds stored in the corresponding NvBlastAsset.
+
+The graph is used as follows. Given a SupportGraph "graph" and node index i, (0 <= i < graph.nodeCount), one may find all
+adjacent bonds and nodes using:
+
+ const uint32_t* adjacencyPartition = graph.getAdjacencyPartition();
+ const uint32_t* adjacentNodeIndices = graph.getAdjacentNodeIndices();
+ const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices();
+
+ // adj is the lookup value in adjacentNodeIndices and graph.getAdjacentBondIndices()
+ for (uint32_t adj = adjacencyPartition[i]; adj < adjacencyPartition[i+1]; ++adj)
+ {
+ // An adjacent node:
+ uint32_t adjacentNodeIndex = adjacentNodeIndices[adj];
+
+ // The corresponding bond (that connects node index i with node indexed adjacentNodeIndex:
+ uint32_t adjacentBondIndex = adjacentBondIndices[adj];
+ }
+
+For a graph node with index i, the corresponding asset chunk index is found using graph.getChunkIndices()[i]. The reverse mapping
+(obtaining a graph node index from an asset chunk index) can be done using the
+
+ NvBlastAssetGetChunkToGraphNodeMap(asset, logFn);
+
+function. See the documentation for its use. The returned "node index" for a non-support chunk is the invalid value 0xFFFFFFFF.
+*/
+struct SupportGraph
+{
+ /**
+ Total number of nodes in the support graph.
+ */
+ uint32_t m_nodeCount;
+
+ /**
+ Indices of chunks represented by the nodes.
+
+ getChunkIndices returns an array of size m_nodeCount.
+ */
+ NvBlastBlockArrayData(uint32_t, m_chunkIndicesOffset, getChunkIndices, m_nodeCount);
+
+ /**
+ Adjacency lookup table, of type uint32_t.
+
+ Partitions both the getAdjacentNodeIndices() and the getAdjacentBondIndices() arrays into subsets corresponding to each node.
+ The size of this array is nodeCount+1.
+ For 0 <= i < nodeCount, getAdjacencyPartition()[i] is the index of the first element in getAdjacentNodeIndices() (or getAdjacentBondIndices()) for nodes adjacent to the node with index i.
+ getAdjacencyPartition()[nodeCount] is the size of the getAdjacentNodeIndices() and getAdjacentBondIndices() arrays.
+ This allows one to easily count the number of nodes adjacent to a node with index i, using getAdjacencyPartition()[i+1] - getAdjacencyPartition()[i].
+
+ getAdjacencyPartition returns an array of size m_nodeCount + 1.
+ */
+ NvBlastBlockArrayData(uint32_t, m_adjacencyPartitionOffset, getAdjacencyPartition, m_nodeCount + 1);
+
+ /**
+ Array of uint32_t composed of subarrays holding the indices of nodes adjacent to a given node. The subarrays may be accessed through the getAdjacencyPartition() array.
+
+ getAdjacentNodeIndices returns an array of size getAdjacencyPartition()[m_nodeCount].
+ */
+ NvBlastBlockArrayData(uint32_t, m_adjacentNodeIndicesOffset, getAdjacentNodeIndices, getAdjacencyPartition()[m_nodeCount]);
+
+ /**
+ Array of uint32_t composed of subarrays holding the indices of bonds (NvBlastBond) for a given node. The subarrays may be accessed through the getAdjacencyPartition() array.
+
+ getAdjacentBondIndices returns an array of size getAdjacencyPartition()[m_nodeCount].
+ */
+ NvBlastBlockArrayData(uint32_t, m_adjacentBondIndicesOffset, getAdjacentBondIndices, getAdjacencyPartition()[m_nodeCount]);
+
+ /**
+ Finds the bond between two given graph nodes (if it exists) and returns the bond index.
+ If no bond exists, returns invalidIndex<uint32_t>().
+
+ \return the index of the bond between the given nodes.
+ */
+ uint32_t findBond(uint32_t nodeIndex0, uint32_t nodeIndex1) const;
+};
+
+
+//////// SupportGraph inline member functions ////////
+
+NV_INLINE uint32_t SupportGraph::findBond(uint32_t nodeIndex0, uint32_t nodeIndex1) const
+{
+ const uint32_t* adjacencyPartition = getAdjacencyPartition();
+ const uint32_t* adjacentNodeIndices = getAdjacentNodeIndices();
+ const uint32_t* adjacentBondIndices = getAdjacentBondIndices();
+
+ // Iterate through all neighbors of nodeIndex0 chunk
+ for (uint32_t i = adjacencyPartition[nodeIndex0]; i < adjacencyPartition[nodeIndex0 + 1]; i++)
+ {
+ if (adjacentNodeIndices[i] == nodeIndex1)
+ {
+ return adjacentBondIndices[i];
+ }
+ }
+
+ return invalidIndex<uint32_t>();
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTSUPPORTGRAPH_H
diff --git a/NvBlast/sdk/profiler/NvBlastProfiler.cpp b/NvBlast/sdk/profiler/NvBlastProfiler.cpp
new file mode 100644
index 0000000..3e82396
--- /dev/null
+++ b/NvBlast/sdk/profiler/NvBlastProfiler.cpp
@@ -0,0 +1,91 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastProfilerInternal.h"
+#include "PxProfiler.h"
+
+#if NV_PROFILE || NV_CHECKED || NV_DEBUG
+
+#if NV_NVTX
+#include "nvToolsExt.h"
+NV_INLINE void platformZoneStart(const char* name) { nvtxRangePush(name); }
+NV_INLINE void platformZoneEnd(const char*) { nvtxRangePop(); }
+
+#elif NV_XBOXONE
+#include "xboxone/NvBlastProfilerXB1.h"
+
+#elif NV_PS4
+#include "ps4/NvBlastProfilerPS4.h"
+
+#else
+NV_INLINE void platformZoneStart(const char*) { }
+NV_INLINE void platformZoneEnd(const char*) { }
+
+#endif
+
+static const uint64_t blastContextId = 0xb1a57;
+static physx::PxProfilerCallback* sCallback = nullptr;
+static bool sPlatform = false;
+static NvBlastProfilerDetail::Level sDetail = NvBlastProfilerDetail::LOW;
+
+void NvBlastProfilerSetCallback(physx::PxProfilerCallback* pcb)
+{
+ sCallback = pcb;
+}
+
+void NvBlastProfilerEnablePlatform(bool enable)
+{
+ sPlatform = enable;
+}
+
+void NvBlastProfilerBegin(const char* name, NvBlastProfilerDetail::Level level)
+{
+ if (level <= sDetail)
+ {
+ if (sCallback != nullptr)
+ {
+ sCallback->zoneStart(name, false, blastContextId);
+ }
+
+ if (sPlatform)
+ {
+ platformZoneStart(name);
+ }
+ }
+}
+
+void NvBlastProfilerEnd(const char* name, NvBlastProfilerDetail::Level level)
+{
+ if (level <= sDetail)
+ {
+ if (sCallback != nullptr)
+ {
+ sCallback->zoneEnd(nullptr, name, false, blastContextId);
+ }
+
+ if (sPlatform)
+ {
+ platformZoneEnd(name);
+ }
+ }
+}
+
+void NvBlastProfilerSetDetail(NvBlastProfilerDetail::Level level)
+{
+ sDetail = level;
+}
+
+#else
+
+void NvBlastProfilerSetCallback(physx::PxProfilerCallback*) {}
+void NvBlastProfilerEnablePlatform(bool) {}
+void NvBlastProfilerSetDetail(NvBlastProfilerDetail::Level) {}
+
+#endif //NV_PROFILE
diff --git a/NvBlast/sdk/profiler/NvBlastProfilerInternal.h b/NvBlast/sdk/profiler/NvBlastProfilerInternal.h
new file mode 100644
index 0000000..9be3c87
--- /dev/null
+++ b/NvBlast/sdk/profiler/NvBlastProfilerInternal.h
@@ -0,0 +1,58 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTPROFILERINTERNAL_H
+#define NVBLASTPROFILERINTERNAL_H
+
+#include "NvBlastPreprocessor.h"
+#include "NvBlastProfiler.h"
+
+#if NV_PROFILE || NV_CHECKED || NV_DEBUG
+
+void NvBlastProfilerBegin(const char* name, NvBlastProfilerDetail::Level);
+void NvBlastProfilerEnd(const char* name, NvBlastProfilerDetail::Level);
+
+class ProfileScope
+{
+public:
+ ProfileScope(const char* name, NvBlastProfilerDetail::Level level) :m_name(name), m_level(level)
+ {
+ NvBlastProfilerBegin(m_name, m_level);
+ }
+
+ ~ProfileScope()
+ {
+ NvBlastProfilerEnd(m_name, m_level);
+ }
+
+private:
+ const char* m_name;
+ NvBlastProfilerDetail::Level m_level;
+};
+
+#define PERF_BLAST_PREFIX "Blast: "
+#define PERF_ZONE_BEGIN(name) NvBlastProfilerBegin(PERF_BLAST_PREFIX name, NvBlastProfilerDetail::HIGH)
+#define PERF_ZONE_END(name) NvBlastProfilerEnd(PERF_BLAST_PREFIX name, NvBlastProfilerDetail::HIGH)
+#define PERF_SCOPE(name, detail) ProfileScope PX_CONCAT(_scope,__LINE__) (PERF_BLAST_PREFIX name, detail)
+#define PERF_SCOPE_L(name) PERF_SCOPE(name, NvBlastProfilerDetail::LOW)
+#define PERF_SCOPE_M(name) PERF_SCOPE(name, NvBlastProfilerDetail::MEDIUM)
+#define PERF_SCOPE_H(name) PERF_SCOPE(name, NvBlastProfilerDetail::HIGH)
+
+#else
+
+#define PERF_ZONE_BEGIN(name)
+#define PERF_ZONE_END(name)
+#define PERF_SCOPE_L(name)
+#define PERF_SCOPE_M(name)
+#define PERF_SCOPE_H(name)
+
+#endif
+
+#endif
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTk.h b/NvBlast/sdk/toolkit/include/NvBlastTk.h
new file mode 100644
index 0000000..6471165
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTk.h
@@ -0,0 +1,31 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTK_H
+#define NVBLASTTK_H
+
+
+/**
+This is the main include header for the BlastTk SDK, for users who
+want to use a single #include file.
+
+Alternatively, one can instead directly #include a subset of the below files.
+*/
+
+
+#include "NvBlastTkFramework.h"
+#include "NvBlastTkAsset.h"
+#include "NvBlastTkActor.h"
+#include "NvBlastTkJoint.h"
+#include "NvBlastTkFamily.h"
+#include "NvBlastTkGroup.h"
+
+
+#endif // ifndef NVBLASTTK_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkActor.h b/NvBlast/sdk/toolkit/include/NvBlastTkActor.h
new file mode 100644
index 0000000..a810ee1
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkActor.h
@@ -0,0 +1,239 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKACTOR_H
+#define NVBLASTTKACTOR_H
+
+#include "NvBlastTkObject.h"
+#include "NvBlastTypes.h"
+
+// Forward declarations
+struct NvBlastActor;
+struct NvBlastFamily;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkAsset;
+class TkFamily;
+class TkGroup;
+class TkJoint;
+
+
+/**
+The BlastTk entity which encapsulates an NvBlastActor. Every TkActor represents a group
+of chunks which may correspond to a single physical rigid body. TkActors are created
+using TkFramework::createActor.
+*/
+class TkActor : public TkObject
+{
+public:
+ /**
+ Access to underlying low-level actor.
+
+ \return a pointer to the (const) low-level NvBlastActor object.
+ */
+ virtual const NvBlastActor* getActorLL() const = 0;
+
+ /**
+ Every actor is part of an actor family, even if that family contains a single actor.
+ This function returns a reference to the actor's TkFamily.
+
+ \return a pointer to the actor's TkFamily.
+ */
+ virtual TkFamily& getFamily() const = 0;
+
+ /**
+ Every actor has a unique index within a family. This function returns that index.
+ */
+ virtual uint32_t getIndex() const = 0;
+
+ /**
+ Actors may be part of (no more than) one group. See TkGroup for the functions to add and remove actors.
+ This function returns a pointer to the actor's group, or NULL if it is not in a group.
+ */
+ virtual TkGroup* getGroup() const = 0;
+
+ /**
+ Remove this actor from its group, if it is in one.
+
+ \return the actor's former group if successful, NULL otherwise.
+ */
+ virtual TkGroup* removeFromGroup() = 0;
+
+ /**
+ Every actor has an associated asset.
+
+ \return a pointer to the (const) TkAsset object.
+ */
+ virtual const TkAsset* getAsset() const = 0;
+
+ /**
+ Get the number of visible chunks for this actor. May be used in conjunction with getVisibleChunkIndices.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastActorGetVisibleChunkCount for details.
+
+ \return the number of visible chunk indices for the actor.
+ */
+ virtual uint32_t getVisibleChunkCount() const = 0;
+
+ /**
+ Retrieve a list of visible chunk indices for the actor into the given array.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastActorGetVisibleChunkIndices for details.
+
+ \param[in] visibleChunkIndices User-supplied array to be filled in with indices of visible chunks for this actor.
+ \param[in] visibleChunkIndicesSize The size of the visibleChunkIndices array. To receive all visible chunk indices, the size must be at least that given by getVisibleChunkCount().
+
+ \return the number of indices written to visibleChunkIndices. This will not exceed visibleChunkIndicesSize.
+ */
+ virtual uint32_t getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const = 0;
+
+ /**
+ Get the number of graph nodes for this actor. May be used in conjunction with getGraphNodeIndices.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastActorGetGraphNodeCount for details.
+
+ \return the number of graph node indices for the actor.
+ */
+ virtual uint32_t getGraphNodeCount() const = 0;
+
+ /**
+ Retrieve a list of graph node indices for the actor into the given array.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastActorGetGraphNodeIndices for details.
+
+ \param[in] graphNodeIndices User-supplied array to be filled in with indices of graph nodes for this actor.
+ \param[in] graphNodeIndicesSize The size of the graphNodeIndices array. To receive all graph node indices, the size must be at least that given by getGraphNodeCount().
+
+ \return the number of indices written to graphNodeIndices. This will not exceed graphNodeIndicesSize.
+ */
+ virtual uint32_t getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const = 0;
+
+ /**
+ Access the bond health data for an actor.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastActorGetBondHealths for details.
+
+ \return the array of bond healths for the actor's family, or NULL if the actor is invalid.
+ */
+ virtual const float* getBondHealths() const = 0;
+
+ /**
+ Returns the upper-bound number of actors which can be created by splitting this actor.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastActorGetMaxActorCountForSplit for details.
+
+ \return the upper-bound number of actors which can be created by splitting this actor.
+ */
+ virtual uint32_t getSplitMaxActorCount() const = 0;
+
+ /**
+ Report whether this actor is in 'pending' state. Being in 'pending' state leads to actor being processed by group.
+
+ \return true iff actor is in 'pending' state.
+ */
+ virtual bool isPending() const = 0;
+
+ /**
+ Apply damage to this actor.
+
+ Actual damage processing is deferred till the group process() call. Sets actor in 'pending' state.
+
+ It's the user's responsibility to keep programParams pointer alive until the group sync() call.
+
+ \param[in] program A NvBlastDamageProgram containing damage shaders.
+ \param[in] programParams Parameters for the NvBlastDamageProgram.
+ */
+ virtual void damage(const NvBlastDamageProgram& program, const NvBlastProgramParams* programParams) = 0;
+
+ /**
+ Apply damage to this actor.
+
+ Actual damage processing is deferred till the group process() call. Sets actor in 'pending' state.
+
+ Damage Desc will be stacked into NvBlastProgramParams. NvBlastProgramParams will be passed into shader.
+
+ Material set on actor's family will be passed into NvBlastProgramParams.
+
+ \param[in] program A NvBlastDamageProgram containing damage shaders.
+ \param[in] damageDesc Parameters to be put in NvBlastProgramParams, have to be POD type (will be copied).
+ \param[in] descSize Size of damageDesc in bytes. Required to copy and store Damage Desc.
+ */
+ virtual void damage(const NvBlastDamageProgram& program, const void* damageDesc, uint32_t descSize) = 0;
+
+ /**
+ Apply damage to this actor.
+
+ Actual damage processing is deferred till the group process() call. Sets actor in 'pending' state.
+
+ Damage Desc will be stacked into NvBlastDamageProgram. NvBlastDamageProgram will be passed into shader.
+
+ This function overload explicitly sets a material to be passed into NvBlastProgramParams, it must be valid until the group sync() call.
+
+ \param[in] program A NvBlastDamageProgram containing damage shaders.
+ \param[in] damageDesc Parameters to be put in NvBlastDamageProgram, have to be POD type (will be copied).
+ \param[in] descSize Size of damageDesc in bytes. Required to copy and store Damage Desc.
+ \param[in] material Material to be passed into NvBlastProgramParams. Must be valid until the group sync() call.
+ */
+ virtual void damage(const NvBlastDamageProgram& program, const void* damageDesc, uint32_t descSize, const void* material) = 0;
+
+ /**
+ Creates fracture commands for the actor using an NvBlastMaterialFunction.
+
+ Cannot be called during group processing, in that case a warning will be raised and function will do nothing.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastActorGenerateFracture for details.
+
+ \param[in,out] commands Target buffers to hold generated commands.
+ To avoid data loss, provide an entry for every support chunk and every bond in the original actor.
+ \param[in] program A NvBlastDamageProgram containing damage shaders.
+ \param[in] programParams Parameters for the NvBlastDamageProgram.
+ */
+ virtual void generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const NvBlastProgramParams* programParams) const = 0;
+
+ /**
+ Function applies the direct fracture and breaks graph bonds/edges as necessary. Sets actor in 'pending' state if any bonds or chunks were damaged. Dispatches FractureCommand events.
+
+ NOTE: Calls NvBlastActorApplyFracture internally. see NvBlastActorApplyFracture for details.
+
+ \param[in,out] eventBuffers Target buffers to hold applied fracture events. May be NULL, in which case events are not reported.
+ To avoid data loss, provide an entry for every lower-support chunk and every bond in the original actor.
+ \param[in] commands The fracture commands to process.
+ */
+ virtual void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) = 0;
+
+ /**
+ The number of joints currently attached to this actor.
+
+ \return the number of TkJoints that are currently attached to this actor.
+ */
+ virtual uint32_t getJointCount() const = 0;
+
+ /**
+ Retrieve an array of pointers (into the user-supplied buffer) to joints.
+
+ \param[out] joints A user-supplied array of TkJoint pointers.
+ \param[in] jointsSize The number of elements available to write into the joints array.
+
+ \return the number of TkJoint pointers written to the joints array.
+ */
+ virtual uint32_t getJoints(TkJoint** joints, uint32_t jointsSize) const = 0;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKACTOR_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkAsset.h b/NvBlast/sdk/toolkit/include/NvBlastTkAsset.h
new file mode 100644
index 0000000..987da7d
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkAsset.h
@@ -0,0 +1,136 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKASSET_H
+#define NVBLASTTKASSET_H
+
+#include "NvBlastTkSerializable.h"
+#include "NvBlastTypes.h"
+#include "PxVec3.h"
+
+// Forward declarations
+struct NvBlastAsset;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+A descriptor stored by a TkAsset for an internal joint. Internal joints are created when a TkAsset is instanced into a TkActor.
+*/
+struct TkAssetJointDesc
+{
+ uint32_t nodeIndices[2]; //!< The graph node indices corresponding to the support chunks joined by a joint
+ physx::PxVec3 attachPositions[2]; //!< The joint's attachment positions in asset-local space
+};
+
+
+/**
+The static data associated with a destructible actor. TkAsset encapsulates an NvBlastAsset. In addition to the NvBlastAsset,
+the TkAsset stores joint descriptors (see TkAssetJointDesc).
+*/
+class TkAsset : public TkSerializable
+{
+public:
+ /**
+ Access to underlying low-level asset.
+
+ \return a pointer to the (const) low-level NvBlastAsset object.
+ */
+ virtual const NvBlastAsset* getAssetLL() const = 0;
+
+ /**
+ Get the number of chunks in this asset.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastAssetGetChunkCount for details.
+
+ \return the number of chunks in the asset.
+ */
+ virtual uint32_t getChunkCount() const = 0;
+
+ /**
+ Get the number of leaf chunks in the given asset.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastAssetGetLeafChunkCount for details.
+
+ \return the number of leaf chunks in the asset.
+ */
+ virtual uint32_t getLeafChunkCount() const = 0;
+
+ /**
+ Get the number of bonds in the given asset.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastAssetGetBondCount for details.
+
+ \return the number of bonds in the asset.
+ */
+ virtual uint32_t getBondCount() const = 0;
+
+ /**
+ Access an array of chunks of the given asset.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastAssetGetChunks for details.
+
+ \return a pointer to an array of chunks of the asset.
+ */
+ virtual const NvBlastChunk* getChunks() const = 0;
+
+ /**
+ Access an array of bonds of the given asset.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastAssetGetBonds for details.
+
+ \return a pointer to an array of bonds of the asset.
+ */
+ virtual const NvBlastBond* getBonds() const = 0;
+
+ /**
+ Access an support graph for the given asset.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastAssetGetSupportGraph for details.
+
+ \return a struct of support graph for the given asset.
+ */
+ virtual const NvBlastSupportGraph getGraph() const = 0;
+
+ /**
+ Retrieve the size (in bytes) of the LL asset.
+
+ NOTE: Wrapper function over low-level function call, see NvBlastAssetGetSize for details.
+
+ \return the size of the data block (in bytes).
+ */
+ virtual uint32_t getDataSize() const = 0;
+
+ /**
+ The number of internal TkJoint objects that will be created when this asset is instanced into a TkActor
+ (see TkFramework::createActor). These joints will not trigger TkJointUpdateEvent events
+ until this actor is split into actors such that a joint connects two actors. At this time the actor's family
+ will dispatch a TkJointUpdateEvent::External event during a call to TkGroup::sync() (see TkGroup).
+
+ \return the number of descriptors for internal joints.
+ */
+ virtual uint32_t getJointDescCount() const = 0;
+
+ /**
+ The descriptors for the internal joints created when this asset is instanced. (See getJointDescCount.)
+
+ \return a pointer to the array of descriptors for internal joints.
+ */
+ virtual const TkAssetJointDesc* getJointDescs() const = 0;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKASSET_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkEvent.h b/NvBlast/sdk/toolkit/include/NvBlastTkEvent.h
new file mode 100644
index 0000000..1e640f1
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkEvent.h
@@ -0,0 +1,166 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKEVENT_H
+#define NVBLASTTKEVENT_H
+
+#include <vector>
+
+#include "NvBlastTypes.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkObject;
+class TkActor;
+class TkFamily;
+class TkJoint;
+
+
+/**
+By the time events are dispatched, a specific TkActor may have been split and became invalid.
+This structure caches the state before invalidation happened.
+*/
+struct TkActorData
+{
+ TkFamily* family; //!< TkFamily of the originating TkActor
+ void* userData; //!< TkActor.userData of the originating TkActor
+ uint32_t index; //!< TkActor::getIndex() of the originating TkActor
+};
+
+
+/**
+Event data dispatched to TkEventListener objects. The user may implement the abstract TkEventListener interface
+and pass the listener object to a BlastTk object which dispatches events. (See for example TkFamily.)
+*/
+struct TkEvent
+{
+ // Enums
+ enum Type
+ {
+ Split, //!< Sent when a TkActor is split. See TkSplitEvent.
+ FractureCommand, //!< Sent when a TkActor generated fracture commands using TkActor::generateFracture.
+ FractureEvent, //!< Sent when a TkActor is fractured using TkActor::applyFracture.
+ JointUpdate, //!< Sent when TkJoints change their attachment state. See TkJointUpdateEvent.
+
+ TypeCount
+ };
+
+ // Data
+ const void* payload; //!< Type-dependent payload data
+ Type type; //!< See the Type enum, above
+
+ /**
+ Casts the payload data into its type-dependent format.
+
+ \return the payload for an event of type T
+ */
+ template<typename T>
+ const T* getPayload() const { return reinterpret_cast<const T*>(payload); }
+};
+
+
+/**
+Payload for TkEvent::Split events
+
+When this event is sent, the parent TkActor that was split is no longer valid. Therefore it is not referenced
+directly in the event data. Instead, its TkFamily, index within the TkFamily, and userData are stored. In addition,
+this event gives the child TkActors generated by the split.
+*/
+struct TkSplitEvent
+{
+ enum { EVENT_TYPE = TkEvent::Split };
+
+ TkActorData parentData; //!< The data of parent TkActor that was split
+ uint32_t numChildren; //!< The number of children into which the parent TkActor was split
+ TkActor** children; //!< An array of pointers to the children into which the TkActor was split
+};
+
+
+/**
+Payload for the TkEvent::FractureCommand events
+
+Fracture Commands used to apply fracture to a TkActor.
+*/
+struct TkFractureCommands
+{
+ enum { EVENT_TYPE = TkEvent::FractureCommand };
+
+ TkActorData tkActorData; //!< The data of TkActor that received the fracture command
+ NvBlastFractureBuffers buffers; //!< The fracture commands used to modify the TkActor
+};
+
+
+/**
+Payload for the TkEvent::FractureEvent events
+
+Fracture Events resulting from applying fracture to a TkActor.
+*/
+struct TkFractureEvents
+{
+ enum { EVENT_TYPE = TkEvent::FractureEvent };
+
+ TkActorData tkActorData; //!< The data of TkActor that received the fracture command
+ NvBlastFractureBuffers buffers; //!< The fracture result of the modified TkActor
+ uint32_t bondsDamaged; //!< number of damaged bonds (health remains)
+ uint32_t bondsBroken; //!< number of broken bonds (health exhausted)
+ uint32_t chunksDamaged; //!< number of damaged chunks (health remains) including child chunks
+ uint32_t chunksBroken; //!< number of broken chunks (health exhausted) including child chunks
+};
+
+
+/**
+Payload for the TkEvent::JointUpdate events
+
+Event type sent when a TkJoint's TkActor references change. This may indicate a joint becoming external,
+simply changing actors when split events occur on one or both of the actors, or when one or both of the actors
+are destroyed.
+*/
+struct TkJointUpdateEvent
+{
+ enum { EVENT_TYPE = TkEvent::JointUpdate };
+
+ enum Subtype
+ {
+ External, //!< A joint that used to be internal to a single TkActor now joins two different TkActors
+ Changed, //!< One or both of the joint's attached TkActors has changed. The previous TkActors were distinct, however, differentiating this from the JointExternal case
+ Unreferenced //!< The joint's actors have been set to NULL. The joint will not be used again, and the user may release the TkJoint at this time
+ };
+
+ TkJoint* joint; //!< The joint being updated
+ Subtype subtype; //!< The type of update event this is (see Subtype)
+};
+
+
+/**
+Interface for a listener of TkEvent data. The user must implement this interface and pass it
+to the object which will dispatch the events.
+*/
+class TkEventListener
+{
+public:
+ /**
+ Interface to be implemented by the user. Events will be sent by BlastTk through a call to this function.
+
+ \param[in] events The array of events being dispatched.
+ \param[in] eventCount The number of events in the array.
+ */
+ virtual void receive(const TkEvent* events, uint32_t eventCount) = 0;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKEVENT_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkFamily.h b/NvBlast/sdk/toolkit/include/NvBlastTkFamily.h
new file mode 100644
index 0000000..be6edd6
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkFamily.h
@@ -0,0 +1,124 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKFAMILY_H
+#define NVBLASTTKFAMILY_H
+
+#include "NvBlastTkSerializable.h"
+
+
+// Forward declarations
+struct NvBlastFamily;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkActor;
+class TkAsset;
+class TkEventListener;
+
+
+/**
+The TkFamily is associated with the TkActor that is instanced from a TkAsset, as well as all descendent TkActors generated
+by spliting TkActors within the family. It encapsulates an NvBlastFamily, and also holds a material which will be used
+by default on all TkActors during damage functions.
+*/
+class TkFamily : public TkSerializable
+{
+public:
+ /**
+ Access to underlying low-level family.
+
+ \return a pointer to the (const) low-level NvBlastFamily object.
+ */
+ virtual const NvBlastFamily* getFamilyLL() const = 0;
+
+ /**
+ Every family has an associated asset (the TkAsset which was instanced to create the first member of the family).
+
+ \return a pointer to the (const) TkAsset object.
+ */
+ virtual const TkAsset* getAsset() const = 0;
+
+ /**
+ The number of actors currently in this family.
+
+ \return the number of TkActors that currently exist in this family.
+ */
+ virtual uint32_t getActorCount() const = 0;
+
+ /**
+ Retrieve an array of pointers (into the user-supplied buffer) to actors.
+
+ \param[out] buffer A user-supplied array of TkActor pointers.
+ \param[in] bufferSize The number of elements available to write into buffer.
+ \param[in] indexStart The starting index of the actor.
+
+ \return the number of TkActor pointers written to the buffer.
+ */
+ virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const = 0;
+
+ /**
+ Add a user implementation of TkEventListener to this family's list of listeners. These listeners will receive
+ all split and fracture events generated by TkActor objects in this family. They will also receive joint update events
+ when TkJoint objects are updated that are (or were) associated with a TkActor in this family.
+
+ \param[in] l The event listener to add.
+ */
+ virtual void addListener(TkEventListener& l) = 0;
+
+ /**
+ Remove a TkEventReciever from this family's list of listeners.
+
+ \param[in] l The event listener to remove.
+ */
+ virtual void removeListener(TkEventListener& l) = 0;
+
+ /**
+ This function applies fracture buffers on relevant actors (actor which contains corresponding bond/chunk) in family.
+
+ \param[in] commands The fracture commands to process.
+ */
+ virtual void applyFracture(const NvBlastFractureBuffers* commands) = 0;
+
+ /**
+ A function to reinitialize this family with new family. The Family must be created from the same low-level asset, but can be
+ in any other state. As a result split events (TkEvent::Split) will be dispatched reflecting the resulting changes (created and removed actors)
+ Afterwards the family will contain a copy of the new family and all actors' low-level actor pointers will be updated.
+
+ \param[in] newFamily The NvBlastFamily to use to reinitialize this family.
+ \param[in] group The group for new actors to be placed in.
+ */
+ virtual void reinitialize(const NvBlastFamily* newFamily, TkGroup* group = nullptr) = 0;
+
+ /**
+ The default material to be passed into NvBlastDamageProgram when a TkActor in this family is damaged.
+
+ \return a pointer to the default material.
+ */
+ virtual const void* getMaterial() const = 0;
+
+ /**
+ Set the default material to be passed into NvBlastDamageProgram when a TkActor in this family is damaged. Must be valid till group sync() call.
+
+ \param[in] material The material to be the new default.
+ */
+ virtual void setMaterial(const void* material) = 0;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKFAMILY_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkFramework.h b/NvBlast/sdk/toolkit/include/NvBlastTkFramework.h
new file mode 100644
index 0000000..353eebd
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkFramework.h
@@ -0,0 +1,365 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKFRAMEWORK_H
+#define NVBLASTTKFRAMEWORK_H
+
+
+#include "NvBlastTkType.h"
+#include "NvBlastTkEvent.h"
+
+#include "NvBlastPreprocessor.h"
+#include "NvBlastTypes.h"
+
+#include "PxVec3.h"
+
+
+// Forward declarations
+namespace physx
+{
+class PxErrorCallback;
+class PxAllocatorCallback;
+class PxTransform;
+namespace general_PxIOStream2
+{
+class PxFileBuf;
+}
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkObject;
+class TkEventDispatcher;
+class TkAsset;
+struct TkGroupDesc;
+class TkGroup;
+class TkActor;
+class TkJoint;
+class TkSerializable;
+class TkIdentifiable;
+struct TkAssetJointDesc;
+
+
+
+/**
+Descriptor for framework creation.
+
+The TkFramework uses PxShared callbacks for messages and allocation.
+*/
+struct TkFrameworkDesc
+{
+ physx::PxErrorCallback* errorCallback; //!< User-defined message callback (see PxErrorCallback)
+ physx::PxAllocatorCallback* allocatorCallback; //!< User-defined allocation callback (see PxAllocatorCallback)
+};
+
+
+/**
+Descriptor for asset creation
+
+Used to create a TkAsset. This may be used by an authoring tool to describe the asset to be created.
+
+The TkAssetDesc is a simple extension of the low-level NvBlastAsset descriptor, NvBlastAssetDesc.
+*/
+struct TkAssetDesc : public NvBlastAssetDesc
+{
+ /**
+ Flags which may be associated with each bond described in the base NvBlastAssetDesc.
+ */
+ enum BondFlags
+ {
+ NoFlags = 0,
+
+ /**
+ If this flag is set then a TkJoint will be created joining the support chunks jointed by the bond.
+
+ These joints will remain "quiet" until the actor is split in such a way that the joint joins two
+ different actors. In that case, a TkJointUpdateEvent will be dispatched with subtype External.
+ (See TkJointUpdateEvent.)
+ */
+ BondJointed = (1 << 0)
+ };
+
+ /**
+ An array of size bondCount, see BondFlags.
+ If NULL, all flags are assumed to be NoFlags.
+ */
+ const uint8_t* bondFlags;
+
+ /** Constructor sets sane default values. The zero chunkCount will cause TkFramework::createAsset(...) to fail, though gracefully. */
+ TkAssetDesc() : bondFlags(nullptr)
+ {
+ chunkCount = bondCount = 0;
+ chunkDescs = nullptr;
+ bondDescs = nullptr;
+ }
+};
+
+
+/**
+Descriptor for actor creation.
+
+The TkActorDesc is a simple extension of the low-level NvBlastActor descriptor, NvBlastActorDesc.
+*/
+struct TkActorDesc : public NvBlastActorDesc
+{
+ const TkAsset* asset; //!< The TkAsset to instance
+
+ /** Constructor sets sane default values */
+ TkActorDesc(const TkAsset* inAsset = nullptr) : asset(inAsset)
+ {
+ uniformInitialBondHealth = uniformInitialLowerSupportChunkHealth = 1.0f;
+ initialBondHealths = initialSupportChunkHealths = nullptr;
+ }
+};
+
+
+/**
+Descriptor for joint creation.
+*/
+struct TkJointDesc
+{
+ TkFamily* families[2]; //!< The TkFamily objects containint the chunks joined by the joint
+ uint32_t chunkIndices[2]; //!< The chunk indices within the corresponding TkFamily objects joined by the joint. The indexed chunks will be support chunks.
+ physx::PxVec3 attachPositions[2]; //!< The position of the joint relative to each TkActor which ownes the chunks jointed by this joint
+};
+
+
+/**
+Struct-enum to index object types handled by the framework
+*/
+struct TkTypeIndex
+{
+ enum Enum
+ {
+ Asset = 0, //!< TkAsset object type
+ Family, //!< TkFamily object type
+ Group, //!< TkGroup object type
+
+ TypeCount
+ };
+};
+
+
+/**
+BlastTk Framework.
+
+The framework exists as a singleton and is used to create objects, deserialize object streams, and hold references
+to identified objects (TkAsset, TkFamily, and TkGroup) which may be recalled by their GUIDs.
+*/
+class TkFramework
+{
+public:
+ /**
+ Release this framework and all contained objects.
+ Global singleton is set to NULL.
+ */
+ virtual void release() = 0;
+
+ /**
+ Access to the error callback set by the user.
+ */
+ virtual physx::PxErrorCallback& getErrorCallback() const = 0;
+
+ /**
+ Access to the allocator callback set by the user.
+ */
+ virtual physx::PxAllocatorCallback& getAllocatorCallback() const = 0;
+
+ /**
+ Access to a log function which can be used in Blast low-level calls.
+ This function uses the user-supplied PxErrorCallback (see TkFrameworkDesc).
+ */
+ virtual NvBlastLog getLogFn() const = 0;
+
+ /**
+ Deserialize an object from the given stream. Only objects derived from TkSerializable may be serialized and
+ deserialized. Use the parent class method TkIdentifiable::getType() to know the type to which to cast the object.
+
+ Notes for different classes:
+
+ TkAsset: deserializing a serialized TkAsset will recreate the asset in memory with the same NvBlastID (see
+ TkIdentifiable::getID()) as the original asset.
+
+ TkFamily: deserializing a serialized TkFamily will generate all TkActor and TkJoint objects that were originally
+ contained in the family. The TkAsset which generated the family must exist at the time the family is deserialized.
+
+ \param[in] stream User-defined stream object.
+
+ \return pointer the deserialized object if successful, or NULL if unsuccessful.
+ */
+ virtual TkSerializable* deserialize(physx::general_PxIOStream2::PxFileBuf& stream) = 0;
+
+ /**
+ To find the type information for a given TkIdentifiable-derived class, use this funtion with the TkTypeIndex::Enum
+ corresponding to the desired class name.
+
+ \param[in] typeIndex Enumerated object type (see TkTypeIndex).
+
+ \return type object associated with the object's class.
+ */
+ virtual const TkType* getType(TkTypeIndex::Enum typeIndex) const = 0;
+
+ /**
+ Look up an object derived from TkIdentifiable by its ID.
+
+ \param[in] id The ID of the object to look up (see NvBlastID).
+
+ \return pointer the object if it exists, NULL otherwise.
+ */
+ virtual TkIdentifiable* findObjectByID(const NvBlastID& id) const = 0;
+
+ /**
+ The number of TkIdentifiable-derived objects in the framework of the given type.
+
+ \param[in] type The type object for the given type.
+
+ \return the number of objects that currently exist of the given type.
+ */
+ virtual uint32_t getObjectCount(const TkType& type) const = 0;
+
+ /**
+ Retrieve an array of pointers (into the user-supplied buffer) to TkIdentifiable-derived objects of the given type.
+
+ \param[out] buffer A user-supplied array of TkIdentifiable pointers.
+ \param[in] bufferSize The number of elements available to write into buffer.
+ \param[in] type The type object for the given type.
+ \param[in] indexStart The starting index of the object.
+
+ \return the number of TkIdentifiable pointers written to the buffer.
+ */
+ virtual uint32_t getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart = 0) const = 0;
+
+ //////// Asset creation ////////
+ /**
+ Helper function to build and apply chunk reorder map, so that chunk descriptors are properly ordered for the createAsset function.
+
+ This is a convenience wrapper for the low-level NvBlastReorderAssetDescChunks function.
+
+ This function may modify both the chunkDescs and bondDescs array, since rearranging chunk descriptors requires re-indexing within the bond descriptors.
+
+ \param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly.
+ \param[in] chunkCount The number of chunk descriptors.
+ \param[in] bondDescs Array of bond descriptors of size chunkCount. It will be updated accordingly.
+ \param[in] bondCount The number of bond descriptors.
+ \param[in] chunkReorderMap If not NULL, must be a pointer to a uint32_t array of size desc.chunkCount. Maps old chunk indices to the reordered chunk indices.
+
+ \return true iff the chunks did not require reordering (chunkReorderMap is the identity map).
+ */
+ virtual bool reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap = nullptr) const = 0;
+
+ /**
+ Helper function to ensure (check and update) support coverage of chunks, required for asset creation via the createAsset function.
+
+ This is a convenience wrapper for the low-level NvBlastEnsureAssetExactSupportCoverage function.
+
+ The chunk descriptors may have their support flags be modified to ensure exact coverage.
+
+ \param[in] chunkDescs An array of chunk descriptors.
+ \param[in] chunkCount The size of the chunkDescs array.
+
+ \return true iff coverage was already exact.
+ */
+ virtual bool ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const = 0;
+
+ /**
+ Create an asset from the given descriptor.
+
+ \param[in] desc The asset descriptor (see TkAssetDesc).
+
+ \return the created asset, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL.
+ */
+ virtual TkAsset* createAsset(const TkAssetDesc& desc) = 0;
+
+ /**
+ Create an asset from a low-level NvBlastAsset.
+
+ \param[in] assetLL The low-level NvBlastAsset to encapsulate.
+ \param[in] jointDescs Optional joint descriptors to add to the new asset.
+ \param[in] jointDescCount The number of joint descriptors in the jointDescs array. If non-zero, jointDescs cannot be NULL.
+ \param[in] ownsAsset Does this TkAsset own the NvBlastAsset and thus is responsible for freeing it.
+
+ \return the created asset, if memory was available for the operation. Otherwise, returns NULL.
+ */
+ virtual TkAsset* createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false) = 0;
+
+ //////// Group creation ////////
+ /**
+ Create a group from the given descriptor. A group is a processing unit, to which the user may add TkActors. New actors generated
+ from splitting a TkActor are automatically put into the same group. However, any actor may be removed from its group and optionally
+ placed into another group, or left groupless.
+
+ \param[in] desc The group descriptor (see TkGroupDesc).
+
+ \return the created group, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL.
+ */
+ virtual TkGroup* createGroup(const TkGroupDesc& desc) = 0;
+
+ //////// Actor creation ////////
+ /**
+ Create an actor from the given descriptor. The actor will be the first member of a new TkFamily.
+
+ \param[in] desc The actor descriptor (see TkActorDesc).
+
+ \return the created actor, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL.
+ */
+ virtual TkActor* createActor(const TkActorDesc& desc) = 0;
+
+ //////// Joint creation ////////
+ /**
+ Create a joint from the given descriptor. The following restrictions apply:
+
+ * Only one joint may be created between any two support chunks.
+
+ * A joint cannot be created between chunks within the same actor using this method. See TkAssetDesc for a description of
+ bond joint flags, which will create internal joints within an actor.
+
+ \param[in] desc The joint descriptor (see TkJointDesc).
+
+ \return the created joint, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL.
+ */
+ virtual TkJoint* createJoint(const TkJointDesc& desc) = 0;
+
+protected:
+ /**
+ Destructor is virtual and not public - use the release() method instead of explicitly deleting the TkFramework
+ */
+ virtual ~TkFramework() {}
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+//////// Global API to Create and Access Framework ////////
+
+/**
+Create a new TkFramework. This creates a global singleton, and will fail if a TkFramework object already exists.
+
+\param[in] desc The descriptor used to create the new framework (see TkFrameworkDesc).
+
+\return the new TkFramework if successful, NULL otherwise.
+*/
+NVBLAST_API Nv::Blast::TkFramework* NvBlastTkFrameworkCreate(const Nv::Blast::TkFrameworkDesc& desc);
+
+
+/**
+Retrieve a pointer to the global TkFramework singleton (if it exists).
+
+\return the pointer to the global TkFramework (NULL if none exists).
+*/
+NVBLAST_API Nv::Blast::TkFramework* NvBlastTkFrameworkGet();
+
+
+#endif // ifndef NVBLASTTKFRAMEWORK_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkGroup.h b/NvBlast/sdk/toolkit/include/NvBlastTkGroup.h
new file mode 100644
index 0000000..585cccb
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkGroup.h
@@ -0,0 +1,133 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKGROUP_H
+#define NVBLASTTKGROUP_H
+
+#include "NvBlastTkIdentifiable.h"
+
+
+// Forward declarations
+namespace physx
+{
+class PxTaskManager;
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkActor;
+
+
+/**
+Descriptor for a TkGroup. It uses the PxShared PxTaskManager interface to dispatch PxLightCpuTask.
+@see TkWorker
+*/
+struct TkGroupDesc
+{
+ physx::PxTaskManager* pxTaskManager; //!< User-defined task manager
+};
+
+
+/**
+Used to collect internal counters using TkGroup::getStats (for profile builds only)
+@see TkGroup::getStats()
+*/
+struct TkGroupStats
+{
+ NvBlastTimers timers; //!< Accumulated time spent in blast low-level functions, see NvBlastTimers
+ uint32_t processedActorsCount; //!< Accumulated number of processed actors in all TkWorker
+ int64_t workerTime; //!< Accumulated time spent executing TkWorker::run. Unit is ticks, see NvBlastTimers.
+};
+
+
+/**
+A group is a processing unit, to which the user may add TkActors. New actors generated from splitting a TkActor
+are automatically put into the same group. However, any actor may be removed from its group and placed into
+another group (or no group) by the user's choice.
+
+When the group's process function is called, all actors' damage buffers will be processed and turned into fracture events
+and the actor is split if applicable.
+This work is done in separate (possibly multiple) threads. The sync function waits for the processing threads to finish
+and dispatches events for processing that actually occurred.
+*/
+class TkGroup : public TkIdentifiable
+{
+public:
+ /**
+ Add the actor to this group, if the actor does not currently belong to a group.
+
+ \param[in] actor The actor to add.
+
+ \return true if successful, false otherwise.
+ */
+ virtual bool addActor(TkActor& actor) = 0;
+
+ /**
+ The number of actors currently in this group.
+
+ \return the number of TkActors that currently exist in this group.
+ */
+ virtual uint32_t getActorCount() const = 0;
+
+ /**
+ Retrieve an array of pointers (into the user-supplied buffer) to actors.
+
+ \param[out] buffer A user-supplied array of TkActor pointers.
+ \param[in] bufferSize The number of elements available to write into buffer.
+ \param[in] indexStart The starting index of the actor.
+
+ \return the number of TkActor pointers written to the buffer.
+ */
+ virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const = 0;
+
+ /**
+ TkActors that have been damaged with applyFracture() such that they may be split into separate
+ actors are split by this function. TkActors that have damage queued through the actor's damage() function
+ will be fractured and split by this function.
+ Fracture and splitting work will be run on different threads provided through TkGroupDesc::pxTaskManager.
+ All work is done asynchronously, and the results are gathered by the sync() function.
+
+ Note: The number of threads provided by pxTaskManager must not change over the group's lifetime.
+
+ \return true if processing may be launched (this group is not currently processing), false otherwise.
+ */
+ virtual bool process() = 0;
+
+ /**
+ If all threads spawned by process() have finished, and sync() has not yet been called since, then this
+ function gathers the results of the split operations on the actors in this group. Events will be dispatched
+ to notify listeners of new and deleted actors.
+
+ \param[in] block If true, this function waits until all threads have completed execution, then performs the gather and dispatch work.
+ If false, this function will perform the gather and dispatch work only if threads have completed execution, otherwise it returns immediately.
+
+ \return true if gather and dispatch work have been performed, false otherwise.
+ */
+ virtual bool sync(bool block = true) = 0;
+
+ /**
+ For profile builds only, request stats of the last successful processing. Inactive in other builds.
+ The times and counters reported account for all the TkWorker (accumulated) taking part in the processing.
+
+ \param[in] stats The struct to be filled in.
+ */
+ virtual void getStats(TkGroupStats& stats) const = 0;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKGROUP_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkIdentifiable.h b/NvBlast/sdk/toolkit/include/NvBlastTkIdentifiable.h
new file mode 100644
index 0000000..6efe954
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkIdentifiable.h
@@ -0,0 +1,61 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKIDENTIFIABLE_H
+#define NVBLASTTKIDENTIFIABLE_H
+
+
+#include "NvBlastTkObject.h"
+
+#include "NvBlastTypes.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkType;
+
+
+/**
+TkIdentifiable objects have getID and setID methods for individual objects. They also have a type (class) identification.
+*/
+class TkIdentifiable : public TkObject
+{
+public:
+ // Identifiable API
+
+ /**
+ Return the ID associated with this object.
+
+ \return the ID for this object.
+ */
+ virtual const NvBlastID& getID() const = 0;
+
+ /**
+ Set the ID for this object.
+ */
+ virtual void setID(const NvBlastID& id) = 0;
+
+ /**
+ Access to the static (class) type data for this object.
+
+ \return the static type data for this object type.
+ */
+ virtual const TkType& getType() const = 0;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKIDENTIFIABLE_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkJoint.h b/NvBlast/sdk/toolkit/include/NvBlastTkJoint.h
new file mode 100644
index 0000000..1fc41c0
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkJoint.h
@@ -0,0 +1,60 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKJOINT_H
+#define NVBLASTTKJOINT_H
+
+#include "NvBlastTkObject.h"
+
+#include "PxVec3.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+The data contained in a TkJoint.
+*/
+struct TkJointData
+{
+ TkActor* actors[2]; //!< The TkActor objects joined by the joint
+ uint32_t chunkIndices[2]; //!< The chunk indices within the corresponding TkActor objects joined by the joint. The indexed chunks will be support chunks.
+ physx::PxVec3 attachPositions[2]; //!< The position of the joint relative to each TkActor
+};
+
+
+/**
+The TkJoint is may join two different TkActors, or be created internally within a single TkActor.
+
+When a TkActor is created from a TkAsset with jointed bonds (the asset is created using a TkAssetDesc with joint flags on bonds, see TkActorDesc) then
+internal TkJoint objects are created and associated with every TkActor created from that TkAsset. The user only gets notification of the internal TkJoint
+objects when the TkActor is split into separate TkActor objects that hold the support chunks joined by an internal TkJoint.
+
+The user will be notified when the TkActor objects that are attached to TkJoint objects change, or are released. In that case, a TkEvent with
+a TkJointUpdateEvent payload is dispatched to TkEventListener objects registered with the TkFamily objects to which the actors belong.
+*/
+class TkJoint : public TkObject
+{
+public:
+ /**
+ Retrieve data in this joint.
+
+ \return a TkJointData containing this joint's data.
+ */
+ virtual const TkJointData getData() const = 0;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKJOINT_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkObject.h b/NvBlast/sdk/toolkit/include/NvBlastTkObject.h
new file mode 100644
index 0000000..085e859
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkObject.h
@@ -0,0 +1,57 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKOBJECT_H
+#define NVBLASTTKOBJECT_H
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Base class for all objects in Tk. All TkObjects are releasable.
+*/
+class TkObject
+{
+public:
+ /**
+ Constructor clears userData.
+ */
+ TkObject() : userData(nullptr) {}
+
+ // Object API
+
+ /**
+ Release this object and free associated memory.
+ */
+ virtual void release() = 0;
+
+protected:
+ /**
+ Destructor is virtual and not public - use the release() method instead of explicitly deleting a TkObject
+ */
+ virtual ~TkObject() {}
+
+public:
+ // Data
+
+ /**
+ Pointer field available to the user.
+ */
+ void* userData;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKOBJECT_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkSerializable.h b/NvBlast/sdk/toolkit/include/NvBlastTkSerializable.h
new file mode 100644
index 0000000..93b9b47
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkSerializable.h
@@ -0,0 +1,60 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKSERIALIZABLE_H
+#define NVBLASTTKSERIALIZABLE_H
+
+
+#include "NvBlastTkIdentifiable.h"
+
+
+// Forward declarations
+namespace physx
+{
+namespace general_PxIOStream2
+{
+class PxFileBuf;
+}
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+TkSerializable objects support the serialize interface, and are returned by TkFramework::deserialize.
+*/
+class TkSerializable : public TkIdentifiable
+{
+public:
+ /**
+ Write the object data to the user-defined PxFileBuf stream.
+
+ \param[in] stream User-defined stream object.
+
+ \return true if serialization was successful, false otherwise.
+ */
+ virtual bool serialize(physx::general_PxIOStream2::PxFileBuf& stream) const = 0;
+
+ // Data
+
+ /**
+ Integer field available to the user. This data is serialized.
+ */
+ uint64_t userIntData;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKSERIALIZABLE_H
diff --git a/NvBlast/sdk/toolkit/include/NvBlastTkType.h b/NvBlast/sdk/toolkit/include/NvBlastTkType.h
new file mode 100644
index 0000000..6f3afbf
--- /dev/null
+++ b/NvBlast/sdk/toolkit/include/NvBlastTkType.h
@@ -0,0 +1,65 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKTYPE_H
+#define NVBLASTTKTYPE_H
+
+#include "NvBlastTypes.h"
+
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Interface for static (class) type data. This data is used for identification in streams,
+class-specific object queries in TkFramework, etc. Only classes derived from TkIdentifiable
+use TkType data.
+*/
+class TkType
+{
+public:
+ /**
+ The class name.
+
+ \return the class name.
+ */
+ virtual const char* getName() const = 0;
+
+ /**
+ The data format version for this class. When deserializing, this version must match the
+ current version. If not, the user may convert the data format using the format conversion
+ extension.
+
+ \return the version number.
+ */
+ virtual uint32_t getVersion() const = 0;
+
+ /**
+ Test for equality. This type is used in static (per-class) data, so types are equal exactly
+ when their addresses are equal.
+
+ \param[in] type The TkType to compare with this TkType.
+
+ \return true if this type equals the input type, false otherwise.
+ */
+ bool operator == (const TkType& type) const
+ {
+ return &type == this;
+ }
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKTYPE_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkActorImpl.cpp b/NvBlast/sdk/toolkit/source/NvBlastTkActorImpl.cpp
new file mode 100644
index 0000000..028e0f1
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkActorImpl.cpp
@@ -0,0 +1,434 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastPreprocessor.h"
+
+#include "NvBlastTkFrameworkImpl.h"
+#include "NvBlastTkActorImpl.h"
+#include "NvBlastTkGroupImpl.h"
+#include "NvBlastTkAssetImpl.h"
+#include "NvBlastTkFamilyImpl.h"
+#include "NvBlastTkJointImpl.h"
+
+#include "NvBlast.h"
+#include "NvBlastAssert.h"
+#include "NvBlastMemory.h"
+
+#include "Px.h"
+#include "PxFileBuf.h"
+#include "PxAllocatorCallback.h"
+#include "PxTransform.h"
+
+using namespace physx::general_PxIOStream2;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+TkActorImpl* TkActorImpl::create(const TkActorDesc& desc)
+{
+ const TkAssetImpl* asset = static_cast<const TkAssetImpl*>(desc.asset);
+
+ TkFamilyImpl* family = TkFamilyImpl::create(asset);
+
+ NvBlastFamily* familyLL = family->getFamilyLLInternal();
+ TkArray<char>::type scratch((uint32_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(familyLL, TkFrameworkImpl::get()->log));
+ NvBlastActor* actorLL = NvBlastFamilyCreateFirstActor(familyLL, &desc, scratch.begin(), TkFrameworkImpl::get()->log);
+ if (actorLL == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkActorImpl::create: low-level actor could not be created.");
+ return nullptr;
+ }
+
+ TkActorImpl* actor = family->addActor(actorLL);
+
+ // Add internal joints
+ if (actor != nullptr)
+ {
+ const uint32_t internalJointCount = asset->getJointDescCountInternal();
+ const TkAssetJointDesc* jointDescs = asset->getJointDescsInternal();
+ const NvBlastSupportGraph graph = asset->getGraph();
+ TkJointImpl* joints = family->getInternalJoints();
+ for (uint32_t jointNum = 0; jointNum < internalJointCount; ++jointNum)
+ {
+ const TkAssetJointDesc& assetJointDesc = jointDescs[jointNum];
+ NVBLAST_ASSERT(assetJointDesc.nodeIndices[0] < graph.nodeCount && assetJointDesc.nodeIndices[1] < graph.nodeCount);
+ TkJointDesc jointDesc;
+ jointDesc.families[0] = jointDesc.families[1] = family;
+ jointDesc.chunkIndices[0] = graph.chunkIndices[assetJointDesc.nodeIndices[0]];
+ jointDesc.chunkIndices[1] = graph.chunkIndices[assetJointDesc.nodeIndices[1]];
+ jointDesc.attachPositions[0] = assetJointDesc.attachPositions[0];
+ jointDesc.attachPositions[1] = assetJointDesc.attachPositions[1];
+ TkJointImpl* joint = new (joints + jointNum) TkJointImpl(jointDesc, family);
+ actor->addJoint(joint->m_links[0]);
+ }
+ }
+
+ return actor;
+}
+
+
+//////// Member functions ////////
+
+TkActorImpl::TkActorImpl()
+ : m_actorLL(nullptr)
+ , m_family(nullptr)
+ , m_group(nullptr)
+ , m_groupJobIndex(invalidIndex<uint32_t>())
+ , m_flags(0)
+ , m_jointCount(0)
+{
+#if NV_PROFILE
+ NvBlastTimersReset(&m_timers);
+#endif
+}
+
+
+TkActorImpl::~TkActorImpl()
+{
+}
+
+
+void TkActorImpl::release()
+{
+ // Disassoaciate all joints
+
+ // Copy joint array for safety against implementation of joint->setActor
+ TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*getJointCountInternal()));
+ TkJointImpl** stop = joints + getJointCountInternal();
+ TkJointImpl** jointHandle = joints;
+ for (JointIt j(*this); (bool)j; ++j)
+ {
+ *jointHandle++ = *j;
+ }
+ jointHandle = joints;
+ while (jointHandle < stop)
+ {
+ NVBLAST_ASSERT(*jointHandle != nullptr);
+ NVBLAST_ASSERT((*jointHandle)->getDataInternal().actors[0] == this || (*jointHandle)->getDataInternal().actors[1] == this);
+ (*jointHandle++)->setActors(nullptr, nullptr);
+ }
+ NVBLAST_ASSERT(getJointCountInternal() == 0);
+
+ if (m_group != nullptr)
+ {
+ m_group->removeActor(*this);
+ }
+
+ if (m_actorLL != nullptr)
+ {
+ NvBlastActorDeactivate(m_actorLL, TkFrameworkImpl::get()->log);
+ }
+
+ if (m_family != nullptr)
+ {
+ m_family->removeActor(this);
+
+ // Make sure we dispatch any remaining events when this family is emptied, since it will no longer be done by any group
+ if (m_family->getActorCountInternal() == 0)
+ {
+ m_family->getQueue().dispatch();
+ }
+ }
+}
+
+
+const NvBlastActor* TkActorImpl::getActorLL() const
+{
+ return m_actorLL;
+}
+
+
+TkFamily& TkActorImpl::getFamily() const
+{
+ return getFamilyImpl();
+}
+
+
+uint32_t TkActorImpl::getIndex() const
+{
+ return getIndexInternal();
+}
+
+
+TkGroup* TkActorImpl::getGroup() const
+{
+ return getGroupImpl();
+}
+
+
+TkGroup* TkActorImpl::removeFromGroup()
+{
+ if (m_group == nullptr)
+ {
+ NVBLASTTK_LOG_WARNING("TkActorImpl::removeFromGroup: actor not in a group.");
+ return nullptr;
+ }
+
+ if (m_group->isProcessing())
+ {
+ NVBLASTTK_LOG_ERROR("TkActorImpl::removeFromGroup: cannot alter Group while processing.");
+ return nullptr;
+ }
+
+ TkGroup* group = m_group;
+
+ return m_group->removeActor(*this) ? group : nullptr;
+}
+
+
+NvBlastFamily* TkActorImpl::getFamilyLL() const
+{
+ return m_family->getFamilyLLInternal();
+}
+
+
+const TkAsset* TkActorImpl::getAsset() const
+{
+ return m_family->getAssetImpl();
+}
+
+
+uint32_t TkActorImpl::getVisibleChunkCount() const
+{
+ return NvBlastActorGetVisibleChunkCount(m_actorLL, TkFrameworkImpl::get()->log);
+}
+
+
+uint32_t TkActorImpl::getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const
+{
+ return NvBlastActorGetVisibleChunkIndices(visibleChunkIndices, visibleChunkIndicesSize, m_actorLL, TkFrameworkImpl::get()->log);
+}
+
+
+uint32_t TkActorImpl::getGraphNodeCount() const
+{
+ return NvBlastActorGetGraphNodeCount(m_actorLL, TkFrameworkImpl::get()->log);
+}
+
+
+uint32_t TkActorImpl::getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const
+{
+ return NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeIndicesSize, m_actorLL, TkFrameworkImpl::get()->log);
+}
+
+
+const float* TkActorImpl::getBondHealths() const
+{
+ return NvBlastActorGetBondHealths(m_actorLL, TkFrameworkImpl::get()->log);
+}
+
+
+uint32_t TkActorImpl::getSplitMaxActorCount() const
+{
+ return NvBlastActorGetMaxActorCountForSplit(m_actorLL, TkFrameworkImpl::get()->log);
+}
+
+
+bool TkActorImpl::isDamaged() const
+{
+ NVBLAST_ASSERT(!m_flags.isSet(TkActorFlag::DAMAGED) || (m_flags.isSet(TkActorFlag::DAMAGED) && m_flags.isSet(TkActorFlag::PENDING)));
+ return m_flags.isSet(TkActorFlag::DAMAGED);
+}
+
+
+void TkActorImpl::markAsDamaged()
+{
+ m_flags |= TkActorFlag::DAMAGED;
+ makePending();
+}
+
+
+void TkActorImpl::makePending()
+{
+ if (m_group != nullptr && !isPending())
+ {
+ m_group->enqueue(this);
+ }
+
+ m_flags |= TkActorFlag::PENDING;
+}
+
+
+TkActorImpl::operator Nv::Blast::TkActorData() const
+{
+ TkActorData data = { m_family, userData, getIndex() };
+ return data;
+}
+
+
+void TkActorImpl::damage(const NvBlastDamageProgram& program, const NvBlastProgramParams* programParams)
+{
+ PERF_SCOPE_L("TkActor::damage");
+
+ if (m_group == nullptr)
+ {
+ NVBLASTTK_LOG_WARNING("TkActor::damage: actor is not in a group, cannot fracture.");
+ return;
+ }
+
+ if (m_group->isProcessing())
+ {
+ NVBLASTTK_LOG_WARNING("TkActor::damage: group is being processed, cannot fracture this actor.");
+ return;
+ }
+
+ if (NvBlastActorCanFracture(m_actorLL, TkFrameworkImpl::get()->log))
+ {
+ m_damageBuffer.pushBack(DamageData(program, programParams));
+ makePending();
+ }
+}
+
+
+void TkActorImpl::damage(const NvBlastDamageProgram& program, const void* damageDesc, uint32_t descSize)
+{
+ damage(program, damageDesc, descSize, m_family->getMaterial());
+}
+
+
+void TkActorImpl::damage(const NvBlastDamageProgram& program, const void* damageDesc, uint32_t descSize, const void* material)
+{
+ PERF_SCOPE_L("TkActor::damage");
+
+ if (m_group == nullptr)
+ {
+ NVBLASTTK_LOG_WARNING("TkActor::damage: actor is not in a group, cannot fracture.");
+ return;
+ }
+
+ if (m_group->isProcessing())
+ {
+ NVBLASTTK_LOG_WARNING("TkActor::damage: group is being processed, cannot fracture this actor.");
+ return;
+ }
+
+ if (NvBlastActorCanFracture(m_actorLL, TkFrameworkImpl::get()->log))
+ {
+ bool appended = false;
+ for (auto& damageData : m_damageBuffer)
+ {
+ if (damageData.tryAppend(program, material, damageDesc, descSize))
+ {
+ appended = true;
+ break;
+ }
+ }
+
+ if (!appended)
+ {
+ m_damageBuffer.pushBack(DamageData(program, material, damageDesc, descSize));
+ }
+
+ makePending();
+ }
+}
+
+
+void TkActorImpl::generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const NvBlastProgramParams* programParams) const
+{
+ PERF_SCOPE_L("TkActor::generateFracture");
+
+ if (m_group && m_group->isProcessing())
+ {
+ NVBLASTTK_LOG_WARNING("TkActor::generateFracture: group is being processed, cannot fracture this actor.");
+ return;
+ }
+
+ // const context, must make m_timers mutable otherwise
+ NvBlastActorGenerateFracture(commands, m_actorLL, program, programParams, TkFrameworkImpl::get()->log, const_cast<NvBlastTimers*>(&m_timers));
+}
+
+
+void TkActorImpl::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands)
+{
+ PERF_SCOPE_L("TkActor::applyFracture");
+
+ if (m_group && m_group->isProcessing())
+ {
+ NVBLASTTK_LOG_WARNING("TkActor::applyFracture: group is being processed, cannot fracture this actor.");
+ return;
+ }
+
+ NvBlastActorApplyFracture(eventBuffers, m_actorLL, commands, TkFrameworkImpl::get()->log, &m_timers);
+
+ if (commands->chunkFractureCount > 0 || commands->bondFractureCount > 0)
+ {
+ markAsDamaged();
+
+ TkFractureCommands* fevt = getFamilyImpl().getQueue().allocData<TkFractureCommands>();
+ fevt->tkActorData = *this;
+ fevt->buffers = *commands;
+ getFamilyImpl().getQueue().addEvent(fevt);
+ getFamilyImpl().getQueue().dispatch();
+ }
+}
+
+
+uint32_t TkActorImpl::getJointCount() const
+{
+ return getJointCountInternal();
+}
+
+
+uint32_t TkActorImpl::getJoints(TkJoint** joints, uint32_t jointsSize) const
+{
+ uint32_t jointsWritten = 0;
+
+ for (JointIt j(*this); (bool)j && jointsWritten < jointsSize; ++j)
+ {
+ joints[jointsWritten++] = *j;
+ }
+
+ return jointsWritten;
+}
+
+
+//////// TkActorImpl::DamageData methods ////////
+
+static bool operator==(const NvBlastDamageProgram& lhs, const NvBlastDamageProgram& rhs)
+{
+ return lhs.graphShaderFunction == rhs.graphShaderFunction && lhs.subgraphShaderFunction == rhs.subgraphShaderFunction;
+}
+
+
+TkActorImpl::DamageData::DamageData(const NvBlastDamageProgram& program, const NvBlastProgramParams* params)
+ : m_program(program), m_programParams(params), m_damageDescCount(0)
+{
+}
+
+
+TkActorImpl::DamageData::DamageData(const NvBlastDamageProgram& program, const void* material, const void* desc, uint32_t descSize)
+ : m_program(program), m_material(material), m_damageDescs((char*)desc, (char*)desc + descSize), m_damageDescCount(1)
+{
+}
+
+
+bool TkActorImpl::DamageData::tryAppend(const NvBlastDamageProgram& program, const void* material, const void* desc, uint32_t descSize)
+{
+ if (getType() == Buffered && m_program == program && m_material == material)
+ {
+ const uint32_t currentDescSize = m_damageDescs.size() / m_damageDescCount;
+ if (descSize == currentDescSize)
+ {
+ const uint32_t s = m_damageDescs.size();
+ m_damageDescs.resizeUninitialized(s + static_cast<uint32_t>(descSize));
+ memcpy(m_damageDescs.begin() + s, desc, descSize);
+ m_damageDescCount++;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkActorImpl.h b/NvBlast/sdk/toolkit/source/NvBlastTkActorImpl.h
new file mode 100644
index 0000000..4d65660
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkActorImpl.h
@@ -0,0 +1,375 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKACTORIMPL_H
+#define NVBLASTTKACTORIMPL_H
+
+
+#include "NvBlastTkCommon.h"
+
+#include "NvBlastAssert.h"
+#include "NvBlastDLink.h"
+#include "NvBlastIteratorBase.h"
+
+#include "NvBlastTkJointImpl.h"
+
+#include "NvBlast.h"
+
+#include "NvBlastTkActor.h"
+
+#include "PxFlags.h"
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations:
+class TkGroupImpl;
+class TkFamilyImpl;
+class TkAssetImpl;
+class TkJointImpl;
+
+
+/**
+Struct-enum for actor status flags, used in TkGroup processing.
+*/
+struct TkActorFlag
+{
+ enum Enum
+ {
+ DAMAGED = (1 << 0), //!< The actor had fractures applied successfully and will take the split step.
+ PENDING = (1 << 1), //!< The actor will be processed when its group executes, used to update job queues when moving group.
+ };
+};
+
+
+/**
+Implementation of TkActor.
+*/
+class TkActorImpl : public TkActor
+{
+public:
+ TkActorImpl();
+ ~TkActorImpl();
+
+ // Begin TkActor
+ virtual const NvBlastActor* getActorLL() const override;
+
+ virtual TkFamily& getFamily() const override;
+
+ virtual uint32_t getIndex() const override;
+
+ virtual TkGroup* getGroup() const override;
+
+ virtual TkGroup* removeFromGroup() override;
+
+ virtual const TkAsset* getAsset() const override;
+
+ virtual uint32_t getVisibleChunkCount() const override;
+
+ virtual uint32_t getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const override;
+
+ virtual uint32_t getGraphNodeCount() const override;
+
+ virtual uint32_t getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const override;
+
+ virtual const float* getBondHealths() const override;
+
+ virtual uint32_t getSplitMaxActorCount() const override;
+
+ virtual void damage(const NvBlastDamageProgram& program, const NvBlastProgramParams* programParams) override;
+ virtual void damage(const NvBlastDamageProgram& program, const void* damageDesc, uint32_t descSize) override;
+ virtual void damage(const NvBlastDamageProgram& program, const void* damageDesc, uint32_t descSize, const void* material) override;
+
+ virtual bool isPending() const override;
+
+ virtual void generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const NvBlastProgramParams* programParams) const override;
+
+ virtual void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) override;
+
+ virtual uint32_t getJointCount() const override;
+
+ virtual uint32_t getJoints(TkJoint** joints, uint32_t jointsSize) const override;
+ // End TkActor
+
+ // Begin TkObject
+ virtual void release() override;
+ // End TkObject
+
+
+ // Public methods
+
+ /**
+ Factory create method.
+
+ \param[in] desc Actor descriptor set by the user.
+
+ \return a pointer to a new TkActorImpl object if successful, NULL otherwise.
+ */
+ static TkActorImpl* create(const TkActorDesc& desc);
+
+ /**
+ TkActorImpl objects are created in an array within a TkFamilyImpl. Actors may become
+ 'inactive' without their memory being freed. If inactive, the actor should be treated as if
+ it has been released.
+
+ \return the active status of this TkActorImpl.
+ */
+ bool isActive() const;
+
+ /**
+ Utility to return the low-level family to which the low-level actor belongs.
+
+ \return a pointer to the NvBlastFamily to which the low-level actor belongs.
+ */
+ NvBlastFamily* getFamilyLL() const;
+
+ /**
+ Utility to access the TkFamily to which this actor belongs.
+
+ \return a reference to the TkFamilyImpl to which this TkActorImpl belongs.
+ */
+ TkFamilyImpl& getFamilyImpl() const;
+
+ /**
+ \return the index of this actor with its TkFamilyImpl.
+ */
+ uint32_t getIndexInternal() const;
+
+ /**
+ Access to the group to which this actor belongs, if any.
+
+ \return a pointer to the TkGroupImpl to which this TkActorImpl belongs, if any. If this actor is not in a group, this function returns NULL.
+ */
+ TkGroupImpl* getGroupImpl() const;
+
+ /**
+ Access to the low-level actor associated with this TkActorImpl.
+
+ \return a pointer to the NvBlastActor associated with this TkActorImpl. If this actor is inactive (see isActive), this function returns NULL.
+ */
+ NvBlastActor* getActorLLInternal() const;
+
+ /**
+ \return the number of TkJointImpl objects that reference this actor.
+ */
+ uint32_t getJointCountInternal() const;
+
+ /**
+ Joint iterator. Usage:
+
+ Given a TkActorImpl a,
+
+ for (TkActorImpl::JointIt i(a); (bool)i; ++i)
+ {
+ TkJointImpl* joint = (TkJointImpl*)i;
+ // ...
+ }
+ */
+ class JointIt : public DList::It
+ {
+ public:
+ /** Constructed from an actor. */
+ JointIt(const TkActorImpl& actor, Direction dir = Forward);
+
+ /** Current joint. */
+ TkJointImpl* operator * () const;
+ };
+
+ /**
+ Implicit converter to TkActorData for events.
+ */
+ operator Nv::Blast::TkActorData() const;
+
+private:
+ /**
+ Used to buffer damage for deferred fracture generation. Unifies 2 different ways to pass and store damage data.
+ */
+ struct DamageData
+ {
+ DamageData(const NvBlastDamageProgram& program, const NvBlastProgramParams* params);
+ DamageData(const NvBlastDamageProgram& program, const void* material, const void* desc, uint32_t descSize);
+
+ bool tryAppend(const NvBlastDamageProgram& program, const void* material, const void* desc, uint32_t descSize);
+ void generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastActor* actorLL, NvBlastTimers* timers) const;
+
+ enum Type
+ {
+ Plain,
+ Buffered
+ };
+
+ Type getType() const;
+
+ NvBlastDamageProgram m_program;
+ union
+ {
+ const void* m_material; //!< for Buffered type
+ const NvBlastProgramParams* m_programParams; //!< for Plain type
+ };
+ TkArray<char>::type m_damageDescs;
+ uint32_t m_damageDescCount;
+ };
+
+
+ /**
+ Functions to raise or check 'damaged' state: this actor will take the split step.
+ 'damaged' actors automatically become 'pending' also.
+ */
+ void markAsDamaged();
+ bool isDamaged() const;
+
+ /**
+ Raise actor to 'pending' state: this actor will be processed when its group executes next.
+ Enqueues the actor in its group's job list if a group is set. Otherwise the group will enqueue the actor when it is added.
+ */
+ void makePending();
+
+ /**
+ Functions to add or remove an internal reference to a joint. (Joints and actors mutually reference each other.)
+ */
+ void addJoint(TkJointLink& jointLink);
+ void removeJoint(TkJointLink& jointLink);
+
+
+ // Data
+
+ NvBlastActor* m_actorLL; //!< The low-level actor associated with this actor
+ TkFamilyImpl* m_family; //!< The TkFamilyImpl to which this actor belongs
+ TkGroupImpl* m_group; //!< The TkGroupImpl (if any) to which this actor belongs
+ uint32_t m_groupJobIndex; //!< The index of this actor's job within its group's job list
+ physx::PxFlags<TkActorFlag::Enum, char> m_flags; //!< Status flags for this actor
+ TkArray<DamageData>::type m_damageBuffer; //!< Buffered damage input
+ uint32_t m_jointCount; //!< The number of joints referenced in m_jointList
+ DList m_jointList; //!< A doubly-linked list of joint references
+
+//#if NV_PROFILE
+ NvBlastTimers m_timers; //!< If profiling, each actor stores timing data
+//#endif
+
+ friend class TkWorker; // m_damageBuffer and m_flags
+ friend class TkGroupImpl;
+ friend class TkFamilyImpl;
+ friend class TkJointImpl;
+ friend class TkFrameworkImpl;
+};
+
+
+//////// TkActorImpl inline methods ////////
+
+NV_INLINE TkFamilyImpl& TkActorImpl::getFamilyImpl() const
+{
+ NVBLAST_ASSERT(m_family != nullptr);
+
+ return *m_family;
+}
+
+
+NV_INLINE uint32_t TkActorImpl::getIndexInternal() const
+{
+ NVBLAST_ASSERT(isActive());
+ return NvBlastActorGetIndex(m_actorLL, TkFrameworkImpl::get()->log);
+}
+
+
+NV_INLINE NvBlastActor* TkActorImpl::getActorLLInternal() const
+{
+ return m_actorLL;
+}
+
+
+NV_INLINE uint32_t TkActorImpl::getJointCountInternal() const
+{
+ return m_jointCount;
+}
+
+
+NV_INLINE TkGroupImpl* TkActorImpl::getGroupImpl() const
+{
+ return m_group;
+}
+
+
+NV_INLINE bool TkActorImpl::isActive() const
+{
+ return m_actorLL != nullptr;
+}
+
+
+NV_INLINE bool TkActorImpl::isPending() const
+{
+ return m_flags.isSet(TkActorFlag::PENDING);
+}
+
+
+NV_INLINE void TkActorImpl::addJoint(TkJointLink& jointLink)
+{
+ NVBLAST_ASSERT(m_jointList.isSolitary(jointLink));
+
+ m_jointList.insertHead(jointLink);
+ ++m_jointCount;
+}
+
+
+NV_INLINE void TkActorImpl::removeJoint(TkJointLink& jointLink)
+{
+ NVBLAST_ASSERT(!m_jointList.isSolitary(jointLink));
+ NVBLAST_ASSERT(m_jointCount > 0);
+ if (m_jointCount > 0)
+ {
+ --m_jointCount;
+ m_jointList.remove(jointLink);
+ }
+}
+
+
+//////// TkActorImpl::DamageData inline methods ////////
+
+NV_INLINE TkActorImpl::DamageData::Type TkActorImpl::DamageData::getType() const
+{
+ return m_damageDescCount > 0 ? Buffered : Plain;
+}
+
+
+NV_INLINE void TkActorImpl::DamageData::generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastActor* actorLL, NvBlastTimers* timers) const
+{
+ if (getType() == Plain)
+ {
+ NvBlastActorGenerateFracture(commandBuffers, actorLL, m_program, m_programParams, TkFrameworkImpl::get()->log, timers);
+ }
+ else
+ {
+ const NvBlastProgramParams programParams = {
+ m_damageDescs.begin(),
+ m_damageDescCount,
+ m_material,
+ };
+ NvBlastActorGenerateFracture(commandBuffers, actorLL, m_program, &programParams, TkFrameworkImpl::get()->log, timers);
+ }
+}
+
+
+//////// TkActorImpl::JointIt methods ////////
+
+NV_INLINE TkActorImpl::JointIt::JointIt(const TkActorImpl& actor, Direction dir) : DList::It(actor.m_jointList, dir) {}
+
+
+NV_INLINE TkJointImpl* TkActorImpl::JointIt::operator * () const
+{
+ const DLink* link = (const DLink*)(*this);
+ return reinterpret_cast<const TkJointLink*>(link)->m_joint;
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKACTORIMPL_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkAllocator.cpp b/NvBlast/sdk/toolkit/source/NvBlastTkAllocator.cpp
new file mode 100644
index 0000000..b1c2c65
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkAllocator.cpp
@@ -0,0 +1,22 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastTkAllocator.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+physx::PxAllocatorCallback* TkAllocator::s_allocatorCallback = nullptr;
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkAllocator.h b/NvBlast/sdk/toolkit/source/NvBlastTkAllocator.h
new file mode 100644
index 0000000..abc7b16
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkAllocator.h
@@ -0,0 +1,49 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKALLOCATOR_H
+#define NVBLASTTKALLOCATOR_H
+
+#include "PxAllocatorCallback.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+An allocator which can be used in PxShared containers.
+*/
+class TkAllocator
+{
+public:
+ TkAllocator(const char* = 0)
+ {
+ }
+
+ void* allocate(size_t size, const char* file, int line)
+ {
+ return s_allocatorCallback->allocate(size, nullptr, file, line);
+ }
+
+ void deallocate(void* ptr)
+ {
+ return s_allocatorCallback->deallocate(ptr);
+ }
+
+ static physx::PxAllocatorCallback* s_allocatorCallback;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTTKALLOCATOR_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkArray.h b/NvBlast/sdk/toolkit/source/NvBlastTkArray.h
new file mode 100644
index 0000000..c07dc11
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkArray.h
@@ -0,0 +1,41 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKARRAY_H
+#define NVBLASTTKARRAY_H
+
+
+#include "NvBlastTkAllocator.h"
+#include "PsInlineArray.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+template <class T>
+struct TkArray
+{
+ typedef physx::shdfnd::Array<T, TkAllocator> type;
+};
+
+
+template <class T, uint32_t N>
+struct TkInlineArray
+{
+ typedef physx::shdfnd::InlineArray<T, N, TkAllocator> type;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTTKARRAY_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkAssetImpl.cpp b/NvBlast/sdk/toolkit/source/NvBlastTkAssetImpl.cpp
new file mode 100644
index 0000000..577d46b
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkAssetImpl.cpp
@@ -0,0 +1,337 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+
+#include "NvBlastTkFrameworkImpl.h"
+#include "NvBlastTkAssetImpl.h"
+#include "NvBlastTkFamilyImpl.h"
+
+#include "NvBlast.h"
+#include "NvBlastMemory.h"
+
+#include "Px.h"
+#include "PxFileBuf.h"
+#include "PxAllocatorCallback.h"
+
+
+using namespace physx::general_PxIOStream2;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Static data ////////
+
+NVBLASTTK_DEFINE_TYPE_SERIALIZABLE(Asset);
+
+
+//////// Member functions ////////
+
+TkAssetImpl::TkAssetImpl()
+ : m_assetLL(nullptr), m_ownsAsset(false)
+{
+}
+
+
+TkAssetImpl::TkAssetImpl(const NvBlastID& id)
+ : TkAssetType(id), m_assetLL(nullptr), m_ownsAsset(false)
+{
+}
+
+
+TkAssetImpl::~TkAssetImpl()
+{
+ if (m_assetLL != nullptr && m_ownsAsset)
+ {
+ TkFrameworkImpl::get()->free(m_assetLL);
+ }
+}
+
+
+const NvBlastAsset* TkAssetImpl::getAssetLL() const
+{
+ return getAssetLLInternal();
+}
+
+
+uint32_t TkAssetImpl::getChunkCount() const
+{
+ return NvBlastAssetGetChunkCount(m_assetLL, TkFrameworkImpl::get()->log);
+}
+
+
+uint32_t TkAssetImpl::getLeafChunkCount() const
+{
+ return NvBlastAssetGetLeafChunkCount(m_assetLL, TkFrameworkImpl::get()->log);
+}
+
+
+uint32_t TkAssetImpl::getBondCount() const
+{
+ return NvBlastAssetGetBondCount(m_assetLL, TkFrameworkImpl::get()->log);
+}
+
+
+const NvBlastChunk* TkAssetImpl::getChunks() const
+{
+ return NvBlastAssetGetChunks(m_assetLL, TkFrameworkImpl::get()->log);
+}
+
+
+const NvBlastBond* TkAssetImpl::getBonds() const
+{
+ return NvBlastAssetGetBonds(m_assetLL, TkFrameworkImpl::get()->log);
+}
+
+
+const NvBlastSupportGraph TkAssetImpl::getGraph() const
+{
+ return NvBlastAssetGetSupportGraph(m_assetLL, TkFrameworkImpl::get()->log);
+}
+
+
+uint32_t TkAssetImpl::getDataSize() const
+{
+ return NvBlastAssetGetSize(m_assetLL, TkFrameworkImpl::get()->log);
+}
+
+
+uint32_t TkAssetImpl::getJointDescCount() const
+{
+ return getJointDescCountInternal();
+}
+
+
+const TkAssetJointDesc* TkAssetImpl::getJointDescs() const
+{
+ return getJointDescsInternal();
+}
+
+
+void TkAssetImpl::release()
+{
+ const TkType& tkType = TkFamilyImpl::s_type;
+ const uint32_t num = TkFrameworkImpl::get()->getObjectCount(tkType);
+
+ if (num)
+ {
+ TkArray<TkIdentifiable*>::type dependents(num);
+ TkFrameworkImpl::get()->getObjects(dependents.begin(), dependents.size(), tkType);
+
+ for (TkObject* o : dependents)
+ {
+ TkFamilyImpl* f = static_cast<TkFamilyImpl*>(o);
+ if (f->getAssetImpl() == this)
+ {
+ f->release();
+ }
+ }
+ }
+
+ NVBLASTTK_DELETE(this, TkAssetImpl);
+}
+
+
+bool TkAssetImpl::serialize(PxFileBuf& stream) const
+{
+ TkFrameworkImpl::get()->serializeHeader(*this, stream);
+
+ // Asset data
+ const uint32_t assetSize = NvBlastAssetGetSize(m_assetLL, TkFrameworkImpl::get()->log);
+ stream.storeDword(assetSize);
+ stream.write(m_assetLL, assetSize);
+
+ // Joint descs
+ stream.storeDword((uint32_t)m_jointDescs.size());
+ for (uint32_t i = 0; i < m_jointDescs.size(); ++i)
+ {
+ const TkAssetJointDesc& jointDesc = m_jointDescs[i];
+ stream.storeDword(jointDesc.nodeIndices[0]);
+ stream.storeDword(jointDesc.nodeIndices[1]);
+ stream.storeFloat(jointDesc.attachPositions[0].x);
+ stream.storeFloat(jointDesc.attachPositions[0].y);
+ stream.storeFloat(jointDesc.attachPositions[0].z);
+ stream.storeFloat(jointDesc.attachPositions[1].x);
+ stream.storeFloat(jointDesc.attachPositions[1].y);
+ stream.storeFloat(jointDesc.attachPositions[1].z);
+ }
+
+ return true;
+}
+
+
+//////// Static functions ////////
+
+TkSerializable* TkAssetImpl::deserialize(PxFileBuf& stream, const NvBlastID& id)
+{
+ // Allocate
+ TkAssetImpl* asset = NVBLASTTK_NEW(TkAssetImpl)(id);
+ if (asset == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkAssetImpl::deserialize: asset allocation failed.");
+ return nullptr;
+ }
+
+ // Asset data
+ const uint32_t assetSize = stream.readDword();
+ asset->m_assetLL = static_cast<NvBlastAsset*>(TkFrameworkImpl::get()->alloc(assetSize));
+ asset->m_ownsAsset = true;
+ stream.read(asset->m_assetLL, assetSize);
+
+ // Joint descs
+ const uint32_t jointDescCount = stream.readDword();
+ asset->m_jointDescs.resize(jointDescCount);
+ for (uint32_t i = 0; i < asset->m_jointDescs.size(); ++i)
+ {
+ TkAssetJointDesc& jointDesc = asset->m_jointDescs[i];
+ jointDesc.nodeIndices[0] = stream.readDword();
+ jointDesc.nodeIndices[1] = stream.readDword();
+ jointDesc.attachPositions[0].x = stream.readFloat();
+ jointDesc.attachPositions[0].y = stream.readFloat();
+ jointDesc.attachPositions[0].z = stream.readFloat();
+ jointDesc.attachPositions[1].x = stream.readFloat();
+ jointDesc.attachPositions[1].y = stream.readFloat();
+ jointDesc.attachPositions[1].z = stream.readFloat();
+ }
+
+
+ if (asset->m_assetLL == nullptr)
+ {
+ asset->release();
+ asset = nullptr;
+ }
+
+ return asset;
+}
+
+
+TkAssetImpl* TkAssetImpl::create(const TkAssetDesc& desc)
+{
+ TkAssetImpl* asset = NVBLASTTK_NEW(TkAssetImpl);
+
+ TkArray<char>::type scratch((uint32_t)NvBlastGetRequiredScratchForCreateAsset(&desc, TkFrameworkImpl::get()->log));
+ void* mem = TkFrameworkImpl::get()->alloc(NvBlastGetAssetMemorySize(&desc, TkFrameworkImpl::get()->log));
+ asset->m_assetLL = NvBlastCreateAsset(mem, &desc, scratch.begin(), TkFrameworkImpl::get()->log);
+ if (asset->m_assetLL == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created.");
+ asset->release();
+ return nullptr;
+ }
+
+ if (desc.bondFlags != nullptr)
+ {
+ for (uint32_t bondN = 0; bondN < desc.bondCount; ++bondN)
+ {
+ if (0 != (desc.bondFlags[bondN] & TkAssetDesc::BondJointed))
+ {
+ const NvBlastBondDesc& bondDesc = desc.bondDescs[bondN];
+ const uint32_t c0 = bondDesc.chunkIndices[0];
+ const uint32_t c1 = bondDesc.chunkIndices[1];
+ if (c0 >= desc.chunkCount || c1 >= desc.chunkCount)
+ {
+ NVBLASTTK_LOG_WARNING("TkAssetImpl::create: joint flag set for badly described bond. No joint descriptor created.");
+ continue;
+ }
+
+ if (!asset->addJointDesc(c0, c1))
+ {
+ NVBLASTTK_LOG_WARNING("TkAssetImpl::create: no bond corresponds to the user-described bond indices. No joint descriptor created.");
+ }
+ }
+ }
+ }
+
+ asset->m_ownsAsset = true;
+// asset->setID(NvBlastAssetGetID(asset->m_assetLL, TkFrameworkImpl::get()->log)); // Keeping LL and Tk IDs distinct
+
+ return asset;
+}
+
+
+TkAssetImpl* TkAssetImpl::create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset)
+{
+ TkAssetImpl* asset = NVBLASTTK_NEW(TkAssetImpl);
+
+ //NOTE: Why are we passing in a const NvBlastAsset* and then discarding the const?
+ asset->m_assetLL = const_cast<NvBlastAsset*>(assetLL);
+ if (asset->m_assetLL == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created.");
+ asset->release();
+ return nullptr;
+ }
+
+ asset->m_ownsAsset = ownsAsset;
+ asset->setID(NvBlastAssetGetID(asset->m_assetLL, TkFrameworkImpl::get()->log));
+
+ asset->m_jointDescs.resize(jointDescCount);
+ for (uint32_t i = 0; i < asset->m_jointDescs.size(); ++i)
+ {
+ asset->m_jointDescs[i] = jointDescs[i];
+ }
+
+ return asset;
+}
+
+bool TkAssetImpl::addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1)
+{
+ if (m_assetLL == nullptr)
+ {
+ return false;
+ }
+
+ const uint32_t upperSupportChunkCount = NvBlastAssetGetFirstSubsupportChunkIndex(m_assetLL, TkFrameworkImpl::get()->log);
+ if (chunkIndex0 >= upperSupportChunkCount || chunkIndex1 >= upperSupportChunkCount)
+ {
+ return false;
+ }
+
+ const uint32_t* chunkToGraphNodeMap = NvBlastAssetGetChunkToGraphNodeMap(m_assetLL, TkFrameworkImpl::get()->log);
+ const uint32_t node0 = chunkToGraphNodeMap[chunkIndex0];
+ const uint32_t node1 = chunkToGraphNodeMap[chunkIndex1];
+ const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(m_assetLL, TkFrameworkImpl::get()->log);
+ if (node0 >= graph.nodeCount && node1 >= graph.nodeCount)
+ {
+ return false;
+ }
+
+ // Find bond index
+ // Iterate through all neighbors of node0 chunk
+ uint32_t bondIndex = 0xFFFFFFFF;
+ for (uint32_t i = graph.adjacencyPartition[node0]; i < graph.adjacencyPartition[node0 + 1]; i++)
+ {
+ if (graph.adjacentNodeIndices[i] == node1)
+ {
+ bondIndex = graph.adjacentBondIndices[i];
+ break;
+ }
+ }
+
+ if (bondIndex >= NvBlastAssetGetBondCount(m_assetLL, TkFrameworkImpl::get()->log))
+ {
+ return false;
+ }
+
+ const NvBlastBond& bond = NvBlastAssetGetBonds(m_assetLL, TkFrameworkImpl::get()->log)[bondIndex];
+
+ TkAssetJointDesc jointDesc;
+ jointDesc.attachPositions[0] = jointDesc.attachPositions[1] = physx::PxVec3(bond.centroid[0], bond.centroid[1], bond.centroid[2]);
+ jointDesc.nodeIndices[0] = node0;
+ jointDesc.nodeIndices[1] = node1;
+ m_jointDescs.pushBack(jointDesc);
+
+ return true;
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkAssetImpl.h b/NvBlast/sdk/toolkit/source/NvBlastTkAssetImpl.h
new file mode 100644
index 0000000..ae68af8
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkAssetImpl.h
@@ -0,0 +1,162 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKASSETIMPL_H
+#define NVBLASTTKASSETIMPL_H
+
+
+#include "NvBlastTkCommon.h"
+#include "NvBlastTkJoint.h"
+#include "NvBlastTkAsset.h"
+#include "NvBlastTkTypeImpl.h"
+#include "NvBlastTkArray.h"
+
+
+// Forward declarations
+struct NvBlastAsset;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+/**
+Implementation of TkAsset
+*/
+NVBLASTTK_IMPL_DECLARE(Asset)
+{
+public:
+ /**
+ Enum which keeps track of the serialized data format.
+ */
+ enum Version
+ {
+ /** Initial version */
+ Initial,
+
+ // New formats must come before Count. They should be given descriptive names with more information in comments.
+
+ /** The number of serialized formats. */
+ Count,
+
+ /** The current version. This should always be Count-1 */
+ Current = Count - 1
+ };
+
+ TkAssetImpl();
+ TkAssetImpl(const NvBlastID& id);
+ ~TkAssetImpl();
+
+ NVBLASTTK_IMPL_DEFINE_SERIALIZABLE('A', 'S', 'S', 'T');
+
+ // Public methods
+
+ /**
+ Factory create method. This method creates a low-level asset and stores a reference to it.
+
+ \param[in] desc Asset descriptor set by the user.
+
+ \return a pointer to a new TkAssetImpl object if successful, NULL otherwise.
+ */
+ static TkAssetImpl* create(const TkAssetDesc& desc);
+
+ /**
+ Static method to create an asset from an existing low-level asset.
+
+ \param[in] assetLL A valid low-level asset passed in by the user.
+ \param[in] jointDescs Optional joint descriptors to add to the new asset.
+ \param[in] jointDescCount The number of joint descriptors in the jointDescs array. If non-zero, jointDescs cannot be NULL.
+ \param[in] ownsAsset Whether or not to let this TkAssetImpl object release the low-level NvBlastAsset memory upon its own release.
+
+ \return a pointer to a new TkAssetImpl object if successful, NULL otherwise.
+ */
+ static TkAssetImpl* create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false);
+
+ /**
+ \return a pointer to the underlying low-level NvBlastAsset associated with this asset.
+ */
+ const NvBlastAsset* getAssetLLInternal() const;
+
+ /**
+ \return the number of internal joint descriptors stored with this asset.
+ */
+ uint32_t getJointDescCountInternal() const;
+
+ /**
+ \return the array of internal joint descriptors stored with this asset, with size given by getJointDescCountInternal().
+ */
+ const TkAssetJointDesc* getJointDescsInternal() const;
+
+ // Begin TkAsset
+ virtual const NvBlastAsset* getAssetLL() const override;
+
+ virtual uint32_t getChunkCount() const override;
+
+ virtual uint32_t getLeafChunkCount() const override;
+
+ virtual uint32_t getBondCount() const override;
+
+ virtual const NvBlastChunk* getChunks() const override;
+
+ virtual const NvBlastBond* getBonds() const override;
+
+ virtual const NvBlastSupportGraph getGraph() const override;
+
+ virtual uint32_t getDataSize() const override;
+
+ virtual uint32_t getJointDescCount() const override;
+
+ virtual const TkAssetJointDesc* getJointDescs() const override;
+ // End TkAsset
+
+private:
+ /**
+ Utility to add a joint descriptor between the indexed chunks. The two chunks
+ must be support chunks, and there must exist a bond between them. The joint's
+ attachment positions will be the bond centroid.
+
+ \param[in] chunkIndex0 The first chunk index.
+ \param[in] chunkIndex1 The second chunk index.
+
+ \return true iff successful.
+ */
+ bool addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1);
+
+ NvBlastAsset* m_assetLL; //!< The underlying low-level asset.
+ TkArray<TkAssetJointDesc>::type m_jointDescs; //!< The array of internal joint descriptors.
+ bool m_ownsAsset; //!< Whether or not this asset should release its low-level asset upon its own release.
+};
+
+
+//////// TkAssetImpl inline methods ////////
+
+NV_INLINE const NvBlastAsset* TkAssetImpl::getAssetLLInternal() const
+{
+ return m_assetLL;
+}
+
+
+NV_INLINE uint32_t TkAssetImpl::getJointDescCountInternal() const
+{
+ return m_jointDescs.size();
+}
+
+
+NV_INLINE const TkAssetJointDesc* TkAssetImpl::getJointDescsInternal() const
+{
+ return m_jointDescs.begin();
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKASSETIMPL_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkCommon.h b/NvBlast/sdk/toolkit/source/NvBlastTkCommon.h
new file mode 100644
index 0000000..edc1a91
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkCommon.h
@@ -0,0 +1,110 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKCOMMON_H
+#define NVBLASTTKCOMMON_H
+
+
+#include "NvPreprocessor.h"
+#include "NvBlastTkGUID.h"
+
+
+// Macro to load a uint32_t (or larger) with four characters
+#define NVBLASTTK_FOURCC(_a, _b, _c, _d) ( (uint32_t)(_a) | (uint32_t)(_b)<<8 | (uint32_t)(_c)<<16 | (uint32_t)(_d)<<24 )
+
+
+// Macro to define standard object classes. An intermediate class is defined which holds common implementations.
+#define NVBLASTTK_IMPL_DECLARE(_name) \
+class Tk##_name##Type : public Tk##_name \
+{ \
+public: \
+ /* Blank constructor generates a new NvBlastID and informs framework */ \
+ Tk##_name##Type() \
+ { \
+ memset(&m_ID, 0, sizeof(NvBlastID)); \
+ setID(TkGenerateGUID(this)); \
+ TkFrameworkImpl::get()->onCreate(*this); \
+ } \
+ \
+ /* This constructor takes an existing NvBlastID and informs framework */ \
+ Tk##_name##Type(const NvBlastID& id) \
+ { \
+ memset(&m_ID, 0, sizeof(NvBlastID)); \
+ setID(id); \
+ TkFrameworkImpl::get()->onCreate(*this); \
+ } \
+ \
+ /* Destructor informs framework */ \
+ ~Tk##_name##Type() { TkFrameworkImpl::get()->onDestroy(*this); } \
+ \
+ /* Begin TkIdentifiable */ \
+ virtual void setID(const NvBlastID& id) override \
+ { \
+ /* Inform framework of ID change */ \
+ TkFrameworkImpl::get()->onIDChange(*this, m_ID, id); \
+ m_ID = id; \
+ } \
+ virtual const NvBlastID& getID() const override { return getIDInternal(); } \
+ virtual const TkType& getType() const override { return s_type; } \
+ /* End TkIdentifiable */ \
+ \
+ /* Begin public API */ \
+ \
+ /* Inline method for internal access to NvBlastID */ \
+ const NvBlastID& getIDInternal() const { return m_ID; } \
+ \
+ /* End public API */ \
+ \
+ /* Static type information */ \
+ static TkTypeImpl s_type; \
+ \
+private: \
+ NvBlastID m_ID; /* NvBlastID for a TkIdentifiable object */ \
+}; \
+ \
+/* Derive object implementation from common implementation class above */ \
+class Tk##_name##Impl final : public Tk##_name##Type
+
+
+// Macro to declare standard object interfaces, enums, etc.
+#define NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE(_id0, _id1, _id2, _id3) \
+ /* Begin TkObject */ \
+ virtual void release() override; \
+ /* End TkObject */ \
+ \
+ /* Enums */ \
+ \
+ /* Generate a ClassID enum used to identify this TkIdentifiable. */ \
+ enum { ClassID = NVBLASTTK_FOURCC(_id0, _id1, _id2, _id3) }
+
+
+// Macro to declare standard object interfaces, enums, etc (serializable version)
+#define NVBLASTTK_IMPL_DEFINE_SERIALIZABLE(_id0, _id1, _id2, _id3) \
+ NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE(_id0, _id1, _id2, _id3); \
+ \
+ /* Begin TkSerializable */ \
+ virtual bool serialize(physx::general_PxIOStream2::PxFileBuf& stream) const override; \
+ /* End TkSerializable */ \
+ \
+ /* Static deserialization function, called by TkFrameworkImpl::deserialize after header data */ \
+ static TkSerializable* deserialize(physx::general_PxIOStream2::PxFileBuf& stream, const NvBlastID& id)
+
+
+// Macro to define class type data
+#define NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(_name) \
+ TkTypeImpl Tk##_name##Type::s_type("Tk" #_name, Tk##_name##Impl::ClassID, 0, nullptr)
+
+
+// Macro to define class type data (serializable version)
+#define NVBLASTTK_DEFINE_TYPE_SERIALIZABLE(_name) \
+ TkTypeImpl Tk##_name##Type::s_type("Tk" #_name, Tk##_name##Impl::ClassID, Tk##_name##Impl::Version::Current, Tk##_name##Impl::deserialize)
+
+
+#endif // ifndef NVBLASTTKCOMMON_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkEventQueue.h b/NvBlast/sdk/toolkit/source/NvBlastTkEventQueue.h
new file mode 100644
index 0000000..00a1a61
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkEventQueue.h
@@ -0,0 +1,231 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKEVENTQUEUE_H
+#define NVBLASTTKEVENTQUEUE_H
+
+#include <algorithm>
+#include <vector>
+
+#include <mutex>
+#include <atomic>
+
+#include "PxAllocatorCallback.h"
+#include "NvBlastTkFrameworkImpl.h"
+#include "NvBlastAssert.h"
+
+
+namespace Nv {
+namespace Blast {
+
+/**
+A dispatcher queue providing preallocation and thread-safe insertions therein.
+
+Typical usage:
+- preallocate space for events and payload:
+ - reserveEvents, reserveData
+- enable asserts to detect undersized storage (allocations are not thread safe):
+ - protect(true)
+- get pointers to payload data and events to fill in, thread safe for preallocated memory:
+ - allocData, addEvent
+- back on main thread, ensure consistency:
+ - protect(false)
+
+- continue adding events and payload on main thread if necessary like above (allocations are safe here)
+eventually dispatch, or reset if dispatched by proxy
+*/
+class TkEventQueue
+{
+public:
+ TkEventQueue() : m_currentEvent(0), m_poolCapacity(0), m_pool(nullptr), m_allowAllocs(true) {}
+
+ /**
+ Peek events queue for dispatch.
+ Do not use in protected state.
+ */
+ operator const TkArray<TkEvent>::type&()
+ {
+ NVBLAST_ASSERT(m_allowAllocs);
+ NVBLAST_ASSERT(m_currentEvent == m_events.size());
+ return m_events;
+ }
+
+ /**
+ Debug help to catch (unwanted) allocations during task work.
+ Note that this will not actually avoid allocations, but assert in debug builds.
+
+ Set true before using in distributed environment.
+ Set false to return to single-thread mode.
+ */
+ void protect(bool enable)
+ {
+ // During parallel use, m_events.size() and m_currentEvent are allowed to diverge.
+ // This is fine because resizeUninitialized does not alter the stored data.
+ NVBLAST_ASSERT(m_currentEvent <= m_events.capacity());
+ m_events.resizeUninitialized(m_currentEvent);
+ m_allowAllocs = !enable;
+ }
+
+ /**
+ Restores initial state.
+ Data memory is currently not being reused. To be improved.
+ */
+ void reset()
+ {
+ m_events.clear();
+ m_currentEvent = 0;
+ for (void* mem : m_memory)
+ {
+ NVBLASTTK_FREE(mem);
+ }
+ m_memory.clear();
+ m_currentData = 0;
+ m_allowAllocs = true;
+ m_poolCapacity = 0;
+ m_pool = nullptr;
+ }
+
+ /**
+ Queue an event with a payload.
+ */
+ template<class T>
+ void addEvent(T* payload)
+ {
+ uint32_t index = m_currentEvent.fetch_add(1);
+
+ // Should not allocate in protected state.
+ NVBLAST_ASSERT(m_allowAllocs || m_currentEvent <= m_events.capacity());
+
+ m_events.resizeUninitialized(m_currentEvent);
+
+ // During parallel use, m_events.size() and m_currentEvent are allowed to diverge.
+ // Consistency is restored in protect().
+ NVBLAST_ASSERT(!m_allowAllocs || m_currentEvent == m_events.size());
+
+ TkEvent& evt = m_events[index];
+ evt.type = TkEvent::Type(T::EVENT_TYPE);
+ evt.payload = payload;
+ }
+
+ /**
+ Request storage for payload.
+ */
+ template<typename T>
+ T* allocData()
+ {
+ uint32_t index = m_currentData.fetch_add(sizeof(T));
+ if (m_currentData <= m_poolCapacity)
+ {
+ return reinterpret_cast<T*>(&m_pool[index]);
+ }
+ else
+ {
+ // Could do larger block allocation here.
+ reserveData(sizeof(T));
+ // Account for the requested size.
+ m_currentData = sizeof(T);
+ return reinterpret_cast<T*>(&m_pool[0]);
+ }
+ }
+
+ /**
+ Preallocate a memory block of size Bytes for payload data.
+ Note that this will inevitably allocate a new memory block.
+ Subsequent calls to allocData will use this memory piecewise.
+ */
+ void reserveData(size_t size)
+ {
+ NVBLAST_ASSERT(m_allowAllocs);
+ m_pool = reinterpret_cast<uint8_t*>(allocDataBySize(size));
+ m_poolCapacity = size;
+ m_currentData = 0;
+ }
+
+ /**
+ Preallocate space for events.
+ */
+ void reserveEvents(uint32_t n)
+ {
+ NVBLAST_ASSERT(m_allowAllocs);
+ m_events.reserve(m_events.size() + n);
+ }
+
+ /**
+ Add a listener to dispatch to.
+ */
+ void addListener(TkEventListener& l)
+ {
+ m_listeners.pushBack(&l);
+ }
+
+ /**
+ Remove a listener from dispatch list.
+ */
+ void removeListener(TkEventListener& l)
+ {
+ m_listeners.findAndReplaceWithLast(&l);
+ }
+
+ /**
+ Dispatch the stored events to the registered listeners.
+ After dispatch, all data is invalidated.
+ */
+ void dispatch()
+ {
+ dispatch(*this);
+ reset();
+ }
+
+ /**
+ Proxy function to dispatch events to this queue's listeners.
+ */
+ void dispatch(const TkArray<TkEvent>::type& events) const
+ {
+ if (events.size())
+ {
+ for (TkEventListener* l : m_listeners)
+ {
+ PERF_SCOPE_M("TkEventQueue::dispatch");
+ l->receive(events.begin(), events.size());
+ }
+ }
+ }
+
+private:
+ /**
+ Allocates and stores a block of size Bytes of payload data.
+ */
+ void* allocDataBySize(size_t size)
+ {
+ void* memory = nullptr;
+ if (size > 0)
+ {
+ memory = NVBLASTTK_ALLOC(size, "TkEventQueue Data");
+ m_memory.pushBack(memory);
+ }
+ return memory;
+ }
+
+
+ TkArray<TkEvent>::type m_events; //!< holds events
+ TkArray<void*>::type m_memory; //!< holds allocated data memory blocks
+ std::atomic<uint32_t> m_currentEvent; //!< reference index for event insertion
+ std::atomic<uint32_t> m_currentData; //!< reference index for data insertion
+ size_t m_poolCapacity; //!< size of the currently active memory block (m_pool)
+ uint8_t* m_pool; //!< the current memory block allocData() uses
+ bool m_allowAllocs; //!< assert guard
+ TkInlineArray<TkEventListener*,4>::type m_listeners; //!< objects to dispatch to
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKEVENTQUEUE_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkFamilyImpl.cpp b/NvBlast/sdk/toolkit/source/NvBlastTkFamilyImpl.cpp
new file mode 100644
index 0000000..33baafe
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkFamilyImpl.cpp
@@ -0,0 +1,815 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastTkFrameworkImpl.h"
+#include "NvBlastTkFamilyImpl.h"
+#include "NvBlastTkGroupImpl.h"
+#include "NvBlastTkAssetImpl.h"
+#include "NvBlastTkActorImpl.h"
+#include "NvBlastTkJointImpl.h"
+
+#include "Px.h"
+#include "PxFileBuf.h"
+#include "PxAllocatorCallback.h"
+
+#include "NvBlastIndexFns.h"
+#include "NvBlastMemory.h"
+
+using namespace physx::general_PxIOStream2;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Static data ////////
+
+NVBLASTTK_DEFINE_TYPE_SERIALIZABLE(Family);
+
+
+//////// Member functions ////////
+
+TkFamilyImpl::TkFamilyImpl() : m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr), m_material(nullptr)
+{
+}
+
+
+TkFamilyImpl::TkFamilyImpl(const NvBlastID& id) : TkFamilyType(id), m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr), m_material(nullptr)
+{
+}
+
+
+TkFamilyImpl::~TkFamilyImpl()
+{
+ if (m_familyLL != nullptr)
+ {
+ uint32_t familyActorCount = NvBlastFamilyGetActorCount(m_familyLL, TkFrameworkImpl::get()->log);
+ if (familyActorCount != 0)
+ {
+ NVBLASTTK_LOG_WARNING("TkFamilyImpl::~TkFamilyImpl(): family actor count is not 0.");
+ }
+ TkFrameworkImpl::get()->free(m_familyLL);
+ }
+}
+
+
+void TkFamilyImpl::release()
+{
+ for (TkActorImpl& actor : m_actors)
+ {
+ if (actor.isActive())
+ {
+ actor.release();
+ }
+ }
+
+ m_actors.clear();
+
+ NVBLASTTK_DELETE(this, TkFamilyImpl);
+}
+
+
+const NvBlastFamily* TkFamilyImpl::getFamilyLL() const
+{
+ return m_familyLL;
+}
+
+
+TkActorImpl* TkFamilyImpl::addActor(NvBlastActor* actorLL)
+{
+ TkActorImpl* actor = getActorByActorLL(actorLL);
+ NVBLAST_ASSERT(actor);
+ actor->m_actorLL = actorLL;
+ actor->m_family = this;
+ return actor;
+}
+
+
+void TkFamilyImpl::removeActor(TkActorImpl* actor)
+{
+ NVBLAST_ASSERT(actor != nullptr && actor->m_family == this);
+ //actor->m_family = nullptr;
+ actor->m_actorLL = nullptr;
+}
+
+
+uint32_t TkFamilyImpl::getActorCount() const
+{
+ return getActorCountInternal();
+}
+
+
+uint32_t TkFamilyImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /*= 0*/) const
+{
+ uint32_t actorCount = getActorCount();
+ if (actorCount <= indexStart)
+ {
+ NVBLASTTK_LOG_WARNING("TkFamilyImpl::getActors: indexStart beyond end of actor list.");
+ return 0;
+ }
+
+ actorCount -= indexStart;
+ if (actorCount > bufferSize)
+ {
+ actorCount = static_cast<uint32_t>(bufferSize);
+ }
+
+ uint32_t index = 0;
+ for (const TkActorImpl& actor : m_actors)
+ {
+ if (actor.isActive())
+ {
+ if (index >= indexStart)
+ {
+ if ((index - indexStart) >= actorCount)
+ {
+ break;
+ }
+ else
+ {
+ *buffer++ = const_cast<TkActorImpl*>(&actor);
+ }
+ }
+ index++;
+ }
+ }
+
+ return actorCount;
+}
+
+
+NV_INLINE bool areLLActorsEqual(const NvBlastActor* actor0, const NvBlastActor* actor1, TkArray<uint32_t>::type& scratch)
+{
+ if (NvBlastActorGetGraphNodeCount(actor0, TkFrameworkImpl::get()->log) != NvBlastActorGetGraphNodeCount(actor1, TkFrameworkImpl::get()->log))
+ {
+ return false;
+ }
+
+ const uint32_t chunkCount = NvBlastActorGetVisibleChunkCount(actor0, TkFrameworkImpl::get()->log);
+ if (chunkCount != NvBlastActorGetVisibleChunkCount(actor1, TkFrameworkImpl::get()->log))
+ {
+ return false;
+ }
+
+ scratch.resize(chunkCount * 2);
+ NvBlastActorGetVisibleChunkIndices(scratch.begin(), chunkCount, actor0, TkFrameworkImpl::get()->log);
+ NvBlastActorGetVisibleChunkIndices(scratch.begin() + chunkCount, chunkCount, actor1, TkFrameworkImpl::get()->log);
+ return memcmp(scratch.begin(), scratch.begin() + chunkCount, chunkCount * sizeof(uint32_t)) == 0;
+}
+
+
+void TkFamilyImpl::reinitialize(const NvBlastFamily* newFamily, TkGroup* group)
+{
+ NVBLAST_ASSERT(newFamily);
+#if NV_ENABLE_ASSERTS
+ NvBlastID id0 = NvBlastFamilyGetAssetID(m_familyLL, TkFrameworkImpl::get()->log);
+ NvBlastID id1 = NvBlastFamilyGetAssetID(newFamily, TkFrameworkImpl::get()->log);
+ NVBLAST_ASSERT(TkGUIDsEqual(&id0, &id1));
+#endif
+ NVBLAST_ASSERT(NvBlastFamilyGetSize(m_familyLL, TkFrameworkImpl::get()->log) == NvBlastFamilyGetSize(newFamily, TkFrameworkImpl::get()->log));
+
+ // alloc and init new family
+ const uint32_t blockSize = NvBlastFamilyGetSize(newFamily, TkFrameworkImpl::get()->log);
+ NvBlastFamily* newFamilyCopy = (NvBlastFamily*)TkFrameworkImpl::get()->alloc(blockSize);
+ memcpy(newFamilyCopy, newFamily, blockSize);
+ NvBlastFamilySetAsset(newFamilyCopy, m_asset->getAssetLL(), TkFrameworkImpl::get()->log);
+
+ // get actors from new family
+ TkArray<NvBlastActor*>::type newLLActors(NvBlastFamilyGetActorCount(newFamilyCopy, TkFrameworkImpl::get()->log));
+ uint32_t actorCount = NvBlastFamilyGetActors(newLLActors.begin(), newLLActors.size(), newFamilyCopy, TkFrameworkImpl::get()->log);
+
+ // reset actor families to nullptr (we use it as a flag later)
+ for (TkActorImpl& actor : m_actors)
+ {
+ if (actor.isActive())
+ {
+ actor.m_family = nullptr;
+ }
+ }
+
+ // prepare split event with new actors
+ auto newActorsSplitEvent = getQueue().allocData<TkSplitEvent>();
+ TkArray<TkActor*>::type children(actorCount);
+ children.resizeUninitialized(0);
+ newActorsSplitEvent->children = children.begin();
+
+ // scratch
+ TkArray<uint32_t>::type scratch(m_asset->getChunkCount());
+
+ for (uint32_t i = 0; i < actorCount; ++i)
+ {
+ NvBlastActor* newLLActor = newLLActors[i];
+ uint32_t actorIndex = NvBlastActorGetIndex(newLLActor, TkFrameworkImpl::get()->log);
+ TkActorImpl& tkActor = *getActorByIndex(actorIndex);
+
+ tkActor.m_family = this;
+
+ if (!tkActor.isActive() || !areLLActorsEqual(newLLActor, tkActor.m_actorLL, scratch))
+ {
+ if (tkActor.isActive())
+ {
+ auto removeSplitEvent = getQueue().allocData<TkSplitEvent>();
+ removeSplitEvent->parentData.family = this;
+ removeSplitEvent->numChildren = 0;
+ removeSplitEvent->parentData.userData = tkActor.userData;
+ removeSplitEvent->parentData.index = tkActor.getIndex();
+ getQueue().addEvent(removeSplitEvent);
+ }
+
+ tkActor.m_actorLL = newLLActor;
+
+ // switch groups
+ TkGroupImpl* prevGroup = tkActor.m_group;
+ if (prevGroup != group)
+ {
+ if (prevGroup)
+ {
+ prevGroup->removeActor(tkActor);
+ }
+ if (group)
+ {
+ group->addActor(tkActor);
+ }
+ }
+
+ children.pushBack(&tkActor);
+ }
+ else
+ {
+ tkActor.m_actorLL = newLLActor;
+ }
+ }
+
+ // if m_family is still nullptr for an active actor -> remove it. It doesn't exist in new family.
+ for (TkActorImpl& tkActor : m_actors)
+ {
+ if (tkActor.isActive() && tkActor.m_family == nullptr)
+ {
+ tkActor.m_family = this;
+ if (tkActor.m_group)
+ {
+ tkActor.m_group->removeActor(tkActor);
+ }
+
+ auto removeSplitEvent = getQueue().allocData<TkSplitEvent>();
+ removeSplitEvent->parentData.family = this;
+ removeSplitEvent->numChildren = 0;
+ removeSplitEvent->parentData.userData = tkActor.userData;
+ removeSplitEvent->parentData.index = tkActor.getIndex();
+ getQueue().addEvent(removeSplitEvent);
+
+ tkActor.m_actorLL = nullptr;
+ }
+ }
+
+ // add split event with all new actors
+ newActorsSplitEvent->parentData.family = this;
+ newActorsSplitEvent->parentData.userData = 0;
+ newActorsSplitEvent->parentData.index = invalidIndex<uint32_t>();
+ newActorsSplitEvent->numChildren = children.size();
+ if (newActorsSplitEvent->numChildren > 0)
+ {
+ getQueue().addEvent(newActorsSplitEvent);
+ }
+
+ // replace family
+ TkFrameworkImpl::get()->free(m_familyLL);
+ m_familyLL = newFamilyCopy;
+
+ // update joints
+ for (TkActorImpl& tkActor : m_actors)
+ {
+ if (!tkActor.m_jointList.isEmpty())
+ {
+ updateJoints(&tkActor);
+ }
+ }
+
+ getQueue().dispatch();
+}
+
+
+TkActorImpl* TkFamilyImpl::getActorByChunk(uint32_t chunk)
+{
+ if (chunk >= NvBlastAssetGetChunkCount(m_asset->getAssetLLInternal(), TkFrameworkImpl::get()->log))
+ {
+ NVBLASTTK_LOG_WARNING("TkFamilyImpl::getActorByChunk: invalid chunk index. Returning NULL.");
+ return nullptr;
+ }
+
+ NvBlastActor* actorLL = NvBlastFamilyGetChunkActor(m_familyLL, chunk, TkFrameworkImpl::get()->log);
+ return actorLL ? getActorByActorLL(actorLL) : nullptr;
+}
+
+
+void TkFamilyImpl::applyFractureInternal(const NvBlastFractureBuffers* commands)
+{
+ NvBlastSupportGraph graph = getAsset()->getGraph();
+
+ // apply bond fracture commands on relevant actors
+ {
+ TkActorImpl* currActor = nullptr;
+ NvBlastBondFractureData* bondFractures = commands->bondFractures;
+ uint32_t bondFracturesCount = 0;
+
+ auto applyFracture = [&]()
+ {
+ if (bondFracturesCount > 0)
+ {
+ if (currActor != nullptr && currActor->isActive())
+ {
+ NvBlastFractureBuffers newCommands;
+ newCommands.bondFractures = bondFractures;
+ newCommands.bondFractureCount = bondFracturesCount;
+ newCommands.chunkFractures = nullptr;
+ newCommands.chunkFractureCount = 0;
+ currActor->applyFracture(nullptr, &newCommands);
+ }
+
+ bondFractures += bondFracturesCount;
+ bondFracturesCount = 0;
+ }
+ };
+
+ for (uint32_t i = 0; i < commands->bondFractureCount; ++i, ++bondFracturesCount)
+ {
+ const NvBlastBondFractureData& command = commands->bondFractures[i];
+ uint32_t chunk0 = graph.chunkIndices[command.nodeIndex0];
+ uint32_t chunk1 = graph.chunkIndices[command.nodeIndex1];
+ TkActorImpl* actor0 = getActorByChunk(chunk0);
+ TkActorImpl* actor1 = getActorByChunk(chunk1);
+ if (actor0 != actor1)
+ {
+ // skipping this event, bond already broken
+ actor0 = nullptr;
+ }
+ if (actor0 != currActor)
+ {
+ applyFracture();
+ currActor = actor0;
+ }
+ }
+
+ if (bondFracturesCount > 0)
+ {
+ applyFracture();
+ }
+ }
+
+ // apply chunk fracture commands on relevant actors
+ {
+ TkActorImpl* currActor = nullptr;
+ NvBlastChunkFractureData* chunkFractures = commands->chunkFractures;
+ uint32_t chunkFracturesCount = 0;
+
+ auto applyFracture = [&]()
+ {
+ if (chunkFracturesCount > 0)
+ {
+ if (currActor != nullptr && currActor->isActive())
+ {
+ NvBlastFractureBuffers newCommands;
+ newCommands.bondFractures = nullptr;
+ newCommands.bondFractureCount = 0;
+ newCommands.chunkFractures = chunkFractures;
+ newCommands.chunkFractureCount = chunkFracturesCount;
+ currActor->applyFracture(nullptr, &newCommands);
+ }
+
+ chunkFractures += chunkFracturesCount;
+ chunkFracturesCount = 0;
+ }
+ };
+
+ for (uint32_t i = 0; i < commands->chunkFractureCount; ++i, ++chunkFracturesCount)
+ {
+ const NvBlastChunkFractureData& command = commands->chunkFractures[i];
+ TkActorImpl* actor = getActorByChunk(command.chunkIndex);
+ if (actor != currActor)
+ {
+ applyFracture();
+ currActor = actor;
+ }
+ }
+ if (chunkFracturesCount > 0)
+ {
+ applyFracture();
+ }
+ }
+}
+
+
+void TkFamilyImpl::updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue)
+{
+ // Copy joint array for safety against implementation of joint->setActor
+ TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*actor->getJointCountInternal()));
+ TkJointImpl** stop = joints + actor->getJointCountInternal();
+ TkJointImpl** jointHandle = joints;
+ for (TkActorImpl::JointIt j(*actor); (bool)j; ++j)
+ {
+ *jointHandle++ = *j;
+ }
+ jointHandle = joints;
+ while (jointHandle < stop)
+ {
+ TkJointImpl* joint = *jointHandle++;
+
+ const TkJointData& data = joint->getDataInternal();
+
+ TkActorImpl* actor0 = data.actors[0] != nullptr ?
+ static_cast<TkActorImpl&>(*data.actors[0]).getFamilyImpl().getActorByChunk(data.chunkIndices[0]) : nullptr;
+
+ TkActorImpl* actor1 = data.actors[1] != nullptr ?
+ static_cast<TkActorImpl&>(*data.actors[1]).getFamilyImpl().getActorByChunk(data.chunkIndices[1]) : nullptr;
+
+ joint->setActors(actor0, actor1, alternateQueue);
+ }
+}
+
+
+const TkAsset* TkFamilyImpl::getAsset() const
+{
+ return m_asset;
+}
+
+
+bool TkFamilyImpl::serialize(PxFileBuf& stream) const
+{
+ TkFrameworkImpl::get()->serializeHeader(*this, stream);
+
+ if (m_material != nullptr)
+ {
+ NVBLASTTK_LOG_WARNING("TkFamilyImpl::serialize(): Material pointer is not nullptr, it will be lost during serialization.");
+ }
+
+ NVBLASTTK_CHECK_ERROR(m_asset != nullptr, "TkFamilyImpl::serialize(): TkFamily asset is nullptr, can't be serialized.", return false);
+ NVBLASTTK_CHECK_ERROR(m_familyLL != nullptr, "TkFamilyImpl::serialize(): TkFamily family is nullptr, can't be serialized.", return false);
+
+ // Asset ID
+ const NvBlastID& assetID = m_asset->getID();
+ NVBLASTTK_CHECK_ERROR(!TkGUIDIsZero(&assetID), "TkFamilyImpl::serialize(): Associated asset doesn't have an ID set.", return false);
+ stream.write(&assetID, sizeof(NvBlastID));
+
+ // Family
+ const uint32_t familySize = NvBlastFamilyGetSize(m_familyLL, TkFrameworkImpl::get()->log);
+ stream.storeDword(familySize);
+ stream.write(m_familyLL, familySize);
+
+ //// Joints ////
+
+ // Internal joint data
+ stream.storeDword(m_internalJointCount);
+
+ // External joint family ID list
+ stream.storeDword(m_jointSets.size());
+ for (uint32_t i = 0; i < m_jointSets.size(); ++i)
+ {
+ const JointSet* jointSet = m_jointSets[i];
+ stream.write(&jointSet->m_familyID, sizeof(NvBlastID));
+ }
+
+ // Actor joint lists
+ TkJointImpl* internalJoints = getInternalJoints();
+ for (uint32_t actorNum = 0; actorNum < m_actors.size(); ++actorNum)
+ {
+ const TkActorImpl& actor = m_actors[actorNum];
+ if (!actor.isActive())
+ {
+ continue; // We may need a better way of iterating through active actors
+ }
+
+ stream.storeDword(actor.getJointCount());
+
+ for (TkActorImpl::JointIt j(actor); (bool)j; ++j)
+ {
+ TkJointImpl* joint = *j;
+
+ const TkJointData& jointData = joint->getDataInternal();
+ NVBLAST_ASSERT(jointData.actors[0] == &actor || jointData.actors[1] == &actor);
+
+ const uint32_t attachmentFlags = (uint32_t)(jointData.actors[0] == &actor) | (uint32_t)(jointData.actors[1] == &actor) << 1;
+ stream.storeDword(attachmentFlags);
+
+ const TkActorImpl* otherActor = static_cast<const TkActorImpl*>(jointData.actors[(attachmentFlags >> 1) ^ 1]);
+
+ if (joint->m_owner == this)
+ {
+ // Internal joint - write internal joint index
+ const uint32_t jointIndex = static_cast<uint32_t>(joint - internalJoints);
+ stream.storeDword(jointIndex);
+ if (otherActor != nullptr && otherActor->getIndexInternal() < actorNum) // No need to write the joint data, it has already been written
+ {
+ continue;
+ }
+ }
+ else
+ {
+ // External joint - write external family index and joint information
+ stream.storeDword(invalidIndex<uint32_t>()); // Denotes external joint
+
+ const FamilyIDMap::Entry* e = m_familyIDMap.find(getFamilyID(otherActor));
+ NVBLASTTK_CHECK_ERROR(e != nullptr, "TkFamilyImpl::deserialize(): Bad data - attached family's ID not recorded.", return false);
+
+ stream.storeDword(e->second); // Write family ID index
+ }
+
+ // Write joint data
+ for (int side = 0; side < 2; ++side)
+ {
+ stream.storeDword(jointData.chunkIndices[side]);
+ const physx::PxVec3& attachPosition = jointData.attachPositions[side];
+ stream.storeFloat(attachPosition.x); stream.storeFloat(attachPosition.y); stream.storeFloat(attachPosition.z);
+ }
+ }
+ }
+
+ return true;
+}
+
+
+//////// Static functions ////////
+
+TkSerializable* TkFamilyImpl::deserialize(PxFileBuf& stream, const NvBlastID& id)
+{
+ // Asset resolve
+ NvBlastID assetID;
+ stream.read(&assetID, sizeof(NvBlastID));
+ TkIdentifiable* object = TkFrameworkImpl::get()->findObjectByIDInternal(assetID);
+ NVBLASTTK_CHECK_ERROR(object && object->getType() == TkAssetImpl::s_type, "TkFamilyImpl::deserialize: can't find asset with corresponding ID.", return nullptr);
+ TkAssetImpl* asset = static_cast<TkAssetImpl*>(object);
+
+ // Allocate
+ TkFamilyImpl* family = NVBLASTTK_NEW(TkFamilyImpl)(id);
+ NVBLASTTK_CHECK_ERROR(family != nullptr, "TkFamilyImpl::deserialize: family allocation failed.", return nullptr);
+
+ // associate with found asset
+ family->m_asset = asset;
+
+ // Family
+ const uint32_t familySize = stream.readDword();
+ family->m_familyLL = static_cast<NvBlastFamily*>(TkFrameworkImpl::get()->alloc(familySize));
+ stream.read(family->m_familyLL, familySize);
+
+ if (family->m_familyLL == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFamilyImpl::deserialize: low-level family could not be created.");
+ family->release();
+ return nullptr;
+ }
+
+#if NV_ENABLE_ASSERTS && 0
+ NvBlastID id = NvBlastFamilyGetAssetID(family->m_familyLL, TkFrameworkImpl::get()->log);
+ NVBLAST_ASSERT(TkGUIDsEqual(&id, &assetID));
+#endif
+
+ // preallocate actors
+ uint32_t maxActorCount = NvBlastFamilyGetMaxActorCount(family->m_familyLL, TkFrameworkImpl::get()->log);
+ family->m_actors.resize(maxActorCount);
+
+ // get actors from family
+ TkArray<NvBlastActor*>::type newLLActors(NvBlastFamilyGetActorCount(family->m_familyLL, TkFrameworkImpl::get()->log));
+ uint32_t actorCount = NvBlastFamilyGetActors(newLLActors.begin(), newLLActors.size(), family->m_familyLL, TkFrameworkImpl::get()->log);
+
+ // fill actors
+ for (uint32_t i = 0; i < actorCount; ++i)
+ {
+ NvBlastActor* newLLActor = newLLActors[i];
+ uint32_t actorIndex = NvBlastActorGetIndex(newLLActor, TkFrameworkImpl::get()->log);
+ TkActorImpl& tkActor = *family->getActorByIndex(actorIndex);
+
+ tkActor.m_family = family;
+ tkActor.m_actorLL = newLLActor;
+ }
+
+ //// Create joints ////
+
+ // internal
+ family->m_internalJointCount = stream.readDword();
+ family->m_internalJointBuffer.resize(family->m_internalJointCount * sizeof(TkJointImpl), '\0');
+ TkJointImpl* internalJoints = family->getInternalJoints();
+
+ // external joint family ID list
+ const uint32_t jointSetCount = stream.readDword();
+ family->m_jointSets.resize(jointSetCount);
+ for (uint32_t i = 0; i < jointSetCount; ++i)
+ {
+ family->m_jointSets[i] = NVBLASTTK_NEW(JointSet);
+ stream.read(&family->m_jointSets[i]->m_familyID, sizeof(NvBlastID));
+ family->m_familyIDMap[family->m_jointSets[i]->m_familyID] = i;
+ }
+
+ // fill actor joint lists
+ for (uint32_t actorNum = 0; actorNum < family->m_actors.size(); ++actorNum)
+ {
+ TkActorImpl& actor = family->m_actors[actorNum];
+ if (!actor.isActive())
+ {
+ continue; // We may need a better way of iterating through active actors
+ }
+
+ // Read joint information
+ uint32_t jointCount = stream.readDword();
+ while (jointCount--)
+ {
+ const uint32_t attachmentFlags = stream.readDword();
+ const uint32_t jointIndex = stream.readDword();
+ if (!isInvalidIndex(jointIndex))
+ {
+ // Internal joint
+ TkJointImpl& joint = internalJoints[jointIndex];
+ TkJointData& jointData = joint.getDataWritable();
+
+ // Initialize joint if it has not been encountered yet
+ NVBLAST_ASSERT((joint.m_links[0].m_joint == nullptr) == (joint.m_links[1].m_joint == nullptr));
+ if (joint.m_links[0].m_joint == nullptr)
+ {
+ new (&joint) TkJointImpl;
+ joint.m_owner = family;
+ for (int side = 0; side < 2; ++side)
+ {
+ jointData.chunkIndices[side] = stream.readDword();
+ physx::PxVec3& attachPosition = jointData.attachPositions[side];
+ attachPosition.x = stream.readFloat(); attachPosition.y = stream.readFloat(); attachPosition.z = stream.readFloat();
+ }
+ }
+
+ if (attachmentFlags & 1)
+ {
+ jointData.actors[0] = &actor;
+ actor.addJoint(joint.m_links[0]);
+ }
+
+ if (attachmentFlags & 2)
+ {
+ jointData.actors[1] = &actor;
+ if (jointData.actors[0] != jointData.actors[1])
+ {
+ actor.addJoint(joint.m_links[1]);
+ }
+ }
+ }
+ else
+ {
+ // External joint
+ const uint32_t otherFamilyIndex = stream.readDword();
+ NVBLASTTK_CHECK_ERROR(otherFamilyIndex < family->m_jointSets.size(), "TkFamilyImpl::deserialize: family allocation failed.", return nullptr);
+ const NvBlastID& otherFamilyID = family->m_jointSets[otherFamilyIndex]->m_familyID;
+ TkFamilyImpl* otherFamily = static_cast<TkFamilyImpl*>(TkFrameworkImpl::get()->findObjectByIDInternal(otherFamilyID));
+
+ TkJointDesc jointDesc;
+ for (int side = 0; side < 2; ++side)
+ {
+ jointDesc.chunkIndices[side] = stream.readDword();
+ physx::PxVec3& attachPosition = jointDesc.attachPositions[side];
+ attachPosition.x = stream.readFloat(); attachPosition.y = stream.readFloat(); attachPosition.z = stream.readFloat();
+ }
+
+ NVBLASTTK_CHECK_ERROR(attachmentFlags != 3, "TkFamilyImpl::deserialize: both attached actors are the same in an external joint.", return nullptr);
+
+ const uint32_t attachmentIndex = attachmentFlags >> 1;
+
+ TkJointImpl** jointHandle = family->createExternalJointHandle(otherFamilyID, jointDesc.chunkIndices[attachmentIndex], jointDesc.chunkIndices[attachmentIndex ^ 1]);
+ NVBLASTTK_CHECK_ERROR(jointHandle != nullptr, "TkFamilyImpl::deserialize: joint handle could not be created.", return nullptr);
+
+ if (otherFamily == nullptr)
+ {
+ // Other family does not exist yet, we'll create the joint
+ jointDesc.families[attachmentIndex] = family;
+ jointDesc.families[attachmentIndex ^ 1] = nullptr;
+
+ TkJointImpl* joint = NVBLASTTK_NEW(TkJointImpl)(jointDesc, nullptr);
+ NVBLASTTK_CHECK_ERROR(joint != nullptr, "TkFamilyImpl::deserialize: joint createion failed.", return nullptr);
+
+ *jointHandle = joint;
+
+ actor.addJoint(joint->m_links[attachmentIndex]);
+ }
+ else
+ {
+ // Other family exists, and should have created the joint
+ TkJointImpl* joint = otherFamily->findExternalJoint(family, ExternalJointKey(jointDesc.chunkIndices[attachmentIndex ^ 1], jointDesc.chunkIndices[attachmentIndex]));
+ NVBLASTTK_CHECK_ERROR(joint != nullptr, "TkFamilyImpl::deserialize: other family should have created joint, but did not.", return nullptr);
+
+ *jointHandle = joint;
+
+ // Add the joint to its actor(s)
+ joint->getDataWritable().actors[attachmentIndex] = &actor;
+ actor.addJoint(joint->m_links[attachmentIndex]);
+ }
+ }
+ }
+ }
+
+ return family;
+}
+
+
+TkFamilyImpl* TkFamilyImpl::create(const TkAssetImpl* asset)
+{
+ TkFamilyImpl* family = NVBLASTTK_NEW(TkFamilyImpl);
+ family->m_asset = asset;
+ void* mem = TkFrameworkImpl::get()->alloc(NvBlastAssetGetFamilyMemorySize(asset->getAssetLL(), TkFrameworkImpl::get()->log));
+ family->m_familyLL = NvBlastAssetCreateFamily(mem, asset->getAssetLL(), TkFrameworkImpl::get()->log);
+ //family->addListener(*TkFrameworkImpl::get());
+
+ if (family->m_familyLL == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFamilyImpl::create: low-level family could not be created.");
+ family->release();
+ return nullptr;
+ }
+
+ uint32_t maxActorCount = NvBlastFamilyGetMaxActorCount(family->m_familyLL, TkFrameworkImpl::get()->log);
+ family->m_actors.resize(maxActorCount);
+
+ family->m_internalJointBuffer.resize(asset->getJointDescCountInternal() * sizeof(TkJointImpl), 0);
+ family->m_internalJointCount = asset->getJointDescCountInternal();
+
+ return family;
+}
+
+
+TkJointImpl** TkFamilyImpl::createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1)
+{
+ JointSet* jointSet;
+ const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID);
+ uint32_t otherFamilyIndex;
+ if (jointSetIndexEntry != nullptr)
+ {
+ otherFamilyIndex = jointSetIndexEntry->second;
+ jointSet = m_jointSets[otherFamilyIndex];
+ }
+ else
+ {
+ jointSet = NVBLASTTK_NEW(JointSet);
+ NVBLASTTK_CHECK_ERROR(jointSet != nullptr, "TkFamilyImpl::addExternalJoint: failed to create joint set for other family ID.", return nullptr);
+ jointSet->m_familyID = otherFamilyID;
+ otherFamilyIndex = m_jointSets.size();
+ m_familyIDMap[otherFamilyID] = otherFamilyIndex;
+ m_jointSets.pushBack(jointSet);
+ }
+
+ const ExternalJointKey key(chunkIndex0, chunkIndex1);
+ const bool jointExists = jointSet->m_joints.find(key) != nullptr;
+ NVBLASTTK_CHECK_WARNING(!jointExists, "TkFamilyImpl::addExternalJoint: joint already added.", return nullptr);
+
+ return &jointSet->m_joints[key];
+}
+
+
+bool TkFamilyImpl::deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1)
+{
+ const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID);
+ if (jointSetIndexEntry != nullptr)
+ {
+ const uint32_t jointSetIndex = jointSetIndexEntry->second;
+ TkHashMap<ExternalJointKey, TkJointImpl*>::type::Entry e;
+ if (m_jointSets[jointSetIndex]->m_joints.erase(ExternalJointKey(chunkIndex0, chunkIndex1), e))
+ {
+ // Delete the joint set if it is empty
+ if (m_jointSets[jointSetIndex]->m_joints.size() == 0)
+ {
+ NVBLASTTK_DELETE(m_jointSets[jointSetIndex], JointSet);
+ m_jointSets.replaceWithLast(jointSetIndex);
+ m_familyIDMap.erase(otherFamilyID);
+ if (jointSetIndex < m_jointSets.size())
+ {
+ m_familyIDMap[m_jointSets[jointSetIndex]->m_familyID] = jointSetIndex;
+ }
+ }
+
+ // Return value that was stored
+ joint = e.second;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+TkJointImpl* TkFamilyImpl::findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const
+{
+ const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(getFamilyID(otherFamily));
+ if (jointSetIndexEntry != nullptr)
+ {
+ const TkHashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndexEntry->second]->m_joints.find(key);
+ if (e != nullptr)
+ {
+ return e->second;
+ }
+ }
+
+ return nullptr;
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkFamilyImpl.h b/NvBlast/sdk/toolkit/source/NvBlastTkFamilyImpl.h
new file mode 100644
index 0000000..571ee75
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkFamilyImpl.h
@@ -0,0 +1,245 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKFAMILYIMPL_H
+#define NVBLASTTKFAMILYIMPL_H
+
+#include "NvBlastTkCommon.h"
+
+#include "NvBlastTkFamily.h"
+#include "NvBlastTkTypeImpl.h"
+#include "NvBlastTkActorImpl.h"
+
+#include "NvBlastTkEventQueue.h"
+#include "NvBlastTkHashSet.h"
+#include "NvBlastTkHashMap.h"
+
+#include "NvBlast.h"
+#include "NvBlastAssert.h"
+#include "NvBlastDLink.h"
+
+
+// Forward declarations
+struct NvBlastFamily;
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkGroupImpl;
+class TkAssetImpl;
+
+
+NVBLASTTK_IMPL_DECLARE(Family)
+{
+public:
+ /**
+ Enum which keeps track of the serialized data format.
+ */
+ enum Version
+ {
+ /** Initial version */
+ Initial,
+
+ // New formats must come before Count. They should be given descriptive names with more information in comments.
+
+ /** The number of serialized formats. */
+ Count,
+
+ /** The current version. This should always be Count-1 */
+ Current = Count - 1
+ };
+
+ TkFamilyImpl();
+ TkFamilyImpl(const NvBlastID& id);
+ ~TkFamilyImpl();
+
+ NVBLASTTK_IMPL_DEFINE_SERIALIZABLE('A', 'C', 'T', 'F');
+
+ // Begin TkFamily
+ virtual const NvBlastFamily* getFamilyLL() const override;
+
+ virtual uint32_t getActorCount() const override;
+
+ virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override;
+
+ virtual void addListener(TkEventListener& l) override { m_queue.addListener(l); }
+
+ virtual void removeListener(TkEventListener& l) override { m_queue.removeListener(l); }
+
+ virtual void applyFracture(const NvBlastFractureBuffers* commands) override { applyFractureInternal(commands); }
+
+ virtual const TkAsset* getAsset() const override;
+
+ virtual void reinitialize(const NvBlastFamily* newFamily, TkGroup* group) override;
+
+ virtual const void* getMaterial() const override;
+
+ virtual void setMaterial(const void* material) override;
+ // End TkFamily
+
+ // Public methods
+ static TkFamilyImpl* create(const TkAssetImpl* asset);
+
+ const TkAssetImpl* getAssetImpl() const;
+
+ NvBlastFamily* getFamilyLLInternal() const;
+
+ uint32_t getActorCountInternal() const;
+
+ TkActorImpl* addActor(NvBlastActor* actorLL);
+
+ void applyFractureInternal(const NvBlastFractureBuffers* commands);
+
+ void removeActor(TkActorImpl* actorLL);
+
+ TkEventQueue& getQueue() { return m_queue; }
+
+ TkActorImpl* getActorByActorLL(const NvBlastActor* actorLL);
+
+ void updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue = nullptr);
+
+ TkArray<TkActorImpl>::type& getActorsInternal();
+
+ uint32_t getInternalJointCount() const;
+
+ TkJointImpl* getInternalJoints() const;
+
+ TkJointImpl** createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1);
+
+ bool deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1);
+
+ void releaseJoint(TkJointImpl& joint);
+
+ TkActorImpl* getActorByChunk(uint32_t chunkIndex);
+
+ typedef physx::shdfnd::Pair<uint32_t, uint32_t> ExternalJointKey; //!< The chunk indices within the TkFamily joined by the joint. This chunks will be a supports chunks.
+
+ TkJointImpl* findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const;
+
+private:
+ TkActorImpl* getActorByIndex(uint32_t index);
+
+ struct JointSet
+ {
+ NvBlastID m_familyID;
+ TkHashMap<ExternalJointKey, TkJointImpl*>::type m_joints;
+ };
+
+ typedef TkHashMap<NvBlastID, uint32_t>::type FamilyIDMap;
+
+ NvBlastFamily* m_familyLL;
+ TkArray<TkActorImpl>::type m_actors;
+ uint32_t m_internalJointCount;
+ TkArray<uint8_t>::type m_internalJointBuffer;
+ TkArray<JointSet*>::type m_jointSets;
+ FamilyIDMap m_familyIDMap;
+ const TkAssetImpl* m_asset;
+ const void* m_material;
+
+ TkEventQueue m_queue;
+};
+
+
+//////// TkFamilyImpl inline methods ////////
+
+NV_INLINE const TkAssetImpl* TkFamilyImpl::getAssetImpl() const
+{
+ return m_asset;
+}
+
+
+NV_INLINE NvBlastFamily* TkFamilyImpl::getFamilyLLInternal() const
+{
+ return m_familyLL;
+}
+
+
+NV_INLINE uint32_t TkFamilyImpl::getActorCountInternal() const
+{
+ NVBLAST_ASSERT(m_familyLL != nullptr);
+
+ return NvBlastFamilyGetActorCount(m_familyLL, TkFrameworkImpl::get()->log);
+}
+
+
+NV_INLINE TkActorImpl* TkFamilyImpl::getActorByIndex(uint32_t index)
+{
+ NVBLAST_ASSERT(index < m_actors.size());
+ return &m_actors[index];
+}
+
+
+NV_INLINE TkActorImpl* TkFamilyImpl::getActorByActorLL(const NvBlastActor* actorLL)
+{
+ uint32_t index = NvBlastActorGetIndex(actorLL, TkFrameworkImpl::get()->log);
+ return getActorByIndex(index);
+}
+
+
+NV_INLINE const void* TkFamilyImpl::getMaterial() const
+{
+ return m_material;
+}
+
+
+NV_INLINE void TkFamilyImpl::setMaterial(const void* material)
+{
+ m_material = material;
+}
+
+
+NV_INLINE TkArray<TkActorImpl>::type& TkFamilyImpl::getActorsInternal()
+{
+ return m_actors;
+}
+
+
+NV_INLINE uint32_t TkFamilyImpl::getInternalJointCount() const
+{
+ return m_internalJointCount;
+}
+
+
+NV_INLINE TkJointImpl* TkFamilyImpl::getInternalJoints() const
+{
+ return const_cast<TkJointImpl*>(reinterpret_cast<const TkJointImpl*>(m_internalJointBuffer.begin()));
+}
+
+
+NV_INLINE void TkFamilyImpl::releaseJoint(TkJointImpl& joint)
+{
+ NVBLAST_ASSERT(joint.m_owner == this);
+ NVBLAST_ASSERT(&joint >= getInternalJoints() && &joint < getInternalJoints() + getInternalJointCount() * sizeof(TkJointImpl));
+
+ joint.~TkJointImpl();
+ joint.m_owner = nullptr;
+}
+
+
+//////// Inline global functions ////////
+
+NV_INLINE const NvBlastID& getFamilyID(const TkActor* actor)
+{
+ return actor != nullptr ? static_cast<const TkActorImpl*>(actor)->getFamilyImpl().getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
+}
+
+NV_INLINE const NvBlastID& getFamilyID(const TkFamilyImpl* family)
+{
+ return family != nullptr ? family->getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKFAMILYIMPL_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkFrameworkImpl.cpp b/NvBlast/sdk/toolkit/source/NvBlastTkFrameworkImpl.cpp
new file mode 100644
index 0000000..6201101
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkFrameworkImpl.cpp
@@ -0,0 +1,613 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastAssert.h"
+
+#include "NvBlastTkFrameworkImpl.h"
+#include "NvBlastTkAssetImpl.h"
+#include "NvBlastTkFamilyImpl.h"
+#include "NvBlastTkGroupImpl.h"
+#include "NvBlastTkActorImpl.h"
+#include "NvBlastTkJointImpl.h"
+#include "NvBlastTkTypeImpl.h"
+
+#include "PxAllocatorCallback.h"
+#include "PxErrorCallback.h"
+#include "PxFileBuf.h"
+
+#include <algorithm>
+
+
+using namespace physx;
+using namespace physx::shdfnd;
+using namespace physx::general_PxIOStream2;
+
+
+NV_INLINE bool operator < (const NvBlastID& id1, const NvBlastID& id2)
+{
+ return memcmp(&id1, &id2, sizeof(NvBlastID)) < 0;
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Local definitions ////////
+
+// Map type ID to static type data
+#define NVBLASTTK_REGISTER_TYPE(_name) \
+ if (!Tk##_name##Impl::s_type.indexIsValid()) \
+ { \
+ Tk##_name##Impl::s_type.setIndex(TkTypeIndex::_name); \
+ } \
+ m_types[TkTypeIndex::_name] = &Tk##_name##Impl::s_type; \
+ m_typeIDToIndex[Tk##_name##Impl::s_type.getID()] = TkTypeIndex::_name
+
+
+#define NVBLASTTK_RELEASE_TYPE(_name) \
+ { \
+ TkTypeImpl& type = Tk##_name##Impl::s_type; \
+ auto& toRelease = m_objects[type.getIndex()]; \
+ for (TkObject* obj : toRelease) \
+ { \
+ obj->release(); \
+ } \
+ }
+
+
+//////// TkFrameworkImpl static variables ////////
+
+TkFrameworkImpl* TkFrameworkImpl::s_framework = nullptr;
+
+
+//////// TkFrameworkImpl static function ////////
+
+TkFrameworkImpl* TkFrameworkImpl::get()
+{
+ return s_framework;
+}
+
+
+bool TkFrameworkImpl::set(TkFrameworkImpl* framework)
+{
+ if (s_framework != nullptr)
+ {
+ if (framework != nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::set: framework already set. Pass NULL to this function to destroy framework.");
+ return false;
+ }
+
+ PxAllocatorCallback& allocator = s_framework->getAllocatorCallbackInternal();
+ s_framework->~TkFrameworkImpl();
+ allocator.deallocate(s_framework);
+ }
+
+ s_framework = framework;
+
+ return true;
+}
+
+
+void TkFrameworkImpl::log(int type, const char* msg, const char* file, int line)
+{
+ if (s_framework == nullptr)
+ {
+ return;
+ }
+
+ PxErrorCode::Enum pxErrorCode = PxErrorCode::eNO_ERROR;
+ switch (type)
+ {
+ case NvBlastMessage::Error: pxErrorCode = PxErrorCode::eINVALID_OPERATION; break;
+ case NvBlastMessage::Warning: pxErrorCode = PxErrorCode::eDEBUG_WARNING; break;
+ case NvBlastMessage::Info: pxErrorCode = PxErrorCode::eDEBUG_INFO; break;
+ case NvBlastMessage::Debug: pxErrorCode = PxErrorCode::eNO_ERROR; break;
+ }
+
+ s_framework->getErrorCallback().reportError(pxErrorCode, msg, file, line);
+}
+
+
+void* TkFrameworkImpl::alloc(size_t size)
+{
+ if (s_framework == nullptr)
+ {
+ return nullptr;
+ }
+
+ NV_COMPILE_TIME_ASSERT(Alignment > 0 && Alignment <= 256);
+
+ unsigned char* mem = reinterpret_cast<unsigned char*>(s_framework->m_allocatorCallback->allocate(size + (size_t)Alignment, "NvBlast", __FILE__, __LINE__));
+
+ const unsigned char offset = (unsigned char)((uintptr_t)Alignment - (uintptr_t)mem % (size_t)Alignment - 1);
+ mem += offset;
+ *mem++ = offset;
+
+ return mem;
+}
+
+
+void TkFrameworkImpl::free(void* mem)
+{
+ if (s_framework == nullptr)
+ {
+ return;
+ }
+
+ unsigned char* ptr = reinterpret_cast<unsigned char*>(mem);
+ const unsigned char offset = *--ptr;
+
+ return s_framework->m_allocatorCallback->deallocate(ptr - offset);
+}
+
+
+//////// TkFrameworkImpl methods ////////
+
+TkFrameworkImpl::TkFrameworkImpl(const TkFrameworkDesc& desc)
+ : TkFramework()
+ , m_errorCallback(desc.errorCallback)
+ , m_allocatorCallback(desc.allocatorCallback)
+{
+ // Static create() function should ensure these pointers are not NULL
+ NVBLAST_ASSERT(m_errorCallback != nullptr);
+ NVBLAST_ASSERT(m_allocatorCallback != nullptr);
+
+ // Register types
+ m_types.resize(TkTypeIndex::TypeCount);
+ m_objects.resize(TkTypeIndex::TypeCount);
+ NVBLASTTK_REGISTER_TYPE(Asset);
+ NVBLASTTK_REGISTER_TYPE(Family);
+ NVBLASTTK_REGISTER_TYPE(Group);
+}
+
+
+TkFrameworkImpl::~TkFrameworkImpl()
+{
+}
+
+
+void TkFrameworkImpl::release()
+{
+ // Special release of joints, which are not TkIdentifiable:
+ TkArray<TkJointImpl*>::type joints; // Since the EraseIterator is not exposed
+ joints.reserve(m_joints.size());
+ for (auto j = m_joints.getIterator(); !j.done(); ++j)
+ {
+ joints.pushBack(*j);
+ }
+ for (uint32_t i = 0; i < joints.size(); ++i)
+ {
+ joints[i]->release();
+ }
+ NVBLAST_ASSERT(m_joints.size() == 0);
+ joints.reset(); // Since we will be deleting the allocator
+
+ NVBLASTTK_RELEASE_TYPE(Group);
+ NVBLASTTK_RELEASE_TYPE(Asset);
+ set(nullptr);
+ Nv::Blast::TkAllocator::s_allocatorCallback = nullptr;
+}
+
+
+physx::PxErrorCallback& TkFrameworkImpl::getErrorCallback() const
+{
+ return getErrorCallbackInternal();
+}
+
+
+physx::PxAllocatorCallback& TkFrameworkImpl::getAllocatorCallback() const
+{
+ return getAllocatorCallbackInternal();
+}
+
+
+NvBlastLog TkFrameworkImpl::getLogFn() const
+{
+ return TkFrameworkImpl::log;
+}
+
+
+TkSerializable* TkFrameworkImpl::deserialize(PxFileBuf& stream)
+{
+ // Read framework ID
+ if (stream.readDword() != ClassID)
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::deserialize: stream does not contain a BlastTk object.");
+ return nullptr;
+ }
+
+ // Read object class ID and get class type data
+ const auto it = m_typeIDToIndex.find(stream.readDword());
+ if (it == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::deserialize: BlastTk object type unrecognized.");
+ return nullptr;
+ }
+
+ const uint32_t index = (*it).second;
+ NVBLAST_ASSERT(index < m_types.size());
+
+ const TkTypeImpl* type = m_types[index];
+
+ // Read object class version and ensure it's current
+ if (stream.readDword() != type->getVersionInternal())
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::deserialize: BlastTk object version does not equal the current version for the loaded type.");
+ return nullptr;
+ }
+
+ // Object ID
+ NvBlastID id;
+ stream.read(&id, sizeof(NvBlastID));
+
+ // Serializable user data
+ const uint32_t lsq = stream.readDword();
+ const uint32_t msq = stream.readDword();
+
+ // All checks out, deserialize
+ TkSerializable* object = type->getDeserializeFn()(stream, id);
+
+ // Set serializable user data if deserialization was successful
+ if (object != nullptr)
+ {
+ object->userIntData = static_cast<uint64_t>(msq) << 32 | static_cast<uint64_t>(lsq);
+ }
+
+ return object;
+}
+
+
+const TkType* TkFrameworkImpl::getType(TkTypeIndex::Enum typeIndex) const
+{
+ if (typeIndex < 0 || typeIndex >= TkTypeIndex::TypeCount)
+ {
+ NVBLASTTK_LOG_WARNING("TkFrameworkImpl::getType: invalid typeIndex.");
+ return nullptr;
+ }
+
+ return m_types[typeIndex];
+}
+
+
+TkIdentifiable* TkFrameworkImpl::findObjectByID(const NvBlastID& id) const
+{
+ TkIdentifiable* object = findObjectByIDInternal(id);
+
+ if (object == nullptr)
+ {
+ NVBLASTTK_LOG_WARNING("TkFrameworkImpl::findObjectByID: object not found.");
+ }
+
+ return object;
+}
+
+
+uint32_t TkFrameworkImpl::getObjectCount(const TkType& type) const
+{
+ const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex();
+
+ if (index >= m_objects.size())
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized.");
+ return 0;
+
+ }
+
+ return m_objects[index].size();
+}
+
+
+uint32_t TkFrameworkImpl::getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart /* = 0 */) const
+{
+ const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex();
+
+ if (index >= m_objects.size())
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized.");
+ return 0;
+ }
+
+ const auto& objectArray = m_objects[index];
+
+ uint32_t objectCount = objectArray.size();
+ if (objectCount <= indexStart)
+ {
+ NVBLASTTK_LOG_WARNING("TkFrameworkImpl::getObjects: indexStart beyond end of object list.");
+ return 0;
+ }
+
+ objectCount -= indexStart;
+ if (objectCount > bufferSize)
+ {
+ objectCount = bufferSize;
+ }
+
+ memcpy(buffer, objectArray.begin() + indexStart, objectCount * sizeof(TkObject*));
+
+ return objectCount;
+}
+
+
+bool TkFrameworkImpl::reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap /*= nullptr*/) const
+{
+ uint32_t* map = chunkReorderMap != nullptr ? chunkReorderMap : static_cast<uint32_t*>(NVBLASTTK_ALLOC(chunkCount * sizeof(uint32_t), "reorderAssetDescChunks:chunkReorderMap"));
+ void* scratch = NVBLASTTK_ALLOC(chunkCount * sizeof(NvBlastChunkDesc), "reorderAssetDescChunks:scratch");
+ const bool result = NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, map, scratch, log);
+ NVBLASTTK_FREE(scratch);
+ if (chunkReorderMap == nullptr)
+ {
+ NVBLASTTK_FREE(map);
+ }
+ return result;
+}
+
+
+bool TkFrameworkImpl::ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const
+{
+ void* scratch = NVBLASTTK_ALLOC(chunkCount, "ensureAssetExactSupportCoverage:scratch");
+ const bool result = NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, log);
+ NVBLASTTK_FREE(scratch);
+ return result;
+}
+
+
+TkAsset* TkFrameworkImpl::createAsset(const TkAssetDesc& desc)
+{
+ TkAssetImpl* asset = TkAssetImpl::create(desc);
+ if (asset == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset.");
+ }
+
+ return asset;
+}
+
+
+TkAsset* TkFrameworkImpl::createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset)
+{
+ TkAssetImpl* asset = TkAssetImpl::create(assetLL, jointDescs, jointDescCount, ownsAsset);
+ if (asset == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset.");
+ }
+
+ return asset;
+}
+
+
+TkGroup* TkFrameworkImpl::createGroup(const TkGroupDesc& desc)
+{
+ TkGroupImpl* group = TkGroupImpl::create(desc);
+ if (group == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::createGroup: failed to create group.");
+ }
+
+ return group;
+}
+
+
+TkActor* TkFrameworkImpl::createActor(const TkActorDesc& desc)
+{
+ TkActor* actor = TkActorImpl::create(desc);
+ if (actor == nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::createActor: failed to create actor.");
+ }
+
+ return actor;
+}
+
+
+TkJoint* TkFrameworkImpl::createJoint(const TkJointDesc& desc)
+{
+ TkJointImpl** handle0 = nullptr;
+ TkJointImpl** handle1 = nullptr;
+
+ TkFamilyImpl* family0 = static_cast<TkFamilyImpl*>(desc.families[0]);
+ TkFamilyImpl* family1 = static_cast<TkFamilyImpl*>(desc.families[1]);
+
+ NVBLASTTK_CHECK_ERROR(family0 != nullptr || family1 != nullptr, "TkFrameworkImpl::createJoint: at least one family in the TkJointDesc must be valid.", return nullptr);
+
+ NVBLASTTK_CHECK_ERROR(family0 == nullptr || desc.chunkIndices[0] < family0->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is invalid.", return nullptr);
+ NVBLASTTK_CHECK_ERROR(family1 == nullptr || desc.chunkIndices[1] < family1->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is invalid.", return nullptr);
+
+ const bool actorsAreTheSame = family0 == family1 && family0->getActorByChunk(desc.chunkIndices[0]) == family1->getActorByChunk(desc.chunkIndices[1]);
+ NVBLASTTK_CHECK_ERROR(!actorsAreTheSame, "TkFrameworkImpl::createJoint: the chunks listed in the TkJointDesc must be in different actors.", return nullptr);
+
+ if (family0 != nullptr)
+ {
+ const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family0->getAssetImpl()->getAssetLLInternal(), log)[desc.chunkIndices[0]]);
+ NVBLASTTK_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is not a support chunk in the asset for desc.families[0]. Joint not created.", return nullptr);
+ handle0 = family0->createExternalJointHandle(getFamilyID(family1), desc.chunkIndices[0], desc.chunkIndices[1]);
+ NVBLASTTK_CHECK_ERROR(handle0 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[0]. Joint not created.", return nullptr);
+ }
+
+ if (family1 != nullptr)
+ {
+ const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family1->getAssetImpl()->getAssetLLInternal(), log)[desc.chunkIndices[1]]);
+ NVBLASTTK_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is not a support chunk in the asset for desc.families[1]. Joint not created.", return nullptr);
+ if (family1 != family0)
+ {
+ handle1 = family1->createExternalJointHandle(getFamilyID(family0), desc.chunkIndices[1], desc.chunkIndices[0]);
+ NVBLASTTK_CHECK_ERROR(handle1 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[1]. Joint not created.", return nullptr);
+ }
+ }
+
+ TkJointImpl* joint = NVBLASTTK_NEW(TkJointImpl)(desc, nullptr);
+ NVBLASTTK_CHECK_ERROR(joint != nullptr, "TkFrameworkImpl::createJoint: failed to create joint.", return nullptr);
+
+ const TkJointData& jointData = joint->getDataInternal();
+
+ if (handle0 != nullptr)
+ {
+ *handle0 = joint;
+ static_cast<TkActorImpl*>(jointData.actors[0])->addJoint(joint->m_links[0]);
+ }
+
+ if (handle1 != nullptr)
+ {
+ *handle1 = joint;
+ if (jointData.actors[0] != jointData.actors[1])
+ {
+ static_cast<TkActorImpl*>(jointData.actors[1])->addJoint(joint->m_links[1]);
+ }
+ }
+
+ return joint;
+}
+
+
+bool TkFrameworkImpl::serializeHeader(const TkSerializable& object, PxFileBuf& stream)
+{
+ const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType());
+
+ // Tk framework identifier
+ stream.storeDword(ClassID);
+
+ // Object header
+ stream.storeDword(type.getID());
+ stream.storeDword(type.getVersionInternal());
+
+ // Object ID
+ stream.write(&object.getID(), sizeof(NvBlastID));
+
+ // Serializable user data
+ stream.storeDword(static_cast<uint32_t>(object.userIntData & 0xFFFFFFFF));
+ stream.storeDword(static_cast<uint32_t>(object.userIntData >> 32));
+
+ return true;
+}
+
+
+void TkFrameworkImpl::onCreate(TkIdentifiable& object)
+{
+ const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType());
+
+ const uint32_t index = type.getIndex();
+
+ if (index >= m_objects.size())
+ {
+ if (!isInvalidIndex(index))
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::addObject: object type unrecognized.");
+ }
+ return;
+ }
+
+ auto& objectArray = m_objects[index];
+ NVBLAST_ASSERT(objectArray.find(&object) == objectArray.end());
+ objectArray.pushBack(&object);
+}
+
+
+void TkFrameworkImpl::onDestroy(TkIdentifiable& object)
+{
+ // remove from id map if present
+ const auto id = object.getID();
+ if (!TkGUIDIsZero(&id))
+ {
+ m_IDToObject.erase(id);
+ }
+
+ // remove from object list
+ const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType());
+
+ const uint32_t index = type.getIndex();
+
+ if (index >= m_objects.size())
+ {
+ if (!isInvalidIndex(index))
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::removeObject: object type unrecognized.");
+ }
+ return;
+ }
+
+ auto& objectArray = m_objects[index];
+ objectArray.findAndReplaceWithLast(&object);
+}
+
+
+void TkFrameworkImpl::onCreate(TkJointImpl& joint)
+{
+ NVBLASTTK_CHECK_ERROR(m_joints.insert(&joint), "TkFrameworkImpl::onCreate: Joint already tracked.", return);
+}
+
+
+void TkFrameworkImpl::onDestroy(TkJointImpl& joint)
+{
+ NVBLASTTK_CHECK_ERROR(m_joints.erase(&joint), "TkFrameworkImpl::onDestroy: Joint not tracked.", return);
+}
+
+
+void TkFrameworkImpl::onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr)
+{
+ if (!TkGUIDIsZero(&IDPrev))
+ {
+ if (!m_IDToObject.erase(IDPrev))
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with previous ID doesn't exist.");
+ }
+ }
+
+ if (!TkGUIDIsZero(&IDCurr))
+ {
+ auto& value = m_IDToObject[IDCurr];
+ if (value != nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with new ID already exists.");
+ return;
+ }
+ value = &object;
+ }
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+//////// Global API implementation ////////
+
+Nv::Blast::TkFramework* NvBlastTkFrameworkCreate(const Nv::Blast::TkFrameworkDesc& desc)
+{
+ if (desc.errorCallback == nullptr)
+ {
+ return nullptr;
+ }
+
+ if (desc.allocatorCallback == nullptr)
+ {
+ desc.errorCallback->reportError(PxErrorCode::eINVALID_OPERATION, "TkFramework::create: NULL allocator given in descriptor.", __FILE__, __LINE__);
+ return nullptr;
+ }
+
+ if (Nv::Blast::TkFrameworkImpl::get() != nullptr)
+ {
+ desc.errorCallback->reportError(PxErrorCode::eINVALID_OPERATION, "TkFramework::create: framework already created. Use TkFramework::get() to access.", __FILE__, __LINE__);
+ return nullptr;
+ }
+
+ Nv::Blast::TkAllocator::s_allocatorCallback = desc.allocatorCallback;
+
+ Nv::Blast::TkFrameworkImpl* framework = new (desc.allocatorCallback->allocate(sizeof(Nv::Blast::TkFrameworkImpl), "TkFrameworkImpl", __FILE__, __LINE__)) Nv::Blast::TkFrameworkImpl(desc);
+ Nv::Blast::TkFrameworkImpl::set(framework);
+
+ return Nv::Blast::TkFrameworkImpl::get();
+}
+
+
+Nv::Blast::TkFramework* NvBlastTkFrameworkGet()
+{
+ return Nv::Blast::TkFrameworkImpl::get();
+}
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkFrameworkImpl.h b/NvBlast/sdk/toolkit/source/NvBlastTkFrameworkImpl.h
new file mode 100644
index 0000000..ceeff06
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkFrameworkImpl.h
@@ -0,0 +1,225 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKFRAMEWORKIMPL_H
+#define NVBLASTTKFRAMEWORKIMPL_H
+
+#include "NvBlastTkFramework.h"
+#include "NvBlastProfilerInternal.h"
+
+#include "NvBlastTkCommon.h"
+
+#include "NvBlastTkArray.h"
+#include "NvBlastTkHashMap.h"
+#include "NvBlastTkHashSet.h"
+
+
+//////// Log macros that use the TkFrameworkImpl::log function ////////
+
+#define NVBLASTTK_LOG_ERROR(_msg) NVBLAST_LOG_ERROR(TkFrameworkImpl::log, _msg)
+#define NVBLASTTK_LOG_WARNING(_msg) NVBLAST_LOG_WARNING(TkFrameworkImpl::log, _msg)
+#define NVBLASTTK_LOG_INFO(_msg) NVBLAST_LOG_INFO(TkFrameworkImpl::log, _msg)
+#define NVBLASTTK_LOG_DEBUG(_msg) NVBLAST_LOG_DEBUG(TkFrameworkImpl::log, _msg)
+
+#define NVBLASTTK_CHECK(_expr, _messageType, _msg, _onFail) \
+ { \
+ if(!(_expr)) \
+ { \
+ TkFrameworkImpl::log(_messageType, _msg, __FILE__, __LINE__); \
+ { _onFail; }; \
+ } \
+ }
+
+#define NVBLASTTK_CHECK_ERROR(_expr, _msg, _onFail) NVBLASTTK_CHECK(_expr, NvBlastMessage::Error, _msg, _onFail)
+#define NVBLASTTK_CHECK_WARNING(_expr, _msg, _onFail) NVBLASTTK_CHECK(_expr, NvBlastMessage::Warning, _msg, _onFail)
+#define NVBLASTTK_CHECK_INFO(_expr, _msg, _onFail) NVBLASTTK_CHECK(_expr, NvBlastMessage::Info, _msg, _onFail)
+#define NVBLASTTK_CHECK_DEBUG(_expr, _msg, _onFail) NVBLASTTK_CHECK(_expr, NvBlastMessage::Debug, _msg, _onFail)
+
+
+//////// Allocator macros ////////
+
+#define NVBLASTTK_ALLOC(_size, _name) TkFrameworkImpl::get()->getAllocatorCallbackInternal().allocate(_size, _name, __FILE__, __LINE__)
+#define NVBLASTTK_FREE(_mem) TkFrameworkImpl::get()->getAllocatorCallbackInternal().deallocate(_mem)
+
+#define NVBLASTTK_NEW(T) new (NVBLASTTK_ALLOC(sizeof(T), #T)) T
+#define NVBLASTTK_DELETE(obj, T) \
+ (obj)->~T(); \
+ NVBLASTTK_FREE(obj)
+
+
+
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkTypeImpl;
+class TkJointImpl;
+
+/**
+Implementation of TkFramework
+*/
+class TkFrameworkImpl : public TkFramework
+{
+public:
+ TkFrameworkImpl(const TkFrameworkDesc& desc);
+ ~TkFrameworkImpl();
+
+ // Begin TkFramework
+ virtual void release() override;
+
+ virtual physx::PxErrorCallback& getErrorCallback() const override;
+
+ virtual physx::PxAllocatorCallback& getAllocatorCallback() const override;
+
+ virtual NvBlastLog getLogFn() const override;
+
+ virtual TkSerializable* deserialize(physx::general_PxIOStream2::PxFileBuf& stream) override;
+
+ virtual const TkType* getType(TkTypeIndex::Enum typeIndex) const override;
+
+ virtual TkIdentifiable* findObjectByID(const NvBlastID& id) const override;
+
+ virtual uint32_t getObjectCount(const TkType& type) const override;
+
+ virtual uint32_t getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart = 0) const override;
+
+ virtual bool reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap = nullptr) const override;
+
+ virtual bool ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const override;
+
+ virtual TkAsset* createAsset(const TkAssetDesc& desc) override;
+
+ virtual TkAsset* createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false) override;
+
+ virtual TkGroup* createGroup(const TkGroupDesc& desc) override;
+
+ virtual TkActor* createActor(const TkActorDesc& desc) override;
+
+ virtual TkJoint* createJoint(const TkJointDesc& desc) override;
+ // End TkFramework
+
+ // Public methods
+ /**
+ Access to the error callback set by the user.
+ */
+ physx::PxErrorCallback& getErrorCallbackInternal() const;
+
+ /**
+ Access to the allocator callback set by the user.
+ */
+ physx::PxAllocatorCallback& getAllocatorCallbackInternal() const;
+
+ /**
+ To be called by any TkIdentifiable object when it is created, so the framework can track it.
+ */
+ void onCreate(TkIdentifiable& object);
+
+ /**
+ To be called by any TkIdentifiable object when it is deleted, so the framework can stop tracking it.
+ */
+ void onDestroy(TkIdentifiable& object);
+
+ /**
+ Special onCreate method for joints, since they are not TkIdentifiable.
+ */
+ void onCreate(TkJointImpl& joint);
+
+ /**
+ Special onDestroy method for joints, since they are not TkIdentifiable.
+ */
+ void onDestroy(TkJointImpl& joint);
+
+ /**
+ Must be called whenever a TkIdentifiable object's ID is changed, so that the framework can associate the new ID with it.
+ */
+ void onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr);
+
+ /**
+ Internal (non-virtual) method to find a TkIdentifiable object based upon its NvBlastID.
+ */
+ TkIdentifiable* findObjectByIDInternal(const NvBlastID& id) const;
+
+ /**
+ Serialize a TkSerializable's standard header data, including its type ID, type version, object ID, and TkObject::userIntData.
+ */
+ bool serializeHeader(const TkSerializable& object, physx::general_PxIOStream2::PxFileBuf& stream);
+
+ // Access to singleton
+
+ /** Retrieve the global singleton. */
+ static TkFrameworkImpl* get();
+
+ /** Set the global singleton, if it's not already set, or set it to NULL. Returns true iff successful. */
+ static bool set(TkFrameworkImpl* framework);
+
+ // Blast LL context functions
+ static void log(int type, const char* msg, const char* file, int line); //!< A function with the NvBlastLog signature which can be used in NvBlast low-level function calls
+ static void* alloc(size_t size); //!< A function with the std::malloc signature which returns 16-byte aligned memory
+ static void free(void* mem); //!< A function with the std::free signature which can deallocate memory created by alloc
+
+private:
+ // Enums
+ enum { Alignment = 16 }; //!< Memory alignment used for allocations
+ enum { ClassID = NVBLASTTK_FOURCC('T', 'K', 'F', 'W') }; //!< TkFramework identifier token, used in serialization
+
+ // Static data
+ static TkFrameworkImpl* s_framework; //!< Global (singleton) object pointer
+
+ // Callbacks
+ physx::PxErrorCallback* m_errorCallback; //!< User-supplied error callback
+ physx::PxAllocatorCallback* m_allocatorCallback; //!< User-supplied allocator callback
+
+ // Types
+ TkInlineArray<const TkTypeImpl*, TkTypeIndex::TypeCount>::type m_types; //!< TkIdentifiable static type data
+ TkHashMap<uint32_t, uint32_t>::type m_typeIDToIndex; //!< Map to type data keyed by ClassID
+
+ // Objects and object names
+ TkHashMap<NvBlastID, TkIdentifiable*>::type m_IDToObject; //!< Map to all TkIdentifiable objects, keyed by NvBlastID
+ TkInlineArray<TkArray<TkIdentifiable*>::type, TkTypeIndex::TypeCount>::type m_objects; //!< Catalog of all TkIdentifiable objects, grouped by type. (Revisit implementation.)
+
+ // Track external joints (to do: make this a pool)
+ TkHashSet<TkJointImpl*>::type m_joints; //!< All internal joints
+};
+
+
+//////// TkFrameworkImpl inline methods ////////
+
+NV_INLINE physx::PxErrorCallback& TkFrameworkImpl::getErrorCallbackInternal() const
+{
+ return *m_errorCallback;
+}
+
+
+NV_INLINE physx::PxAllocatorCallback& TkFrameworkImpl::getAllocatorCallbackInternal() const
+{
+ return *m_allocatorCallback;
+}
+
+
+NV_INLINE TkIdentifiable* TkFrameworkImpl::findObjectByIDInternal(const NvBlastID& id) const
+{
+ const auto entry = m_IDToObject.find(id);
+ if (entry == nullptr)
+ {
+ return nullptr;
+ }
+
+ return entry->second;
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKFRAMEWORKIMPL_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkGUID.h b/NvBlast/sdk/toolkit/source/NvBlastTkGUID.h
new file mode 100644
index 0000000..a770092
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkGUID.h
@@ -0,0 +1,135 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKGUID_H
+#define NVBLASTTKGUID_H
+
+#include "NvPreprocessor.h"
+
+#if NV_WINDOWS_FAMILY
+#include <rpc.h>
+#else
+//#include <uuid/uuid.h>
+#include "NvBlastTime.h"
+#endif
+
+#include "PsHash.h"
+
+namespace Nv
+{
+namespace Blast
+{
+
+#if NV_WINDOWS_FAMILY
+
+NV_INLINE NvBlastID TkGenerateGUID(void* ptr)
+{
+ NV_UNUSED(ptr);
+
+ NV_COMPILE_TIME_ASSERT(sizeof(UUID) == sizeof(NvBlastID));
+
+ NvBlastID guid;
+ UuidCreate(reinterpret_cast<UUID*>(&guid));
+
+ return guid;
+}
+
+#else
+
+NV_INLINE NvBlastID TkGenerateGUID(void* ptr)
+{
+// NV_COMPILE_TIME_ASSERT(sizeof(uuid_t) == sizeof(NvBlastID));
+ Time time;
+
+ NvBlastID guid;
+ // uuid_generate_random(reinterpret_cast<uuid_t&>(guid));
+
+ *reinterpret_cast<uint64_t*>(guid.data) = reinterpret_cast<uintptr_t>(ptr);
+ *reinterpret_cast<int64_t*>(guid.data + 8) = time.getLastTickCount();
+
+ return guid;
+}
+
+#endif
+
+
+/**
+Compares two NvBlastIDs.
+
+\param[in] id1 A pointer to the first id to compare.
+\param[in] id2 A pointer to the second id to compare.
+
+\return true iff ids are equal.
+*/
+NV_INLINE bool TkGUIDsEqual(const NvBlastID* id1, const NvBlastID* id2)
+{
+ return !memcmp(id1, id2, sizeof(NvBlastID));
+}
+
+
+/**
+Clears an NvBlastID (sets all of its fields to zero).
+
+\param[out] id A pointer to the ID to clear.
+*/
+NV_INLINE void TkGUIDReset(NvBlastID* id)
+{
+ memset(id, 0, sizeof(NvBlastID));
+}
+
+
+/**
+Tests an NvBlastID to determine if it's zeroed. After calling TkGUIDReset
+on an ID, passing it to this function will return a value of true.
+
+\param[in] id A pointer to the ID to test.
+*/
+NV_INLINE bool TkGUIDIsZero(const NvBlastID* id)
+{
+ return *reinterpret_cast<const uint64_t*>(&id->data[0]) == 0 && *reinterpret_cast<const uint64_t*>(&id->data[8]) == 0;
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+namespace physx
+{
+namespace shdfnd
+{
+
+// hash specialization for NvBlastID
+template <>
+struct Hash<NvBlastID>
+{
+ uint32_t operator()(const NvBlastID& k) const
+ {
+ // "DJB" string hash
+ uint32_t h = 5381;
+ for (uint32_t i = 0; i < sizeof(k.data) / sizeof(k.data[0]); ++i)
+ h = ((h << 5) + h) ^ uint32_t(k.data[i]);
+ return h;
+ }
+ bool equal(const NvBlastID& k0, const NvBlastID& k1) const
+ {
+ return Nv::Blast::TkGUIDsEqual(&k0, &k1);
+ }
+};
+
+} // namespace shdfnd
+} // namespace physx
+
+
+#endif // #ifndef NVBLASTTKGUID_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkGroupImpl.cpp b/NvBlast/sdk/toolkit/source/NvBlastTkGroupImpl.cpp
new file mode 100644
index 0000000..d9a4b29
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkGroupImpl.cpp
@@ -0,0 +1,592 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvPreprocessor.h"
+
+#include "NvBlastAssert.h"
+#include "NvBlast.h"
+
+#include "NvBlastTkFrameworkImpl.h"
+#include "NvBlastTkGroupImpl.h"
+#include "NvBlastTkActorImpl.h"
+#include "NvBlastTkFamilyImpl.h"
+#include "NvBlastTkAssetImpl.h"
+#include "NvBlastTkTaskImpl.h"
+
+#include "Px.h"
+#include "PxFileBuf.h"
+#include "PxAllocatorCallback.h"
+#include "task/PxCpuDispatcher.h"
+
+#undef max
+#undef min
+#include <algorithm>
+
+using namespace physx;
+using namespace physx::general_PxIOStream2;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Static data ////////
+
+NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Group);
+
+
+//////// Local (static) functions ////////
+
+static uint32_t getNumThreads(PxTaskManager* tm)
+{
+ if (tm == nullptr) return 0;
+ if (tm->getCpuDispatcher() == nullptr) return 0;
+ return tm->getCpuDispatcher()->getWorkerCount();
+}
+
+
+//////// Member functions ////////
+
+TkGroupImpl::TkGroupImpl() : m_actorCount(0), m_isProcessing(false), m_sync(0)
+{
+#if NV_PROFILE
+ memset(&m_stats, 0, sizeof(TkGroupStats));
+#endif
+}
+
+
+TkGroupImpl::~TkGroupImpl()
+{
+ NVBLAST_ASSERT(getActorCount() == 0);
+ NVBLAST_ASSERT(m_sharedMemory.size() == 0);
+}
+
+
+void TkGroupImpl::release()
+{
+ if (isProcessing())
+ {
+ // abort all processing?
+ NVBLASTTK_LOG_ERROR("TkGroup::release: cannot release Group while processing.");
+ NVBLAST_ALWAYS_ASSERT_MESSAGE("TkGroup::release: cannot release Group while processing.");
+ return;
+ }
+
+ for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it)
+ {
+ TkFamilyImpl* family = it->first;
+ for (TkActorImpl& actor : family->getActorsInternal())
+ {
+ if (actor.m_group == this)
+ {
+ removeActorInternal(actor);
+ }
+ }
+ SharedMemory* mem = it->second;
+ mem->release();
+ NVBLASTTK_DELETE(mem, SharedMemory);
+ }
+ m_sharedMemory.clear();
+
+ m_bondTempDataBlock.release();
+ m_chunkTempDataBlock.release();
+ m_bondEventDataBlock.release();
+ m_chunkEventDataBlock.release();
+ m_splitScratchBlock.release();
+
+ NVBLASTTK_DELETE(this, TkGroupImpl);
+}
+
+
+void TkGroupImpl::addActorsInternal(TkActorImpl** actors, uint32_t numActors)
+{
+ for (uint32_t i = 0; i < numActors; i++)
+ {
+ addActorInternal(*actors[i]);
+ }
+}
+
+
+void TkGroupImpl::addActorInternal(TkActorImpl& tkActor)
+{
+ NVBLAST_ASSERT(tkActor.getGroup() == nullptr);
+ tkActor.m_group = this;
+ m_actorCount++;
+}
+
+
+bool TkGroupImpl::addActor(TkActor& actor)
+{
+ TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor);
+ if (tkActor.getGroup() != nullptr)
+ {
+ NVBLASTTK_LOG_ERROR("TkGroup::addActor: actor already belongs to a Group. Remove from current group first.");
+ return false;
+ }
+
+ if (isProcessing())
+ {
+ NVBLASTTK_LOG_ERROR("TkGroup::addActor: cannot alter Group while processing.");
+ return false;
+ }
+
+ // mark the actor that it now belongs to this group
+ addActorInternal(tkActor);
+
+ // actors that were fractured already or have damage requested
+ // must be enqueued to be processed
+ if (tkActor.isPending())
+ {
+ enqueue(&tkActor);
+ }
+
+ TkFamilyImpl& family = tkActor.getFamilyImpl();
+ SharedMemory* mem = m_sharedMemory[&family];
+ if (mem == nullptr)
+ {
+ // the actor belongs to a family not involved in this group yet
+ // shared memory must be allocated and temporary buffers adjusted accordingly
+
+ PERF_ZONE_BEGIN("family memory");
+ mem = NVBLASTTK_NEW(SharedMemory);
+ mem->allocate(family);
+ m_sharedMemory[&family] = mem;
+ PERF_ZONE_END("family memory");
+
+ PERF_ZONE_BEGIN("group memory");
+
+ const uint32_t numThreads = getNumThreads(m_pxTaskManager);
+ // one worker always exists, even if it is the main thread (when numThreads is 0)
+ const uint32_t numWorkers = std::max(numThreads, (uint32_t)1);
+
+ // the number of threads could have changed, however this is unexpected and handled in process()
+
+
+ NvBlastLog theLog = TkFrameworkImpl::get()->log;
+
+ // this group's tasks will use one temporary buffer each, which is of max size of, for all families involved
+ const size_t requiredScratch = NvBlastActorGetRequiredScratchForSplit(tkActor.getActorLL(), theLog);
+ if (static_cast<size_t>(m_splitScratchBlock.numElementsPerBlock()) < requiredScratch)
+ {
+ m_splitScratchBlock.release();
+ m_splitScratchBlock.allocate(static_cast<uint32_t>(requiredScratch), numWorkers);
+ }
+
+ // generate and apply fracture may create an entry for each bond
+ const uint32_t bondCount = NvBlastAssetGetBondCount(tkActor.getAsset()->getAssetLL(), theLog);
+ if (m_bondTempDataBlock.numElementsPerBlock() < bondCount)
+ {
+ m_bondTempDataBlock.release();
+ m_bondTempDataBlock.allocate(bondCount, numWorkers);
+ m_bondEventDataBlock.release();
+ m_bondEventDataBlock.allocate(bondCount, numWorkers);
+ }
+
+ // apply fracture may create an entry for each lower-support chunk
+ const uint32_t graphNodeCount = NvBlastAssetGetSupportGraph(tkActor.getAsset()->getAssetLL(), theLog).nodeCount;
+ const uint32_t subsupportChunkCount
+ = NvBlastAssetGetChunkCount(tkActor.getAsset()->getAssetLL(), theLog)
+ - NvBlastAssetGetFirstSubsupportChunkIndex(tkActor.getAsset()->getAssetLL(), theLog);
+ const uint32_t chunkCount = graphNodeCount + subsupportChunkCount;
+ if (m_chunkTempDataBlock.numElementsPerBlock() < chunkCount)
+ {
+ m_chunkTempDataBlock.release();
+ m_chunkTempDataBlock.allocate(chunkCount, numWorkers);
+ m_chunkEventDataBlock.release();
+ m_chunkEventDataBlock.allocate(chunkCount, numWorkers);
+ }
+ PERF_ZONE_END("group memory");
+ }
+ mem->addReference();
+
+ return true;
+}
+
+
+uint32_t TkGroupImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /* = 0 */) const
+{
+ PERF_SCOPE_L("TkGroup::getActors");
+
+ uint32_t actorCount = m_actorCount;
+ if (actorCount <= indexStart)
+ {
+ NVBLASTTK_LOG_WARNING("TkGroup::getActors: indexStart beyond end of actor list.");
+ return 0;
+ }
+
+ actorCount -= indexStart;
+ if (actorCount > bufferSize)
+ {
+ actorCount = bufferSize;
+ }
+
+ uint32_t index = 0;
+ bool done = false;
+ for (auto it = const_cast<TkGroupImpl*>(this)->m_sharedMemory.getIterator(); !it.done();++it)
+ {
+ TkFamilyImpl* fam = it->first;
+ for (TkActorImpl& actor : fam->getActorsInternal())
+ {
+ if (actor.m_group == this)
+ {
+ NVBLAST_ASSERT(actor.isActive());
+
+ if (index >= indexStart)
+ {
+ *buffer++ = &actor;
+ }
+
+ index++;
+ done = (index - indexStart) >= actorCount;
+ }
+ if (done) break;
+ }
+ if (done) break;
+ }
+
+ return actorCount;
+}
+
+
+void TkGroupImpl::removeActorInternal(TkActorImpl& tkActor)
+{
+ NVBLAST_ASSERT(tkActor.m_group == this);
+ tkActor.m_group = nullptr;
+ m_actorCount--;
+}
+
+
+void TkGroupImpl::releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem)
+{
+ NVBLAST_ASSERT(mem != nullptr && m_sharedMemory[fam] == mem);
+ mem->release();
+ m_sharedMemory.erase(fam);
+ NVBLASTTK_DELETE(mem, SharedMemory);
+}
+
+
+bool TkGroupImpl::removeActor(TkActor& actor)
+{
+ TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor);
+
+ if (tkActor.getGroup() != this)
+ {
+ NVBLASTTK_LOG_ERROR("TkGroup::removeActor: actor does not belong to this Group.");
+ return false;
+ }
+
+ if (isProcessing())
+ {
+ NVBLASTTK_LOG_ERROR("TkGroup::removeActor: cannot alter Group while processing.");
+ return false;
+ }
+
+ removeActorInternal(tkActor);
+
+ // pending actors must be removed from the job queue as well
+ if(tkActor.isPending())
+ {
+ uint32_t index = tkActor.m_groupJobIndex;
+ tkActor.m_groupJobIndex = invalidIndex<uint32_t>();
+ m_jobs.replaceWithLast(index);
+ if (index < m_jobs.size())
+ {
+ NVBLAST_ASSERT(m_jobs[index].m_tkActor->m_groupJobIndex == m_jobs.size());
+ NVBLAST_ASSERT(m_jobs[index].m_tkActor->isPending());
+ m_jobs[index].m_tkActor->m_groupJobIndex = index;
+ }
+ }
+
+ // if the actor is the last of its family in this group
+ // the group-family memory can be released
+ TkFamilyImpl* family = &tkActor.getFamilyImpl();
+ SharedMemory* mem = getSharedMemory(family);
+ if (mem->removeReference())
+ {
+ releaseSharedMemory(family, mem);
+ }
+
+ return true;
+}
+
+
+TkGroupImpl* TkGroupImpl::create(const TkGroupDesc& desc)
+{
+ if (desc.pxTaskManager == nullptr)
+ {
+ NVBLASTTK_LOG_WARNING("TkGroup::create: attempting to create a Group with a NULL pxTaskManager.");
+ }
+
+ TkGroupImpl* group = NVBLASTTK_NEW(TkGroupImpl);
+
+ group->m_pxTaskManager = desc.pxTaskManager;
+ group->m_initialNumThreads = getNumThreads(group->m_pxTaskManager);
+
+ return group;
+}
+
+
+bool TkGroupImpl::process()
+{
+ PERF_SCOPE_L("TkGroup::process");
+
+ if (!setProcessing(true))
+ {
+ NVBLASTTK_LOG_WARNING("TkGroup::process: Group is still processing, call TkGroup::sync first.");
+ return false;
+ }
+
+ if (m_jobs.size() > 0)
+ {
+ PERF_ZONE_BEGIN("task setup");
+
+ PERF_ZONE_BEGIN("task memory");
+ const uint32_t numThreads = getNumThreads(m_pxTaskManager);
+ // one worker always exists, even if it is the main thread (when numThreads is 0)
+ const uint32_t numWorkers = std::max(numThreads, (uint32_t)1);
+
+ if (numThreads != m_initialNumThreads)
+ {
+ NVBLASTTK_LOG_WARNING("TkGroup::process: number of threads has changed, memory is being reallocated.");
+ m_initialNumThreads = numThreads;
+
+ const uint32_t bondCount = m_bondTempDataBlock.numElementsPerBlock();
+ if (bondCount > 0)
+ {
+ m_bondTempDataBlock.release();
+ m_bondTempDataBlock.allocate(bondCount, numWorkers);
+ m_bondEventDataBlock.release();
+ m_bondEventDataBlock.allocate(bondCount, numWorkers);
+ }
+ const uint32_t chunkCount = m_chunkTempDataBlock.numElementsPerBlock();
+ m_chunkTempDataBlock.release();
+ m_chunkTempDataBlock.allocate(chunkCount, numWorkers);
+ m_chunkEventDataBlock.release();
+ m_chunkEventDataBlock.allocate(chunkCount, numWorkers);
+ const uint32_t scratchSize = m_splitScratchBlock.numElementsPerBlock();
+ m_splitScratchBlock.release();
+ m_splitScratchBlock.allocate(scratchSize, numWorkers);
+ }
+ PERF_ZONE_END("task memory");
+
+
+ PERF_ZONE_BEGIN("setup job queue");
+ for (const auto& job : m_jobs)
+ {
+ const TkActorImpl* a = job.m_tkActor;
+ SharedMemory* mem = getSharedMemory(&a->getFamilyImpl());
+
+ const uint32_t damageCount = a->m_damageBuffer.size();
+
+ // applyFracture'd actor do not necessarily have damage queued
+ NVBLAST_ASSERT(damageCount > 0 || a->m_flags.isSet(TkActorFlag::DAMAGED));
+
+ // no reason to be here without these
+ NVBLAST_ASSERT(a->m_flags.isSet(TkActorFlag::PENDING));
+ NVBLAST_ASSERT(a->m_group == this);
+
+ // collect the amount of event payload memory to preallocate for TkWorkers
+ mem->m_eventsMemory += damageCount * (sizeof(TkFractureCommands) + sizeof(TkFractureEvents)) + sizeof(TkSplitEvent);
+
+ // collect the amount of event entries to preallocate for TkWorkers
+ // (two TkFracture* events per damage plus one TkSplitEvent)
+ mem->m_eventsCount += 2 * damageCount + 1;
+ }
+ PERF_ZONE_END("setup job queue");
+
+ PERF_ZONE_BEGIN("memory protect");
+ for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it)
+ {
+ // preallocate the event memory for TkWorkers
+ SharedMemory* mem = it->second;
+ mem->m_events.reserveData(mem->m_eventsMemory);
+ mem->m_events.reserveEvents(mem->m_eventsCount);
+
+ // these counters are not used anymore
+ // reset them immediately for next time
+ mem->m_eventsCount = 0;
+ mem->m_eventsMemory = 0;
+
+ // switch to parallel mode
+ mem->m_events.protect(true);
+ }
+ PERF_ZONE_END("memory protect");
+
+ PERF_ZONE_END("task setup");
+
+ // ready queue for the workers
+ const uint32_t numJobs = m_jobs.size();
+ m_jobQueue.init(m_jobs.begin(), numJobs);
+
+ // do not start more workers than there are jobs
+ const uint32_t workersToRun = std::min(numWorkers, numJobs);
+ m_workers.resize(workersToRun);
+ m_sync.setCount(workersToRun);
+
+ uint32_t workerId = 0;
+ if (numThreads > 0)
+ {
+ for (auto& task : m_workers)
+ {
+ PERF_SCOPE_M("task release");
+ task.m_id = workerId++;
+ task.m_group = this;
+ task.setContinuation(*m_pxTaskManager, nullptr);
+ // mind m_sync.setCount above, immediately removing reference would not work with a continuation task
+ task.removeReference();
+ }
+ }
+ else
+ {
+ // let this thread do the work
+ NVBLAST_ASSERT(m_workers.size() == 1);
+ for (auto& task : m_workers)
+ {
+ task.m_id = workerId++;
+ task.m_group = this;
+ task.run();
+ task.release();
+ }
+ }
+ }
+
+
+ return true;
+}
+
+
+bool TkGroupImpl::sync(bool block /*= true*/)
+{
+ if (!m_sync.isDone() && block)
+ {
+ PERF_SCOPE_L("TkGroupImpl::sync wait");
+ m_sync.wait();
+ }
+
+ if (isProcessing() && m_sync.isDone())
+ {
+ PERF_SCOPE_L("TkGroupImpl::sync finalize");
+
+ if (m_jobs.size() > 0)
+ {
+#if NV_PROFILE
+ PERF_ZONE_BEGIN("accumulate timers");
+ NvBlastTimers accumulated;
+ NvBlastTimersReset(&accumulated);
+ uint32_t jobCount = 0;
+ int64_t workerTime = 0;
+ for (TkWorker& worker : m_workers)
+ {
+ accumulated += worker.m_stats.timers;
+ jobCount += worker.m_stats.processedActorsCount;
+ workerTime += worker.m_stats.workerTime;
+ }
+ m_stats.timers = accumulated;
+ m_stats.processedActorsCount = jobCount;
+ m_stats.workerTime = workerTime;
+ PERF_ZONE_END("accumulate timers");
+#endif
+
+ PERF_ZONE_BEGIN("job update");
+ for (auto& j : m_jobs)
+ {
+ if (j.m_newActorsCount)
+ {
+ TkFamilyImpl* fam = &j.m_tkActor->getFamilyImpl();
+ SharedMemory* mem = getSharedMemory(fam);
+
+ // as LL is implemented, where newActorsCount the parent is always deleted
+ removeActorInternal(*j.m_tkActor);
+ mem->removeReference();
+ addActorsInternal(j.m_newActors, j.m_newActorsCount);
+ mem->addReference(j.m_newActorsCount);
+
+ // Update joints
+ mem->m_events.protect(false); // allow allocations again
+ fam->updateJoints(j.m_tkActor, &mem->m_events);
+ }
+
+ // virtually dequeue the actor
+ // the queue itself is cleared right after this loop
+ j.m_tkActor->m_flags.clear(TkActorFlag::PENDING);
+ j.m_tkActor->m_groupJobIndex = invalidIndex<uint32_t>();
+ j.m_tkActor->m_damageBuffer.clear();
+ }
+ m_jobs.clear();
+ PERF_ZONE_END("job update");
+
+ PERF_ZONE_BEGIN("event dispatch");
+ for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it)
+ {
+ TkFamilyImpl* family = it->first;
+ SharedMemory* mem = it->second;
+
+ NVBLAST_ASSERT(family != nullptr);
+ NVBLAST_ASSERT(mem != nullptr && mem->isUsed());
+
+ // where no actor of a family has split,
+ // its group/family event queue has not been
+ // unprotected in the jobs loop above
+ mem->m_events.protect(false);
+
+ family->getQueue().dispatch(mem->m_events);
+
+ mem->m_events.reset();
+ mem->reset();
+ }
+ PERF_ZONE_END("event dispatch");
+
+ PERF_ZONE_BEGIN("event memory release");
+ for (auto& worker : m_workers)
+ {
+ worker.m_bondBuffer.clear();
+ worker.m_chunkBuffer.clear();
+ }
+ PERF_ZONE_END("event memory release");
+ }
+
+ bool success = setProcessing(false);
+ NVBLAST_ASSERT(success);
+ return success;
+ }
+
+ return false;
+}
+
+
+bool TkGroupImpl::setProcessing(bool value)
+{
+ bool expected = !value;
+ return m_isProcessing.compare_exchange_strong(expected, value);
+}
+
+
+void TkGroupImpl::enqueue(TkActorImpl* tkActor)
+{
+ NVBLAST_ASSERT(tkActor->getGroupImpl() != nullptr);
+ NVBLAST_ASSERT(tkActor->getGroupImpl() == this);
+ NVBLAST_ASSERT(isInvalidIndex(tkActor->m_groupJobIndex));
+ NVBLAST_ASSERT(isProcessing() == false);
+#if NV_DEBUG
+ for (TkWorkerJob& j : m_jobs)
+ {
+ NVBLAST_ASSERT(j.m_tkActor != tkActor);
+ }
+#endif
+
+ tkActor->m_groupJobIndex = m_jobs.size();
+ TkWorkerJob& j = m_jobs.insert();
+ j.m_tkActor = tkActor;
+}
+
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkGroupImpl.h b/NvBlast/sdk/toolkit/source/NvBlastTkGroupImpl.h
new file mode 100644
index 0000000..db7e7c1
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkGroupImpl.h
@@ -0,0 +1,174 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKGROUPIMPL_H
+#define NVBLASTTKGROUPIMPL_H
+
+
+#include "NvBlastTkTaskImpl.h"
+#include "NvBlastTkGroup.h"
+#include "NvBlastTkTypeImpl.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+class TkActorImpl;
+class TkFamilyImpl;
+
+NVBLASTTK_IMPL_DECLARE(Group)
+{
+ ~TkGroupImpl();
+
+public:
+ TkGroupImpl();
+
+ NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('G', 'R', 'P', '\0');
+
+ static TkGroupImpl* create(const TkGroupDesc& desc);
+
+ // Begin TkGroup
+ virtual bool addActor(TkActor& actor) override;
+
+ virtual uint32_t getActorCount() const override;
+
+ virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override;
+
+ virtual bool process() override;
+
+ virtual bool sync(bool block = true) override;
+
+ virtual void getStats(TkGroupStats& stats) const override;
+ // End TkGroup
+
+ // TkGroupImpl API
+
+ /**
+ Remove the actor from this group if the actor actually belongs to it and the group is not processing.
+
+ \param[in] actor The TkActor to remove.
+
+ \return true if removing succeeded, false otherwise
+ */
+ bool removeActor(TkActor& actor);
+
+ /**
+ Add the actor to this group's job queue.
+ It is the caller's responsibility to add an actor only once. This condition is checked in debug builds.
+ */
+ void enqueue(TkActorImpl* tkActor);
+
+ /**
+ Atomically check if this group is processing actors. @see setProcessing()
+
+ \return true between process() and sync() calls, false otherwise
+ */
+ bool isProcessing() const;
+
+private:
+ /**
+ Atomically set the processing state. This function checks for the current state
+ before changing it. @see isProcessing()
+
+ \param[in] value the value of the new state
+
+ \return true if the new state could be set, false otherwise
+ */
+ bool setProcessing(bool value);
+
+ /**
+ Get the group-family shared memory for the specified family. To be used when the memory is expected to already exist.
+ */
+ SharedMemory* getSharedMemory(TkFamilyImpl* family);
+ void releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem);
+
+ // functions to add/remove actors _without_ group-family memory management
+ void addActorInternal(TkActorImpl& tkActor);
+ void addActorsInternal(TkActorImpl** actors, uint32_t numActors);
+ void removeActorInternal(TkActorImpl& tkActor);
+
+
+ physx::PxTaskManager* m_pxTaskManager; //!< the task manager used to dispatch workers
+ uint32_t m_initialNumThreads; //!< tracks the number of worker threads
+
+ uint32_t m_actorCount; //!< number of actors in this group
+
+ TkHashMap<TkFamilyImpl*, SharedMemory*>::type m_sharedMemory; //!< memory sharable by actors in the same family in this group
+
+ // it is assumed no more than the asset's number of bond and chunks fracture commands are produced
+ SharedBlock<NvBlastChunkFractureData> m_chunkTempDataBlock; //!< chunk data for damage/fracture
+ SharedBlock<NvBlastBondFractureData> m_bondTempDataBlock; //!< bond data for damage/fracture
+ SharedBlock<NvBlastChunkFractureData> m_chunkEventDataBlock; //!< initial memory block for event data
+ SharedBlock<NvBlastBondFractureData> m_bondEventDataBlock; //!< initial memory block for event data
+ SharedBlock<char> m_splitScratchBlock; //!< split scratch memory
+
+ std::atomic<bool> m_isProcessing; //!< true while workers are processing
+ TaskSync m_sync; //!< keeps track of finished workers
+
+ TkArray<TkWorker>::type m_workers; //!< this group's workers
+ TkAtomicJobQueue m_jobQueue; //!< shared job queue for workers
+
+ TkArray<TkWorkerJob>::type m_jobs; //!< this group's process jobs
+
+//#if NV_PROFILE
+ TkGroupStats m_stats; //!< accumulated group's worker stats
+//#endif
+
+ friend class TkWorker;
+};
+
+
+NV_INLINE bool TkGroupImpl::isProcessing() const
+{
+ return m_isProcessing.load();
+}
+
+
+NV_INLINE void TkGroupImpl::getStats(TkGroupStats& stats) const
+{
+#if NV_PROFILE
+ memcpy(&stats, &m_stats, sizeof(TkGroupStats));
+#else
+ NV_UNUSED(stats);
+#endif
+}
+
+
+NV_INLINE uint32_t TkGroupImpl::getActorCount() const
+{
+ return m_actorCount;
+}
+
+
+NV_INLINE SharedMemory* TkGroupImpl::getSharedMemory(TkFamilyImpl* family)
+{
+ SharedMemory* mem = m_sharedMemory[family];
+ NVBLAST_ASSERT(mem != nullptr);
+ return mem;
+}
+
+
+NV_FORCE_INLINE void operator +=(NvBlastTimers& lhs, const NvBlastTimers& rhs)
+{
+ lhs.material += rhs.material;
+ lhs.fracture += rhs.fracture;
+ lhs.island += rhs.fracture;
+ lhs.partition += rhs.partition;
+ lhs.visibility += rhs.visibility;
+}
+
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKGROUPIMPL_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkHashMap.h b/NvBlast/sdk/toolkit/source/NvBlastTkHashMap.h
new file mode 100644
index 0000000..14730c6
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkHashMap.h
@@ -0,0 +1,34 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKHASHMAP_H
+#define NVBLASTTKHASHMAP_H
+
+
+#include "NvBlastTkAllocator.h"
+#include "PsHashMap.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+template <class Key, class Value, class HashFn = physx::shdfnd::Hash<Key>>
+struct TkHashMap
+{
+ typedef physx::shdfnd::HashMap<Key, Value, HashFn, TkAllocator> type;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTTKHASHMAP_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkHashSet.h b/NvBlast/sdk/toolkit/source/NvBlastTkHashSet.h
new file mode 100644
index 0000000..4a3a3a0
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkHashSet.h
@@ -0,0 +1,34 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKHASHSET_H
+#define NVBLASTTKHASHSET_H
+
+
+#include "NvBlastTkAllocator.h"
+#include "PsHashSet.h"
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+template <class Key, class HashFn = physx::shdfnd::Hash<Key>>
+struct TkHashSet
+{
+ typedef physx::shdfnd::HashSet<Key, HashFn, TkAllocator> type;
+};
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // #ifndef NVBLASTTKHASHSET_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkJointImpl.cpp b/NvBlast/sdk/toolkit/source/NvBlastTkJointImpl.cpp
new file mode 100644
index 0000000..46d6378
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkJointImpl.cpp
@@ -0,0 +1,183 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastTkFrameworkImpl.h"
+#include "NvBlastTkJointImpl.h"
+#include "NvBlastTkActorImpl.h"
+#include "NvBlastTkAssetImpl.h"
+#include "NvBlastTkFamilyImpl.h"
+
+#include "Px.h"
+#include "PxFileBuf.h"
+#include "PxAllocatorCallback.h"
+
+using namespace physx::general_PxIOStream2;
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+//////// Member functions ////////
+
+TkJointImpl::TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner) : m_owner(owner)
+{
+ userData = nullptr;
+
+ // Do not fire off a creation event. Creation events will only be fired when a family-internal joint is created.
+ NVBLAST_ASSERT(desc.families[0] != nullptr || desc.families[1] != nullptr);
+ NVBLAST_ASSERT(desc.families[0] == nullptr || desc.chunkIndices[0] < static_cast<TkFamilyImpl*>(desc.families[0])->getAssetImpl()->getChunkCount());
+ NVBLAST_ASSERT(desc.attachPositions[0].isFinite());
+ NVBLAST_ASSERT(desc.families[1] == nullptr || desc.chunkIndices[1] < static_cast<TkFamilyImpl*>(desc.families[1])->getAssetImpl()->getChunkCount());
+ NVBLAST_ASSERT(desc.attachPositions[1].isFinite());
+
+ for (int i = 0; i < 2; ++i)
+ {
+ m_data.actors[i] = desc.families[i] != nullptr ? static_cast<TkFamilyImpl*>(desc.families[i])->getActorByChunk(desc.chunkIndices[i]) : nullptr;
+ m_data.chunkIndices[i] = desc.chunkIndices[i];
+ m_data.attachPositions[i] = desc.attachPositions[i];
+ m_links[i].m_joint = this;
+ }
+
+ if (owner == nullptr)
+ {
+ TkFrameworkImpl::get()->onCreate(*this);
+ }
+}
+
+
+void TkJointImpl::release()
+{
+ removeReferencesInActors();
+
+ if (m_owner != nullptr)
+ {
+ // Internal joint
+ m_owner->releaseJoint(*this);
+ }
+ else
+ {
+ // External joint
+ removeReferencesInFamilies();
+ TkFrameworkImpl::get()->onDestroy(*this);
+ NVBLASTTK_DELETE(this, TkJointImpl);
+ }
+}
+
+
+void TkJointImpl::setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue)
+{
+ NVBLAST_ASSERT(m_data.actors[0] != nullptr || m_data.actors[1] != nullptr);
+
+ const bool unreferenced = (actor0 == nullptr && m_data.actors[0] != nullptr) || (actor1 == nullptr && m_data.actors[1] != nullptr);
+
+ removeReferencesInActors();
+
+ if (!unreferenced)
+ {
+ if (actor0 != nullptr)
+ {
+ actor0->addJoint(m_links[0]);
+ }
+
+ if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only need one joint reference
+ {
+ actor1->addJoint(m_links[1]);
+ }
+ }
+
+ // We do _not_ return if m_data.m_actors[0] == actor0 && m_data.m_actors[1] == actor1 since
+ // this leads to a bug. This function will only be called when an actor is split. It is
+ // possible that the two TkActors in a joint are the same as before, but in this case one
+ // of the actors will be the split actor. Since will be represented by a different
+ // physical actor, this case still needs to be reported in an event. Returning when neither
+ // TkActor has changed will prevent that, and lead to unwanted joint disconnection.
+
+ const uint32_t familyToUse = m_data.actors[0] != actor0 ? 0 : 1;
+
+ TkEventQueue* q = alternateQueue == nullptr ?
+ &static_cast<TkActorImpl*>(m_data.actors[familyToUse])->getFamilyImpl().getQueue()
+ : alternateQueue;
+
+ const bool jointWasInternal = m_data.actors[0] == m_data.actors[1];
+
+ if (unreferenced)
+ {
+ removeReferencesInFamilies();
+ actor0 = actor1 = nullptr; // Make both new actors NULL
+ }
+
+ if (!jointWasInternal || actor0 != actor1)
+ {
+ // The original actors were different, or they are now, signal a joint update
+ TkJointUpdateEvent* e = q->allocData<TkJointUpdateEvent>();
+ e->joint = this;
+ e->subtype = unreferenced ? TkJointUpdateEvent::Unreferenced : (jointWasInternal ? TkJointUpdateEvent::External : TkJointUpdateEvent::Changed);
+ m_data.actors[0] = actor0;
+ m_data.actors[1] = actor1;
+ q->addEvent(e);
+ }
+ else
+ if (jointWasInternal)
+ {
+ // The joint was originally created within the same actor and now it remains within the same actor.
+ m_data.actors[0] = m_data.actors[1] = actor0;
+ }
+}
+
+
+const TkJointData TkJointImpl::getData() const
+{
+ return getDataInternal();
+}
+
+
+void TkJointImpl::removeReferencesInActors()
+{
+ TkActorImpl* actor0 = static_cast<TkActorImpl*>(m_data.actors[0]);
+ TkActorImpl* actor1 = static_cast<TkActorImpl*>(m_data.actors[1]);
+
+ if (actor0 != nullptr)
+ {
+ actor0->removeJoint(m_links[0]);
+ }
+
+ if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only had one joint reference
+ {
+ actor1->removeJoint(m_links[1]);
+ }
+}
+
+
+void TkJointImpl::removeReferencesInFamilies()
+{
+ if (m_owner != nullptr)
+ {
+ return; // Only concerned with external joints
+ }
+
+ NVBLAST_ASSERT(m_data.actors[0] != m_data.actors[1] || m_data.actors[0] == nullptr); // This is enforced by the initial assumption in TkFrameworkImpl::createJoint.
+
+ for (int i = 0; i < 2; ++i)
+ {
+ if (m_data.actors[i] != nullptr)
+ {
+ TkFamilyImpl& family = static_cast<TkActorImpl*>(m_data.actors[i])->getFamilyImpl();
+ TkJointImpl* joint = nullptr;
+ const bool found = family.deleteExternalJointHandle(joint, getFamilyID(m_data.actors[i ^ 1]), m_data.chunkIndices[i], m_data.chunkIndices[i ^ 1]);
+ NVBLAST_ASSERT((!found && m_data.actors[i ^ 1] == nullptr) || joint == this); // Might not be found if the actors in a family are in the process of being deleted
+ NV_UNUSED(found);
+ }
+ }
+}
+
+} // namespace Blast
+} // namespace Nv
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkJointImpl.h b/NvBlast/sdk/toolkit/source/NvBlastTkJointImpl.h
new file mode 100644
index 0000000..ec57309
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkJointImpl.h
@@ -0,0 +1,146 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKJOINTIMPL_H
+#define NVBLASTTKJOINTIMPL_H
+
+
+#include "NvBlastTkJoint.h"
+#include "NvBlastTkCommon.h"
+#include "NvBlastIndexFns.h"
+
+#include "NvBlastAssert.h"
+#include "NvBlastDLink.h"
+
+#include <atomic>
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkActorImpl;
+class TkJointImpl;
+class TkFamilyImpl;
+class TkEventQueue;
+
+
+/**
+Double-sided link (DLink) which holds a reference back to a joint which contains it.
+*/
+struct TkJointLink : public DLink
+{
+ TkJointImpl* m_joint; //!< The joint containing this link.
+};
+
+
+/**
+Implementation of TkJoint.
+*/
+class TkJointImpl : public TkJoint
+{
+public:
+ /** Blank constructor only creates valid TkJointLinks (point back to this object) */
+ TkJointImpl();
+
+ /**
+ This constructor sets all internal data. If the joint is defined in an asset, the family
+ instanced from that asset will own this joint, and the 'owner' parameter is that family.
+ Otherwise, in the case where a joint is created from TkFramwork::createJoint, the joint
+ is not owned by a family and 'owner' will be NULL.
+ */
+ TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner);
+
+ // Begin TkObject
+ virtual void release() override;
+ // End TkObject
+
+ // Begin TkJoint
+ virtual const TkJointData getData() const override;
+ // End TkJoint
+
+ // Public API
+
+ /**
+ Internal method to access a const reference to the joint data.
+
+ \return a const reference to the joint data.
+ */
+ const TkJointData& getDataInternal() const;
+
+ /**
+ Internal method to access a non-const reference to the joint data.
+
+ \return a non-const reference to the joint data.
+ */
+ TkJointData& getDataWritable();
+
+ /**
+ Set the actors that this joint attaches to. When the actors are different from the joint's current actors,
+ an event will be generated on one of the actors' families event queues to signal the change. Alternatively,
+ if alternateQueue is not NULL then it will be used to hold the event.
+
+ If a non-NULL attached actor becomes NULL, then this joint will detach its references to both actors (if
+ they exist) and send an event of subtype Unreferenced. This signals the user that the joint may be deleted.
+
+ \param[in] actor0 The new TkActor to replace the first attached actor.
+ \param[in] actor1 The new TkActor to replace the second attached actor.
+ \param[in] alternateQueue If not NULL, this queue will be used to hold events generated by this function.
+ */
+ void setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue = nullptr);
+
+ /**
+ Ensures that any attached actors no longer refer to this joint.
+ */
+ void removeReferencesInActors();
+
+ /**
+ Ensures that any attached actors' families no longer refer to this joint. External joints (created using
+ TkFramework::createJoint) are referenced by the attached actors' families.
+ */
+ void removeReferencesInFamilies();
+
+private:
+ TkJointData m_data; //!< The data given to the user: attached actors, chunk indices, and actor-local attachment positions.
+ TkJointLink m_links[2]; //!< One link for each actor in m_data.m_actors. If m_data.m_actors[0] == m_data.m_actors[1], then only m_links[0] is used.
+ TkFamilyImpl* m_owner; //!< The owning family if this is an internal joint created during TkFramework::createActor() from a TkAssetDesc with joint flags.
+
+ friend class TkFrameworkImpl;
+ friend class TkFamilyImpl;
+ friend class TkActorImpl;
+};
+
+
+//////// TkJointImpl inline methods ////////
+
+NV_INLINE TkJointImpl::TkJointImpl()
+{
+ m_links[0].m_joint = m_links[1].m_joint = this;
+}
+
+
+NV_INLINE const TkJointData& TkJointImpl::getDataInternal() const
+{
+ return m_data;
+}
+
+
+NV_INLINE TkJointData& TkJointImpl::getDataWritable()
+{
+ return m_data;
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKJOINTIMPL_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkTaskImpl.cpp b/NvBlast/sdk/toolkit/source/NvBlastTkTaskImpl.cpp
new file mode 100644
index 0000000..3249928
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkTaskImpl.cpp
@@ -0,0 +1,263 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#include "NvBlastTime.h"
+
+#include "NvBlastTkTaskImpl.h"
+#include "NvBlastTkFamilyImpl.h"
+#include "NvBlastTkAssetImpl.h"
+#include "NvBlastTkGroupImpl.h"
+
+
+using namespace Nv::Blast;
+
+
+void SharedMemory::allocate(TkFamilyImpl& tkFamily)
+{
+ NVBLAST_ASSERT(m_refCount == 0);
+ const NvBlastAsset* assetLL = tkFamily.getAsset()->getAssetLL();
+
+ // at most leafChunkCount actors can be created within a family
+ // tasks will grab their portion out of these memory blocks
+ uint32_t leafChunkCount = NvBlastAssetGetLeafChunkCount(assetLL, TkFrameworkImpl::get()->log);
+ m_newActorBuffers.allocate(2 * leafChunkCount); // GWD-167 workaround (2*)
+ m_newTkActorBuffers.allocate(leafChunkCount);
+}
+
+
+/**
+Creates a TkEvent::FractureCommand according to the input buffer for tkActor
+into events queue using the LocalBuffers to store the actual event data.
+*/
+NV_FORCE_INLINE void reportFractureCommands(
+ const NvBlastFractureBuffers& buffer,
+ LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer,
+ TkEventQueue& events, const TkActorImpl* tkActor)
+{
+
+ NvBlastBondFractureData* bdata = nullptr;
+ if (buffer.bondFractureCount > 0)
+ {
+ bdata = bondBuffer.allocate(buffer.bondFractureCount);
+ memcpy(bdata, buffer.bondFractures, sizeof(NvBlastBondFractureData)*buffer.bondFractureCount);
+ }
+
+ NvBlastChunkFractureData* cdata = nullptr;
+ if (buffer.chunkFractureCount > 0)
+ {
+ cdata = chunkBuffer.allocate(buffer.chunkFractureCount);
+ memcpy(cdata, buffer.chunkFractures, sizeof(NvBlastChunkFractureData)*buffer.chunkFractureCount);
+ }
+
+ TkFractureCommands* fevt = events.allocData<TkFractureCommands>();
+ fevt->tkActorData = *tkActor;
+ fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata };
+ events.addEvent(fevt);
+}
+
+
+/**
+Creates a TkEvent::FractureEvent according to the input buffer for tkActor
+into events queue using the LocalBuffers to store the actual event data.
+*/
+NV_FORCE_INLINE void reportFractureEvents(
+ const NvBlastFractureBuffers& buffer,
+ LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer,
+ TkEventQueue& events, const TkActorImpl* tkActor)
+{
+ uint32_t result[4] = { 0,0,0,0 };
+
+ NvBlastBondFractureData* bdata = nullptr;
+ if (buffer.bondFractureCount > 0)
+ {
+ bdata = bondBuffer.allocate(buffer.bondFractureCount);
+ for (uint32_t b = 0; b < buffer.bondFractureCount; ++b)
+ {
+ bdata[b] = buffer.bondFractures[b];
+ result[buffer.bondFractures[b].health > 0 ? 0 : 1]++;
+ }
+ }
+
+ NvBlastChunkFractureData* cdata = nullptr;
+ if (buffer.chunkFractureCount > 0)
+ {
+ cdata = chunkBuffer.allocate(buffer.chunkFractureCount);
+ for (uint32_t c = 0; c < buffer.chunkFractureCount; ++c)
+ {
+ cdata[c] = buffer.chunkFractures[c];
+ result[buffer.chunkFractures[c].health > 0 ? 2 : 3]++;
+ }
+ }
+
+ TkFractureEvents* fevt = events.allocData<TkFractureEvents>();
+ fevt->tkActorData = *tkActor;
+ fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata };
+ fevt->bondsDamaged = result[0];
+ fevt->bondsBroken = result[1];
+ fevt->chunksDamaged = result[2];
+ fevt->chunksBroken = result[3];
+ events.addEvent(fevt);
+}
+
+
+void TkWorker::run()
+{
+ PERF_SCOPE_L("TkWorker Task");
+
+ NvBlastTimers* timers = nullptr;
+
+#if NV_PROFILE
+ NvBlastTimers myTimers;
+ timers = &myTimers;
+ NvBlastTimersReset(timers);
+ uint32_t jobCount = 0;
+ Time workTime;
+#endif
+
+ // temporary memory used to generate and apply fractures
+ // it must fit for the largest family involved in the group that owns this worker
+ NvBlastBondFractureData* bondFractureData = m_group->m_bondTempDataBlock.getBlock(m_id);
+ uint32_t bondFractureCount = m_group->m_bondTempDataBlock.numElementsPerBlock();
+ NvBlastChunkFractureData* chunkFractureData = m_group->m_chunkTempDataBlock.getBlock(m_id);
+ uint32_t chunkFractureCount = m_group->m_chunkTempDataBlock.numElementsPerBlock();
+ const NvBlastFractureBuffers tempBuffer = { bondFractureCount, chunkFractureCount, bondFractureData, chunkFractureData };
+
+ // temporary memory used to split the actor
+ // large enough for the largest family involved
+ void* splitScratch = m_group->m_splitScratchBlock.getBlock(m_id);
+
+ // to avoid unnecessary allocations, preallocated memory exists to fit all chunks and bonds taking damage once
+ // where multiple damage occurs, more memory will be allocated on demand (this may thwart other threads doing the same)
+ m_bondBuffer.initialize(m_group->m_bondEventDataBlock.getBlock(m_id), m_group->m_bondEventDataBlock.numElementsPerBlock());
+ m_chunkBuffer.initialize(m_group->m_chunkEventDataBlock.getBlock(m_id), m_group->m_chunkEventDataBlock.numElementsPerBlock());
+
+ TkAtomicJobQueue& q = m_group->m_jobQueue;
+ TkWorkerJob* j;
+
+ while ((j = q.next()) != nullptr)
+ {
+ PERF_SCOPE_M("TkActor");
+
+ TkActorImpl* tkActor = j->m_tkActor;
+ const uint32_t tkActorIndex = tkActor->getIndex();
+ NvBlastActor* actorLL = tkActor->getActorLLInternal();
+ TkFamilyImpl& family = tkActor->getFamilyImpl();
+ SharedMemory* mem = m_group->getSharedMemory(&family);
+ TkEventQueue& events = mem->m_events;
+
+ NVBLAST_ASSERT(tkActor->getGroupImpl() == m_group);
+
+#if NV_PROFILE
+ *timers += tkActor->m_timers;
+ NvBlastTimersReset(&tkActor->m_timers);
+ jobCount++;
+#endif
+
+ // generate and apply fracture for all damage requested on this actor
+ // and queue events accordingly
+ for (const auto& damage : tkActor->m_damageBuffer)
+ {
+ NvBlastFractureBuffers commandBuffer = tempBuffer;
+
+ PERF_ZONE_BEGIN("Material");
+ damage.generateFracture(&commandBuffer, actorLL, timers);
+ PERF_ZONE_END("Material");
+
+ if (commandBuffer.chunkFractureCount > 0 || commandBuffer.bondFractureCount > 0)
+ {
+ PERF_SCOPE_M("Fill Command Events");
+ reportFractureCommands(commandBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor);
+ }
+
+ NvBlastFractureBuffers eventBuffer = tempBuffer;
+
+ PERF_ZONE_BEGIN("Fracture");
+ NvBlastActorApplyFracture(&eventBuffer, actorLL, &commandBuffer, TkFrameworkImpl::get()->log, timers);
+ PERF_ZONE_END("Fracture");
+
+ if (eventBuffer.chunkFractureCount > 0 || eventBuffer.bondFractureCount > 0)
+ {
+ PERF_SCOPE_M("Fill Fracture Events");
+ tkActor->m_flags |= (TkActorFlag::DAMAGED);
+ reportFractureEvents(eventBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor);
+ }
+ }
+
+
+ // split the actor, which could have been damaged directly though the TkActor's fracture functions
+ // i.e. it did not have damage queued for the above loop
+
+ NvBlastActorSplitEvent splitEvent = { nullptr, nullptr };
+ if (tkActor->isDamaged())
+ {
+ PERF_ZONE_BEGIN("Split Memory");
+ uint32_t maxActorCount = NvBlastActorGetMaxActorCountForSplit(actorLL, TkFrameworkImpl::get()->log);
+ splitEvent.newActors = mem->reserveNewActors(maxActorCount);
+ PERF_ZONE_END("Split Memory");
+ PERF_ZONE_BEGIN("Split");
+ j->m_newActorsCount = NvBlastActorSplit(&splitEvent, actorLL, maxActorCount, splitScratch, TkFrameworkImpl::get()->log, timers);
+ PERF_ZONE_END("Split");
+
+ tkActor->m_flags.clear(TkActorFlag::DAMAGED);
+ }
+ else
+ {
+ j->m_newActorsCount = 0;
+ }
+
+
+ // update the TkActor according to the LL split results and queue events accordingly
+ if (j->m_newActorsCount > 0)
+ {
+ NVBLAST_ASSERT(splitEvent.deletedActor == tkActor->getActorLL());
+
+ PERF_ZONE_BEGIN("memory new actors");
+
+ auto tkSplitEvent = events.allocData<TkSplitEvent>();
+
+ tkSplitEvent->children = mem->reserveNewTkActors(j->m_newActorsCount);
+ tkSplitEvent->numChildren = j->m_newActorsCount;
+
+ tkSplitEvent->parentData.family = &family;
+ tkSplitEvent->parentData.userData = tkActor->userData;
+ tkSplitEvent->parentData.index = tkActorIndex;
+ family.removeActor(tkActor);
+
+ PERF_ZONE_END("memory new actors");
+
+
+ PERF_ZONE_BEGIN("create new actors");
+ for (uint32_t i = 0; i < j->m_newActorsCount; ++i)
+ {
+ TkActorImpl* newActor = family.addActor(splitEvent.newActors[i]);
+ tkSplitEvent->children[i] = newActor;
+ }
+ j->m_newActors = reinterpret_cast<TkActorImpl**>(tkSplitEvent->children);
+ PERF_ZONE_END("create new actors");
+
+ PERF_ZONE_BEGIN("split event");
+ events.addEvent(tkSplitEvent);
+ PERF_ZONE_END("split event");
+ }
+ }
+
+#if NV_PROFILE
+ PERF_ZONE_BEGIN("write timers");
+ m_stats.timers = *timers;
+ m_stats.processedActorsCount = jobCount;
+ m_stats.workerTime = workTime.getElapsedTicks();
+ PERF_ZONE_END("write timers");
+#endif
+}
+
+void TkWorker::release()
+{
+ m_group->m_sync.notify();
+}
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkTaskImpl.h b/NvBlast/sdk/toolkit/source/NvBlastTkTaskImpl.h
new file mode 100644
index 0000000..75f92f8
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkTaskImpl.h
@@ -0,0 +1,444 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKTASKIMPL_H
+#define NVBLASTTKTASKIMPL_H
+
+#include "NvBlast.h"
+
+#include "NvBlastTkFrameworkImpl.h"
+#include "NvBlastTkEventQueue.h"
+#include "NvBlastTkArray.h"
+
+#include <atomic>
+#include <mutex>
+#include <condition_variable>
+
+#include "task/PxTask.h"
+#include "NvBlastAssert.h"
+
+#include "NvBlastTkGroup.h" // TkGroupStats
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+class TkGroupImpl;
+class TkActorImpl;
+class TkFamilyImpl;
+
+
+/**
+Transient structure describing a job and its results.
+*/
+struct TkWorkerJob
+{
+ TkActorImpl* m_tkActor; //!< the actor to process
+ TkActorImpl** m_newActors; //!< list of child actors created by splitting
+ uint32_t m_newActorsCount; //!< the number of child actors created
+};
+
+
+/**
+Counting synchronization object for waiting on TkWorkers to finish.
+*/
+class TaskSync
+{
+public:
+ /**
+ Initializes with an expected number of notifications.
+ */
+ TaskSync(uint32_t count) : m_count(count) {}
+
+ /**
+ Blocks until the expected number of notifications happened.
+ */
+ void wait()
+ {
+ std::unique_lock<std::mutex> lk(m_mutex);
+ m_cv.wait(lk, [&]{ return m_count == 0; });
+ }
+
+ /**
+ Decrement the wait() count by one.
+ */
+ void notify()
+ {
+ PERF_SCOPE_H("TaskSync::notify");
+ std::unique_lock<std::mutex> lk(m_mutex);
+ m_count--;
+ if (m_count == 0)
+ {
+ lk.unlock();
+ m_cv.notify_one();
+ }
+ }
+
+ /**
+ Peek if notifications are pending.
+ */
+ bool isDone()
+ {
+ std::unique_lock<std::mutex> lk(m_mutex);
+ return m_count == 0;
+ }
+
+ /**
+ Sets the expected number of notifications for wait() to unblock.
+ */
+ void setCount(uint32_t count)
+ {
+ m_count = count;
+ }
+
+private:
+ std::mutex m_mutex;
+ std::condition_variable m_cv;
+ uint32_t m_count;
+};
+
+
+/**
+A list of equally sized memory blocks sharable between tasks.
+*/
+template<typename T>
+class SharedBlock
+{
+public:
+
+ SharedBlock() : m_numElementsPerBlock(0), m_numBlocks(0), m_buffer(nullptr) {}
+
+ /**
+ Allocates one large memory block of elementsPerBlock*numBlocks elements.
+ */
+ void allocate(uint32_t elementsPerBlock, uint32_t numBlocks)
+ {
+ NVBLAST_ASSERT(elementsPerBlock > 0 && numBlocks > 0);
+
+ m_buffer = reinterpret_cast<T*>(NVBLASTTK_ALLOC(elementsPerBlock*numBlocks*sizeof(T), "SharedBlock"));
+ m_numElementsPerBlock = elementsPerBlock;
+ m_numBlocks = numBlocks;
+ }
+
+ /**
+ Returns the pointer to the first element of a block of numElementsPerBlock() elements.
+ */
+ T* getBlock(uint32_t id)
+ {
+ NVBLAST_ASSERT(id < m_numBlocks || 0 == m_numElementsPerBlock);
+ return &m_buffer[id*m_numElementsPerBlock];
+ }
+
+ /**
+ The number of elements available per block.
+ */
+ uint32_t numElementsPerBlock() const
+ {
+ return m_numElementsPerBlock;
+ }
+
+ /**
+ Frees the whole memory block.
+ */
+ void release()
+ {
+ m_numBlocks = 0;
+ m_numElementsPerBlock = 0;
+ NVBLASTTK_FREE(m_buffer);
+ m_buffer = nullptr;
+ }
+
+private:
+ uint32_t m_numElementsPerBlock; //!< elements available in one block
+ uint32_t m_numBlocks; //!< number of virtual blocks available
+ T* m_buffer; //!< contiguous memory for all blocks
+};
+
+
+/**
+A preallocated, shared array from which can be allocated from in tasks.
+Intended to be used when the maximum amount of data (e.g. for a family)
+is known in advance. No further allocations take place on exhaustion.
+Exhaustion asserts in debug builds and overflows otherwise.
+*/
+template<typename T>
+class SharedBuffer
+{
+public:
+ SharedBuffer() : m_capacity(0), m_used(0), m_buffer(nullptr) {}
+
+ /**
+ Atomically gets a pointer to the first element of an array of n elements.
+ */
+ T* reserve(size_t n)
+ {
+ NVBLAST_ASSERT(m_used + n <= m_capacity);
+ size_t start = m_used.fetch_add(n);
+ return &m_buffer[start];
+ }
+
+ /**
+ Preallocates memory for capacity elements.
+ */
+ void allocate(size_t capacity)
+ {
+ NVBLAST_ASSERT(m_buffer == nullptr);
+ m_buffer = reinterpret_cast<T*>(NVBLASTTK_ALLOC(capacity*sizeof(T), "SplitMemory"));
+ m_capacity = capacity;
+ }
+
+ /**
+ Preserves the memory allocated but resets to reserve from the beginning of the array.
+ */
+ void reset()
+ {
+ m_used = 0;
+ }
+
+ /**
+ Frees the preallocated array.
+ */
+ void release()
+ {
+ NVBLAST_ASSERT(m_buffer != nullptr);
+ NVBLASTTK_FREE(m_buffer);
+ m_buffer = nullptr;
+ m_capacity = m_used = 0;
+ }
+
+private:
+ size_t m_capacity; //!< available elements in the buffer
+ std::atomic<size_t> m_used; //!< used elements in the buffer
+ T* m_buffer; //!< the memory containing T's
+};
+
+
+/**
+Allocates from a preallocated, externally owned memory block initialized with.
+When blocks run out of space, new ones are allocated and owned by this class.
+*/
+template<typename T>
+class LocalBuffer
+{
+public:
+ /**
+ Returns the pointer to the first element of an array of n elements.
+ Allocates a new block of memory when exhausted, its size being the larger of n and capacity set with initialize().
+ */
+ T* allocate(size_t n)
+ {
+ if (m_used + n > m_capacity)
+ {
+ allocateNewBlock(n > m_capacity ? n : m_capacity);
+ }
+
+ size_t index = m_used;
+ m_used += n;
+ return &m_currentBlock[index];
+ }
+
+ /**
+ Release the additionally allocated memory blocks.
+ The externally owned memory block remains untouched.
+ */
+ void clear()
+ {
+ for (void* block : m_memoryBlocks)
+ {
+ NVBLASTTK_FREE(block);
+ }
+ m_memoryBlocks.clear();
+ }
+
+ /**
+ Set the externally owned memory block to start allocating from,
+ with a size of capacity elements.
+ */
+ void initialize(T* block, size_t capacity)
+ {
+ m_currentBlock = block;
+ m_capacity = capacity;
+ m_used = 0;
+ }
+
+private:
+ /**
+ Allocates space for capacity elements.
+ */
+ void allocateNewBlock(size_t capacity)
+ {
+ PERF_SCOPE_L("Local Buffer allocation");
+ m_capacity = capacity;
+ m_currentBlock = static_cast<T*>(NVBLASTTK_ALLOC(capacity*sizeof(T), "Blast LocalBuffer"));
+ m_memoryBlocks.pushBack(m_currentBlock);
+ m_used = 0;
+ }
+
+ TkInlineArray<void*, 4>::type m_memoryBlocks; //!< storage for memory blocks
+ T* m_currentBlock; //!< memory block used to allocate from
+ size_t m_used; //!< elements used in current block
+ size_t m_capacity; //!< elements available in current block
+};
+
+
+/**
+Holds the memory used by TkWorker for each family in each group.
+*/
+class SharedMemory
+{
+public:
+ SharedMemory() : m_eventsMemory(0), m_eventsCount(0), m_refCount(0) {}
+
+ /**
+ Reserves n entries from preallocated memory.
+ */
+ NvBlastActor** reserveNewActors(size_t n)
+ {
+ return m_newActorBuffers.reserve(n);
+ }
+
+ /**
+ Reserves n entries from preallocated memory.
+ */
+ TkActor** reserveNewTkActors(size_t n)
+ {
+ return m_newTkActorBuffers.reserve(n);
+ }
+
+ /**
+ Allocates buffers to hold
+ */
+ void allocate(TkFamilyImpl&);
+
+ /**
+ Resets the internal buffers to reserve from their beginning.
+ Preserves the allocated memory.
+ */
+ void reset()
+ {
+ m_newActorBuffers.reset();
+ m_newTkActorBuffers.reset();
+ }
+
+ /**
+ Increments the reference count.
+ */
+ void addReference() { m_refCount++; }
+
+ /**
+ Increments the reference count by n.
+ */
+ void addReference(size_t n) { m_refCount += n; }
+
+ /**
+ Decrements the reference count.
+ Returns true if the count reached zero.
+ */
+ bool removeReference()
+ {
+ m_refCount--;
+ return !isUsed();
+ }
+
+ /**
+ Checks if the reference count is not zero.
+ */
+ bool isUsed()
+ {
+ return m_refCount > 0;
+ }
+
+ /**
+ Release the internal buffers' memory.
+ */
+ void release()
+ {
+ m_newActorBuffers.release();
+ m_newTkActorBuffers.release();
+ }
+
+ TkEventQueue m_events; //!< event queue shared across a group's actors of the same family
+ uint32_t m_eventsMemory; //!< expected memory size for event data
+ uint32_t m_eventsCount; //!< expected number of events
+
+private:
+ size_t m_refCount; //!< helper for usage and releasing memory
+
+ SharedBuffer<NvBlastActor*> m_newActorBuffers; //!< memory for splitting
+ SharedBuffer<TkActor*> m_newTkActorBuffers; //!< memory for split events
+};
+
+
+/**
+Shared job queue from which TkWorkers atomically fetch the next job.
+*/
+template <typename T>
+class TkAtomicQueue
+{
+public:
+ /**
+ Initialize for a new batch of jobs.
+ */
+ void init(TkWorkerJob* jobs, uint32_t numJobs)
+ {
+ m_jobs = jobs;
+ m_maxCount = numJobs;
+ m_current = 0;
+ }
+
+ /**
+ Fetch a pointer to the next job. Returns nullptr when exhausted.
+ */
+ T* next()
+ {
+ size_t index = m_current.fetch_add(1, std::memory_order_relaxed);
+ if (index < m_maxCount)
+ {
+ return &m_jobs[index];
+ }
+ return nullptr;
+ }
+
+private:
+ T* m_jobs; //!< the list of jobs
+ size_t m_maxCount; //!< number of jobs available in the list
+ std::atomic<size_t> m_current; //!< current job counter
+};
+
+typedef TkAtomicQueue<TkWorkerJob> TkAtomicJobQueue;
+
+
+/**
+Thread worker fracturing and splitting actors sequentially.
+The list of actual jobs is provided by the group owning this worker.
+*/
+class TkWorker : public physx::PxLightCpuTask
+{
+public:
+ void run();
+ void release();
+ const char* getName() const { return "TkWorker"; }
+
+ uint32_t m_id; //!< this worker's id
+ TkGroupImpl* m_group; //!< the group owning this worker
+
+ LocalBuffer<NvBlastChunkFractureData> m_chunkBuffer; //!< memory manager for chunk event data
+ LocalBuffer<NvBlastBondFractureData> m_bondBuffer; //!< memory manager for bonds event data
+
+#if NV_PROFILE
+ TkGroupStats m_stats;
+#endif
+};
+}
+}
+
+#endif // NVBLASTTKTASKIMPL_H
diff --git a/NvBlast/sdk/toolkit/source/NvBlastTkTypeImpl.h b/NvBlast/sdk/toolkit/source/NvBlastTkTypeImpl.h
new file mode 100644
index 0000000..a29c32f
--- /dev/null
+++ b/NvBlast/sdk/toolkit/source/NvBlastTkTypeImpl.h
@@ -0,0 +1,174 @@
+/*
+* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
+*
+* NVIDIA CORPORATION and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA CORPORATION is strictly prohibited.
+*/
+
+#ifndef NVBLASTTKTYPEIMPL_H
+#define NVBLASTTKTYPEIMPL_H
+
+
+#include "NvPreprocessor.h"
+
+#include "NvBlastTkType.h"
+
+
+// Forward declarations
+namespace physx
+{
+namespace general_PxIOStream2
+{
+class PxFileBuf;
+}
+}
+
+
+namespace Nv
+{
+namespace Blast
+{
+
+// Forward declarations
+class TkSerializable;
+
+
+// Serialization function signature
+typedef TkSerializable* (*TkDeserializeFn)(physx::general_PxIOStream2::PxFileBuf&, const NvBlastID& id);
+
+
+/**
+Implementation of TkType, storing class information for TkIdentifiable-derived classes.
+*/
+class TkTypeImpl : public TkType
+{
+public:
+ TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version, TkDeserializeFn deserializeFn);
+
+ // Begin TkType
+ virtual const char* getName() const override { return getNameInternal(); }
+
+ virtual uint32_t getVersion() const override { return getVersionInternal(); }
+ // End TkType
+
+ // Public methods
+
+ /**
+ Access to the class name.
+
+ \return a C string pointer to the class name.
+ */
+ const char* getNameInternal() const;
+
+ /**
+ Access to the data format version for the class (used if it TkSerializable-derived).
+
+ \return the data format version.
+ */
+ uint32_t getVersionInternal() const;
+
+ /**
+ Access to a unique identifier for the class (set using the NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE or NVBLASTTK_IMPL_DEFINE_SERIALIZABLE macro).
+
+ \return the class's unique identifier.
+ */
+ uint32_t getID() const;
+
+ /**
+ \return the class's deserialization function.
+ */
+ TkDeserializeFn getDeserializeFn() const;
+
+ /**
+ Access to a runtime-unique small index for the class.
+
+ \return the index for the class.
+ */
+ uint32_t getIndex() const;
+
+ /**
+ \return whether or not the index has been set (see setIndex) to a valid value.
+ */
+ bool indexIsValid() const;
+
+private:
+ enum { InvalidIndex = 0xFFFFFFFF };
+
+ /**
+ Sets the type index.
+
+ \param[in] index The index to set.
+ */
+ void setIndex(uint32_t index);
+
+ const char* m_name; //!< The name of the class, set by the constructor.
+ uint32_t m_ID; //!< The unique identifier for the class, set by the constructor.
+ uint32_t m_version; //!< The data format version for the class, set by the constructor.
+ TkDeserializeFn m_deserializeFn; //!< The class deserialization function, set by the constructor.
+ uint32_t m_index; //!< The index set for this class, set using setIndex().
+
+ friend class TkFrameworkImpl;
+};
+
+
+//////// TkTypeImpl inline methods ////////
+
+NV_INLINE TkTypeImpl::TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version, TkDeserializeFn deserializeFn)
+ : m_name(typeName)
+ , m_ID(typeID)
+ , m_version(version)
+ , m_deserializeFn(deserializeFn)
+ , m_index((uint32_t)InvalidIndex)
+{
+}
+
+
+NV_INLINE const char* TkTypeImpl::getNameInternal() const
+{
+ return m_name;
+}
+
+
+NV_INLINE uint32_t TkTypeImpl::getVersionInternal() const
+{
+ return m_version;
+}
+
+
+NV_INLINE uint32_t TkTypeImpl::getID() const
+{
+ return m_ID;
+}
+
+
+NV_INLINE TkDeserializeFn TkTypeImpl::getDeserializeFn() const
+{
+ return m_deserializeFn;
+}
+
+
+NV_INLINE uint32_t TkTypeImpl::getIndex() const
+{
+ return m_index;
+}
+
+
+NV_INLINE bool TkTypeImpl::indexIsValid() const
+{
+ return m_index != (uint32_t)InvalidIndex;
+}
+
+
+NV_INLINE void TkTypeImpl::setIndex(uint32_t index)
+{
+ m_index = index;
+}
+
+} // namespace Blast
+} // namespace Nv
+
+
+#endif // ifndef NVBLASTTKTYPEIMPL_H