aboutsummaryrefslogtreecommitdiff
path: root/NvCloth/src
diff options
context:
space:
mode:
Diffstat (limited to 'NvCloth/src')
-rw-r--r--NvCloth/src/Allocator.cpp48
-rw-r--r--NvCloth/src/BoundingBox.h103
-rw-r--r--NvCloth/src/Callbacks.cpp133
-rw-r--r--NvCloth/src/ClothBase.h131
-rw-r--r--NvCloth/src/ClothClone.h230
-rw-r--r--NvCloth/src/ClothImpl.h1393
-rw-r--r--NvCloth/src/Factory.cpp101
-rw-r--r--NvCloth/src/IndexPair.h45
-rw-r--r--NvCloth/src/IterationState.h405
-rw-r--r--NvCloth/src/MovingAverage.h151
-rw-r--r--NvCloth/src/NvSimd/NvSimd4f.h623
-rw-r--r--NvCloth/src/NvSimd/NvSimd4i.h365
-rw-r--r--NvCloth/src/NvSimd/NvSimdTypes.h226
-rw-r--r--NvCloth/src/NvSimd/neon/NvNeonSimd4f.h585
-rw-r--r--NvCloth/src/NvSimd/neon/NvNeonSimd4i.h303
-rw-r--r--NvCloth/src/NvSimd/neon/NvNeonSimdTypes.h71
-rw-r--r--NvCloth/src/NvSimd/scalar/NvScalarSimd4f.h464
-rw-r--r--NvCloth/src/NvSimd/scalar/NvScalarSimd4i.h272
-rw-r--r--NvCloth/src/NvSimd/scalar/NvScalarSimdTypes.h154
-rw-r--r--NvCloth/src/NvSimd/sse2/NvSse2Simd4f.h471
-rw-r--r--NvCloth/src/NvSimd/sse2/NvSse2Simd4i.h259
-rw-r--r--NvCloth/src/NvSimd/sse2/NvSse2SimdTypes.h95
-rw-r--r--NvCloth/src/PhaseConfig.cpp68
-rw-r--r--NvCloth/src/PointInterpolator.h167
-rw-r--r--NvCloth/src/Simd.h43
-rw-r--r--NvCloth/src/StackAllocator.h163
-rw-r--r--NvCloth/src/SwCloth.cpp321
-rw-r--r--NvCloth/src/SwCloth.h203
-rw-r--r--NvCloth/src/SwClothData.cpp155
-rw-r--r--NvCloth/src/SwClothData.h151
-rw-r--r--NvCloth/src/SwCollision.cpp1936
-rw-r--r--NvCloth/src/SwCollision.h139
-rw-r--r--NvCloth/src/SwCollisionHelpers.h84
-rw-r--r--NvCloth/src/SwFabric.cpp199
-rw-r--r--NvCloth/src/SwFabric.h108
-rw-r--r--NvCloth/src/SwFactory.cpp299
-rw-r--r--NvCloth/src/SwFactory.h93
-rw-r--r--NvCloth/src/SwInterCollision.cpp703
-rw-r--r--NvCloth/src/SwInterCollision.h142
-rw-r--r--NvCloth/src/SwSelfCollision.cpp416
-rw-r--r--NvCloth/src/SwSelfCollision.h83
-rw-r--r--NvCloth/src/SwSolver.cpp267
-rw-r--r--NvCloth/src/SwSolver.h136
-rw-r--r--NvCloth/src/SwSolverKernel.cpp851
-rw-r--r--NvCloth/src/SwSolverKernel.h84
-rw-r--r--NvCloth/src/TripletScheduler.cpp242
-rw-r--r--NvCloth/src/TripletScheduler.h55
-rw-r--r--NvCloth/src/Vec4T.h139
-rw-r--r--NvCloth/src/avx/SwSolveConstraints.cpp340
-rw-r--r--NvCloth/src/cuda/CuCheckSuccess.h44
-rw-r--r--NvCloth/src/cuda/CuCloth.cpp512
-rw-r--r--NvCloth/src/cuda/CuCloth.h223
-rw-r--r--NvCloth/src/cuda/CuClothClone.cpp86
-rw-r--r--NvCloth/src/cuda/CuClothData.cpp238
-rw-r--r--NvCloth/src/cuda/CuClothData.h192
-rw-r--r--NvCloth/src/cuda/CuCollision.h1572
-rw-r--r--NvCloth/src/cuda/CuContextLock.cpp54
-rw-r--r--NvCloth/src/cuda/CuContextLock.h57
-rw-r--r--NvCloth/src/cuda/CuDevicePointer.h215
-rw-r--r--NvCloth/src/cuda/CuDeviceVector.h224
-rw-r--r--NvCloth/src/cuda/CuFabric.cpp208
-rw-r--r--NvCloth/src/cuda/CuFabric.h105
-rw-r--r--NvCloth/src/cuda/CuFactory.cpp414
-rw-r--r--NvCloth/src/cuda/CuFactory.h109
-rw-r--r--NvCloth/src/cuda/CuPhaseConfig.h51
-rw-r--r--NvCloth/src/cuda/CuPinnedAllocator.h182
-rw-r--r--NvCloth/src/cuda/CuSelfCollision.h478
-rw-r--r--NvCloth/src/cuda/CuSolver.cpp677
-rw-r--r--NvCloth/src/cuda/CuSolver.h162
-rw-r--r--NvCloth/src/cuda/CuSolverKernel.cu1443
-rw-r--r--NvCloth/src/cuda/CuSolverKernel.h93
-rw-r--r--NvCloth/src/dx/DxBatchedVector.h335
-rw-r--r--NvCloth/src/dx/DxCheckSuccess.h63
-rw-r--r--NvCloth/src/dx/DxCloth.cpp533
-rw-r--r--NvCloth/src/dx/DxCloth.h229
-rw-r--r--NvCloth/src/dx/DxClothClone.cpp94
-rw-r--r--NvCloth/src/dx/DxClothData.cpp177
-rw-r--r--NvCloth/src/dx/DxClothData.h215
-rw-r--r--NvCloth/src/dx/DxContextLock.cpp63
-rw-r--r--NvCloth/src/dx/DxContextLock.h56
-rw-r--r--NvCloth/src/dx/DxDeviceVector.h388
-rw-r--r--NvCloth/src/dx/DxFabric.cpp208
-rw-r--r--NvCloth/src/dx/DxFabric.h96
-rw-r--r--NvCloth/src/dx/DxFactory.cpp478
-rw-r--r--NvCloth/src/dx/DxFactory.h160
-rw-r--r--NvCloth/src/dx/DxSolver.cpp542
-rw-r--r--NvCloth/src/dx/DxSolver.h148
-rw-r--r--NvCloth/src/dx/DxSolverKernel.hlsl1665
-rw-r--r--NvCloth/src/dx/DxSolverKernelBlob.h24502
-rw-r--r--NvCloth/src/dx/DxSortKernel.inc193
-rw-r--r--NvCloth/src/neon/NeonCollision.cpp34
-rw-r--r--NvCloth/src/neon/NeonSelfCollision.cpp34
-rw-r--r--NvCloth/src/neon/NeonSolverKernel.cpp49
-rw-r--r--NvCloth/src/neon/SwCollisionHelpers.h87
-rw-r--r--NvCloth/src/scalar/SwCollisionHelpers.h92
-rw-r--r--NvCloth/src/sse2/SwCollisionHelpers.h96
-rw-r--r--NvCloth/src/sse2/SwSolveConstraints.h134
97 files changed, 52225 insertions, 0 deletions
diff --git a/NvCloth/src/Allocator.cpp b/NvCloth/src/Allocator.cpp
new file mode 100644
index 0000000..33a593e
--- /dev/null
+++ b/NvCloth/src/Allocator.cpp
@@ -0,0 +1,48 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "NvCloth/Allocator.h"
+#include "PsAllocator.h"
+
+namespace nv
+{
+
+void* cloth::allocate(size_t n)
+{
+ return n ? GetNvClothAllocator()->allocate(n, "", __FILE__, __LINE__) : 0;
+}
+
+void cloth::deallocate(void* ptr)
+{
+ if (ptr)
+ GetNvClothAllocator()->deallocate(ptr);
+}
+
+}
diff --git a/NvCloth/src/BoundingBox.h b/NvCloth/src/BoundingBox.h
new file mode 100644
index 0000000..74bc0ff
--- /dev/null
+++ b/NvCloth/src/BoundingBox.h
@@ -0,0 +1,103 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "Simd.h"
+
+namespace nv
+{
+
+namespace cloth
+{
+
+template <typename Simd4f>
+struct BoundingBox
+{
+ Simd4f mLower;
+ Simd4f mUpper;
+};
+
+template <typename Simd4f>
+inline BoundingBox<Simd4f> loadBounds(const float* ptr)
+{
+ BoundingBox<Simd4f> result;
+ result.mLower = load(ptr);
+ result.mUpper = load(ptr + 3);
+ return result;
+}
+
+template <typename Simd4f>
+inline BoundingBox<Simd4f> emptyBounds()
+{
+ BoundingBox<Simd4f> result;
+
+ result.mLower = gSimd4fFloatMax;
+ result.mUpper = -result.mLower;
+
+ return result;
+}
+
+template <typename Simd4f>
+inline BoundingBox<Simd4f> expandBounds(const BoundingBox<Simd4f>& bounds, const Simd4f* pIt, const Simd4f* pEnd)
+{
+ BoundingBox<Simd4f> result = bounds;
+ for (; pIt != pEnd; ++pIt)
+ {
+ result.mLower = min(result.mLower, *pIt);
+ result.mUpper = max(result.mUpper, *pIt);
+ }
+ return result;
+}
+
+template <typename Simd4f>
+inline BoundingBox<Simd4f> expandBounds(const BoundingBox<Simd4f>& a, const BoundingBox<Simd4f>& b)
+{
+ BoundingBox<Simd4f> result;
+ result.mLower = min(a.mLower, b.mLower);
+ result.mUpper = max(a.mUpper, b.mUpper);
+ return result;
+}
+
+template <typename Simd4f>
+inline BoundingBox<Simd4f> intersectBounds(const BoundingBox<Simd4f>& a, const BoundingBox<Simd4f>& b)
+{
+ BoundingBox<Simd4f> result;
+ result.mLower = max(a.mLower, b.mLower);
+ result.mUpper = min(a.mUpper, b.mUpper);
+ return result;
+}
+
+template <typename Simd4f>
+inline bool isEmptyBounds(const BoundingBox<Simd4f>& a)
+{
+ return anyGreater(a.mLower, a.mUpper) != 0;
+}
+}
+}
diff --git a/NvCloth/src/Callbacks.cpp b/NvCloth/src/Callbacks.cpp
new file mode 100644
index 0000000..22ba03d
--- /dev/null
+++ b/NvCloth/src/Callbacks.cpp
@@ -0,0 +1,133 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "NvCloth/Callbacks.h"
+#include "NvCloth/Allocator.h"
+#include <foundation/PxAllocatorCallback.h>
+#include <foundation/PxErrorCallback.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+
+struct NvClothContext
+{
+ PxAllocatorCallback* mAllocator;
+ PxErrorCallback* mErrorCallback;
+ PxAssertHandler* mAssertHandler;
+ PxProfilerCallback* mProfilerCallback;
+};
+
+static NvClothContext sContext;
+
+NV_CLOTH_API(void) InitializeNvCloth(PxAllocatorCallback* allocatorCallback, PxErrorCallback* errorCallback, PxAssertHandler* assertHandler, PxProfilerCallback* profilerCallback, int autoDllIDCheck)
+{
+ PX_UNUSED(autoDllIDCheck);
+ NV_CLOTH_ASSERT(("NvCloth dll id mismatch, ensure you compile with matching headers/run with matching dll.", NV_CLOTH_DLL_ID == autoDllIDCheck));
+ NV_CLOTH_ASSERT(("NvCloth initialized with invalid allocator", allocatorCallback != nullptr));
+ sContext.mAllocator = allocatorCallback;
+ sContext.mErrorCallback = errorCallback;
+ sContext.mAssertHandler = assertHandler;
+ sContext.mProfilerCallback = profilerCallback;
+}
+
+}
+}
+
+PxAllocatorCallback* GetNvClothAllocator()
+{
+ NV_CLOTH_ASSERT(("NvCloth used before calling InitializeNvCloth", nv::cloth::sContext.mAllocator != nullptr));
+ return nv::cloth::sContext.mAllocator;
+}
+
+
+namespace nv
+{
+namespace cloth
+{
+void LogFn(PxErrorCode::Enum errorLevel, const char* fileName, int lineNumber, const char* msg, va_list additionalArguments)
+{
+ if (!sContext.mErrorCallback)
+ return;
+ char buf[2048];
+#if PX_VC
+ //Pre VC2015 doesn't support the standard library properly.
+ _vsnprintf_s(buf, 2048, _TRUNCATE, msg, additionalArguments);
+#else
+ //This should also work with VC2015
+ vsnprintf(buf, 2048, msg, additionalArguments);
+#endif
+ sContext.mErrorCallback->reportError(errorLevel, buf, fileName, lineNumber);
+}
+void LogErrorFn(const char* fileName, int lineNumber, const char* msg, ...)
+{
+ va_list args;
+ va_start(args, msg);
+ LogFn(::PxErrorCode::eINTERNAL_ERROR, fileName, lineNumber, msg, args);
+ va_end(args);
+}
+void LogInvalidParameterFn(const char* fileName, int lineNumber, const char* msg, ...)
+{
+ va_list args;
+ va_start(args, msg);
+ LogFn(::PxErrorCode::eINVALID_PARAMETER, fileName, lineNumber, msg, args);
+ va_end(args);
+}
+void LogWarningFn(const char* fileName, int lineNumber, const char* msg, ...)
+{
+ va_list args;
+ va_start(args, msg);
+ LogFn(::PxErrorCode::eDEBUG_WARNING, fileName, lineNumber, msg, args);
+ va_end(args);
+}
+void LogInfoFn(const char* fileName, int lineNumber, const char* msg, ...)
+{
+ va_list args;
+ va_start(args, msg);
+ LogFn(::PxErrorCode::eDEBUG_INFO, fileName, lineNumber, msg, args);
+ va_end(args);
+}
+
+NV_CLOTH_API(PxAssertHandler*) GetNvClothAssertHandler()
+{
+ return sContext.mAssertHandler;
+}
+
+PxProfilerCallback* GetNvClothProfiler()
+{
+ return sContext.mProfilerCallback;
+}
+
+}//cloth
+}//nv
diff --git a/NvCloth/src/ClothBase.h b/NvCloth/src/ClothBase.h
new file mode 100644
index 0000000..8d75a72
--- /dev/null
+++ b/NvCloth/src/ClothBase.h
@@ -0,0 +1,131 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+namespace nv
+{
+namespace cloth
+{
+
+/* helper functions shared between SwCloth and CuCloth */
+
+template <typename Cloth>
+void initialize(Cloth& cloth, const physx::PxVec4* pIt, const physx::PxVec4* pEnd)
+{
+ // initialize particles bounding box
+ physx::PxVec4 lower = physx::PxVec4(FLT_MAX), upper = -lower;
+ for (; pIt != pEnd; ++pIt)
+ {
+ lower = lower.minimum(*pIt);
+ upper = upper.maximum(*pIt);
+ }
+ physx::PxVec4 center = (upper + lower) * 0.5f;
+ physx::PxVec4 extent = (upper - lower) * 0.5f;
+ cloth.mParticleBoundsCenter = reinterpret_cast<const physx::PxVec3&>(center);
+ cloth.mParticleBoundsHalfExtent = reinterpret_cast<const physx::PxVec3&>(extent);
+
+ cloth.mGravity = physx::PxVec3(0.0f);
+ cloth.mLogDamping = physx::PxVec3(0.0f);
+ cloth.mLinearLogDrag = physx::PxVec3(0.0f);
+ cloth.mAngularLogDrag = physx::PxVec3(0.0f);
+ cloth.mLinearInertia = physx::PxVec3(1.0f);
+ cloth.mAngularInertia = physx::PxVec3(1.0f);
+ cloth.mCentrifugalInertia = physx::PxVec3(1.0f);
+ cloth.mSolverFrequency = 300.0f;
+ cloth.mStiffnessFrequency = 10.0f;
+ cloth.mTargetMotion = physx::PxTransform(physx::PxIdentity);
+ cloth.mCurrentMotion = physx::PxTransform(physx::PxIdentity);
+ cloth.mLinearVelocity = physx::PxVec3(0.0f);
+ cloth.mAngularVelocity = physx::PxVec3(0.0f);
+ cloth.mPrevIterDt = 0.0f;
+ cloth.mIterDtAvg = MovingAverage(30);
+ cloth.mTetherConstraintLogStiffness = float(-FLT_MAX_EXP);
+ cloth.mTetherConstraintScale = 1.0f;
+ cloth.mMotionConstraintScale = 1.0f;
+ cloth.mMotionConstraintBias = 0.0f;
+ cloth.mMotionConstraintLogStiffness = float(-FLT_MAX_EXP);
+ cloth.mWind = physx::PxVec3(0.0f);
+ cloth.mDragLogCoefficient = 0.0f;
+ cloth.mLiftLogCoefficient = 0.0f;
+ cloth.mEnableContinuousCollision = false;
+ cloth.mCollisionMassScale = 0.0f;
+ cloth.mFriction = 0.0f;
+ cloth.mSelfCollisionDistance = 0.0f;
+ cloth.mSelfCollisionLogStiffness = float(-FLT_MAX_EXP);
+ cloth.mSleepTestInterval = uint32_t(-1);
+ cloth.mSleepAfterCount = uint32_t(-1);
+ cloth.mSleepThreshold = 0.0f;
+ cloth.mSleepPassCounter = 0;
+ cloth.mSleepTestCounter = 0;
+}
+
+template <typename DstCloth, typename SrcCloth>
+void copy(DstCloth& dstCloth, const SrcCloth& srcCloth)
+{
+ dstCloth.mParticleBoundsCenter = srcCloth.mParticleBoundsCenter;
+ dstCloth.mParticleBoundsHalfExtent = srcCloth.mParticleBoundsHalfExtent;
+ dstCloth.mGravity = srcCloth.mGravity;
+ dstCloth.mLogDamping = srcCloth.mLogDamping;
+ dstCloth.mLinearLogDrag = srcCloth.mLinearLogDrag;
+ dstCloth.mAngularLogDrag = srcCloth.mAngularLogDrag;
+ dstCloth.mLinearInertia = srcCloth.mLinearInertia;
+ dstCloth.mAngularInertia = srcCloth.mAngularInertia;
+ dstCloth.mCentrifugalInertia = srcCloth.mCentrifugalInertia;
+ dstCloth.mSolverFrequency = srcCloth.mSolverFrequency;
+ dstCloth.mStiffnessFrequency = srcCloth.mStiffnessFrequency;
+ dstCloth.mTargetMotion = srcCloth.mTargetMotion;
+ dstCloth.mCurrentMotion = srcCloth.mCurrentMotion;
+ dstCloth.mLinearVelocity = srcCloth.mLinearVelocity;
+ dstCloth.mAngularVelocity = srcCloth.mAngularVelocity;
+ dstCloth.mPrevIterDt = srcCloth.mPrevIterDt;
+ dstCloth.mIterDtAvg = srcCloth.mIterDtAvg;
+ dstCloth.mTetherConstraintLogStiffness = srcCloth.mTetherConstraintLogStiffness;
+ dstCloth.mTetherConstraintScale = srcCloth.mTetherConstraintScale;
+ dstCloth.mMotionConstraintScale = srcCloth.mMotionConstraintScale;
+ dstCloth.mMotionConstraintBias = srcCloth.mMotionConstraintBias;
+ dstCloth.mMotionConstraintLogStiffness = srcCloth.mMotionConstraintLogStiffness;
+ dstCloth.mWind = srcCloth.mWind;
+ dstCloth.mDragLogCoefficient = srcCloth.mDragLogCoefficient;
+ dstCloth.mLiftLogCoefficient = srcCloth.mLiftLogCoefficient;
+ dstCloth.mEnableContinuousCollision = srcCloth.mEnableContinuousCollision;
+ dstCloth.mCollisionMassScale = srcCloth.mCollisionMassScale;
+ dstCloth.mFriction = srcCloth.mFriction;
+ dstCloth.mSelfCollisionDistance = srcCloth.mSelfCollisionDistance;
+ dstCloth.mSelfCollisionLogStiffness = srcCloth.mSelfCollisionLogStiffness;
+ dstCloth.mSleepTestInterval = srcCloth.mSleepTestInterval;
+ dstCloth.mSleepAfterCount = srcCloth.mSleepAfterCount;
+ dstCloth.mSleepThreshold = srcCloth.mSleepThreshold;
+ dstCloth.mSleepPassCounter = srcCloth.mSleepPassCounter;
+ dstCloth.mSleepTestCounter = srcCloth.mSleepTestCounter;
+ dstCloth.mUserData = srcCloth.mUserData;
+}
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/ClothClone.h b/NvCloth/src/ClothClone.h
new file mode 100644
index 0000000..bdfdb2d
--- /dev/null
+++ b/NvCloth/src/ClothClone.h
@@ -0,0 +1,230 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "../SwFactory.h"
+#include "../SwFabric.h"
+#include "../SwCloth.h"
+
+#include "../ClothImpl.h"
+#include "../ClothBase.h"
+#include "NvCloth/Allocator.h"
+
+namespace nv
+{
+namespace cloth
+{
+class DxFactory;
+class CuFactory;
+
+// make range from vector
+template <typename T, typename A>
+Range<T> makeRange(physx::shdfnd::Array<T, A>& vec)
+{
+ T* ptr = vec.empty() ? 0 : vec.begin();
+ return Range<T>(ptr, ptr + vec.size());
+}
+
+template <typename T, typename A>
+Range<const T> makeRange(const physx::shdfnd::Array<T, A>& vec)
+{
+ const T* ptr = vec.empty() ? 0 : vec.begin();
+ return Range<const T>(ptr, ptr + vec.size());
+}
+
+// fabric conversion
+template <typename SrcClothType, typename DstFactoryType>
+typename DstFactoryType::FabricType* convertFabric(const SrcClothType& srcFabric, DstFactoryType& dstFactory)
+{
+ typedef typename DstFactoryType::FabricType DstFabricType;
+
+ // see if dstFactory already has a Fabric with this id
+ DstFabricType* const* fIt = dstFactory.mFabrics.begin();
+ DstFabricType* const* fEnd = dstFactory.mFabrics.end();
+ for (; fIt != fEnd; ++fIt)
+ if ((*fIt)->mId == srcFabric.mId)
+ {
+ (*fIt)->incRefCount();
+ return *fIt; // found id, return existing fabric
+ }
+
+ // fabric does not exist so create a new one
+ Vector<uint32_t>::Type phaseIndices(srcFabric.getNumPhases());
+ Vector<uint32_t>::Type sets(srcFabric.getNumSets());
+ Vector<float>::Type restvalues(srcFabric.getNumRestvalues());
+ Vector<float>::Type stiffnessValues(srcFabric.getNumStiffnessValues());
+ Vector<uint32_t>::Type indices(srcFabric.getNumIndices());
+ Vector<uint32_t>::Type anchors(srcFabric.getNumTethers());
+ Vector<float>::Type tetherLengths(srcFabric.getNumTethers());
+ Vector<uint32_t>::Type triangles(srcFabric.getNumTriangles() * 3);
+
+ Range<uint32_t> phaseIndexRange = makeRange(phaseIndices);
+ Range<float> restvalueRange = makeRange(restvalues);
+ Range<float> stiffnessValueRange = makeRange(stiffnessValues);
+ Range<uint32_t> setRange = makeRange(sets);
+ Range<uint32_t> indexRange = makeRange(indices);
+ Range<uint32_t> anchorRange = makeRange(anchors);
+ Range<float> lengthRange = makeRange(tetherLengths);
+ Range<uint32_t> triangleRange = makeRange(triangles);
+
+ srcFabric.mFactory.extractFabricData(srcFabric, phaseIndexRange, setRange, restvalueRange, stiffnessValueRange, indexRange, anchorRange,
+ lengthRange, triangleRange);
+
+ DstFabricType* dstFabric =
+ static_cast<DstFabricType*>(dstFactory.createFabric(srcFabric.mNumParticles, phaseIndexRange, setRange, restvalueRange, stiffnessValueRange,
+ indexRange, anchorRange, lengthRange, triangleRange));
+
+ // give new fabric the same id as the source so it can be matched
+ dstFabric->mId = srcFabric.mId;
+
+ return dstFabric;
+}
+
+inline Range<const PhaseConfig> getPhaseConfigs(const SwCloth& cloth)
+{
+ return makeRange(cloth.mPhaseConfigs);
+}
+inline void setPhaseConfigs(SwCloth& cloth, Range<const PhaseConfig> phaseConfigs)
+{
+ cloth.mPhaseConfigs.assign(phaseConfigs.begin(), phaseConfigs.end());
+}
+inline Range<const physx::PxVec4> getParticleAccelerations(const SwCloth& cloth)
+{
+ return makeRange(cloth.mParticleAccelerations);
+}
+inline Range<const uint32_t> getSelfCollisionIndices(const SwCloth& cloth)
+{
+ return makeRange(cloth.mSelfCollisionIndices);
+}
+
+// cloth conversion
+template <typename DstFactoryType, typename SrcImplType>
+typename DstFactoryType::ImplType* convertCloth(DstFactoryType& dstFactory, const SrcImplType& srcImpl)
+{
+ typedef typename DstFactoryType::FabricType DstFabricType;
+ typedef typename DstFactoryType::ImplType DstImplType;
+ typedef typename DstImplType::ClothType DstClothType;
+ typedef typename SrcImplType::ClothType SrcClothType;
+
+ const SrcClothType& srcCloth = srcImpl.mCloth;
+ const Factory& srcFactory = srcCloth.mFactory;
+
+ typename DstClothType::ContextLockType dstLock(dstFactory);
+ typename SrcClothType::ContextLockType srcLock(srcCloth.mFactory);
+
+ // particles
+ MappedRange<const physx::PxVec4> curParticles = srcImpl.getCurrentParticles();
+
+ // fabric
+ DstFabricType& dstFabric = *convertFabric(srcCloth.mFabric, dstFactory);
+
+ // create new cloth
+ DstImplType* dstImpl = static_cast<DstImplType*>(dstFactory.createCloth(curParticles, dstFabric));
+ DstClothType& dstCloth = dstImpl->mCloth;
+ dstFabric.decRefCount();
+
+ // copy across common parameters
+ copy(dstCloth, srcCloth);
+
+ // copy across previous particles
+ MappedRange<const physx::PxVec4> prevParticles = srcImpl.getPreviousParticles();
+ memcpy(dstImpl->getPreviousParticles().begin(), prevParticles.begin(), prevParticles.size() * sizeof(physx::PxVec4));
+
+ // copy across transformed phase configs
+ setPhaseConfigs(dstCloth, getPhaseConfigs(srcCloth));
+
+ // collision data
+ Vector<physx::PxVec4>::Type spheres(srcImpl.getNumSpheres(), physx::PxVec4(0.0f));
+ physx::PxVec4* spherePtr = spheres.empty() ? 0 : &spheres.front();
+ Range<physx::PxVec4> sphereRange(spherePtr, spherePtr + spheres.size());
+ Vector<uint32_t>::Type capsules(srcImpl.getNumCapsules() * 2);
+ Range<uint32_t> capsuleRange = makeRange(capsules);
+ Vector<physx::PxVec4>::Type planes(srcImpl.getNumPlanes(), physx::PxVec4(0.0f));
+ physx::PxVec4* planePtr = planes.empty() ? 0 : &planes.front();
+ Range<physx::PxVec4> planeRange(planePtr, planePtr + planes.size());
+ Vector<uint32_t>::Type convexes(srcImpl.getNumConvexes());
+ Range<uint32_t> convexRange = makeRange(convexes);
+ Vector<physx::PxVec3>::Type triangles(srcImpl.getNumTriangles() * 3, physx::PxVec3(0.0f));
+ physx::PxVec3* trianglePtr = triangles.empty() ? 0 : &triangles.front();
+ Range<physx::PxVec3> triangleRange(trianglePtr, trianglePtr + triangles.size());
+
+ srcFactory.extractCollisionData(srcImpl, sphereRange, capsuleRange, planeRange, convexRange, triangleRange);
+ dstImpl->setSpheres(sphereRange, 0, 0);
+ dstImpl->setCapsules(capsuleRange, 0, 0);
+ dstImpl->setPlanes(planeRange, 0, 0);
+ dstImpl->setConvexes(convexRange, 0, 0);
+ dstImpl->setTriangles(triangleRange, 0, 0);
+
+ // motion constraints, copy directly into new cloth buffer
+ if (srcImpl.getNumMotionConstraints())
+ srcFactory.extractMotionConstraints(srcImpl, dstImpl->getMotionConstraints());
+
+ // separation constraints, copy directly into new cloth buffer
+ if (srcImpl.getNumSeparationConstraints())
+ srcFactory.extractSeparationConstraints(srcImpl, dstImpl->getSeparationConstraints());
+
+ // particle accelerations
+ if (srcImpl.getNumParticleAccelerations())
+ {
+ Range<const physx::PxVec4> accelerations = getParticleAccelerations(srcCloth);
+ memcpy(dstImpl->getParticleAccelerations().begin(), accelerations.begin(),
+ accelerations.size() * sizeof(physx::PxVec4));
+ }
+
+ // self-collision indices
+ dstImpl->setSelfCollisionIndices(getSelfCollisionIndices(srcCloth));
+
+ // rest positions
+ Vector<physx::PxVec4>::Type restPositions(srcImpl.getNumRestPositions());
+ srcFactory.extractRestPositions(srcImpl, makeRange(restPositions));
+ dstImpl->setRestPositions(makeRange(restPositions));
+
+ // virtual particles
+ if (srcImpl.getNumVirtualParticles())
+ {
+ Vector<Vec4u>::Type indices(srcImpl.getNumVirtualParticles());
+ Vector<physx::PxVec3>::Type weights(srcImpl.getNumVirtualParticleWeights(), physx::PxVec3(0.0f));
+
+ uint32_t(*indicesPtr)[4] = indices.empty() ? 0 : &array(indices.front());
+ Range<uint32_t[4]> indicesRange(indicesPtr, indicesPtr + indices.size());
+
+ physx::PxVec3* weightsPtr = weights.empty() ? 0 : &weights.front();
+ Range<physx::PxVec3> weightsRange(weightsPtr, weightsPtr + weights.size());
+
+ srcFactory.extractVirtualParticles(srcImpl, indicesRange, weightsRange);
+
+ dstImpl->setVirtualParticles(indicesRange, weightsRange);
+ }
+
+ return dstImpl;
+}
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/ClothImpl.h b/NvCloth/src/ClothImpl.h
new file mode 100644
index 0000000..85d201a
--- /dev/null
+++ b/NvCloth/src/ClothImpl.h
@@ -0,0 +1,1393 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Cloth.h"
+#include "NvCloth/Fabric.h"
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+#include <PsMathUtils.h>
+#include <cmath>
+
+namespace nv
+{
+namespace cloth
+{
+
+// SwCloth or CuCloth aggregate implementing the Cloth interface
+// Member specializations are implemented in Sw/CuCloth.cpp
+template <typename T>
+class ClothImpl : public Cloth
+{
+ ClothImpl(const ClothImpl&);
+
+ public:
+ ClothImpl& operator = (const ClothImpl&);
+
+ typedef T ClothType;
+ typedef typename ClothType::FactoryType FactoryType;
+ typedef typename ClothType::FabricType FabricType;
+ typedef typename ClothType::ContextLockType ContextLockType;
+
+ ClothImpl(Factory&, Fabric&, Range<const physx::PxVec4>);
+ ClothImpl(Factory&, const ClothImpl&);
+
+ virtual Cloth* clone(Factory& factory) const;
+
+ virtual Fabric& getFabric() const;
+ virtual Factory& getFactory() const;
+
+ virtual uint32_t getNumParticles() const;
+ virtual void lockParticles() const;
+ virtual void unlockParticles() const;
+ virtual MappedRange<physx::PxVec4> getCurrentParticles();
+ virtual MappedRange<const physx::PxVec4> getCurrentParticles() const;
+ virtual MappedRange<physx::PxVec4> getPreviousParticles();
+ virtual MappedRange<const physx::PxVec4> getPreviousParticles() const;
+ virtual GpuParticles getGpuParticles();
+
+ virtual void setTranslation(const physx::PxVec3& trans);
+ virtual void setRotation(const physx::PxQuat& rot);
+
+ virtual const physx::PxVec3& getTranslation() const;
+ virtual const physx::PxQuat& getRotation() const;
+
+ virtual void clearInertia();
+
+ virtual void teleport(const physx::PxVec3& delta);
+
+ virtual float getPreviousIterationDt() const;
+ virtual void setGravity(const physx::PxVec3& gravity);
+ virtual physx::PxVec3 getGravity() const;
+ virtual void setDamping(const physx::PxVec3& damping);
+ virtual physx::PxVec3 getDamping() const;
+ virtual void setLinearDrag(const physx::PxVec3& drag);
+ virtual physx::PxVec3 getLinearDrag() const;
+ virtual void setAngularDrag(const physx::PxVec3& drag);
+ virtual physx::PxVec3 getAngularDrag() const;
+ virtual void setLinearInertia(const physx::PxVec3& inertia);
+ virtual physx::PxVec3 getLinearInertia() const;
+ virtual void setAngularInertia(const physx::PxVec3& inertia);
+ virtual physx::PxVec3 getAngularInertia() const;
+ virtual void setCentrifugalInertia(const physx::PxVec3& inertia);
+ virtual physx::PxVec3 getCentrifugalInertia() const;
+
+ virtual void setSolverFrequency(float frequency);
+ virtual float getSolverFrequency() const;
+
+ virtual void setStiffnessFrequency(float frequency);
+ virtual float getStiffnessFrequency() const;
+
+ virtual void setAcceleationFilterWidth(uint32_t);
+ virtual uint32_t getAccelerationFilterWidth() const;
+
+ virtual void setPhaseConfig(Range<const PhaseConfig> configs);
+
+ virtual void setSpheres(Range<const physx::PxVec4>, uint32_t first, uint32_t last);
+ virtual uint32_t getNumSpheres() const;
+
+ virtual void setCapsules(Range<const uint32_t>, uint32_t first, uint32_t last);
+ virtual uint32_t getNumCapsules() const;
+
+ virtual void setPlanes(Range<const physx::PxVec4>, uint32_t first, uint32_t last);
+ virtual uint32_t getNumPlanes() const;
+
+ virtual void setConvexes(Range<const uint32_t>, uint32_t first, uint32_t last);
+ virtual uint32_t getNumConvexes() const;
+
+ virtual void setTriangles(Range<const physx::PxVec3>, uint32_t first, uint32_t last);
+ virtual void setTriangles(Range<const physx::PxVec3>, Range<const physx::PxVec3>, uint32_t first);
+ virtual uint32_t getNumTriangles() const;
+
+ virtual bool isContinuousCollisionEnabled() const;
+ virtual void enableContinuousCollision(bool);
+
+ virtual float getCollisionMassScale() const;
+ virtual void setCollisionMassScale(float);
+ virtual void setFriction(float friction);
+ virtual float getFriction() const;
+
+ virtual void setVirtualParticles(Range<const uint32_t[4]>, Range<const physx::PxVec3>);
+ virtual uint32_t getNumVirtualParticles() const;
+ virtual uint32_t getNumVirtualParticleWeights() const;
+
+ virtual void setTetherConstraintScale(float scale);
+ virtual float getTetherConstraintScale() const;
+ virtual void setTetherConstraintStiffness(float stiffness);
+ virtual float getTetherConstraintStiffness() const;
+
+ virtual Range<physx::PxVec4> getMotionConstraints();
+ virtual void clearMotionConstraints();
+ virtual uint32_t getNumMotionConstraints() const;
+ virtual void setMotionConstraintScaleBias(float scale, float bias);
+ virtual float getMotionConstraintScale() const;
+ virtual float getMotionConstraintBias() const;
+ virtual void setMotionConstraintStiffness(float stiffness);
+ virtual float getMotionConstraintStiffness() const;
+
+ virtual Range<physx::PxVec4> getSeparationConstraints();
+ virtual void clearSeparationConstraints();
+ virtual uint32_t getNumSeparationConstraints() const;
+
+ virtual void clearInterpolation();
+
+ virtual Range<physx::PxVec4> getParticleAccelerations();
+ virtual void clearParticleAccelerations();
+ virtual uint32_t getNumParticleAccelerations() const;
+
+ virtual void setWindVelocity(physx::PxVec3);
+ virtual physx::PxVec3 getWindVelocity() const;
+ virtual void setDragCoefficient(float);
+ virtual float getDragCoefficient() const;
+ virtual void setLiftCoefficient(float);
+ virtual float getLiftCoefficient() const;
+
+ virtual void setSelfCollisionDistance(float);
+ virtual float getSelfCollisionDistance() const;
+ virtual void setSelfCollisionStiffness(float);
+ virtual float getSelfCollisionStiffness() const;
+
+ virtual void setSelfCollisionIndices(Range<const uint32_t>);
+ virtual uint32_t getNumSelfCollisionIndices() const;
+
+ virtual void setRestPositions(Range<const physx::PxVec4>);
+ virtual uint32_t getNumRestPositions() const;
+
+ virtual const physx::PxVec3& getBoundingBoxCenter() const;
+ virtual const physx::PxVec3& getBoundingBoxScale() const;
+
+ virtual void setSleepThreshold(float);
+ virtual float getSleepThreshold() const;
+ virtual void setSleepTestInterval(uint32_t);
+ virtual uint32_t getSleepTestInterval() const;
+ virtual void setSleepAfterCount(uint32_t);
+ virtual uint32_t getSleepAfterCount() const;
+ virtual uint32_t getSleepPassCount() const;
+ virtual bool isAsleep() const;
+ virtual void putToSleep();
+ virtual void wakeUp();
+
+ virtual void setUserData(void*);
+ virtual void* getUserData() const;
+
+ // helper function
+ template <typename U>
+ MappedRange<U> getMappedParticles(U* data) const;
+
+ ClothType mCloth;
+};
+
+class SwCloth;
+typedef ClothImpl<SwCloth> SwClothImpl;
+
+class CuCloth;
+typedef ClothImpl<CuCloth> CuClothImpl;
+
+class DxCloth;
+typedef ClothImpl<DxCloth> DxClothImpl;
+
+template <typename T>
+ClothImpl<T>::ClothImpl(Factory& factory, Fabric& fabric, Range<const physx::PxVec4> particles)
+: mCloth(static_cast<FactoryType&>(factory), static_cast<FabricType&>(fabric), particles)
+{
+ // fabric and cloth need to be created by the same factory
+ NV_CLOTH_ASSERT(&fabric.getFactory() == &factory);
+}
+
+template <typename T>
+ClothImpl<T>::ClothImpl(Factory& factory, const ClothImpl& impl)
+: mCloth(static_cast<FactoryType&>(factory), impl.mCloth)
+{
+}
+
+template <typename T>
+inline Fabric& ClothImpl<T>::getFabric() const
+{
+ return mCloth.mFabric;
+}
+
+template <typename T>
+inline Factory& ClothImpl<T>::getFactory() const
+{
+ return mCloth.mFactory;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setTranslation(const physx::PxVec3& trans)
+{
+ physx::PxVec3 t = reinterpret_cast<const physx::PxVec3&>(trans);
+ if (t == mCloth.mTargetMotion.p)
+ return;
+
+ mCloth.mTargetMotion.p = t;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline void ClothImpl<T>::setRotation(const physx::PxQuat& q)
+{
+ if ((q - mCloth.mTargetMotion.q).magnitudeSquared() == 0.0f)
+ return;
+
+ mCloth.mTargetMotion.q = q;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline const physx::PxVec3& ClothImpl<T>::getTranslation() const
+{
+ return mCloth.mTargetMotion.p;
+}
+
+template <typename T>
+inline const physx::PxQuat& ClothImpl<T>::getRotation() const
+{
+ return mCloth.mTargetMotion.q;
+}
+
+template <typename T>
+inline void ClothImpl<T>::clearInertia()
+{
+ mCloth.mCurrentMotion = mCloth.mTargetMotion;
+ mCloth.mLinearVelocity = physx::PxVec3(0.0);
+ mCloth.mAngularVelocity = physx::PxVec3(0.0);
+
+ mCloth.wakeUp();
+}
+
+// Fixed 4505:local function has been removed
+template <typename T>
+inline void ClothImpl<T>::teleport(const physx::PxVec3& delta)
+{
+ mCloth.mCurrentMotion.p += delta;
+ mCloth.mTargetMotion.p += delta;
+}
+
+template <typename T>
+inline float ClothImpl<T>::getPreviousIterationDt() const
+{
+ return mCloth.mPrevIterDt;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setGravity(const physx::PxVec3& gravity)
+{
+ physx::PxVec3 value = gravity;
+ if (value == mCloth.mGravity)
+ return;
+
+ mCloth.mGravity = value;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline physx::PxVec3 ClothImpl<T>::getGravity() const
+{
+ return mCloth.mGravity;
+}
+
+inline float safeLog2(float x)
+{
+ return x ? physx::shdfnd::log2(x) : -FLT_MAX_EXP;
+}
+
+inline physx::PxVec3 safeLog2(const physx::PxVec3& v)
+{
+ return physx::PxVec3( safeLog2(v.x), safeLog2(v.y), safeLog2(v.z) );
+}
+
+inline float safeExp2(float x)
+{
+ if (x <= -FLT_MAX_EXP)
+ return 0.0f;
+ else
+ return physx::shdfnd::exp2(x);
+}
+
+inline physx::PxVec3 safeExp2(const physx::PxVec3& v)
+{
+ return physx::PxVec3( safeExp2(v.x), safeExp2(v.y), safeExp2(v.z) );
+}
+
+template <typename T>
+inline void ClothImpl<T>::setDamping(const physx::PxVec3& damping)
+{
+ physx::PxVec3 value = safeLog2(physx::PxVec3(1.f) - damping);
+ if (value == mCloth.mLogDamping)
+ return;
+
+ mCloth.mLogDamping = value;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline physx::PxVec3 ClothImpl<T>::getDamping() const
+{
+ return physx::PxVec3(1.f) - safeExp2(mCloth.mLogDamping);
+}
+
+template <typename T>
+inline void ClothImpl<T>::setLinearDrag(const physx::PxVec3& drag)
+{
+ physx::PxVec3 value = safeLog2(physx::PxVec3(1.f) - drag);
+ if (value == mCloth.mLinearLogDrag)
+ return;
+
+ mCloth.mLinearLogDrag = value;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline physx::PxVec3 ClothImpl<T>::getLinearDrag() const
+{
+ return physx::PxVec3(1.f) - safeExp2(mCloth.mLinearLogDrag);
+}
+
+template <typename T>
+inline void ClothImpl<T>::setAngularDrag(const physx::PxVec3& drag)
+{
+ physx::PxVec3 value = safeLog2(physx::PxVec3(1.f) - drag);
+ if (value == mCloth.mAngularLogDrag)
+ return;
+
+ mCloth.mAngularLogDrag = value;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline physx::PxVec3 ClothImpl<T>::getAngularDrag() const
+{
+ return physx::PxVec3(1.f) - safeExp2(mCloth.mAngularLogDrag);
+}
+
+template <typename T>
+inline void ClothImpl<T>::setLinearInertia(const physx::PxVec3& inertia)
+{
+ physx::PxVec3 value = inertia;
+ if (value == mCloth.mLinearInertia)
+ return;
+
+ mCloth.mLinearInertia = value;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline physx::PxVec3 ClothImpl<T>::getLinearInertia() const
+{
+ return mCloth.mLinearInertia;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setAngularInertia(const physx::PxVec3& inertia)
+{
+ physx::PxVec3 value = inertia;
+ if (value == mCloth.mAngularInertia)
+ return;
+
+ mCloth.mAngularInertia = value;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline physx::PxVec3 ClothImpl<T>::getAngularInertia() const
+{
+ return mCloth.mAngularInertia;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setCentrifugalInertia(const physx::PxVec3& inertia)
+{
+ physx::PxVec3 value = inertia;
+ if (value == mCloth.mCentrifugalInertia)
+ return;
+
+ mCloth.mCentrifugalInertia = value;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline physx::PxVec3 ClothImpl<T>::getCentrifugalInertia() const
+{
+ return mCloth.mCentrifugalInertia;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setSolverFrequency(float frequency)
+{
+ if (frequency == mCloth.mSolverFrequency)
+ return;
+
+ mCloth.mSolverFrequency = frequency;
+ mCloth.mClothCostDirty = true;
+ mCloth.mIterDtAvg.reset();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getSolverFrequency() const
+{
+ return mCloth.mSolverFrequency;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setStiffnessFrequency(float frequency)
+{
+ if (frequency == mCloth.mStiffnessFrequency)
+ return;
+
+ mCloth.mStiffnessFrequency = frequency;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getStiffnessFrequency() const
+{
+ return mCloth.mStiffnessFrequency;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setAcceleationFilterWidth(uint32_t n)
+{
+ mCloth.mIterDtAvg.resize(n);
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getAccelerationFilterWidth() const
+{
+ return mCloth.mIterDtAvg.size();
+}
+
+// move a subarray
+template <typename Iter>
+void move(Iter it, uint32_t first, uint32_t last, uint32_t result)
+{
+ if (result > first)
+ {
+ result += last - first;
+ while (first < last)
+ it[--result] = it[--last];
+ }
+ else
+ {
+ while (first < last)
+ it[result++] = it[first++];
+ }
+}
+
+// update capsule index
+inline bool updateIndex(uint32_t& index, uint32_t first, int32_t delta)
+{
+ return index >= first && int32_t(index += delta) < int32_t(first);
+}
+
+template <typename T>
+inline void ClothImpl<T>::setSpheres(Range<const physx::PxVec4> spheres, uint32_t first, uint32_t last)
+{
+ uint32_t oldSize = uint32_t(mCloth.mStartCollisionSpheres.size());
+ uint32_t newSize = uint32_t(spheres.size()) + oldSize - last + first;
+
+ NV_CLOTH_ASSERT(newSize <= 32);
+ NV_CLOTH_ASSERT(first <= oldSize);
+ NV_CLOTH_ASSERT(last <= oldSize);
+
+#if PX_DEBUG
+ for (const physx::PxVec4* it = spheres.begin(); it < spheres.end(); ++it)
+ NV_CLOTH_ASSERT(it->w >= 0.0f);
+#endif
+
+ if (!oldSize && !newSize)
+ return;
+
+ if (!oldSize)
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mStartCollisionSpheres.assign(spheres.begin(), spheres.end());
+ mCloth.notifyChanged();
+ }
+ else
+ {
+ if (std::max(oldSize, newSize) >
+ std::min(mCloth.mStartCollisionSpheres.capacity(), mCloth.mTargetCollisionSpheres.capacity()))
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mStartCollisionSpheres.reserve(newSize);
+ mCloth.mTargetCollisionSpheres.reserve(std::max(oldSize, newSize));
+ }
+
+ typename T::MappedVec4fVectorType start = mCloth.mStartCollisionSpheres;
+ typename T::MappedVec4fVectorType target = mCloth.mTargetCollisionSpheres;
+
+ // fill target from start
+ for (uint32_t i = uint32_t(target.size()); i < oldSize; ++i)
+ target.pushBack(start[i]);
+
+ // resize to larger of oldSize and newSize
+ start.resize(std::max(oldSize, newSize), physx::PxVec4(0.0f));
+ target.resize(std::max(oldSize, newSize), physx::PxVec4(0.0f));
+
+ if (int32_t delta = int32_t(newSize - oldSize))
+ {
+ // move past-range elements to new place
+ move(start.begin(), last, oldSize, last + delta);
+ move(target.begin(), last, oldSize, last + delta);
+
+ // fill new elements from spheres
+ for (uint32_t i = last; i < last + delta; ++i)
+ start[i] = spheres[i - first];
+
+ // adjust capsule indices
+ typename T::MappedIndexVectorType indices = mCloth.mCapsuleIndices;
+ Vector<IndexPair>::Type::Iterator cIt, cEnd = indices.end();
+ for (cIt = indices.begin(); cIt != cEnd;)
+ {
+ bool removed = false;
+ removed |= updateIndex(cIt->first, last + std::min(0, delta), int32_t(delta));
+ removed |= updateIndex(cIt->second, last + std::min(0, delta), int32_t(delta));
+ if (!removed)
+ ++cIt;
+ else
+ {
+ indices.replaceWithLast(cIt);
+ cEnd = indices.end();
+ }
+ }
+
+ start.resize(newSize);
+ target.resize(newSize);
+
+ mCloth.notifyChanged();
+ }
+
+ // fill target elements with spheres
+ for (uint32_t i = 0; i < spheres.size(); ++i)
+ target[first + i] = spheres[i];
+ }
+
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumSpheres() const
+{
+ return uint32_t(mCloth.mStartCollisionSpheres.size());
+}
+
+// Fixed 4505:local function has been removed
+template <typename T>
+inline void ClothImpl<T>::setCapsules(Range<const uint32_t> capsules, uint32_t first, uint32_t last)
+{
+ uint32_t oldSize = uint32_t(mCloth.mCapsuleIndices.size());
+ uint32_t newSize = uint32_t(capsules.size() / 2) + oldSize - last + first;
+
+ NV_CLOTH_ASSERT(newSize <= 32);
+ NV_CLOTH_ASSERT(first <= oldSize);
+ NV_CLOTH_ASSERT(last <= oldSize);
+
+ const IndexPair* srcIndices = reinterpret_cast<const IndexPair*>(capsules.begin());
+
+ if (mCloth.mCapsuleIndices.capacity() < newSize)
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mCapsuleIndices.reserve(newSize);
+ }
+
+ // resize to larger of oldSize and newSize
+ mCloth.mCapsuleIndices.resize(std::max(oldSize, newSize));
+
+ typename T::MappedIndexVectorType dstIndices = mCloth.mCapsuleIndices;
+
+ if (uint32_t delta = newSize - oldSize)
+ {
+ // move past-range elements to new place
+ move(dstIndices.begin(), last, oldSize, last + delta);
+
+ // fill new elements from capsules
+ for (uint32_t i = last; i < last + delta; ++i)
+ dstIndices[i] = srcIndices[i - first];
+
+ dstIndices.resize(newSize);
+ mCloth.notifyChanged();
+ }
+
+ // fill existing elements from capsules
+ for (uint32_t i = first; i < last; ++i)
+ dstIndices[i] = srcIndices[i - first];
+
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumCapsules() const
+{
+ return uint32_t(mCloth.mCapsuleIndices.size());
+}
+
+template <typename T>
+inline void ClothImpl<T>::setPlanes(Range<const physx::PxVec4> planes, uint32_t first, uint32_t last)
+{
+ uint32_t oldSize = uint32_t(mCloth.mStartCollisionPlanes.size());
+ uint32_t newSize = uint32_t(planes.size()) + oldSize - last + first;
+
+ NV_CLOTH_ASSERT(newSize <= 32);
+ NV_CLOTH_ASSERT(first <= oldSize);
+ NV_CLOTH_ASSERT(last <= oldSize);
+#if PX_DEBUG || PX_CHECKED
+ int logCount = 0;
+ for (int i = 0; i<static_cast<int>(planes.size()); i++)
+ {
+ if (fabsf(planes[i].getXYZ().magnitudeSquared() - 1.0f) > 0.01f)
+ {
+ if (logCount == 0)
+ NV_CLOTH_LOG_INVALID_PARAMETER("The plane normals passed to Cloth::setPlanes are not normalized. First error encounterd at plane %d (%f, %f, %f, %f)",
+ i, static_cast<double>(planes[i].x), static_cast<double>(planes[i].y), static_cast<double>(planes[i].z), static_cast<double>(planes[i].w));
+ logCount++;
+ }
+ }
+ if (logCount>1)
+ {
+ NV_CLOTH_LOG_INVALID_PARAMETER("This error was encountered %d more times.", logCount-1);
+ }
+#endif
+
+ if (!oldSize && !newSize)
+ return;
+
+ if (!oldSize)
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mStartCollisionPlanes.assign(planes.begin(), planes.end());
+ mCloth.notifyChanged();
+ }
+ else
+ {
+ if (std::max(oldSize, newSize) >
+ std::min(mCloth.mStartCollisionPlanes.capacity(), mCloth.mTargetCollisionPlanes.capacity()))
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mStartCollisionPlanes.reserve(newSize);
+ mCloth.mTargetCollisionPlanes.reserve(std::max(oldSize, newSize));
+ }
+
+ typename T::MappedVec4fVectorType start = mCloth.mStartCollisionPlanes;
+ typename T::MappedVec4fVectorType target = mCloth.mTargetCollisionPlanes;
+
+ // fill target from start
+ for (uint32_t i = target.size(); i < oldSize; ++i)
+ target.pushBack(start[i]);
+
+ // resize to larger of oldSize and newSize
+ start.resize(std::max(oldSize, newSize), physx::PxVec4(0.0f));
+ target.resize(std::max(oldSize, newSize), physx::PxVec4(0.0f));
+
+ if (int32_t delta = int32_t(newSize - oldSize))
+ {
+ // move past-range elements to new place
+ move(start.begin(), last, oldSize, last + delta);
+ move(target.begin(), last, oldSize, last + delta);
+
+ // fill new elements from planes
+ for (uint32_t i = last; i < last + delta; ++i)
+ start[i] = planes[i - first];
+
+ // adjust convex indices
+ uint32_t mask = (uint32_t(1) << (last + std::min(delta, 0))) - 1;
+ typename T::MappedMaskVectorType masks = mCloth.mConvexMasks;
+ Vector<uint32_t>::Type::Iterator cIt, cEnd = masks.end();
+ for (cIt = masks.begin(); cIt != cEnd;)
+ {
+ uint32_t convex = (*cIt & mask);
+ if (delta < 0)
+ convex |= *cIt >> -delta & ~mask;
+ else
+ convex |= (*cIt & ~mask) << delta;
+ if (convex)
+ *cIt++ = convex;
+ else
+ {
+ masks.replaceWithLast(cIt);
+ cEnd = masks.end();
+ }
+ }
+
+ start.resize(newSize);
+ target.resize(newSize);
+
+ mCloth.notifyChanged();
+ }
+
+ // fill target elements with planes
+ for (uint32_t i = 0; i < planes.size(); ++i)
+ target[first + i] = planes[i];
+ }
+
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumPlanes() const
+{
+ return uint32_t(mCloth.mStartCollisionPlanes.size());
+}
+
+template <typename T>
+inline void ClothImpl<T>::setConvexes(Range<const uint32_t> convexMasks, uint32_t first, uint32_t last)
+{
+ uint32_t oldSize = uint32_t(mCloth.mConvexMasks.size());
+ uint32_t newSize = uint32_t(convexMasks.size()) + oldSize - last + first;
+
+ NV_CLOTH_ASSERT(newSize <= 32);
+ NV_CLOTH_ASSERT(first <= oldSize);
+ NV_CLOTH_ASSERT(last <= oldSize);
+#if PX_DEBUG || PX_CHECKED
+ for (int i = 0; i<static_cast<int>(convexMasks.size()); i++)
+ {
+ if (convexMasks[i] == 0)
+ {
+ NV_CLOTH_LOG_INVALID_PARAMETER("Cloth::setConvexes expects bit masks of the form (1<<planeIndex1)|(1<<planeIndex2). 0 is not a valid mask/plane index. Error found in location %d", i);
+ continue;
+ }
+ }
+#endif
+
+ if (mCloth.mConvexMasks.capacity() < newSize)
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mConvexMasks.reserve(newSize);
+ }
+
+ // resize to larger of oldSize and newSize
+ mCloth.mConvexMasks.resize(std::max(oldSize, newSize));
+
+ if (uint32_t delta = newSize - oldSize)
+ {
+ typename T::MappedMaskVectorType masks = mCloth.mConvexMasks;
+
+ // move past-range elements to new place
+ move(masks.begin(), last, oldSize, last + delta);
+
+ // fill new elements from capsules
+ for (uint32_t i = last; i < last + delta; ++i)
+ masks[i] = convexMasks[i - first];
+
+ masks.resize(newSize);
+ mCloth.notifyChanged();
+ }
+
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumConvexes() const
+{
+ return uint32_t(mCloth.mConvexMasks.size());
+}
+
+template <typename T>
+inline void ClothImpl<T>::setTriangles(Range<const physx::PxVec3> triangles, uint32_t first, uint32_t last)
+{
+ // convert from triangle to vertex count
+ first *= 3;
+ last *= 3;
+
+ triangles = mCloth.clampTriangleCount(triangles, last - first);
+ NV_CLOTH_ASSERT(0 == triangles.size() % 3);
+
+ uint32_t oldSize = uint32_t(mCloth.mStartCollisionTriangles.size());
+ uint32_t newSize = uint32_t(triangles.size()) + oldSize - last + first;
+
+ NV_CLOTH_ASSERT(first <= oldSize);
+ NV_CLOTH_ASSERT(last <= oldSize);
+
+ if (!oldSize && !newSize)
+ return;
+
+ if (!oldSize)
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mStartCollisionTriangles.assign(triangles.begin(), triangles.end());
+ mCloth.notifyChanged();
+ }
+ else
+ {
+ if (std::max(oldSize, newSize) >
+ std::min(mCloth.mStartCollisionTriangles.capacity(), mCloth.mTargetCollisionTriangles.capacity()))
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mStartCollisionTriangles.reserve(newSize);
+ mCloth.mTargetCollisionTriangles.reserve(std::max(oldSize, newSize));
+ }
+
+ typename T::MappedVec3fVectorType start = mCloth.mStartCollisionTriangles;
+ typename T::MappedVec3fVectorType target = mCloth.mTargetCollisionTriangles;
+
+ // fill target from start
+ for (uint32_t i = target.size(); i < oldSize; ++i)
+ target.pushBack(start[i]);
+
+ // resize to larger of oldSize and newSize
+ start.resize(std::max(oldSize, newSize), physx::PxVec3(0.0f));
+ target.resize(std::max(oldSize, newSize), physx::PxVec3(0.0f));
+
+ if (int32_t delta = int32_t(newSize - oldSize))
+ {
+ // move past-range elements to new place
+ move(start.begin(), last, oldSize, last + delta);
+ move(target.begin(), last, oldSize, last + delta);
+
+ // fill new elements from planes
+ for (uint32_t i = last; i < last + delta; ++i)
+ start[i] = triangles[i - first];
+
+ start.resize(newSize);
+ target.resize(newSize);
+
+ mCloth.notifyChanged();
+ }
+ //////////////////////
+
+ // if (std::max(oldSize, newSize) >
+ // std::min(mCloth.mStartCollisionTriangles.capacity(), mCloth.mTargetCollisionTriangles.capacity()))
+ // {
+ // ContextLockType contextLock(mCloth.mFactory);
+ // mCloth.mStartCollisionTriangles.reserve(newSize);
+ // mCloth.mTargetCollisionTriangles.reserve(std::max(oldSize, newSize));
+ // }
+ //
+ // // fill target from start
+ // for (uint32_t i = mCloth.mTargetCollisionTriangles.size(); i < oldSize; ++i)
+ // mCloth.mTargetCollisionTriangles.pushBack(mCloth.mStartCollisionTriangles[i]);
+ //
+ // // resize to larger of oldSize and newSize
+ // mCloth.mStartCollisionTriangles.resize(std::max(oldSize, newSize));
+ // mCloth.mTargetCollisionTriangles.resize(std::max(oldSize, newSize));
+ //
+ // if (uint32_t delta = newSize - oldSize)
+ // {
+ // // move past-range elements to new place
+ // move(mCloth.mStartCollisionTriangles.begin(), last, oldSize, last + delta);
+ // move(mCloth.mTargetCollisionTriangles.begin(), last, oldSize, last + delta);
+ //
+ // // fill new elements from triangles
+ // for (uint32_t i = last; i < last + delta; ++i)
+ // mCloth.mStartCollisionTriangles[i] = triangles[i - first];
+ //
+ // mCloth.mStartCollisionTriangles.resize(newSize);
+ // mCloth.mTargetCollisionTriangles.resize(newSize);
+ //
+ // mCloth.notifyChanged();
+ // }
+
+ // fill target elements with triangles
+ // for (uint32_t i = 0; i < triangles.size(); ++i)
+ // mCloth.mTargetCollisionTriangles[first + i] = triangles[i];
+
+ // fill target elements with planes
+ for (uint32_t i = 0; i < triangles.size(); ++i)
+ target[first + i] = triangles[i];
+ }
+
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline void ClothImpl<T>::setTriangles(Range<const physx::PxVec3> startTriangles, Range<const physx::PxVec3> targetTriangles,
+ uint32_t first)
+{
+ NV_CLOTH_ASSERT(startTriangles.size() == targetTriangles.size());
+
+ // convert from triangle to vertex count
+ first *= 3;
+
+ uint32_t last = uint32_t(mCloth.mStartCollisionTriangles.size());
+
+ startTriangles = mCloth.clampTriangleCount(startTriangles, last - first);
+ targetTriangles = mCloth.clampTriangleCount(targetTriangles, last - first);
+
+ uint32_t oldSize = uint32_t(mCloth.mStartCollisionTriangles.size());
+ uint32_t newSize = uint32_t(startTriangles.size()) + oldSize - last + first;
+
+ NV_CLOTH_ASSERT(first <= oldSize);
+ NV_CLOTH_ASSERT(last == oldSize); // this path only supports replacing the tail
+
+ if (!oldSize && !newSize)
+ return;
+
+ if (newSize > std::min(mCloth.mStartCollisionTriangles.capacity(), mCloth.mTargetCollisionTriangles.capacity()))
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ // mCloth.mStartCollisionTriangles.reserve(newSize);
+ // mCloth.mTargetCollisionTriangles.reserve(newSize);
+
+
+ mCloth.mStartCollisionTriangles.assign(startTriangles.begin(), startTriangles.end());
+ mCloth.mTargetCollisionTriangles.assign(targetTriangles.begin(), targetTriangles.end());
+ mCloth.notifyChanged();
+ }
+ else
+ {
+ uint32_t retainSize = oldSize - last + first;
+ mCloth.mStartCollisionTriangles.resize(retainSize);
+ mCloth.mTargetCollisionTriangles.resize(retainSize);
+
+ mCloth.mStartCollisionTriangles.assign(startTriangles.begin(), startTriangles.end());
+ mCloth.mTargetCollisionTriangles.assign(targetTriangles.begin(), targetTriangles.end());
+
+ // for (uint32_t i = 0, n = startTriangles.size(); i < n; ++i)
+ // {
+ // mCloth.mStartCollisionTriangles.pushBack(startTriangles[i]);
+ // mCloth.mTargetCollisionTriangles.pushBack(targetTriangles[i]);
+ // }
+
+ if (newSize - oldSize)
+ mCloth.notifyChanged();
+ }
+
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumTriangles() const
+{
+ return uint32_t(mCloth.mStartCollisionTriangles.size()) / 3;
+}
+
+template <typename T>
+inline bool ClothImpl<T>::isContinuousCollisionEnabled() const
+{
+ return mCloth.mEnableContinuousCollision;
+}
+
+template <typename T>
+inline void ClothImpl<T>::enableContinuousCollision(bool enable)
+{
+ if (enable == mCloth.mEnableContinuousCollision)
+ return;
+
+ mCloth.mEnableContinuousCollision = enable;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getCollisionMassScale() const
+{
+ return mCloth.mCollisionMassScale;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setCollisionMassScale(float scale)
+{
+ if (scale == mCloth.mCollisionMassScale)
+ return;
+
+ mCloth.mCollisionMassScale = scale;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline void ClothImpl<T>::setFriction(float friction)
+{
+ mCloth.mFriction = friction;
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getFriction() const
+{
+ return mCloth.mFriction;
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumVirtualParticleWeights() const
+{
+ return uint32_t(mCloth.mVirtualParticleWeights.size());
+}
+
+template <typename T>
+inline void ClothImpl<T>::setTetherConstraintScale(float scale)
+{
+ if (scale == mCloth.mTetherConstraintScale)
+ return;
+
+ mCloth.mTetherConstraintScale = scale;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getTetherConstraintScale() const
+{
+ return mCloth.mTetherConstraintScale;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setTetherConstraintStiffness(float stiffness)
+{
+ float value = safeLog2(1 - stiffness);
+ if (value == mCloth.mTetherConstraintLogStiffness)
+ return;
+
+ mCloth.mTetherConstraintLogStiffness = value;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getTetherConstraintStiffness() const
+{
+ return 1.f - safeExp2(mCloth.mTetherConstraintLogStiffness);
+}
+
+template <typename T>
+inline Range<physx::PxVec4> ClothImpl<T>::getMotionConstraints()
+{
+ mCloth.wakeUp();
+ return mCloth.push(mCloth.mMotionConstraints);
+}
+
+template <typename T>
+inline void ClothImpl<T>::clearMotionConstraints()
+{
+ mCloth.clear(mCloth.mMotionConstraints);
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumMotionConstraints() const
+{
+ return uint32_t(mCloth.mMotionConstraints.mStart.size());
+}
+
+template <typename T>
+inline void ClothImpl<T>::setMotionConstraintScaleBias(float scale, float bias)
+{
+ if (scale == mCloth.mMotionConstraintScale && bias == mCloth.mMotionConstraintBias)
+ return;
+
+ mCloth.mMotionConstraintScale = scale;
+ mCloth.mMotionConstraintBias = bias;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getMotionConstraintScale() const
+{
+ return mCloth.mMotionConstraintScale;
+}
+
+template <typename T>
+inline float ClothImpl<T>::getMotionConstraintBias() const
+{
+ return mCloth.mMotionConstraintBias;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setMotionConstraintStiffness(float stiffness)
+{
+ float value = safeLog2(1 - stiffness);
+ if (value == mCloth.mMotionConstraintLogStiffness)
+ return;
+
+ mCloth.mMotionConstraintLogStiffness = value;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getMotionConstraintStiffness() const
+{
+ return 1.f - safeExp2(mCloth.mMotionConstraintLogStiffness);
+}
+
+template <typename T>
+inline Range<physx::PxVec4> ClothImpl<T>::getSeparationConstraints()
+{
+ mCloth.wakeUp();
+ return mCloth.push(mCloth.mSeparationConstraints);
+}
+
+template <typename T>
+inline void ClothImpl<T>::clearSeparationConstraints()
+{
+ mCloth.clear(mCloth.mSeparationConstraints);
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline void ClothImpl<T>::clearInterpolation()
+{
+ if (!mCloth.mTargetCollisionSpheres.empty())
+ {
+ ContextLockType contextLock(mCloth.mFactory);
+ physx::shdfnd::swap(mCloth.mStartCollisionSpheres, mCloth.mTargetCollisionSpheres);
+ mCloth.mTargetCollisionSpheres.resize(0);
+ }
+ mCloth.mMotionConstraints.pop();
+ mCloth.mSeparationConstraints.pop();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumSeparationConstraints() const
+{
+ return uint32_t(mCloth.mSeparationConstraints.mStart.size());
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumParticleAccelerations() const
+{
+ return uint32_t(mCloth.mParticleAccelerations.size());
+}
+
+template <typename T>
+inline void ClothImpl<T>::setWindVelocity(physx::PxVec3 wind)
+{
+ if (wind == mCloth.mWind)
+ return;
+
+ mCloth.mWind = wind;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline physx::PxVec3 ClothImpl<T>::getWindVelocity() const
+{
+ return mCloth.mWind;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setDragCoefficient(float coefficient)
+{
+ NV_CLOTH_ASSERT(coefficient < 1.f);
+
+ float value = safeLog2(1.f - coefficient);
+ if (value == mCloth.mDragLogCoefficient)
+ return;
+
+ mCloth.mDragLogCoefficient = value;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getDragCoefficient() const
+{
+ return 1.f - safeExp2(mCloth.mDragLogCoefficient);
+}
+
+template <typename T>
+inline void ClothImpl<T>::setLiftCoefficient(float coefficient)
+{
+ NV_CLOTH_ASSERT(coefficient < 1.f);
+
+ float value = safeLog2(1.f - coefficient);
+ if (value == mCloth.mLiftLogCoefficient)
+ return;
+
+ mCloth.mLiftLogCoefficient = value;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getLiftCoefficient() const
+{
+ return 1.f - safeExp2(mCloth.mLiftLogCoefficient);
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumSelfCollisionIndices() const
+{
+ return uint32_t(mCloth.mSelfCollisionIndices.size());
+}
+
+// Fixed 4505:local function has been removed
+template <typename T>
+inline void ClothImpl<T>::setRestPositions(Range<const physx::PxVec4> restPositions)
+{
+ NV_CLOTH_ASSERT(restPositions.empty() || restPositions.size() == getNumParticles());
+ ContextLockType contextLock(mCloth.mFactory);
+ mCloth.mRestPositions.assign(restPositions.begin(), restPositions.end());
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getNumRestPositions() const
+{
+ return uint32_t(mCloth.mRestPositions.size());
+}
+
+template <typename T>
+inline void ClothImpl<T>::setSelfCollisionDistance(float distance)
+{
+ if (distance == mCloth.mSelfCollisionDistance)
+ return;
+
+ mCloth.mSelfCollisionDistance = distance;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getSelfCollisionDistance() const
+{
+ return mCloth.mSelfCollisionDistance;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setSelfCollisionStiffness(float stiffness)
+{
+ float value = safeLog2(1 - stiffness);
+ if (value == mCloth.mSelfCollisionLogStiffness)
+ return;
+
+ mCloth.mSelfCollisionLogStiffness = value;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getSelfCollisionStiffness() const
+{
+ return 1.f - safeExp2(mCloth.mSelfCollisionLogStiffness);
+}
+
+template <typename T>
+inline const physx::PxVec3& ClothImpl<T>::getBoundingBoxCenter() const
+{
+ return mCloth.mParticleBoundsCenter;
+}
+
+template <typename T>
+inline const physx::PxVec3& ClothImpl<T>::getBoundingBoxScale() const
+{
+ return mCloth.mParticleBoundsHalfExtent;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setSleepThreshold(float threshold)
+{
+ if (threshold == mCloth.mSleepThreshold)
+ return;
+
+ mCloth.mSleepThreshold = threshold;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline float ClothImpl<T>::getSleepThreshold() const
+{
+ return mCloth.mSleepThreshold;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setSleepTestInterval(uint32_t interval)
+{
+ if (interval == mCloth.mSleepTestInterval)
+ return;
+
+ mCloth.mSleepTestInterval = interval;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getSleepTestInterval() const
+{
+ return mCloth.mSleepTestInterval;
+}
+
+template <typename T>
+inline void ClothImpl<T>::setSleepAfterCount(uint32_t afterCount)
+{
+ if (afterCount == mCloth.mSleepAfterCount)
+ return;
+
+ mCloth.mSleepAfterCount = afterCount;
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getSleepAfterCount() const
+{
+ return mCloth.mSleepAfterCount;
+}
+
+template <typename T>
+inline uint32_t ClothImpl<T>::getSleepPassCount() const
+{
+ return mCloth.mSleepPassCounter;
+}
+
+template <typename T>
+inline bool ClothImpl<T>::isAsleep() const
+{
+ return mCloth.isSleeping();
+}
+
+template <typename T>
+inline void ClothImpl<T>::putToSleep()
+{
+ mCloth.mSleepPassCounter = mCloth.mSleepAfterCount;
+}
+
+template <typename T>
+inline void ClothImpl<T>::wakeUp()
+{
+ mCloth.wakeUp();
+}
+
+template <typename T>
+inline void ClothImpl<T>::setUserData(void* data)
+{
+ mCloth.mUserData = data;
+}
+
+template <typename T>
+inline void* ClothImpl<T>::getUserData() const
+{
+ return mCloth.mUserData;
+}
+
+template <typename T>
+template <typename U>
+inline MappedRange<U> ClothImpl<T>::getMappedParticles(U* data) const
+{
+ return MappedRange<U>(data, data + getNumParticles(), *this, &Cloth::lockParticles, &Cloth::unlockParticles);
+}
+
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/Factory.cpp b/NvCloth/src/Factory.cpp
new file mode 100644
index 0000000..1307223
--- /dev/null
+++ b/NvCloth/src/Factory.cpp
@@ -0,0 +1,101 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef NV_CLOTH_ENABLE_CUDA
+#define NV_CLOTH_ENABLE_CUDA 1
+#endif
+
+#include "SwFactory.h"
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+uint32_t getNextFabricId()
+{
+ static uint32_t sNextFabricId = 0;
+ return sNextFabricId++;
+}
+
+}
+}
+
+
+nv::cloth::Factory* NvClothCreateFactoryCPU()
+{
+ return NV_CLOTH_NEW(nv::cloth::SwFactory)();
+}
+
+#if NV_CLOTH_ENABLE_CUDA
+#include "cuda/CuFactory.h"
+
+nv::cloth::Factory* NvClothCreateFactoryCUDA(CUcontext context)
+{
+ return NV_CLOTH_NEW(nv::cloth::CuFactory)(context);
+}
+#else
+nv::cloth::Factory* NvClothCreateFactoryCUDA(CUcontext context)
+{
+ PX_UNUSED(context);
+ return nullptr;
+}
+#endif
+
+#if NV_CLOTH_ENABLE_DX11
+#include "dx/dxFactory.h"
+#include "NvCloth/DxContextManagerCallback.h"
+nv::cloth::Factory* NvClothCreateFactoryDX11(nv::cloth::DxContextManagerCallback* graphicsContextManager)
+{
+ if (graphicsContextManager->getDevice()->GetFeatureLevel() >= D3D_FEATURE_LEVEL_11_0)
+ return NV_CLOTH_NEW(nv::cloth::DxFactory)(graphicsContextManager);
+ return nullptr;
+}
+#else
+nv::cloth::Factory* NvClothCreateFactoryDX11(nv::cloth::DxContextManagerCallback* graphicsContextManager)
+{
+ PX_UNUSED(graphicsContextManager);
+ return nullptr;
+}
+#endif
+
+NV_CLOTH_API(void) NvClothDestroyFactory(nv::cloth::Factory* factory)
+{
+ NV_CLOTH_DELETE(factory);
+}
+
+NV_CLOTH_API(bool) NvClothCompiledWithCudaSupport()
+{
+ return NV_CLOTH_ENABLE_CUDA == 1;
+}
+NV_CLOTH_API(bool) NvClothCompiledWithDxSupport()
+{
+ return NV_CLOTH_ENABLE_DX11 == 1;
+}
diff --git a/NvCloth/src/IndexPair.h b/NvCloth/src/IndexPair.h
new file mode 100644
index 0000000..2e8432f
--- /dev/null
+++ b/NvCloth/src/IndexPair.h
@@ -0,0 +1,45 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+
+namespace nv
+{
+namespace cloth
+{
+
+struct IndexPair
+{
+ uint32_t first;
+ uint32_t second;
+};
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/IterationState.h b/NvCloth/src/IterationState.h
new file mode 100644
index 0000000..f199663
--- /dev/null
+++ b/NvCloth/src/IterationState.h
@@ -0,0 +1,405 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "Simd.h"
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+#include <foundation/PxMat44.h>
+#include <foundation/PxMat33.h>
+#include <PsMathUtils.h>
+#include "Vec4T.h"
+#include <algorithm>
+
+namespace nv
+{
+
+/* function object to perform solver iterations on one cloth */
+
+// todo: performance optimization: cache this object and test if velocity/iterDt has changed
+// c'tor takes about 5% of the iteration time of a 20x20 cloth
+
+namespace cloth
+{
+
+/* helper functions */
+
+template <typename T>
+T sqr(const T& x)
+{
+ return x * x;
+}
+
+inline physx::PxVec3 log(const physx::PxQuat& q)
+{
+ float theta = q.getImaginaryPart().magnitude();
+ float scale = theta > PX_EPS_REAL ? physx::PxAsin(theta) / theta : 1.0f;
+ scale = physx::intrinsics::fsel(q.w, scale, -scale);
+ return physx::PxVec3(q.x * scale, q.y * scale, q.z * scale);
+}
+
+inline physx::PxQuat exp(const physx::PxVec3& v)
+{
+ float theta = v.magnitude();
+ float scale = theta > PX_EPS_REAL ? physx::PxSin(theta) / theta : 1.0f;
+ return physx::PxQuat(v.x * scale, v.y * scale, v.z * scale, physx::PxCos(theta));
+}
+
+template <typename Simd4f, uint32_t N>
+inline void assign(Simd4f (&columns)[N], const physx::PxMat44& matrix)
+{
+ for (uint32_t i = 0; i < N; ++i)
+ columns[i] = load(nv::cloth::array(matrix[i]));
+}
+
+template <typename Simd4f>
+inline Simd4f transform(const Simd4f (&columns)[3], const Simd4f& vec)
+{
+ return splat<0>(vec) * columns[0] + splat<1>(vec) * columns[1] + splat<2>(vec) * columns[2];
+}
+
+template <typename Simd4f>
+inline Simd4f transform(const Simd4f (&columns)[3], const Simd4f& translate, const Simd4f& vec)
+{
+ return translate + splat<0>(vec) * columns[0] + splat<1>(vec) * columns[1] + splat<2>(vec) * columns[2];
+}
+
+template <typename>
+struct IterationState; // forward declaration
+
+struct IterationStateFactory
+{
+ template <typename MyCloth>
+ IterationStateFactory(MyCloth& cloth, float frameDt);
+
+ template <typename Simd4f, typename MyCloth>
+ IterationState<Simd4f> create(MyCloth const& cloth) const;
+
+ template <typename Simd4f>
+ static Simd4f lengthSqr(Simd4f const& v)
+ {
+ return dot3(v, v);
+ }
+
+ template <typename Simd4f>
+ static physx::PxVec3 castToPxVec3(const Simd4f& v)
+ {
+ return *reinterpret_cast<const physx::PxVec3*>(reinterpret_cast<const char*>(&v));
+ }
+
+ int mNumIterations;
+ float mInvNumIterations;
+ float mIterDt, mIterDtRatio, mIterDtAverage;
+ physx::PxQuat mCurrentRotation;
+ physx::PxVec3 mPrevLinearVelocity;
+ physx::PxVec3 mPrevAngularVelocity;
+};
+
+/* solver iterations helper functor */
+template <typename Simd4f>
+struct IterationState
+{
+ // call after each iteration
+ void update();
+
+ inline float getCurrentAlpha() const;
+ inline float getPreviousAlpha() const;
+
+ public:
+ Simd4f mRotationMatrix[3]; // should rename to 'mRotation'
+
+ Simd4f mCurBias; // in local space
+ Simd4f mPrevBias; // in local space
+ Simd4f mWind; // delta position per iteration
+
+ Simd4f mPrevMatrix[3];
+ Simd4f mCurMatrix[3];
+ Simd4f mDampScaleUpdate;
+
+ // iteration counter
+ uint32_t mRemainingIterations;
+
+ // reciprocal total number of iterations
+ float mInvNumIterations;
+
+ // time step size per iteration
+ float mIterDt;
+
+ bool mIsTurning; // if false, mPositionScale = mPrevMatrix[0]
+};
+
+} // namespace cloth
+
+template <typename Simd4f>
+inline float cloth::IterationState<Simd4f>::getCurrentAlpha() const
+{
+ return getPreviousAlpha() + mInvNumIterations;
+}
+
+template <typename Simd4f>
+inline float cloth::IterationState<Simd4f>::getPreviousAlpha() const
+{
+ return 1.0f - mRemainingIterations * mInvNumIterations;
+}
+
+template <typename MyCloth>
+cloth::IterationStateFactory::IterationStateFactory(MyCloth& cloth, float frameDt)
+{
+ mNumIterations = std::max(1, int(frameDt * cloth.mSolverFrequency + 0.5f));
+ mInvNumIterations = 1.0f / mNumIterations;
+ mIterDt = frameDt * mInvNumIterations;
+
+ mIterDtRatio = cloth.mPrevIterDt ? mIterDt / cloth.mPrevIterDt : 1.0f;
+ mIterDtAverage = cloth.mIterDtAvg.empty() ? mIterDt : cloth.mIterDtAvg.average();
+
+ mCurrentRotation = cloth.mCurrentMotion.q;
+ mPrevLinearVelocity = cloth.mLinearVelocity;
+ mPrevAngularVelocity = cloth.mAngularVelocity;
+
+ // update cloth
+ float invFrameDt = 1.0f / frameDt;
+ cloth.mLinearVelocity = invFrameDt * (cloth.mTargetMotion.p - cloth.mCurrentMotion.p);
+ physx::PxQuat dq = cloth.mTargetMotion.q * cloth.mCurrentMotion.q.getConjugate();
+ cloth.mAngularVelocity = log(dq) * invFrameDt;
+
+ cloth.mPrevIterDt = mIterDt;
+ cloth.mIterDtAvg.push(static_cast<uint32_t>(mNumIterations), mIterDt);
+ cloth.mCurrentMotion = cloth.mTargetMotion;
+}
+
+/*
+momentum conservation:
+m2*x2 - m1*x1 = m1*x1 - m0*x0 + g*dt2, m = r+t
+r2*x2+t2 = 2(r1*x1+t1) - (r0*x0+t0) + g*dt2
+r2*x2 = r1*x1 + r1*x1 - r0*x0 - (t2-2t1+t0) + g*dt2
+substitue r1*x1 - r0*x0 = r1*(x1-x0) + (r1-r0)*x0
+and r1*x1 = r2*x1 - (r2-r1)*x1
+
+x2 = x1 + r2'*g*dt2
+ + r2'r1*(x1-x0) //< damp
+ + (r2'r1-r2'r0)*x0 - (1-r2'r1)*x1 - r2'*(t2-2t1+t0) //< inertia
+ + (1-r2'r1)x1 + t2-t1 //< drag (not momentum conserving)
+
+x2 = x0 + a0*x0 + a1*x1 + b with
+a0 = (inertia-damp)*r2'r1 - inertia*r2'r0 - eye
+a1 = (1-inertia-drag)*eye + (damp+inertia+drag)*r2'r1
+b = r2'*(g*dt2 - (inertia+drag)*(t2-t1) + inertia*(t1-t0))
+
+Velocities are used to deal with multiple iterations and varying dt. Only b needs
+to updated from one iteration to the next. Specifically, it is multiplied
+by (r2'r1)^1/numIterations. a0 and a1 are unaffected by that multiplication.
+
+The centrifugal and coriolis forces of non-inertial (turning) reference frame are
+not generally captured in these formulas. The 'inertia' term above contains radial
+acceleration plus centrifugal and coriolis force for a single iteration.
+For multiple iterations, or when the centrifugal forces are scaled differently
+than angular inertia, we need to add explicit centrifugal and coriolis forces.
+We only use them to correct the above formula because their discretization is
+not accurate.
+
+Possible improvements: multiply coriolis and centrifugal matrix by curInvRotation
+from the left. Do the alpha trick of linearInertia also for angularInertia, write
+prevParticle after multiplying it with matrix.
+
+If you change anything in this function, make sure that ClothCustomFloating and
+ClothInertia haven't regressed for any choice of solver frequency.
+*/
+
+template <typename Simd4f, typename MyCloth>
+cloth::IterationState<Simd4f> cloth::IterationStateFactory::create(MyCloth const& cloth) const
+{
+ IterationState<Simd4f> result;
+
+ result.mRemainingIterations = static_cast<uint32_t>(mNumIterations);
+ result.mInvNumIterations = mInvNumIterations;
+ result.mIterDt = mIterDt;
+
+ Simd4f curLinearVelocity = load(array(cloth.mLinearVelocity));
+ Simd4f prevLinearVelocity = load(array(mPrevLinearVelocity));
+
+ Simd4f iterDt = simd4f(mIterDt);
+ Simd4f dampExponent = simd4f(cloth.mStiffnessFrequency) * iterDt;
+
+ Simd4f translation = iterDt * curLinearVelocity;
+
+ // gravity delta per iteration
+ Simd4f gravity = load(array(cloth.mGravity)) * static_cast<Simd4f>(simd4f(sqr(mIterDtAverage)));
+
+ // scale of local particle velocity per iteration
+ Simd4f dampScale = exp2(load(array(cloth.mLogDamping)) * dampExponent);
+ // adjust for the change in time step during the first iteration
+ Simd4f firstDampScale = dampScale * simd4f(mIterDtRatio);
+
+ // portion of negative frame velocity to transfer to particle
+ Simd4f linearDrag = (gSimd4fOne - exp2(load(array(cloth.mLinearLogDrag)) * dampExponent)) * translation;
+
+ // portion of frame acceleration to transfer to particle
+ Simd4f linearInertia = load(array(cloth.mLinearInertia)) * iterDt * (prevLinearVelocity - curLinearVelocity);
+
+ // for inertia, we want to violate newton physics to
+ // match velocity and position as given by the user, which means:
+ // vt = v0 + a * t and xt = x0 + v0 * t + (!) a * t^2
+ // this is achieved by applying a different portion to cur and prev
+ // position, compared to the normal +0.5 and -0.5 for '... 1/2 a*t^2'.
+ // specifically, the portion is alpha=(n+1)/2n and 1-alpha.
+
+ float linearAlpha = (mNumIterations + 1) * 0.5f * mInvNumIterations;
+ Simd4f curLinearInertia = linearInertia * simd4f(linearAlpha);
+
+ // rotate to local space (use mRotationMatrix temporarily to hold matrix)
+ physx::PxMat44 invRotation = physx::PxMat44(mCurrentRotation.getConjugate());
+ assign(result.mRotationMatrix, invRotation);
+
+ Simd4f maskXYZ = simd4f(simd4i(~0, ~0, ~0, 0));
+
+ // Previously, we split the bias between previous and current position to
+ // get correct disretized position and velocity. However, this made a
+ // hanging cloth experience a downward velocity, which is problematic
+ // when scaled by the iterDt ratio and results in jitter under variable
+ // timesteps. Instead, we now apply the entire bias to current position
+ // and accept a less noticeable error for a free falling cloth.
+
+ Simd4f bias = gravity - linearDrag;
+ result.mCurBias = transform(result.mRotationMatrix, curLinearInertia + bias) & maskXYZ;
+ result.mPrevBias = transform(result.mRotationMatrix, linearInertia - curLinearInertia) & maskXYZ;
+
+ Simd4f wind = load(array(cloth.mWind)) * iterDt;
+ result.mWind = transform(result.mRotationMatrix, translation - wind) & maskXYZ;
+
+ result.mIsTurning = mPrevAngularVelocity.magnitudeSquared() + cloth.mAngularVelocity.magnitudeSquared() > 0.0f;
+
+ if (result.mIsTurning)
+ {
+ Simd4f curAngularVelocity = load(array(invRotation.rotate(cloth.mAngularVelocity)));
+ Simd4f prevAngularVelocity = load(array(invRotation.rotate(mPrevAngularVelocity)));
+
+ // rotation for one iteration in local space
+ Simd4f curInvAngle = -iterDt * curAngularVelocity;
+ Simd4f prevInvAngle = -iterDt * prevAngularVelocity;
+
+ physx::PxQuat curInvRotation = exp(castToPxVec3(curInvAngle));
+ physx::PxQuat prevInvRotation = exp(castToPxVec3(prevInvAngle));
+
+ physx::PxMat44 curMatrix = physx::PxMat44(curInvRotation);
+ physx::PxMat44 prevMatrix = physx::PxMat44(prevInvRotation * curInvRotation);
+
+ assign(result.mRotationMatrix, curMatrix);
+
+ Simd4f angularDrag = gSimd4fOne - exp2(load(array(cloth.mAngularLogDrag)) * dampExponent);
+ Simd4f centrifugalInertia = load(array(cloth.mCentrifugalInertia));
+ Simd4f angularInertia = load(array(cloth.mAngularInertia));
+ Simd4f angularAcceleration = curAngularVelocity - prevAngularVelocity;
+
+ Simd4f epsilon = simd4f(sqrtf(FLT_MIN)); // requirement: sqr(epsilon) > 0
+ Simd4f velocityLengthSqr = lengthSqr(curAngularVelocity) + epsilon;
+ Simd4f dragLengthSqr = lengthSqr(Simd4f(curAngularVelocity * angularDrag)) + epsilon;
+ Simd4f centrifugalLengthSqr = lengthSqr(Simd4f(curAngularVelocity * centrifugalInertia)) + epsilon;
+ Simd4f accelerationLengthSqr = lengthSqr(angularAcceleration) + epsilon;
+ Simd4f inertiaLengthSqr = lengthSqr(Simd4f(angularAcceleration * angularInertia)) + epsilon;
+
+ float dragScale = array(rsqrt(velocityLengthSqr * dragLengthSqr) * dragLengthSqr)[0];
+ float inertiaScale =
+ mInvNumIterations * array(rsqrt(accelerationLengthSqr * inertiaLengthSqr) * inertiaLengthSqr)[0];
+
+ // magic factor found by comparing to global space simulation:
+ // some centrifugal force is in inertia part, remainder is 2*(n-1)/n
+ // after scaling the inertia part, we get for centrifugal:
+ float centrifugalAlpha = (2 * mNumIterations - 1) * mInvNumIterations;
+ float centrifugalScale =
+ centrifugalAlpha * array(rsqrt(velocityLengthSqr * centrifugalLengthSqr) * centrifugalLengthSqr)[0] -
+ inertiaScale;
+
+ // slightly better in ClothCustomFloating than curInvAngle alone
+ Simd4f centrifugalVelocity = (prevInvAngle + curInvAngle) * simd4f(0.5f);
+ const Simd4f data = lengthSqr(centrifugalVelocity);
+ float centrifugalSqrLength = array(data)[0] * centrifugalScale;
+
+ Simd4f coriolisVelocity = centrifugalVelocity * simd4f(centrifugalScale);
+ physx::PxMat33 coriolisMatrix = physx::shdfnd::star(castToPxVec3(coriolisVelocity));
+
+ const float* dampScalePtr = array(firstDampScale);
+ const float* centrifugalPtr = array(centrifugalVelocity);
+
+ for (unsigned int j = 0; j < 3; ++j)
+ {
+ float centrifugalJ = -centrifugalPtr[j] * centrifugalScale;
+ for (unsigned int i = 0; i < 3; ++i)
+ {
+ float damping = dampScalePtr[j];
+ float coriolis = coriolisMatrix(i, j);
+ float centrifugal = centrifugalPtr[i] * centrifugalJ;
+
+ prevMatrix(i, j) = centrifugal - coriolis + curMatrix(i, j) * (inertiaScale - damping) -
+ prevMatrix(i, j) * inertiaScale;
+ curMatrix(i, j) = centrifugal + coriolis + curMatrix(i, j) * (inertiaScale + damping + dragScale);
+ }
+ curMatrix(j, j) += centrifugalSqrLength - inertiaScale - dragScale;
+ prevMatrix(j, j) += centrifugalSqrLength;
+ }
+
+ assign(result.mPrevMatrix, prevMatrix);
+ assign(result.mCurMatrix, curMatrix);
+ }
+ else
+ {
+ Simd4f minusOne = -static_cast<Simd4f>(gSimd4fOne);
+ result.mRotationMatrix[0] = minusOne;
+ result.mPrevMatrix[0] = select(maskXYZ, firstDampScale, minusOne);
+ }
+
+ // difference of damp scale between first and other iterations
+ result.mDampScaleUpdate = (dampScale - firstDampScale) & maskXYZ;
+
+ return result;
+}
+
+template <typename Simd4f>
+void cloth::IterationState<Simd4f>::update()
+{
+ if (mIsTurning)
+ {
+ // only need to turn bias, matrix is unaffected (todo: verify)
+ mCurBias = transform(mRotationMatrix, mCurBias);
+ mPrevBias = transform(mRotationMatrix, mPrevBias);
+ mWind = transform(mRotationMatrix, mWind);
+ }
+
+ // remove time step ratio in damp scale after first iteration
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ mPrevMatrix[i] = mPrevMatrix[i] - mRotationMatrix[i] * mDampScaleUpdate;
+ mCurMatrix[i] = mCurMatrix[i] + mRotationMatrix[i] * mDampScaleUpdate;
+ }
+ mDampScaleUpdate = gSimd4fZero; // only once
+
+ --mRemainingIterations;
+}
+
+} // namespace nv
diff --git a/NvCloth/src/MovingAverage.h b/NvCloth/src/MovingAverage.h
new file mode 100644
index 0000000..f524d95
--- /dev/null
+++ b/NvCloth/src/MovingAverage.h
@@ -0,0 +1,151 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Allocator.h"
+#include <algorithm>
+#include "NvCloth/Callbacks.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+struct MovingAverage
+{
+ struct Element
+ {
+ uint32_t mCount;
+ float mValue;
+ };
+
+ public:
+ MovingAverage(uint32_t n = 1) : mCount(0), mSize(n)
+ {
+ }
+
+ bool empty() const
+ {
+ return mData.empty();
+ }
+
+ uint32_t size() const
+ {
+ return mSize;
+ }
+
+ void resize(uint32_t n)
+ {
+ NV_CLOTH_ASSERT(n);
+ mSize = n;
+ trim();
+ }
+
+ void reset()
+ {
+ mData.resize(0);
+ mCount = 0;
+ }
+
+ void push(uint32_t n, float value)
+ {
+ n = std::min(n, mSize);
+
+ if (mData.empty() || mData.back().mValue != value)
+ {
+ Element element = { n, value };
+ mData.pushBack(element);
+ }
+ else
+ {
+ mData.back().mCount += n;
+ }
+
+ mCount += n;
+ trim();
+ }
+
+ float average() const
+ {
+ NV_CLOTH_ASSERT(!mData.empty());
+
+ float sum = 0.0f;
+ Vector<Element>::Type::ConstIterator it = mData.begin(), end = mData.end();
+ for (; it != end; ++it)
+ sum += it->mCount * it->mValue;
+
+ // linear weight ramps at both ends for smoother average
+ uint32_t n = mCount / 8;
+ float ramp = 0.0f, temp = 0.0f;
+ uint32_t countLo = (it = mData.begin())->mCount;
+ uint32_t countHi = (--end)->mCount;
+ for (uint32_t i = 0; i < n; ++i)
+ {
+ if (i == countLo)
+ countLo += (++it)->mCount;
+ if (i == countHi)
+ countHi += (--end)->mCount;
+
+ temp += it->mValue + end->mValue;
+ ramp += temp;
+ }
+
+ uint32_t num = (mCount - n) * (n + 1);
+ return (sum * (n + 1) - ramp) / num;
+ }
+
+ private:
+ // remove oldest (front) values until mCount<=mSize
+ void trim()
+ {
+ Vector<Element>::Type::Iterator it = mData.begin();
+ for (uint32_t k = mSize; k < mCount; it += k <= mCount)
+ {
+ k += it->mCount;
+ it->mCount = k - mCount;
+ }
+
+ if (it != mData.begin())
+ {
+ Vector<Element>::Type tmp;
+ tmp.assign(it, mData.end());
+ tmp.swap(mData);
+ }
+
+ mCount = std::min(mCount, mSize);
+ }
+
+ Vector<Element>::Type mData;
+
+ uint32_t mCount;
+ uint32_t mSize;
+};
+}
+}
diff --git a/NvCloth/src/NvSimd/NvSimd4f.h b/NvCloth/src/NvSimd/NvSimd4f.h
new file mode 100644
index 0000000..cb863e7
--- /dev/null
+++ b/NvCloth/src/NvSimd/NvSimd4f.h
@@ -0,0 +1,623 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvSimdTypes.h"
+#include <float.h>
+#include <math.h>
+
+NV_SIMD_NAMESPACE_BEGIN
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factories
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+/*! \brief Creates Simd4f with all components set to zero.
+* \relates Simd4f */
+struct Simd4fZeroFactory
+{
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+};
+
+/*! \brief Creates Simd4f with all components set to one.
+* \relates Simd4f */
+struct Simd4fOneFactory
+{
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+};
+
+/*! \brief Replicates float into all four Simd4f components.
+* \relates Simd4f */
+struct Simd4fScalarFactory
+{
+ explicit Simd4fScalarFactory(const float& s) : value(s)
+ {
+ }
+ Simd4fScalarFactory& operator = (const Simd4fScalarFactory&); // not implemented
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+
+ const float value;
+};
+
+/*! \brief Creates Simd4f from four floats.
+* \relates Simd4f */
+struct Simd4fTupleFactory
+{
+ Simd4fTupleFactory(float x, float y, float z, float w)
+ // c++11: : tuple{ x, y, z, w }
+ {
+ tuple[0] = x, tuple[1] = y, tuple[2] = z, tuple[3] = w;
+ }
+ Simd4fTupleFactory(unsigned x, unsigned y, unsigned z, unsigned w)
+ {
+ unsigned* ptr = reinterpret_cast<unsigned*>(tuple);
+ ptr[0] = x, ptr[1] = y, ptr[2] = z, ptr[3] = w;
+ }
+ Simd4fTupleFactory& operator = (const Simd4fTupleFactory&); // not implemented
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+ NV_SIMD_ALIGN(16, float) tuple[4];
+};
+
+/*! \brief Loads Simd4f from (unaligned) pointer.
+* \relates Simd4f */
+struct Simd4fLoadFactory
+{
+ explicit Simd4fLoadFactory(const float* p) : ptr(p)
+ {
+ }
+ Simd4fLoadFactory& operator = (const Simd4fLoadFactory&); // not implemented
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+ const float* const ptr;
+};
+
+/*! \brief Loads Simd4f from (aligned) pointer.
+* \relates Simd4f */
+struct Simd4fAlignedLoadFactory
+{
+ explicit Simd4fAlignedLoadFactory(const float* p) : ptr(p)
+ {
+ }
+ Simd4fAlignedLoadFactory& operator = (const Simd4fAlignedLoadFactory&); // not implemented
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+ const float* const ptr;
+};
+
+/*! \brief Loads Simd4f from (unaligned) pointer.
+* \relates Simd4f */
+struct Simd4fLoad3Factory
+{
+ explicit Simd4fLoad3Factory(const float* p) : ptr(p)
+ {
+ }
+ Simd4fLoad3Factory& operator = (const Simd4fLoad3Factory&); // not implemented
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+ const float* const ptr;
+};
+
+/*! \brief Loads Simd4f from (unaligned) pointer, which point to 3 floats in memory. 4th component will be initialized
+* with w
+* \relates Simd4f */
+struct Simd4fLoad3SetWFactory
+{
+ explicit Simd4fLoad3SetWFactory(const float* p, const float wComponent) : ptr(p), w(wComponent)
+ {
+ }
+ Simd4fLoad3SetWFactory& operator = (const Simd4fLoad3SetWFactory&); // not implemented
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+ const float* const ptr;
+ const float w;
+};
+
+/*! \brief Loads Simd4f from (aligned) pointer with offset in bytes.
+* \relates Simd4f */
+struct Simd4fOffsetLoadFactory
+{
+ Simd4fOffsetLoadFactory(const float* p, unsigned int off) : ptr(p), offset(off)
+ {
+ }
+ Simd4fOffsetLoadFactory& operator = (const Simd4fOffsetLoadFactory&); // not implemented
+ inline operator Simd4f() const;
+ inline operator Scalar4f() const;
+ const float* const ptr;
+ const unsigned int offset;
+};
+
+// forward declaration
+struct Simd4iScalarFactory;
+struct Simd4iTupleFactory;
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression templates
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+#if NV_SIMD_FUSE_MULTIPLY_ADD
+/*! \brief Expression template to fuse multiply-adds.
+* \relates Simd4f */
+struct ProductExpr
+{
+ inline ProductExpr(Simd4f const& v0_, Simd4f const& v1_) : v0(v0_), v1(v1_)
+ {
+ }
+ inline operator Simd4f() const;
+ const Simd4f v0, v1;
+
+ private:
+ ProductExpr& operator = (const ProductExpr&); // not implemented
+};
+#else // NV_SIMD_FUSE_MULTIPLY_ADD
+typedef Simd4f ProductExpr;
+#endif // NV_SIMD_FUSE_MULTIPLY_ADD
+
+// multiply-add expression templates
+inline Simd4f operator + (const ProductExpr&, const Simd4f&);
+inline Simd4f operator + (const Simd4f&, const ProductExpr&);
+inline Simd4f operator + (const ProductExpr&, const ProductExpr&);
+inline Simd4f operator - (const Simd4f&, const ProductExpr&);
+inline Simd4f operator - (const ProductExpr&, const ProductExpr&);
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operators
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+// note: operator?= missing because they don't have corresponding intrinsics.
+
+/*! \brief Test for equality of two vectors.
+* \return Vector of per element result mask (all bits set for 'true', none set for 'false').
+* \note QNaNs aren't handled on SPU: comparing two QNaNs will return true.
+* \relates Simd4f */
+inline Simd4f operator == (const Simd4f& v0, const Simd4f& v1);
+
+// no operator!= because VMX128 does not support it, use ~operator== and handle QNaNs
+
+/*! \brief Less-compare all elements of two vectors.
+* \return Vector of per element result mask (all bits set for 'true', none set for 'false').
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline Simd4f operator<(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Less-or-equal-compare all elements of two vectors.
+* \return Vector of per element result mask (all bits set for 'true', none set for 'false').
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline Simd4f operator <= (const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Greater-compare all elements of two vectors.
+* \return Vector of per element result mask (all bits set for 'true', none set for 'false').
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline Simd4f operator>(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Greater-or-equal-compare all elements of two vectors.
+* \return Vector of per element result mask (all bits set for 'true', none set for 'false').
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline Simd4f operator >= (const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Vector bit-wise NOT operator
+* \return A vector holding the bit-negate of \a v.
+* \relates Simd4f */
+inline ComplementExpr<Simd4f> operator~(const Simd4f& v);
+
+/*! \brief Vector bit-wise AND operator
+* \return A vector holding the bit-wise AND of \a v0 and \a v1.
+* \relates Simd4f */
+inline Simd4f operator&(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Vector bit-wise OR operator
+* \return A vector holding the bit-wise OR of \a v0 and \a v1.
+* \relates Simd4f */
+inline Simd4f operator|(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Vector bit-wise XOR operator
+* \return A vector holding the bit-wise XOR of \a v0 and \a v1.
+* \relates Simd4f */
+inline Simd4f operator^(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Vector logical left shift.
+* \return A vector with 4 elements of \a v0, each shifted left by \a shift bits.
+* \relates Simd4f */
+inline Simd4f operator<<(const Simd4f& v, int shift);
+
+/*! \brief Vector logical right shift.
+* \return A vector with 4 elements of \a v0, each shifted right by \a shift bits.
+* \relates Simd4f */
+inline Simd4f operator>>(const Simd4f& v, int shift);
+
+#if NV_SIMD_SHIFT_BY_VECTOR
+/*! \brief Vector logical left shift.
+* \return A vector with 4 elements of \a v0, each shifted left by \a shift bits.
+* \relates Simd4f */
+inline Simd4f operator<<(const Simd4f& v, const Simd4f& shift);
+
+/*! \brief Vector logical right shift.
+* \return A vector with 4 elements of \a v0, each shifted right by \a shift bits.
+* \relates Simd4f */
+inline Simd4f operator>>(const Simd4f& v, const Simd4f& shift);
+#endif
+
+/*! \brief Unary vector addition operator.
+* \return A vector holding the component-wise copy of \a v.
+* \relates Simd4f */
+inline Simd4f operator + (const Simd4f& v);
+
+/*! \brief Vector addition operator
+* \return A vector holding the component-wise sum of \a v0 and \a v1.
+* \relates Simd4f */
+inline Simd4f operator + (const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Unary vector negation operator.
+* \return A vector holding the component-wise negation of \a v.
+* \relates Simd4f */
+inline Simd4f operator - (const Simd4f& v);
+
+/*! \brief Vector subtraction operator.
+* \return A vector holding the component-wise difference of \a v0 and \a v1.
+* \relates Simd4f */
+inline Simd4f operator - (const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Vector multiplication.
+* \return Element-wise product of \a v0 and \a v1.
+* \note For VMX, returns expression template to fuse multiply-add.
+* \relates Simd4f */
+inline ProductExpr operator*(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Vector division.
+* \return Element-wise division of \a v0 and \a v1.
+* \relates Simd4f */
+inline Simd4f operator/(const Simd4f& v0, const Simd4f& v1);
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// functions
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+/*! \brief Load float value into all vector components.
+* \relates Simd4f */
+inline Simd4fScalarFactory simd4f(const float& s)
+{
+ return Simd4fScalarFactory(s);
+}
+
+/*! \brief Load 4 float values into vector.
+* \relates Simd4f */
+inline Simd4fTupleFactory simd4f(float x, float y, float z, float w)
+{
+ return Simd4fTupleFactory(x, y, z, w);
+}
+
+/*! \brief Reinterpret Simd4i as Simd4f.
+* \return A copy of \a v, but reinterpreted as Simd4f.
+* \relates Simd4f */
+inline Simd4f simd4f(const Simd4i& v);
+
+/*! \brief Reinterpret Simd4iScalarFactory as Simd4fScalarFactory.
+* \relates Simd4f */
+inline Simd4fScalarFactory simd4f(const Simd4iScalarFactory& v)
+{
+ return reinterpret_cast<const Simd4fScalarFactory&>(v);
+}
+
+/*! \brief Reinterpret Simd4iTupleFactory as Simd4fTupleFactory.
+* \relates Simd4f */
+inline Simd4fTupleFactory simd4f(const Simd4iTupleFactory& v)
+{
+ return reinterpret_cast<const Simd4fTupleFactory&>(v);
+}
+
+/*! \brief Convert Simd4i to Simd4f.
+* \relates Simd4f */
+inline Simd4f convert(const Simd4i& v);
+
+/*! \brief Return reference to contiguous array of vector elements
+* \relates Simd4f */
+inline float (&array(Simd4f& v))[4];
+
+/*! \brief Return constant reference to contiguous array of vector elements
+* \relates Simd4f */
+inline const float (&array(const Simd4f& v))[4];
+
+/*! \brief Create vector from float array.
+* \relates Simd4f */
+inline Simd4fLoadFactory load(const float* ptr)
+{
+ return Simd4fLoadFactory(ptr);
+}
+
+/*! \brief Create vector from aligned float array.
+* \note \a ptr needs to be 16 byte aligned.
+* \relates Simd4f */
+inline Simd4fAlignedLoadFactory loadAligned(const float* ptr)
+{
+ return Simd4fAlignedLoadFactory(ptr);
+}
+
+/*! \brief Create vector from float[3] \a ptr array. 4th component of simd4f will be equal to 0.0
+* \relates Simd4f */
+inline Simd4fLoad3Factory load3(const float* ptr)
+{
+ return Simd4fLoad3Factory(ptr);
+}
+
+/*! \brief Create vector from float[3] \a ptr array and extra \a wComponent
+* \relates Simd4f */
+inline Simd4fLoad3SetWFactory load3(const float* ptr, const float wComponent)
+{
+ return Simd4fLoad3SetWFactory(ptr, wComponent);
+}
+
+/*! \brief Create vector from aligned float array.
+* \param offset pointer offset in bytes.
+* \note \a ptr+offset needs to be 16 byte aligned.
+* \relates Simd4f */
+inline Simd4fOffsetLoadFactory loadAligned(const float* ptr, unsigned int offset)
+{
+ return Simd4fOffsetLoadFactory(ptr, offset);
+}
+
+/*! \brief Store vector \a v to float array \a ptr.
+* \relates Simd4f */
+inline void store(float* ptr, Simd4f const& v);
+
+/*! \brief Store vector \a v to float[3] array \a ptr.
+* \relates Simd4f */
+inline void store3(float* ptr, Simd4f const& v);
+
+/*! \brief Store vector \a v to aligned float array \a ptr.
+* \note \a ptr needs to be 16 byte aligned.
+* \relates Simd4f */
+inline void storeAligned(float* ptr, Simd4f const& v);
+
+/*! \brief Store vector \a v to aligned float array \a ptr.
+* \param offset pointer offset in bytes.
+* \note \a ptr+offset needs to be 16 byte aligned.
+* \relates Simd4f */
+inline void storeAligned(float* ptr, unsigned int offset, Simd4f const& v);
+
+/*! \brief replicate i-th component into all vector components.
+* \return Vector with all elements set to \a v[i].
+* \relates Simd4f */
+template <size_t i>
+inline Simd4f splat(Simd4f const& v);
+
+/*! \brief Select \a v0 or \a v1 based on \a mask.
+* \return mask ? v0 : v1
+* \relates Simd4f */
+inline Simd4f select(Simd4f const& mask, Simd4f const& v0, Simd4f const& v1);
+
+/*! \brief Per element absolute value.
+* \return Vector with absolute values of \a v.
+* \relates Simd4f */
+inline Simd4f abs(const Simd4f& v);
+
+/*! \brief Per element floor value.
+* \note Result undefined for values outside of the integer range.
+* \note Translates to 6 instructions on SSE and NEON.
+* \relates Simd4f */
+inline Simd4f floor(const Simd4f& v);
+
+#if !defined max
+/*! \brief Per-component minimum of two vectors
+* \note Result undefined for QNaN elements.
+* \relates Simd4f */
+inline Simd4f max(const Simd4f& v0, const Simd4f& v1);
+#endif
+
+#if !defined min
+/*! \brief Per-component minimum of two vectors
+* \note Result undefined for QNaN elements.
+* \relates Simd4f */
+inline Simd4f min(const Simd4f& v0, const Simd4f& v1);
+#endif
+
+/*! \brief Return reciprocal estimate of a vector.
+* \return Vector of per-element reciprocal estimate.
+* \relates Simd4f */
+inline Simd4f recip(const Simd4f& v);
+
+/*! \brief Return reciprocal of a vector.
+* \return Vector of per-element reciprocal.
+* \note Performs \a n Newton-Raphson iterations on initial estimate.
+* \relates Simd4f */
+template <int n>
+inline Simd4f recip(const Simd4f& v);
+
+/*! \brief Return square root of a vector.
+* \return Vector of per-element square root.
+* \note Result undefined for negative elements.
+* \relates Simd4f */
+inline Simd4f sqrt(const Simd4f& v);
+
+/*! \brief Return inverse square root estimate of a vector.
+* \return Vector of per-element inverse square root estimate.
+* \note Result undefined for negative, zero, and infinity elements.
+* \relates Simd4f */
+inline Simd4f rsqrt(const Simd4f& v);
+
+/*! \brief Return inverse square root of a vector.
+* \return Vector of per-element inverse square root.
+* \note Performs \a n Newton-Raphson iterations on initial estimate.
+* \note The result is undefined for negative and infinity elements.
+* \relates Simd4f */
+template <int n>
+inline Simd4f rsqrt(const Simd4f& v);
+
+/*! \brief Return 2 raised to the power of v.
+* \note Result only defined for finite elements.
+* \relates Simd4f */
+inline Simd4f exp2(const Simd4f& v);
+
+/*! \brief Return logarithm of v to base 2.
+* \note Result undefined for QNaN elements.
+* \relates Simd4f */
+inline Simd4f log2(const Simd4f& v);
+
+/*! \brief Return dot product of two 3-vectors.
+* \note The result is replicated across all 4 components.
+* \relates Simd4f */
+inline Simd4f dot3(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Return cross product of two 3-vectors.
+* \note The 4th component is undefined.
+* \note Result only defined for finite x, y, and z values.
+* \relates Simd4f */
+inline Simd4f cross3(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief Transposes 4x4 matrix represented by \a x, \a y, \a z, and \a w.
+* \relates Simd4f */
+inline void transpose(Simd4f& x, Simd4f& y, Simd4f& z, Simd4f& w);
+
+/*! \brief Interleave elements.
+* \a v0 becomes {x0, x1, y0, y1}, v1 becomes {z0, z1, w0, w1}.
+* \relates Simd4f */
+inline void zip(Simd4f& v0, Simd4f& v1);
+
+/*! \brief De-interleave elements.
+* \a v0 becomes {x0, z0, x1, z1}, v1 becomes {y0, w0, y1, w1}.
+* \relates Simd4f */
+inline void unzip(Simd4f& v0, Simd4f& v1);
+
+/*! \brief Swaps quad words.
+* Returns {z0, w0, x0, y0}
+* \relates Simd4f */
+inline Simd4f swaphilo(const Simd4f& v);
+
+/*! \brief returns non-zero if all elements of \a v0 and \a v1 are equal
+* \note QNaNs aren't handled on SPU: comparing two QNaNs will return true.
+* \relates Simd4f */
+inline int allEqual(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief returns non-zero if all elements of \a v0 and \a v1 are equal
+* \param outMask holds the result of \a v0 == \a v1.
+* \note QNaNs aren't handled on SPU: comparing two QNaNs will return true.
+* \relates Simd4f */
+inline int allEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask);
+
+/*! \brief returns non-zero if any elements of \a v0 and \a v1 are equal
+* \note QNaNs aren't handled on SPU: comparing two QNaNs will return true.
+* \relates Simd4f */
+inline int anyEqual(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief returns non-zero if any elements of \a v0 and \a v1 are equal
+* \param outMask holds the result of \a v0 == \a v1.
+* \note QNaNs aren't handled on SPU: comparing two QNaNs will return true.
+* \relates Simd4f */
+inline int anyEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask);
+
+/*! \brief returns non-zero if all elements of \a v0 and \a v1 are greater
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline int allGreater(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief returns non-zero if all elements of \a v0 and \a v1 are greater
+* \param outMask holds the result of \a v0 == \a v1.
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline int allGreater(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask);
+
+/*! \brief returns non-zero if any elements of \a v0 and \a v1 are greater
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline int anyGreater(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief returns non-zero if any elements of \a v0 and \a v1 are greater
+* \param outMask holds the result of \a v0 == \a v1.
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline int anyGreater(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask);
+
+/*! \brief returns non-zero if all elements of \a v0 and \a v1 are greater or equal
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline int allGreaterEqual(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief returns non-zero if all elements of \a v0 and \a v1 are greater or equal
+* \param outMask holds the result of \a v0 == \a v1.
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline int allGreaterEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask);
+
+/*! \brief returns non-zero if any elements of \a v0 and \a v1 are greater or equal
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline int anyGreaterEqual(const Simd4f& v0, const Simd4f& v1);
+
+/*! \brief returns non-zero if any elements of \a v0 and \a v1 are greater or equal
+* \param outMask holds the result of \a v0 == \a v1.
+* \note QNaNs aren't handled on SPU: comparisons against QNaNs don't necessarily return false.
+* \relates Simd4f */
+inline int anyGreaterEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask);
+
+/*! \brief returns non-zero if all elements are true
+* \note Undefined if parameter is not result of a comparison.
+* \relates Simd4f */
+inline int allTrue(const Simd4f& v);
+
+/*! \brief returns non-zero if any element is true
+* \note Undefined if parameter is not result of a comparison.
+* \relates Simd4f */
+inline int anyTrue(const Simd4f& v);
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// constants
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+NV_SIMD_GLOBAL_CONSTANT Simd4fZeroFactory gSimd4fZero = Simd4fZeroFactory();
+NV_SIMD_GLOBAL_CONSTANT Simd4fOneFactory gSimd4fOne = Simd4fOneFactory();
+NV_SIMD_GLOBAL_CONSTANT Simd4fScalarFactory gSimd4fMinusOne = simd4f(-1.0f);
+NV_SIMD_GLOBAL_CONSTANT Simd4fScalarFactory gSimd4fHalf = simd4f(0.5f);
+NV_SIMD_GLOBAL_CONSTANT Simd4fScalarFactory gSimd4fTwo = simd4f(2.0f);
+NV_SIMD_GLOBAL_CONSTANT Simd4fScalarFactory gSimd4fPi = simd4f(3.14159265358979323846f);
+NV_SIMD_GLOBAL_CONSTANT Simd4fScalarFactory gSimd4fEpsilon = simd4f(FLT_EPSILON);
+NV_SIMD_GLOBAL_CONSTANT Simd4fScalarFactory gSimd4fFloatMax = simd4f(FLT_MAX);
+NV_SIMD_GLOBAL_CONSTANT Simd4fTupleFactory gSimd4fMaskX = Simd4fTupleFactory(~0u, 0u, 0u, 0u);
+NV_SIMD_GLOBAL_CONSTANT Simd4fTupleFactory gSimd4fMaskXYZ = Simd4fTupleFactory(~0u, ~0u, ~0u, 0u);
+
+NV_SIMD_NAMESPACE_END
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// platform specific includes
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+#if NV_SIMD_SSE2
+#include "sse2/NvSse2Simd4f.h"
+#elif NV_SIMD_NEON
+#include "neon/NvNeonSimd4f.h"
+#endif
+
+#if NV_SIMD_SCALAR
+#include "scalar/NvScalarSimd4f.h"
+#endif
diff --git a/NvCloth/src/NvSimd/NvSimd4i.h b/NvCloth/src/NvSimd/NvSimd4i.h
new file mode 100644
index 0000000..3da6169
--- /dev/null
+++ b/NvCloth/src/NvSimd/NvSimd4i.h
@@ -0,0 +1,365 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvSimdTypes.h"
+
+NV_SIMD_NAMESPACE_BEGIN
+
+/*! \brief Creates Simd4i with all components set to zero.
+* \relates Simd4i */
+struct Simd4iZeroFactory
+{
+ inline operator Simd4i() const;
+ inline operator Scalar4i() const;
+};
+
+/*! \brief Replicates int into all four Simd4i components.
+* \relates Simd4i */
+struct Simd4iScalarFactory
+{
+ explicit Simd4iScalarFactory(const int& s) : value(s)
+ {
+ }
+ Simd4iScalarFactory& operator = (const Simd4iScalarFactory&); // not implemented
+ inline operator Simd4i() const;
+ inline operator Scalar4i() const;
+ const int value;
+};
+
+/*! \brief Creates Simd4i from four ints.
+* \relates Simd4i */
+struct Simd4iTupleFactory
+{
+ Simd4iTupleFactory(int x, int y, int z, int w)
+ // c++11: : tuple{ x, y, z, w }
+ {
+ tuple[0] = x, tuple[1] = y, tuple[2] = z, tuple[3] = w;
+ }
+ Simd4iTupleFactory& operator = (const Simd4iTupleFactory&); // not implemented
+ inline operator Simd4i() const;
+ inline operator Scalar4i() const;
+ NV_SIMD_ALIGN(16, int) tuple[4];
+};
+
+/*! \brief Loads Simd4i from (unaligned) pointer.
+* \relates Simd4i */
+struct Simd4iLoadFactory
+{
+ explicit Simd4iLoadFactory(const int* p) : ptr(p)
+ {
+ }
+ Simd4iLoadFactory& operator = (const Simd4iLoadFactory&); // not implemented
+ inline operator Simd4i() const;
+ inline operator Scalar4i() const;
+ const int* const ptr;
+};
+
+/*! \brief Loads Simd4i from (aligned) pointer.
+* \relates Simd4i */
+struct Simd4iAlignedLoadFactory
+{
+ explicit Simd4iAlignedLoadFactory(const int* p) : ptr(p)
+ {
+ }
+ Simd4iAlignedLoadFactory& operator = (const Simd4iAlignedLoadFactory&); // not implemented
+ inline operator Simd4i() const;
+ inline operator Scalar4i() const;
+ const int* const ptr;
+};
+
+/*! \brief Loads Simd4i from (aligned) pointer with offset.
+* \relates Simd4i */
+struct Simd4iOffsetLoadFactory
+{
+ Simd4iOffsetLoadFactory(const int* p, unsigned int off) : ptr(p), offset(off)
+ {
+ }
+ Simd4iOffsetLoadFactory& operator = (const Simd4iOffsetLoadFactory&); // not implemented
+ inline operator Simd4i() const;
+ inline operator Scalar4i() const;
+ const int* const ptr;
+ const unsigned int offset;
+};
+
+// map Simd4f/Scalar4f to Simd4i/Scalar4i
+template <typename>
+struct Simd4fToSimd4i;
+template <>
+struct Simd4fToSimd4i<Simd4f>
+{
+ typedef Simd4i Type;
+};
+template <>
+struct Simd4fToSimd4i<Scalar4f>
+{
+ typedef Scalar4i Type;
+};
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operators
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+/*! \brief Vector bit-wise NOT operator
+* \return A vector holding the bit-negate of \a v.
+* \relates Simd4i */
+inline ComplementExpr<Simd4i> operator~(const Simd4i& v);
+
+/*! \brief Vector bit-wise AND operator
+* \return A vector holding the bit-wise AND of \a v0 and \a v1.
+* \relates Simd4i */
+inline Simd4i operator&(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief Vector bit-wise OR operator
+* \return A vector holding the bit-wise OR of \a v0 and \a v1.
+* \relates Simd4i */
+inline Simd4i operator|(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief Vector bit-wise XOR operator
+* \return A vector holding the bit-wise XOR of \a v0 and \a v1.
+* \relates Simd4i */
+inline Simd4i operator^(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief Vector logical left shift.
+* \return A vector with 4 elements of \a v0, each shifted left by \a shift bits.
+* \relates Simd4i */
+inline Simd4i operator<<(const Simd4i& v, int shift);
+
+/*! \brief Vector logical right shift.
+* \return A vector with 4 elements of \a v0, each shifted right by \a shift bits.
+* \relates Simd4i */
+inline Simd4i operator>>(const Simd4i& v, int shift);
+
+#if NV_SIMD_SHIFT_BY_VECTOR
+
+/*! \brief Vector logical left shift.
+* \return A vector with 4 elements of \a v0, each shifted left by \a shift bits.
+* \relates Simd4i */
+inline Simd4i operator<<(const Simd4i& v, const Simd4i& shift);
+
+/*! \brief Vector logical right shift.
+* \return A vector with 4 elements of \a v0, each shifted right by \a shift bits.
+* \relates Simd4i */
+inline Simd4i operator>>(const Simd4i& v, const Simd4i& shift);
+
+#endif // NV_SIMD_SHIFT_BY_VECTOR
+
+// note: operator?= missing because they don't have corresponding intrinsics.
+
+/*! \brief Test for equality of two vectors.
+* \return Vector of per element result mask (all bits set for 'true', none set for 'false').
+* \relates Simd4i */
+inline Simd4i operator == (const Simd4i& v0, const Simd4i& v1);
+
+// no !=, <=, >= because VMX128/SSE don't support it, use ~operator== etc.
+
+/*! \brief Less-compare all elements of two *signed* vectors.
+* \return Vector of per element result mask (all bits set for 'true', none set for 'false').
+* \relates Simd4i */
+inline Simd4i operator<(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief Greater-compare all elements of two *signed* vectors.
+* \return Vector of per element result mask (all bits set for 'true', none set for 'false').
+* \relates Simd4i */
+inline Simd4i operator>(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief Unary vector addition operator.
+* \return A vector holding the component-wise copy of \a v.
+* \relates Simd4i */
+inline Simd4i operator + (const Simd4i& v);
+
+/*! \brief Vector addition operator
+* \return A vector holding the component-wise sum of \a v0 and \a v1.
+* \relates Simd4i */
+inline Simd4i operator + (const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief Unary vector negation operator.
+* \return A vector holding the component-wise negation of \a v.
+* \relates Simd4i */
+inline Simd4i operator - (const Simd4i& v);
+
+/*! \brief Vector subtraction operator.
+* \return A vector holding the component-wise difference of \a v0 and \a v1.
+* \relates Simd4i */
+inline Simd4i operator - (const Simd4i& v0, const Simd4i& v1);
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// functions
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+/*! \brief Load int value into all vector components.
+* \relates Simd4i */
+inline Simd4iScalarFactory simd4i(const int& s)
+{
+ return Simd4iScalarFactory(s);
+}
+
+/*! \brief Load 4 int values into vector.
+* \relates Simd4i */
+inline Simd4iTupleFactory simd4i(int x, int y, int z, int w)
+{
+ return Simd4iTupleFactory(x, y, z, w);
+}
+
+/*! \brief Reinterpret Simd4f as Simd4i.
+* \return A copy of \a v, but reinterpreted as Simd4i.
+* \relates Simd4i */
+inline Simd4i simd4i(const Simd4f& v);
+
+/*! \brief Truncate Simd4f to Simd4i.
+* \relates Simd4i */
+inline Simd4i truncate(const Simd4f& v);
+
+/*! \brief return reference to contiguous array of vector elements
+* \relates Simd4i */
+inline int (&array(Simd4i& v))[4];
+
+/*! \brief return constant reference to contiguous array of vector elements
+* \relates Simd4i */
+inline const int (&array(const Simd4i& v))[4];
+
+/*! \brief Create vector from int array.
+* \relates Simd4i */
+inline Simd4iLoadFactory load(const int* ptr)
+{
+ return Simd4iLoadFactory(ptr);
+}
+
+/*! \brief Create vector from aligned int array.
+* \note \a ptr needs to be 16 byte aligned.
+* \relates Simd4i */
+inline Simd4iAlignedLoadFactory loadAligned(const int* ptr)
+{
+ return Simd4iAlignedLoadFactory(ptr);
+}
+
+/*! \brief Create vector from aligned float array.
+* \param offset pointer offset in bytes.
+* \note \a ptr+offset needs to be 16 byte aligned.
+* \relates Simd4i */
+inline Simd4iOffsetLoadFactory loadAligned(const int* ptr, unsigned int offset)
+{
+ return Simd4iOffsetLoadFactory(ptr, offset);
+}
+
+/*! \brief Store vector \a v to int array \a ptr.
+* \relates Simd4i */
+inline void store(int* ptr, const Simd4i& v);
+
+/*! \brief Store vector \a v to aligned int array \a ptr.
+* \note \a ptr needs to be 16 byte aligned.
+* \relates Simd4i */
+inline void storeAligned(int* ptr, const Simd4i& v);
+
+/*! \brief Store vector \a v to aligned int array \a ptr.
+* \param offset pointer offset in bytes.
+* \note \a ptr+offset needs to be 16 byte aligned.
+* \relates Simd4i */
+inline void storeAligned(int* ptr, unsigned int offset, const Simd4i& v);
+
+/*! \brief replicate i-th component into all vector components.
+* \return Vector with all elements set to \a v[i].
+* \relates Simd4i */
+template <size_t i>
+inline Simd4i splat(const Simd4i& v);
+
+/*! \brief Select \a v0 or \a v1 based on \a mask.
+* \return mask ? v0 : v1
+* \relates Simd4i */
+inline Simd4i select(const Simd4i& mask, const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief returns non-zero if all elements or \a v0 and \a v1 are equal
+* \relates Simd4i */
+inline int allEqual(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief returns non-zero if all elements or \a v0 and \a v1 are equal
+* \param outMask holds the result of \a v0 == \a v1.
+* \relates Simd4i */
+inline int allEqual(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask);
+
+/*! \brief returns non-zero if any elements or \a v0 and \a v1 are equal
+* \relates Simd4i */
+inline int anyEqual(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief returns non-zero if any elements or \a v0 and \a v1 are equal
+* \param outMask holds the result of \a v0 == \a v1.
+* \relates Simd4i */
+inline int anyEqual(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask);
+
+/*! \brief returns non-zero if all *signed* elements or \a v0 and \a v1 are greater
+* \relates Simd4i */
+inline int allGreater(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief returns non-zero if all *signed* elements or \a v0 and \a v1 are greater
+* \param outMask holds the result of \a v0 == \a v1.
+* \relates Simd4i */
+inline int allGreater(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask);
+
+/*! \brief returns non-zero if any elements or \a v0 and \a v1 are greater
+* \relates Simd4i */
+inline int anyGreater(const Simd4i& v0, const Simd4i& v1);
+
+/*! \brief returns non-zero if any elements or \a v0 and \a v1 are greater
+* \param outMask holds the result of \a v0 == \a v1.
+* \relates Simd4i */
+inline int anyGreater(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask);
+
+/*! \brief returns non-zero if all elements are true
+* \note undefined if parameter is not result of a comparison.
+* \relates Simd4i */
+inline int allTrue(const Simd4i& v);
+
+/*! \brief returns non-zero if any element is true
+* \note undefined if parameter is not result of a comparison.
+* \relates Simd4i */
+inline int anyTrue(const Simd4i& v);
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// constants
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+NV_SIMD_GLOBAL_CONSTANT Simd4iZeroFactory gSimd4iZero = Simd4iZeroFactory();
+NV_SIMD_GLOBAL_CONSTANT Simd4iScalarFactory gSimd4iOne = simd4i(1);
+
+NV_SIMD_NAMESPACE_END
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// platform specific includes
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+#if NV_SIMD_SSE2
+#include "sse2/NvSse2Simd4i.h"
+#elif NV_SIMD_NEON
+#include "neon/NvNeonSimd4i.h"
+#endif
+
+#if NV_SIMD_SCALAR
+#include "scalar/NvScalarSimd4i.h"
+#endif
diff --git a/NvCloth/src/NvSimd/NvSimdTypes.h b/NvCloth/src/NvSimd/NvSimdTypes.h
new file mode 100644
index 0000000..57b4c36
--- /dev/null
+++ b/NvCloth/src/NvSimd/NvSimdTypes.h
@@ -0,0 +1,226 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+/*! @file
+This set of files provides an abstraction to SSE2 and NEON SIMD instructions and provides
+a scalar fallback for other architectures. The documentation of Simd4f and Simd4i contain
+everything to get started.
+
+The following design choices have been made:
+- Use typedef for SSE2 data on MSVC (implies global namespace, see NV_SIMD_USE_NAMESPACE for options)
+- Exposing SIMD types as float/integer values as well as bit patterns
+- Free functions and overloaded operators for better code readability
+- Expression templates for common use cases (and-not and multiply-add)
+- Support for constants with same or individual values (see Scalar/TubleFactory)
+- Documentation (!)
+- Altivec/VMX128 support has been removed
+
+The following areas could still use some work:
+- generic shuffling instructions
+- matrix and quaterion types
+
+Here is a simple example of how to use the SIMD libarary:
+
+\code
+void foo(const float* ptr)
+{
+ assert(!(ptr & 0xf)); // make sure ptr is aligned
+ using namespace nvidia::simd;
+ Simd4f a = loadAligned(ptr);
+ Simd4f b = simd4f(0.0f, 1.0f, 0.0f, 1.0f);
+ Simd4f c = simd4f(3.0f);
+ Simd4f d = a * b + gSimd4fOne; // maps to FMA on NEON
+ Simd4f mask, e;
+ // same result as e = max(c, d);
+ if (anyGreater(c, d, mask))
+ e = select(mask, c, d);
+ Simd4f f = splat<2>(d) - rsqrt(e);
+ printf("%f\n", array(f)[0]);
+}
+\endcode
+*/
+
+/*! \def NV_SIMD_SIMD
+* Define Simd4f and Simd4i, which map to four 32bit float or integer tuples.
+* */
+// note: ps4 compiler defines _M_X64 without value
+#define NV_SIMD_SSE2 (defined _M_IX86 || defined _M_X64 || defined __i386__ || defined __x86_64__)
+#define NV_SIMD_NEON (defined _M_ARM || defined __ARM_NEON__)
+#define NV_SIMD_SIMD (NV_SIMD_SSE2 || NV_SIMD_NEON)
+
+/*! \def NV_SIMD_SCALAR
+* Define Scalar4f and Scalar4i (default: 0 if SIMD is supported, 1 otherwise).
+* Scalar4f and Scalar4i can be typedef'd to Simd4f and Simd4i respectively to replace
+* the SIMD classes, or they can be used in combination as template parameters to
+* implement a scalar run-time fallback. */
+#if !defined NV_SIMD_SCALAR
+#define NV_SIMD_SCALAR !NV_SIMD_SIMD
+#endif
+
+// use template expression to fuse multiply-adds into a single instruction
+#define NV_SIMD_FUSE_MULTIPLY_ADD (NV_SIMD_NEON)
+// support shift by vector operarations
+#define NV_SIMD_SHIFT_BY_VECTOR (NV_SIMD_NEON)
+// support inline assembler
+#define NV_SIMD_INLINE_ASSEMBLER !(defined _M_ARM || defined SN_TARGET_PSP2 || defined __arm64__)
+
+/*! \def NV_SIMD_USE_NAMESPACE
+* \brief Set to 1 to define the SIMD library types and functions inside the nvidia::simd namespace.
+* By default, the types and functions defined in this header live in the global namespace.
+* This is because MSVC (prior to version 12, Visual Studio 2013) does an inferior job at optimizing
+* SSE2 code when __m128 is wrapped in a struct (the cloth solver for example is more than 50% slower).
+* Therefore, Simd4f is typedefe'd to __m128 on MSVC, and for name lookup to work, all related functions
+* live in the global namespace. This behavior can be overriden by defining NV_SIMD_USE_NAMESPACE to 1.
+* The types and functions of the SIMD library are then defined inside the nv::simd namespace, but
+* performance on MSVC version 11 and earlier is expected to be lower in this mode because __m128 and
+* __m128i are wrapped into structs. Arguments need to be passed by reference in this mode.
+* \see NV_SIMD_VECTORCALL, Simd4fArg */
+
+#if defined NV_SIMD_USE_NAMESPACE&& NV_SIMD_USE_NAMESPACE
+#define NV_SIMD_NAMESPACE_BEGIN \
+ namespace nv \
+ { \
+ namespace simd \
+ {
+#define NV_SIMD_NAMESPACE_END \
+ } \
+ }
+#else
+#define NV_SIMD_NAMESPACE_BEGIN
+#define NV_SIMD_NAMESPACE_END
+#endif
+
+// alignment struct to \c alignment byte
+#ifdef _MSC_VER
+#define NV_SIMD_ALIGN(alignment, decl) __declspec(align(alignment)) decl
+#else
+#define NV_SIMD_ALIGN(alignment, decl) decl __attribute__((aligned(alignment)))
+#endif
+
+// define a global constant
+#ifdef _MSC_VER
+#define NV_SIMD_GLOBAL_CONSTANT extern const __declspec(selectany)
+#else
+#define NV_SIMD_GLOBAL_CONSTANT extern const __attribute__((weak))
+#endif
+
+// suppress warning of unused identifiers
+#if defined(__GNUC__)
+#define NV_SIMD_UNUSED __attribute__((unused))
+#else
+#define NV_SIMD_UNUSED
+#endif
+
+// disable warning
+#if defined _MSC_VER
+#if _MSC_VER < 1700
+#pragma warning(disable : 4347) // behavior change: 'function template' is called instead of 'function'
+#endif
+#pragma warning(disable : 4350) // behavior change: 'member1' called instead of 'member2'
+#endif
+
+NV_SIMD_NAMESPACE_BEGIN
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression templates
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+/*! \brief Expression template to fuse and-not. */
+template <typename T>
+struct ComplementExpr
+{
+ inline explicit ComplementExpr(T const& v_) : v(v_)
+ {
+ }
+ ComplementExpr& operator = (const ComplementExpr&); // not implemented
+ inline operator T() const;
+ const T v;
+};
+
+template <typename T>
+inline T operator&(const ComplementExpr<T>&, const T&);
+template <typename T>
+inline T operator&(const T&, const ComplementExpr<T>&);
+
+NV_SIMD_NAMESPACE_END
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// platform specific includes
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+#if NV_SIMD_SSE2
+#include "sse2/NvSse2SimdTypes.h"
+#elif NV_SIMD_NEON
+#include "neon/NvNeonSimdTypes.h"
+#elif NV_SIMD_SIMD
+#error unknown SIMD architecture
+#else
+struct Simd4f;
+struct Simd4i;
+#endif
+
+#if NV_SIMD_SCALAR
+#include "scalar/NvScalarSimdTypes.h"
+#else
+struct Scalar4f;
+struct Scalar4i;
+#endif
+
+NV_SIMD_NAMESPACE_BEGIN
+
+/*! \typedef Simd4fArg
+* Maps to Simd4f value or reference, whichever is faster. */
+
+/*! \def NV_SIMD_VECTORCALL
+* MSVC passes aligned arguments by pointer, unless the vector calling convention
+* introduced in Visual Studio 2013 is being used. For the last bit of performance
+* of non-inlined functions, use the following pattern:
+* Simd4f NV_SIMD_VECTORCALL foo(Simd4fArg x);
+* This will pass the argument in register where possible (instead of by pointer).
+* For inlined functions, the compiler will remove the store/load (except for MSVC
+* when NV_SIMD_USE_NAMESPACE is set to 1).
+* Non-inlined functions are rarely perf-critical, so it might be simpler
+* to always pass by reference instead: Simd4f foo(const Simd4f&); */
+
+#if defined _MSC_VER
+#if _MSC_VER >= 1800 // Visual Studio 2013
+typedef Simd4f Simd4fArg;
+#define NV_SIMD_VECTORCALL __vectorcall
+#else
+typedef const Simd4f& Simd4fArg;
+#define NV_SIMD_VECTORCALL
+#endif
+#else
+typedef Simd4f Simd4fArg;
+#define NV_SIMD_VECTORCALL
+#endif
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/neon/NvNeonSimd4f.h b/NvCloth/src/NvSimd/neon/NvNeonSimd4f.h
new file mode 100644
index 0000000..e9c9097
--- /dev/null
+++ b/NvCloth/src/NvSimd/neon/NvNeonSimd4f.h
@@ -0,0 +1,585 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+NV_SIMD_NAMESPACE_BEGIN
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factory implementation
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4fZeroFactory::operator Simd4f() const
+{
+ return vdupq_n_u32(0);
+}
+
+Simd4fOneFactory::operator Simd4f() const
+{
+ return vdupq_n_f32(1.0f);
+}
+
+Simd4fScalarFactory::operator Simd4f() const
+{
+ return vdupq_n_f32(reinterpret_cast<const float32_t&>(value));
+}
+
+Simd4fTupleFactory::operator Simd4f() const
+{
+ return reinterpret_cast<const Simd4f&>(tuple);
+}
+
+Simd4fLoadFactory::operator Simd4f() const
+{
+ return vld1q_f32(static_cast<const float32_t*>(ptr));
+}
+
+Simd4fLoad3Factory::operator Simd4f() const
+{
+#if 0
+ float32x2_t xy = vld1_f32(ptr);
+ float32x2_t zz = vld1_dup_f32(ptr + 2);
+ return vcombine_f32(xy, zz);
+#else
+ float fltArray[] = { ptr[0], ptr[1], ptr[2], 0.0 };
+ return vld1q_f32(static_cast<const float32_t*>(fltArray));
+#endif
+}
+
+Simd4fLoad3SetWFactory::operator Simd4f() const
+{
+#if 0
+ float32x2_t xy = vld1_f32(ptr);
+ float32x2_t zz = vld1_dup_f32(ptr + 2);
+ return vcombine_f32(xy, zz);
+#else
+ float fltArray[] = { ptr[0], ptr[1], ptr[2], w };
+ return vld1q_f32(static_cast<const float32_t*>(fltArray));
+#endif
+}
+
+Simd4fAlignedLoadFactory::operator Simd4f() const
+{
+ return vld1q_f32(static_cast<const float32_t*>(ptr));
+}
+
+Simd4fOffsetLoadFactory::operator Simd4f() const
+{
+ return vld1q_f32(reinterpret_cast<const float32_t*>(reinterpret_cast<const char*>(ptr) + offset));
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression templates
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline ComplementExpr<Simd4f>::operator Simd4f() const
+{
+ return vbicq_u32(vdupq_n_u32(0xffffffff), v.u4);
+}
+
+template <>
+inline Simd4f operator&(const ComplementExpr<Simd4f>& complement, const Simd4f& v)
+{
+ return vbicq_u32(v.u4, complement.v.u4);
+}
+
+template <>
+inline Simd4f operator&(const Simd4f& v, const ComplementExpr<Simd4f>& complement)
+{
+ return vbicq_u32(v.u4, complement.v.u4);
+}
+
+ProductExpr::operator Simd4f() const
+{
+ return vmulq_f32(v0.f4, v1.f4);
+}
+
+Simd4f operator + (const ProductExpr& p, const Simd4f& v)
+{
+ return vmlaq_f32(v.f4, p.v0.f4, p.v1.f4);
+}
+
+Simd4f operator + (const Simd4f& v, const ProductExpr& p)
+{
+ return vmlaq_f32(v.f4, p.v0.f4, p.v1.f4);
+}
+
+Simd4f operator + (const ProductExpr& p0, const ProductExpr& p1)
+{
+ // cast calls operator Simd4f() which evaluates the other ProductExpr
+ return vmlaq_f32(static_cast<Simd4f>(p0).f4, p1.v0.f4, p1.v1.f4);
+}
+
+Simd4f operator - (const Simd4f& v, const ProductExpr& p)
+{
+ return vmlsq_f32(v.f4, p.v0.f4, p.v1.f4);
+}
+
+Simd4f operator - (const ProductExpr& p0, const ProductExpr& p1)
+{
+ // cast calls operator Simd4f() which evaluates the other ProductExpr
+ return vmlsq_f32(static_cast<Simd4f>(p0).f4, p1.v0.f4, p1.v1.f4);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operator implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4f operator == (const Simd4f& v0, const Simd4f& v1)
+{
+ return vceqq_f32(v0.f4, v1.f4);
+}
+
+Simd4f operator<(const Simd4f& v0, const Simd4f& v1)
+{
+ return vcltq_f32(v0.f4, v1.f4);
+}
+
+Simd4f operator <= (const Simd4f& v0, const Simd4f& v1)
+{
+ return vcleq_f32(v0.f4, v1.f4);
+}
+
+Simd4f operator>(const Simd4f& v0, const Simd4f& v1)
+{
+ return vcgtq_f32(v0.f4, v1.f4);
+}
+
+Simd4f operator >= (const Simd4f& v0, const Simd4f& v1)
+{
+ return vcgeq_f32(v0.f4, v1.f4);
+}
+
+ComplementExpr<Simd4f> operator~(const Simd4f& v)
+{
+ return ComplementExpr<Simd4f>(v);
+}
+
+Simd4f operator&(const Simd4f& v0, const Simd4f& v1)
+{
+ return vandq_u32(v0.u4, v1.u4);
+}
+
+Simd4f operator|(const Simd4f& v0, const Simd4f& v1)
+{
+ return vorrq_u32(v0.u4, v1.u4);
+}
+
+Simd4f operator^(const Simd4f& v0, const Simd4f& v1)
+{
+ return veorq_u32(v0.u4, v1.u4);
+}
+
+Simd4f operator<<(const Simd4f& v, int shift)
+{
+ return vshlq_u32(v.u4, vdupq_n_s32(shift));
+}
+
+Simd4f operator>>(const Simd4f& v, int shift)
+{
+ return vshlq_u32(v.u4, vdupq_n_s32(-shift));
+}
+
+Simd4f operator<<(const Simd4f& v, const Simd4f& shift)
+{
+ return vshlq_u32(v.u4, shift.i4);
+}
+
+Simd4f operator>>(const Simd4f& v, const Simd4f& shift)
+{
+ return vshlq_u32(v.u4, vnegq_s32(shift.i4));
+}
+
+Simd4f operator + (const Simd4f& v)
+{
+ return v;
+}
+
+Simd4f operator + (const Simd4f& v0, const Simd4f& v1)
+{
+ return vaddq_f32(v0.f4, v1.f4);
+}
+
+Simd4f operator - (const Simd4f& v)
+{
+ return vnegq_f32(v.f4);
+}
+
+Simd4f operator - (const Simd4f& v0, const Simd4f& v1)
+{
+ return vsubq_f32(v0.f4, v1.f4);
+}
+
+ProductExpr operator*(const Simd4f& v0, const Simd4f& v1)
+{
+ return ProductExpr(v0, v1);
+}
+
+Simd4f operator/(const Simd4f& v0, const Simd4f& v1)
+{
+ return v0 * vrecpeq_f32(v1.f4); // reciprocal estimate
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// function implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4f simd4f(const Simd4i& v)
+{
+ return v.u4;
+}
+
+Simd4f convert(const Simd4i& v)
+{
+ return vcvtq_f32_s32(v.i4);
+}
+
+float (&array(Simd4f& v))[4]
+{
+ return reinterpret_cast<float(&)[4]>(v);
+}
+
+const float (&array(const Simd4f& v))[4]
+{
+ return reinterpret_cast<const float(&)[4]>(v);
+}
+
+void store(float* ptr, Simd4f const& v)
+{
+ return vst1q_f32(reinterpret_cast<float32_t*>(ptr), v.f4);
+}
+
+void storeAligned(float* ptr, Simd4f const& v)
+{
+ return vst1q_f32(reinterpret_cast<float32_t*>(ptr), v.f4);
+}
+
+void store3(float* dst, const Simd4f& v)
+{
+ const float* __restrict src = array(v);
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+}
+
+void storeAligned(float* ptr, unsigned int offset, Simd4f const& v)
+{
+ return storeAligned(reinterpret_cast<float*>(reinterpret_cast<char*>(ptr) + offset), v);
+}
+
+template <size_t i>
+Simd4f splat(Simd4f const& v)
+{
+ return vdupq_n_f32(array(v)[i]);
+}
+
+Simd4f select(Simd4f const& mask, Simd4f const& v0, Simd4f const& v1)
+{
+ return vbslq_f32(mask.u4, v0.f4, v1.f4);
+}
+
+Simd4f abs(const Simd4f& v)
+{
+ return vabsq_f32(v.f4);
+}
+
+Simd4f floor(const Simd4f& v)
+{
+ int32x4_t i = vcvtq_s32_f32(v.f4);
+ int32x4_t s = vreinterpretq_s32_u32(vcgtq_f32(vcvtq_f32_s32(i), v.f4));
+ return vcvtq_f32_s32(vsubq_s32(i, vshrq_n_s32(s, 31)));
+}
+
+#if !defined max
+Simd4f max(const Simd4f& v0, const Simd4f& v1)
+{
+ return vmaxq_f32(v0.f4, v1.f4);
+}
+#endif
+
+#if !defined min
+Simd4f min(const Simd4f& v0, const Simd4f& v1)
+{
+ return vminq_f32(v0.f4, v1.f4);
+}
+#endif
+
+Simd4f recip(const Simd4f& v)
+{
+ return recip<0>(v);
+}
+
+template <int n>
+Simd4f recip(const Simd4f& v)
+{
+ Simd4f r = vrecpeq_f32(v.f4);
+ // n+1 newton iterations because initial approximation is crude
+ for (int i = 0; i <= n; ++i)
+ r = vrecpsq_f32(v.f4, r.f4) * r;
+ return r;
+}
+
+Simd4f sqrt(const Simd4f& v)
+{
+ return (v > gSimd4fZero) & (v * rsqrt(v));
+}
+
+Simd4f rsqrt(const Simd4f& v)
+{
+ return rsqrt<0>(v);
+}
+
+template <int n>
+Simd4f rsqrt(const Simd4f& v)
+{
+ Simd4f r = vrsqrteq_f32(v.f4);
+ // n+1 newton iterations because initial approximation is crude
+ for (int i = 0; i <= n; ++i)
+ r = vrsqrtsq_f32(vmulq_f32(v.f4, r.f4), r.f4) * r;
+ return r;
+}
+
+Simd4f exp2(const Simd4f& v)
+{
+ // http://www.netlib.org/cephes/
+
+ Simd4f limit = simd4f(127.4999f);
+ Simd4f x = min(max(-limit, v), limit);
+
+ // separate into integer and fractional part
+
+ Simd4f fx = x + simd4f(0.5f);
+ Simd4i ix = vsubq_s32(vcvtq_s32_f32(fx.f4), vreinterpretq_s32_u32(vshrq_n_u32(fx.u4, 31)));
+ fx = x - vcvtq_f32_s32(ix.i4);
+
+ // exp2(fx) ~ 1 + 2 * P(fx) / (Q(fx) - P(fx))
+
+ Simd4f fx2 = fx * fx;
+
+ Simd4f px = fx * (simd4f(1.51390680115615096133e+3f) +
+ fx2 * (simd4f(2.02020656693165307700e+1f) + fx2 * simd4f(2.30933477057345225087e-2f)));
+ Simd4f qx = simd4f(4.36821166879210612817e+3f) + fx2 * (simd4f(2.33184211722314911771e+2f) + fx2);
+
+ Simd4f exp2fx = px * recip(qx - px);
+ exp2fx = gSimd4fOne + exp2fx + exp2fx;
+
+ // exp2(ix)
+
+ Simd4f exp2ix = vreinterpretq_f32_s32(vshlq_n_s32(vaddq_s32(ix.i4, vdupq_n_s32(0x7f)), 23));
+
+ return exp2fx * exp2ix;
+}
+
+Simd4f log2(const Simd4f& v)
+{
+ Simd4f scale = simd4f(1.44269504088896341f); // 1/ln(2)
+ const float* ptr = array(v);
+ return simd4f(::logf(ptr[0]), ::logf(ptr[1]), ::logf(ptr[2]), ::logf(ptr[3])) * scale;
+}
+
+Simd4f dot3(const Simd4f& v0, const Simd4f& v1)
+{
+ Simd4f tmp = v0 * v1;
+ return splat<0>(tmp) + splat<1>(tmp) + splat<2>(tmp);
+}
+
+Simd4f cross3(const Simd4f& v0, const Simd4f& v1)
+{
+ float32x2_t x0_y0 = vget_low_f32(v0.f4);
+ float32x2_t z0_w0 = vget_high_f32(v0.f4);
+ float32x2_t x1_y1 = vget_low_f32(v1.f4);
+ float32x2_t z1_w1 = vget_high_f32(v1.f4);
+
+ float32x2_t y1_z1 = vext_f32(x1_y1, z1_w1, 1);
+ float32x2_t y0_z0 = vext_f32(x0_y0, z0_w0, 1);
+
+ float32x2_t z0x1_w0y1 = vmul_f32(z0_w0, x1_y1);
+ float32x2_t x0y1_y0z1 = vmul_f32(x0_y0, y1_z1);
+
+ float32x2_t y2_w2 = vmls_f32(z0x1_w0y1, x0_y0, z1_w1);
+ float32x2_t z2_x2 = vmls_f32(x0y1_y0z1, y0_z0, x1_y1);
+ float32x2_t x2_y2 = vext_f32(z2_x2, y2_w2, 1);
+
+ return vcombine_f32(x2_y2, z2_x2);
+}
+
+void transpose(Simd4f& x, Simd4f& y, Simd4f& z, Simd4f& w)
+{
+#if NV_SIMD_INLINE_ASSEMBLER
+ asm volatile("vzip.f32 %q0, %q2 \n\t"
+ "vzip.f32 %q1, %q3 \n\t"
+ "vzip.f32 %q0, %q1 \n\t"
+ "vzip.f32 %q2, %q3 \n\t"
+ : "+w"(x.f4), "+w"(y.f4), "+w"(z.f4), "+w"(w.f4));
+#else
+ float32x4x2_t v0v1 = vzipq_f32(x.f4, z.f4);
+ float32x4x2_t v2v3 = vzipq_f32(y.f4, w.f4);
+ float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]);
+ float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]);
+
+ x = zip0.val[0];
+ y = zip0.val[1];
+ z = zip1.val[0];
+ w = zip1.val[1];
+#endif
+}
+
+void zip(Simd4f& v0, Simd4f& v1)
+{
+#if NV_SIMD_INLINE_ASSEMBLER
+ asm volatile("vzip.f32 %q0, %q1 \n\t" : "+w"(v0.f4), "+w"(v1.f4));
+#else
+ float32x4x2_t uzp = vzipq_f32(v0.f4, v1.f4);
+ v0 = uzp.val[0];
+ v1 = uzp.val[1];
+#endif
+}
+
+void unzip(Simd4f& v0, Simd4f& v1)
+{
+#if NV_SIMD_INLINE_ASSEMBLER
+ asm volatile("vuzp.f32 %q0, %q1 \n\t" : "+w"(v0.f4), "+w"(v1.f4));
+#else
+ float32x4x2_t uzp = vuzpq_f32(v0.f4, v1.f4);
+ v0 = uzp.val[0];
+ v1 = uzp.val[1];
+#endif
+}
+
+Simd4f swaphilo(const Simd4f& v)
+{
+ return vcombine_f32(vget_high_f32(v.f4), vget_low_f32(v.f4));
+}
+
+int allEqual(const Simd4f& v0, const Simd4f& v1)
+{
+ return allTrue(v0 == v1);
+}
+
+int allEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return allTrue(outMask = v0 == v1);
+}
+
+int anyEqual(const Simd4f& v0, const Simd4f& v1)
+{
+ return anyTrue(v0 == v1);
+}
+
+int anyEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return anyTrue(outMask = v0 == v1);
+}
+
+int allGreater(const Simd4f& v0, const Simd4f& v1)
+{
+ return allTrue(v0 > v1);
+}
+
+int allGreater(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return allTrue(outMask = v0 > v1);
+}
+
+int anyGreater(const Simd4f& v0, const Simd4f& v1)
+{
+ return anyTrue(v0 > v1);
+}
+
+int anyGreater(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return anyTrue(outMask = v0 > v1);
+}
+
+int allGreaterEqual(const Simd4f& v0, const Simd4f& v1)
+{
+ return allTrue(v0 >= v1);
+}
+
+int allGreaterEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return allTrue(outMask = v0 >= v1);
+}
+
+int anyGreaterEqual(const Simd4f& v0, const Simd4f& v1)
+{
+ return anyTrue(v0 >= v1);
+}
+
+int anyGreaterEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return anyTrue(outMask = v0 >= v1);
+}
+
+int allTrue(const Simd4f& v)
+{
+#if NV_SIMD_INLINE_ASSEMBLER
+ int result;
+ asm volatile("vmovq q0, %q1 \n\t"
+ "vand.u32 d0, d0, d1 \n\t"
+ "vpmin.u32 d0, d0, d0 \n\t"
+ "vcmp.f32 s0, #0 \n\t"
+ "fmrx %0, fpscr"
+ : "=r"(result)
+ : "w"(v.f4)
+ : "q0");
+ return result >> 28 & 0x1;
+#else
+ uint16x4_t hi = vget_high_u16(vreinterpretq_u16_u32(v.u4));
+ uint16x4_t lo = vmovn_u32(v.u4);
+ uint16x8_t combined = vcombine_u16(lo, hi);
+ uint32x2_t reduced = vreinterpret_u32_u8(vmovn_u16(combined));
+ return vget_lane_u32(reduced, 0) == 0xffffffff;
+#endif
+}
+
+int anyTrue(const Simd4f& v)
+{
+#if NV_SIMD_INLINE_ASSEMBLER
+ int result;
+ asm volatile("vmovq q0, %q1 \n\t"
+ "vorr.u32 d0, d0, d1 \n\t"
+ "vpmax.u32 d0, d0, d0 \n\t"
+ "vcmp.f32 s0, #0 \n\t"
+ "fmrx %0, fpscr"
+ : "=r"(result)
+ : "w"(v.f4)
+ : "q0");
+ return result >> 28 & 0x1;
+#else
+ uint16x4_t hi = vget_high_u16(vreinterpretq_u16_u32(v.u4));
+ uint16x4_t lo = vmovn_u32(v.u4);
+ uint16x8_t combined = vcombine_u16(lo, hi);
+ uint32x2_t reduced = vreinterpret_u32_u8(vmovn_u16(combined));
+ return vget_lane_u32(reduced, 0) != 0x0;
+#endif
+}
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/neon/NvNeonSimd4i.h b/NvCloth/src/NvSimd/neon/NvNeonSimd4i.h
new file mode 100644
index 0000000..66b3b92
--- /dev/null
+++ b/NvCloth/src/NvSimd/neon/NvNeonSimd4i.h
@@ -0,0 +1,303 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+NV_SIMD_NAMESPACE_BEGIN
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factory implementation
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4iZeroFactory::operator Simd4i() const
+{
+ return vdupq_n_s32(0);
+}
+
+Simd4iScalarFactory::operator Simd4i() const
+{
+ return vdupq_n_s32(value);
+}
+
+Simd4iTupleFactory::operator Simd4i() const
+{
+ return reinterpret_cast<const Simd4i&>(tuple);
+}
+
+Simd4iLoadFactory::operator Simd4i() const
+{
+ return vld1q_s32(ptr);
+}
+
+Simd4iAlignedLoadFactory::operator Simd4i() const
+{
+ return vld1q_s32(ptr);
+}
+
+Simd4iOffsetLoadFactory::operator Simd4i() const
+{
+ return vld1q_s32(reinterpret_cast<const int*>(reinterpret_cast<const char*>(ptr) + offset));
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression template
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline ComplementExpr<Simd4i>::operator Simd4i() const
+{
+ return vbicq_u32(vdupq_n_u32(0xffffffff), v.u4);
+}
+
+template <>
+inline Simd4i operator&(const ComplementExpr<Simd4i>& complement, const Simd4i& v)
+{
+ return vbicq_u32(v.u4, complement.v.u4);
+}
+
+template <>
+inline Simd4i operator&(const Simd4i& v, const ComplementExpr<Simd4i>& complement)
+{
+ return vbicq_u32(v.u4, complement.v.u4);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operator implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4i operator == (const Simd4i& v0, const Simd4i& v1)
+{
+ return vceqq_u32(v0.u4, v1.u4);
+}
+
+Simd4i operator<(const Simd4i& v0, const Simd4i& v1)
+{
+ return vcltq_s32(v0.i4, v1.i4);
+}
+
+Simd4i operator>(const Simd4i& v0, const Simd4i& v1)
+{
+ return vcgtq_s32(v0.i4, v1.i4);
+}
+
+ComplementExpr<Simd4i> operator~(const Simd4i& v)
+{
+ return ComplementExpr<Simd4i>(v);
+}
+
+Simd4i operator&(const Simd4i& v0, const Simd4i& v1)
+{
+ return vandq_u32(v0.u4, v1.u4);
+}
+
+Simd4i operator|(const Simd4i& v0, const Simd4i& v1)
+{
+ return vorrq_u32(v0.u4, v1.u4);
+}
+
+Simd4i operator^(const Simd4i& v0, const Simd4i& v1)
+{
+ return veorq_u32(v0.u4, v1.u4);
+}
+
+Simd4i operator<<(const Simd4i& v, int shift)
+{
+ return vshlq_u32(v.u4, vdupq_n_s32(shift));
+}
+
+Simd4i operator>>(const Simd4i& v, int shift)
+{
+ return vshlq_u32(v.u4, vdupq_n_s32(-shift));
+}
+
+Simd4i operator<<(const Simd4i& v, const Simd4i& shift)
+{
+ return vshlq_u32(v.u4, shift.i4);
+}
+
+Simd4i operator>>(const Simd4i& v, const Simd4i& shift)
+{
+ return vshlq_u32(v.u4, vnegq_s32(shift.i4));
+}
+
+Simd4i operator + (const Simd4i& v)
+{
+ return v;
+}
+
+Simd4i operator + (const Simd4i& v0, const Simd4i& v1)
+{
+ return vaddq_u32(v0.u4, v1.u4);
+}
+
+Simd4i operator - (const Simd4i& v)
+{
+ return vnegq_s32(v.i4);
+}
+
+Simd4i operator - (const Simd4i& v0, const Simd4i& v1)
+{
+ return vsubq_u32(v0.u4, v1.u4);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// function implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4i simd4i(const Simd4f& v)
+{
+ return v.u4;
+}
+
+Simd4i truncate(const Simd4f& v)
+{
+ return vcvtq_s32_f32(v.f4);
+}
+
+int (&array(Simd4i& v))[4]
+{
+ return reinterpret_cast<int(&)[4]>(v);
+}
+
+const int (&array(const Simd4i& v))[4]
+{
+ return reinterpret_cast<const int(&)[4]>(v);
+}
+
+void store(int* ptr, const Simd4i& v)
+{
+ return vst1q_s32(ptr, v.i4);
+}
+
+void storeAligned(int* ptr, const Simd4i& v)
+{
+ vst1q_s32(ptr, v.i4);
+}
+
+void storeAligned(int* ptr, unsigned int offset, const Simd4i& v)
+{
+ return storeAligned(reinterpret_cast<int*>(reinterpret_cast<char*>(ptr) + offset), v);
+}
+
+template <size_t i>
+Simd4i splat(Simd4i const& v)
+{
+ return vdupq_n_s32(array(v)[i]);
+}
+
+Simd4i select(Simd4i const& mask, Simd4i const& v0, Simd4i const& v1)
+{
+ return vbslq_u32(mask.u4, v0.u4, v1.u4);
+}
+
+int allEqual(const Simd4i& v0, const Simd4i& v1)
+{
+ return allTrue(operator==(v0, v1));
+}
+
+int allEqual(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask)
+{
+ return allTrue(outMask = operator==(v0, v1));
+}
+
+int anyEqual(const Simd4i& v0, const Simd4i& v1)
+{
+ return anyTrue(operator==(v0, v1));
+}
+
+int anyEqual(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask)
+{
+ return anyTrue(outMask = operator==(v0, v1));
+}
+
+int allGreater(const Simd4i& v0, const Simd4i& v1)
+{
+ return allTrue(operator>(v0, v1));
+}
+
+int allGreater(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask)
+{
+ return allTrue(outMask = operator>(v0, v1));
+}
+
+int anyGreater(const Simd4i& v0, const Simd4i& v1)
+{
+ return anyTrue(operator>(v0, v1));
+}
+
+int anyGreater(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask)
+{
+ return anyTrue(outMask = operator>(v0, v1));
+}
+
+int allTrue(const Simd4i& v)
+{
+#if NV_SIMD_INLINE_ASSEMBLER
+ int result;
+ asm volatile("vmovq q0, %q1 \n\t"
+ "vand.u32 d0, d0, d1 \n\t"
+ "vpmin.u32 d0, d0, d0 \n\t"
+ "vcmp.f32 s0, #0 \n\t"
+ "fmrx %0, fpscr"
+ : "=r"(result)
+ : "w"(v.u4)
+ : "q0");
+ return result >> 28 & 0x1;
+#else
+ uint16x4_t hi = vget_high_u16(vreinterpretq_u16_u32(v.u4));
+ uint16x4_t lo = vmovn_u32(v.u4);
+ uint16x8_t combined = vcombine_u16(lo, hi);
+ uint32x2_t reduced = vreinterpret_u32_u8(vmovn_u16(combined));
+ return vget_lane_u32(reduced, 0) == 0xffffffff;
+#endif
+}
+
+int anyTrue(const Simd4i& v)
+{
+#if NV_SIMD_INLINE_ASSEMBLER
+ int result;
+ asm volatile("vmovq q0, %q1 \n\t"
+ "vorr.u32 d0, d0, d1 \n\t"
+ "vpmax.u32 d0, d0, d0 \n\t"
+ "vcmp.f32 s0, #0 \n\t"
+ "fmrx %0, fpscr"
+ : "=r"(result)
+ : "w"(v.u4)
+ : "q0");
+ return result >> 28 & 0x1;
+#else
+ uint16x4_t hi = vget_high_u16(vreinterpretq_u16_u32(v.u4));
+ uint16x4_t lo = vmovn_u32(v.u4);
+ uint16x8_t combined = vcombine_u16(lo, hi);
+ uint32x2_t reduced = vreinterpret_u32_u8(vmovn_u16(combined));
+ return vget_lane_u32(reduced, 0) != 0x0;
+#endif
+}
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/neon/NvNeonSimdTypes.h b/NvCloth/src/NvSimd/neon/NvNeonSimdTypes.h
new file mode 100644
index 0000000..a1d6820
--- /dev/null
+++ b/NvCloth/src/NvSimd/neon/NvNeonSimdTypes.h
@@ -0,0 +1,71 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include <arm_neon.h>
+
+NV_SIMD_NAMESPACE_BEGIN
+
+union Simd4f
+{
+ Simd4f()
+ {
+ }
+ Simd4f(const float32x4_t& v) : f4(v)
+ {
+ }
+#ifndef _M_ARM // all *32x4_t map to the same type
+ Simd4f(const uint32x4_t& v) : u4(v)
+ {
+ }
+#endif
+ float32x4_t f4;
+ uint32x4_t u4;
+ int32x4_t i4;
+};
+
+union Simd4i
+{
+ Simd4i()
+ {
+ }
+ Simd4i(const uint32x4_t& v) : u4(v)
+ {
+ }
+#ifndef _M_ARM // all *32x4_t map to the same type
+ Simd4i(const int32x4_t& v) : i4(v)
+ {
+ }
+#endif
+ uint32x4_t u4;
+ int32x4_t i4;
+};
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/scalar/NvScalarSimd4f.h b/NvCloth/src/NvSimd/scalar/NvScalarSimd4f.h
new file mode 100644
index 0000000..f655a6d
--- /dev/null
+++ b/NvCloth/src/NvSimd/scalar/NvScalarSimd4f.h
@@ -0,0 +1,464 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+NV_SIMD_NAMESPACE_BEGIN
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factory implementation
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4fZeroFactory::operator Scalar4f() const
+{
+ return Scalar4f(0.0f, 0.0f, 0.0f, 0.0f);
+}
+
+Simd4fOneFactory::operator Scalar4f() const
+{
+ return Scalar4f(1.0f, 1.0f, 1.0f, 1.0f);
+}
+
+Simd4fScalarFactory::operator Scalar4f() const
+{
+ return Scalar4f(value, value, value, value);
+}
+
+Simd4fTupleFactory::operator Scalar4f() const
+{
+ return reinterpret_cast<const Scalar4f&>(tuple);
+}
+
+Simd4fLoadFactory::operator Scalar4f() const
+{
+ return Scalar4f(ptr[0], ptr[1], ptr[2], ptr[3]);
+}
+
+Simd4fLoad3Factory::operator Scalar4f() const
+{
+ return Scalar4f(ptr[0], ptr[1], ptr[2], 0.0f);
+}
+
+Simd4fLoad3SetWFactory::operator Scalar4f() const
+{
+ return Scalar4f(ptr[0], ptr[1], ptr[2], w);
+}
+
+Simd4fAlignedLoadFactory::operator Scalar4f() const
+{
+ return Scalar4f(ptr[0], ptr[1], ptr[2], ptr[3]);
+}
+
+Simd4fOffsetLoadFactory::operator Scalar4f() const
+{
+ return Simd4fAlignedLoadFactory(reinterpret_cast<const float*>(reinterpret_cast<const char*>(ptr) + offset));
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression template
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline ComplementExpr<Scalar4f>::operator Scalar4f() const
+{
+ return Scalar4f(~v.u4[0], ~v.u4[1], ~v.u4[2], ~v.u4[3]);
+}
+
+template <>
+inline Scalar4f operator&<Scalar4f>(const ComplementExpr<Scalar4f>& complement, const Scalar4f& v)
+{
+ return Scalar4f(v.u4[0] & ~complement.v.u4[0], v.u4[1] & ~complement.v.u4[1], v.u4[2] & ~complement.v.u4[2],
+ v.u4[3] & ~complement.v.u4[3]);
+}
+
+template <>
+inline Scalar4f operator&<Scalar4f>(const Scalar4f& v, const ComplementExpr<Scalar4f>& complement)
+{
+ return Scalar4f(v.u4[0] & ~complement.v.u4[0], v.u4[1] & ~complement.v.u4[1], v.u4[2] & ~complement.v.u4[2],
+ v.u4[3] & ~complement.v.u4[3]);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operator implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+inline Scalar4f operator == (const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] == v1.f4[0], v0.f4[1] == v1.f4[1], v0.f4[2] == v1.f4[2], v0.f4[3] == v1.f4[3]);
+}
+
+inline Scalar4f operator<(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] < v1.f4[0], v0.f4[1] < v1.f4[1], v0.f4[2] < v1.f4[2], v0.f4[3] < v1.f4[3]);
+}
+
+inline Scalar4f operator <= (const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] <= v1.f4[0], v0.f4[1] <= v1.f4[1], v0.f4[2] <= v1.f4[2], v0.f4[3] <= v1.f4[3]);
+}
+
+inline Scalar4f operator>(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] > v1.f4[0], v0.f4[1] > v1.f4[1], v0.f4[2] > v1.f4[2], v0.f4[3] > v1.f4[3]);
+}
+
+inline Scalar4f operator >= (const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] >= v1.f4[0], v0.f4[1] >= v1.f4[1], v0.f4[2] >= v1.f4[2], v0.f4[3] >= v1.f4[3]);
+}
+
+inline ComplementExpr<Scalar4f> operator~(const Scalar4f& v)
+{
+ return ComplementExpr<Scalar4f>(v);
+}
+
+inline Scalar4f operator&(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.u4[0] & v1.u4[0], v0.u4[1] & v1.u4[1], v0.u4[2] & v1.u4[2], v0.u4[3] & v1.u4[3]);
+}
+
+inline Scalar4f operator|(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.u4[0] | v1.u4[0], v0.u4[1] | v1.u4[1], v0.u4[2] | v1.u4[2], v0.u4[3] | v1.u4[3]);
+}
+
+inline Scalar4f operator^(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.u4[0] ^ v1.u4[0], v0.u4[1] ^ v1.u4[1], v0.u4[2] ^ v1.u4[2], v0.u4[3] ^ v1.u4[3]);
+}
+
+inline Scalar4f operator<<(const Scalar4f& v, int shift)
+{
+ return Scalar4f(v.u4[0] << shift, v.u4[1] << shift, v.u4[2] << shift, v.u4[3] << shift);
+}
+
+inline Scalar4f operator>>(const Scalar4f& v, int shift)
+{
+ return Scalar4f(v.u4[0] >> shift, v.u4[1] >> shift, v.u4[2] >> shift, v.u4[3] >> shift);
+}
+
+inline Scalar4f operator + (const Scalar4f& v)
+{
+ return v;
+}
+
+inline Scalar4f operator + (const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] + v1.f4[0], v0.f4[1] + v1.f4[1], v0.f4[2] + v1.f4[2], v0.f4[3] + v1.f4[3]);
+}
+
+inline Scalar4f operator - (const Scalar4f& v)
+{
+ return Scalar4f(-v.f4[0], -v.f4[1], -v.f4[2], -v.f4[3]);
+}
+
+inline Scalar4f operator - (const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] - v1.f4[0], v0.f4[1] - v1.f4[1], v0.f4[2] - v1.f4[2], v0.f4[3] - v1.f4[3]);
+}
+
+inline Scalar4f operator*(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] * v1.f4[0], v0.f4[1] * v1.f4[1], v0.f4[2] * v1.f4[2], v0.f4[3] * v1.f4[3]);
+}
+
+inline Scalar4f operator/(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] / v1.f4[0], v0.f4[1] / v1.f4[1], v0.f4[2] / v1.f4[2], v0.f4[3] / v1.f4[3]);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// function implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+inline Scalar4f simd4f(const Scalar4i& v)
+{
+ return reinterpret_cast<const Scalar4f&>(v);
+}
+
+inline Scalar4f convert(const Scalar4i& v)
+{
+ return Scalar4f(float(v.i4[0]), float(v.i4[1]), float(v.i4[2]), float(v.i4[3]));
+}
+
+inline float (&array(Scalar4f& v))[4]
+{
+ return v.f4;
+}
+
+inline const float (&array(const Scalar4f& v))[4]
+{
+ return v.f4;
+}
+
+inline void store(float* ptr, const Scalar4f& v)
+{
+ ptr[0] = v.f4[0];
+ ptr[1] = v.f4[1];
+ ptr[2] = v.f4[2];
+ ptr[3] = v.f4[3];
+}
+
+inline void storeAligned(float* ptr, const Scalar4f& v)
+{
+ store(ptr, v);
+}
+
+inline void store3(float* ptr, const Scalar4f& v)
+{
+ ptr[0] = v.f4[0];
+ ptr[1] = v.f4[1];
+ ptr[2] = v.f4[2];
+}
+
+inline void storeAligned(float* ptr, unsigned int offset, const Scalar4f& v)
+{
+ storeAligned(reinterpret_cast<float*>(reinterpret_cast<char*>(ptr) + offset), v);
+}
+
+template <size_t i>
+inline Scalar4f splat(const Scalar4f& v)
+{
+ return Scalar4f(v.f4[i], v.f4[i], v.f4[i], v.f4[i]);
+}
+
+inline Scalar4f select(const Scalar4f& mask, const Scalar4f& v0, const Scalar4f& v1)
+{
+ return ((v0 ^ v1) & mask) ^ v1;
+}
+
+inline Scalar4f abs(const Scalar4f& v)
+{
+ return Scalar4f(::fabsf(v.f4[0]), ::fabsf(v.f4[1]), ::fabsf(v.f4[2]), ::fabsf(v.f4[3]));
+}
+
+inline Scalar4f floor(const Scalar4f& v)
+{
+ return Scalar4f(::floorf(v.f4[0]), ::floorf(v.f4[1]), ::floorf(v.f4[2]), ::floorf(v.f4[3]));
+}
+
+#if !defined max
+inline Scalar4f max(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(std::max(v0.f4[0], v1.f4[0]), std::max(v0.f4[1], v1.f4[1]), std::max(v0.f4[2], v1.f4[2]),
+ std::max(v0.f4[3], v1.f4[3]));
+}
+#endif
+
+#if !defined min
+inline Scalar4f min(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(std::min(v0.f4[0], v1.f4[0]), std::min(v0.f4[1], v1.f4[1]), std::min(v0.f4[2], v1.f4[2]),
+ std::min(v0.f4[3], v1.f4[3]));
+}
+#endif
+
+inline Scalar4f recip(const Scalar4f& v)
+{
+ return Scalar4f(1 / v.f4[0], 1 / v.f4[1], 1 / v.f4[2], 1 / v.f4[3]);
+}
+
+template <int n>
+inline Scalar4f recip(const Scalar4f& v)
+{
+ return recip(v);
+}
+
+inline Scalar4f sqrt(const Scalar4f& v)
+{
+ return Scalar4f(::sqrtf(v.f4[0]), ::sqrtf(v.f4[1]), ::sqrtf(v.f4[2]), ::sqrtf(v.f4[3]));
+}
+
+inline Scalar4f rsqrt(const Scalar4f& v)
+{
+ return recip(sqrt(v));
+}
+
+template <int n>
+inline Scalar4f rsqrt(const Scalar4f& v)
+{
+ return rsqrt(v);
+}
+
+inline Scalar4f exp2(const Scalar4f& v)
+{
+ float scale = 0.69314718055994531f; // ::logf(2.0f);
+ return Scalar4f(::expf(v.f4[0] * scale), ::expf(v.f4[1] * scale), ::expf(v.f4[2] * scale), ::expf(v.f4[3] * scale));
+}
+
+inline Scalar4f log2(const Scalar4f& v)
+{
+ float scale = 1.44269504088896341f; // 1/ln(2)
+ return Scalar4f(::logf(v.f4[0]) * scale, ::logf(v.f4[1]) * scale, ::logf(v.f4[2]) * scale, ::logf(v.f4[3]) * scale);
+}
+
+inline Scalar4f dot3(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return simd4f(v0.f4[0] * v1.f4[0] + v0.f4[1] * v1.f4[1] + v0.f4[2] * v1.f4[2]);
+}
+
+inline Scalar4f cross3(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return simd4f(v0.f4[1] * v1.f4[2] - v0.f4[2] * v1.f4[1], v0.f4[2] * v1.f4[0] - v0.f4[0] * v1.f4[2],
+ v0.f4[0] * v1.f4[1] - v0.f4[1] * v1.f4[0], 0.0f);
+}
+
+inline void transpose(Scalar4f& x, Scalar4f& y, Scalar4f& z, Scalar4f& w)
+{
+ float x1 = x.f4[1], x2 = x.f4[2], x3 = x.f4[3];
+ float y2 = y.f4[2], y3 = y.f4[3], z3 = z.f4[3];
+
+ x.f4[1] = y.f4[0];
+ x.f4[2] = z.f4[0];
+ x.f4[3] = w.f4[0];
+ y.f4[0] = x1;
+ y.f4[2] = z.f4[1];
+ y.f4[3] = w.f4[1];
+ z.f4[0] = x2;
+ z.f4[1] = y2;
+ z.f4[3] = w.f4[2];
+ w.f4[0] = x3;
+ w.f4[1] = y3;
+ w.f4[2] = z3;
+}
+
+inline void zip(Scalar4f& v0, Scalar4f& v1)
+{
+ float z0 = v0.f4[2];
+ v0.f4[2] = v0.f4[1];
+ v0.f4[1] = v1.f4[0];
+ v1.f4[0] = z0;
+
+ float z1 = v1.f4[2];
+ v1.f4[2] = v0.f4[3];
+ v0.f4[3] = v1.f4[1];
+ v1.f4[1] = z1;
+}
+
+inline void unzip(Scalar4f& v0, Scalar4f& v1)
+{
+ float z0 = v0.f4[2];
+ v0.f4[2] = v1.f4[0];
+ v1.f4[0] = v0.f4[1];
+ v0.f4[1] = z0;
+
+ float z1 = v1.f4[2];
+ v1.f4[2] = v1.f4[1];
+ v1.f4[1] = v0.f4[3];
+ v0.f4[3] = z1;
+}
+
+inline Scalar4f swaphilo(const Scalar4f& v)
+{
+ return Scalar4f(v.f4[2], v.f4[3], v.f4[0], v.f4[1]);
+}
+
+inline int allEqual(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] == v1.f4[0] && v0.f4[1] == v1.f4[1] && v0.f4[2] == v1.f4[2] && v0.f4[3] == v1.f4[3];
+}
+
+inline int allEqual(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] == v1.f4[0], b1 = v0.f4[1] == v1.f4[1], b2 = v0.f4[2] == v1.f4[2], b3 = v0.f4[3] == v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyEqual(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] == v1.f4[0] || v0.f4[1] == v1.f4[1] || v0.f4[2] == v1.f4[2] || v0.f4[3] == v1.f4[3];
+}
+
+inline int anyEqual(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] == v1.f4[0], b1 = v0.f4[1] == v1.f4[1], b2 = v0.f4[2] == v1.f4[2], b3 = v0.f4[3] == v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allGreater(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] > v1.f4[0] && v0.f4[1] > v1.f4[1] && v0.f4[2] > v1.f4[2] && v0.f4[3] > v1.f4[3];
+}
+
+inline int allGreater(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] > v1.f4[0], b1 = v0.f4[1] > v1.f4[1], b2 = v0.f4[2] > v1.f4[2], b3 = v0.f4[3] > v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyGreater(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] > v1.f4[0] || v0.f4[1] > v1.f4[1] || v0.f4[2] > v1.f4[2] || v0.f4[3] > v1.f4[3];
+}
+
+inline int anyGreater(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] > v1.f4[0], b1 = v0.f4[1] > v1.f4[1], b2 = v0.f4[2] > v1.f4[2], b3 = v0.f4[3] > v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allGreaterEqual(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] >= v1.f4[0] && v0.f4[1] >= v1.f4[1] && v0.f4[2] >= v1.f4[2] && v0.f4[3] >= v1.f4[3];
+}
+
+inline int allGreaterEqual(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] >= v1.f4[0], b1 = v0.f4[1] >= v1.f4[1], b2 = v0.f4[2] >= v1.f4[2], b3 = v0.f4[3] >= v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyGreaterEqual(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] >= v1.f4[0] || v0.f4[1] >= v1.f4[1] || v0.f4[2] >= v1.f4[2] || v0.f4[3] >= v1.f4[3];
+}
+
+inline int anyGreaterEqual(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] >= v1.f4[0], b1 = v0.f4[1] >= v1.f4[1], b2 = v0.f4[2] >= v1.f4[2], b3 = v0.f4[3] >= v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allTrue(const Scalar4f& v)
+{
+ return v.i4[0] & v.i4[1] & v.i4[2] & v.i4[3];
+}
+
+inline int anyTrue(const Scalar4f& v)
+{
+ return v.i4[0] | v.i4[1] | v.i4[2] | v.i4[3];
+}
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/scalar/NvScalarSimd4i.h b/NvCloth/src/NvSimd/scalar/NvScalarSimd4i.h
new file mode 100644
index 0000000..4e3fd48
--- /dev/null
+++ b/NvCloth/src/NvSimd/scalar/NvScalarSimd4i.h
@@ -0,0 +1,272 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+NV_SIMD_NAMESPACE_BEGIN
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factory implementation
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4iZeroFactory::operator Scalar4i() const
+{
+ return Scalar4i(0, 0, 0, 0);
+}
+
+Simd4iScalarFactory::operator Scalar4i() const
+{
+ return Scalar4i(value, value, value, value);
+}
+
+Simd4iTupleFactory::operator Scalar4i() const
+{
+ return reinterpret_cast<const Scalar4i&>(tuple);
+}
+
+Simd4iLoadFactory::operator Scalar4i() const
+{
+ return Scalar4i(ptr[0], ptr[1], ptr[2], ptr[3]);
+}
+
+Simd4iAlignedLoadFactory::operator Scalar4i() const
+{
+ return Scalar4i(ptr[0], ptr[1], ptr[2], ptr[3]);
+}
+
+Simd4iOffsetLoadFactory::operator Scalar4i() const
+{
+ return Simd4iAlignedLoadFactory(reinterpret_cast<const int*>(reinterpret_cast<const char*>(ptr) + offset));
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression template
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline ComplementExpr<Scalar4i>::operator Scalar4i() const
+{
+ return Scalar4i(~v.u4[0], ~v.u4[1], ~v.u4[2], ~v.u4[3]);
+}
+
+template <>
+inline Scalar4i operator&(const ComplementExpr<Scalar4i>& complement, const Scalar4i& v)
+{
+ return Scalar4i(v.u4[0] & ~complement.v.u4[0], v.u4[1] & ~complement.v.u4[1], v.u4[2] & ~complement.v.u4[2],
+ v.u4[3] & ~complement.v.u4[3]);
+}
+
+template <>
+inline Scalar4i operator&(const Scalar4i& v, const ComplementExpr<Scalar4i>& complement)
+{
+ return Scalar4i(v.u4[0] & ~complement.v.u4[0], v.u4[1] & ~complement.v.u4[1], v.u4[2] & ~complement.v.u4[2],
+ v.u4[3] & ~complement.v.u4[3]);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operator implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+inline ComplementExpr<Scalar4i> operator~(const Scalar4i& v)
+{
+ return ComplementExpr<Scalar4i>(v);
+}
+
+inline Scalar4i operator&(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.u4[0] & v1.u4[0], v0.u4[1] & v1.u4[1], v0.u4[2] & v1.u4[2], v0.u4[3] & v1.u4[3]);
+}
+
+inline Scalar4i operator|(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.u4[0] | v1.u4[0], v0.u4[1] | v1.u4[1], v0.u4[2] | v1.u4[2], v0.u4[3] | v1.u4[3]);
+}
+
+inline Scalar4i operator^(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.u4[0] ^ v1.u4[0], v0.u4[1] ^ v1.u4[1], v0.u4[2] ^ v1.u4[2], v0.u4[3] ^ v1.u4[3]);
+}
+
+inline Scalar4i operator<<(const Scalar4i& v, int shift)
+{
+ return Scalar4i(v.u4[0] << shift, v.u4[1] << shift, v.u4[2] << shift, v.u4[3] << shift);
+}
+
+inline Scalar4i operator>>(const Scalar4i& v, int shift)
+{
+ return Scalar4i(v.u4[0] >> shift, v.u4[1] >> shift, v.u4[2] >> shift, v.u4[3] >> shift);
+}
+
+inline Scalar4i operator == (const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] == v1.i4[0], v0.i4[1] == v1.i4[1], v0.i4[2] == v1.i4[2], v0.i4[3] == v1.i4[3]);
+}
+
+inline Scalar4i operator<(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] < v1.i4[0], v0.i4[1] < v1.i4[1], v0.i4[2] < v1.i4[2], v0.i4[3] < v1.i4[3]);
+}
+
+inline Scalar4i operator>(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] > v1.i4[0], v0.i4[1] > v1.i4[1], v0.i4[2] > v1.i4[2], v0.i4[3] > v1.i4[3]);
+}
+
+inline Scalar4i operator + (const Scalar4i& v)
+{
+ return v;
+}
+
+inline Scalar4i operator + (const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] + v1.i4[0], v0.i4[1] + v1.i4[1], v0.i4[2] + v1.i4[2], v0.i4[3] + v1.i4[3]);
+}
+
+inline Scalar4i operator - (const Scalar4i& v)
+{
+ return Scalar4i(-v.i4[0], -v.i4[1], -v.i4[2], -v.i4[3]);
+}
+
+inline Scalar4i operator - (const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] - v1.i4[0], v0.i4[1] - v1.i4[1], v0.i4[2] - v1.i4[2], v0.i4[3] - v1.i4[3]);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// function implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+inline Scalar4i simd4i(const Scalar4f& v)
+{
+ return reinterpret_cast<const Scalar4i&>(v);
+}
+
+inline Scalar4i truncate(const Scalar4f& v)
+{
+ return Scalar4i(int(v.f4[0]), int(v.f4[1]), int(v.f4[2]), int(v.f4[3]));
+}
+
+inline int (&array(Scalar4i& v))[4]
+{
+ return v.i4;
+}
+
+inline const int (&array(const Scalar4i& v))[4]
+{
+ return v.i4;
+}
+
+inline void store(int* ptr, const Scalar4i& v)
+{
+ ptr[0] = v.i4[0];
+ ptr[1] = v.i4[1];
+ ptr[2] = v.i4[2];
+ ptr[3] = v.i4[3];
+}
+
+inline void storeAligned(int* ptr, const Scalar4i& v)
+{
+ store(ptr, v);
+}
+
+inline void storeAligned(int* ptr, unsigned int offset, const Scalar4i& v)
+{
+ store(reinterpret_cast<int*>(reinterpret_cast<char*>(ptr) + offset), v);
+}
+
+template <size_t i>
+inline Scalar4i splat(const Scalar4i& v)
+{
+ return Scalar4i(v.u4[i], v.u4[i], v.u4[i], v.u4[i]);
+}
+
+inline Scalar4i select(const Scalar4i& mask, const Scalar4i& v0, const Scalar4i& v1)
+{
+ return ((v0 ^ v1) & mask) ^ v1;
+}
+
+inline int allEqual(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return v0.i4[0] == v1.i4[0] && v0.i4[1] == v1.i4[1] && v0.i4[2] == v1.i4[2] && v0.i4[3] == v1.i4[3];
+}
+
+inline int allEqual(const Scalar4i& v0, const Scalar4i& v1, Scalar4i& outMask)
+{
+ bool b0 = v0.i4[0] == v1.i4[0], b1 = v0.i4[1] == v1.i4[1], b2 = v0.i4[2] == v1.i4[2], b3 = v0.i4[3] == v1.i4[3];
+ outMask = Scalar4i(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyEqual(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return v0.i4[0] == v1.i4[0] || v0.i4[1] == v1.i4[1] || v0.i4[2] == v1.i4[2] || v0.i4[3] == v1.i4[3];
+}
+
+inline int anyEqual(const Scalar4i& v0, const Scalar4i& v1, Scalar4i& outMask)
+{
+ bool b0 = v0.i4[0] == v1.i4[0], b1 = v0.i4[1] == v1.i4[1], b2 = v0.i4[2] == v1.i4[2], b3 = v0.i4[3] == v1.i4[3];
+ outMask = Scalar4i(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allGreater(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return v0.i4[0] > v1.i4[0] && v0.i4[1] > v1.i4[1] && v0.i4[2] > v1.i4[2] && v0.i4[3] > v1.i4[3];
+}
+
+inline int allGreater(const Scalar4i& v0, const Scalar4i& v1, Scalar4i& outMask)
+{
+ bool b0 = v0.i4[0] > v1.i4[0], b1 = v0.i4[1] > v1.i4[1], b2 = v0.i4[2] > v1.i4[2], b3 = v0.i4[3] > v1.i4[3];
+ outMask = Scalar4i(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyGreater(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return v0.i4[0] > v1.i4[0] || v0.i4[1] > v1.i4[1] || v0.i4[2] > v1.i4[2] || v0.i4[3] > v1.i4[3];
+}
+
+inline int anyGreater(const Scalar4i& v0, const Scalar4i& v1, Scalar4i& outMask)
+{
+ bool b0 = v0.i4[0] > v1.i4[0], b1 = v0.i4[1] > v1.i4[1], b2 = v0.i4[2] > v1.i4[2], b3 = v0.i4[3] > v1.i4[3];
+ outMask = Scalar4i(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allTrue(const Scalar4i& v)
+{
+ return v.i4[0] & v.i4[1] & v.i4[2] & v.i4[3];
+}
+
+inline int anyTrue(const Scalar4i& v)
+{
+ return v.i4[0] | v.i4[1] | v.i4[2] | v.i4[3];
+}
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/scalar/NvScalarSimdTypes.h b/NvCloth/src/NvSimd/scalar/NvScalarSimdTypes.h
new file mode 100644
index 0000000..35814b7
--- /dev/null
+++ b/NvCloth/src/NvSimd/scalar/NvScalarSimdTypes.h
@@ -0,0 +1,154 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#if NV_WIIU
+#pragma ghs nowarning 193 // warning #193-D: zero used for undefined preprocessing identifier
+#endif
+
+#include <algorithm>
+
+#if NV_WIIU
+#pragma ghs endnowarning
+#endif
+
+#ifdef _MSC_VER
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+#endif
+
+NV_SIMD_NAMESPACE_BEGIN
+
+/** \brief Scalar fallback for SIMD containing 4 floats */
+union Scalar4f
+{
+ Scalar4f()
+ {
+ }
+
+ Scalar4f(float x, float y, float z, float w)
+ {
+ f4[0] = x;
+ f4[1] = y;
+ f4[2] = z;
+ f4[3] = w;
+ }
+
+ Scalar4f(uint32_t x, uint32_t y, uint32_t z, uint32_t w)
+ {
+ u4[0] = x;
+ u4[1] = y;
+ u4[2] = z;
+ u4[3] = w;
+ }
+
+ Scalar4f(bool x, bool y, bool z, bool w)
+ {
+ u4[0] = ~(uint32_t(x) - 1);
+ u4[1] = ~(uint32_t(y) - 1);
+ u4[2] = ~(uint32_t(z) - 1);
+ u4[3] = ~(uint32_t(w) - 1);
+ }
+
+ Scalar4f(const Scalar4f& other)
+ {
+ f4[0] = other.f4[0];
+ f4[1] = other.f4[1];
+ f4[2] = other.f4[2];
+ f4[3] = other.f4[3];
+ }
+
+ Scalar4f& operator = (const Scalar4f& other)
+ {
+ f4[0] = other.f4[0];
+ f4[1] = other.f4[1];
+ f4[2] = other.f4[2];
+ f4[3] = other.f4[3];
+ return *this;
+ }
+
+ float f4[4];
+ int32_t i4[4];
+ uint32_t u4[4];
+};
+
+/** \brief Scalar fallback for SIMD containing 4 integers */
+union Scalar4i
+{
+ Scalar4i()
+ {
+ }
+
+ Scalar4i(int32_t x, int32_t y, int32_t z, int32_t w)
+ {
+ i4[0] = x;
+ i4[1] = y;
+ i4[2] = z;
+ i4[3] = w;
+ }
+
+ Scalar4i(uint32_t x, uint32_t y, uint32_t z, uint32_t w)
+ {
+ u4[0] = x;
+ u4[1] = y;
+ u4[2] = z;
+ u4[3] = w;
+ }
+
+ Scalar4i(bool x, bool y, bool z, bool w)
+ {
+ u4[0] = ~(uint32_t(x) - 1);
+ u4[1] = ~(uint32_t(y) - 1);
+ u4[2] = ~(uint32_t(z) - 1);
+ u4[3] = ~(uint32_t(w) - 1);
+ }
+
+ Scalar4i(const Scalar4i& other)
+ {
+ u4[0] = other.u4[0];
+ u4[1] = other.u4[1];
+ u4[2] = other.u4[2];
+ u4[3] = other.u4[3];
+ }
+
+ Scalar4i& operator = (const Scalar4i& other)
+ {
+ u4[0] = other.u4[0];
+ u4[1] = other.u4[1];
+ u4[2] = other.u4[2];
+ u4[3] = other.u4[3];
+ return *this;
+ }
+
+ int32_t i4[4];
+ uint32_t u4[4];
+};
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/sse2/NvSse2Simd4f.h b/NvCloth/src/NvSimd/sse2/NvSse2Simd4f.h
new file mode 100644
index 0000000..f280e35
--- /dev/null
+++ b/NvCloth/src/NvSimd/sse2/NvSse2Simd4f.h
@@ -0,0 +1,471 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+NV_SIMD_NAMESPACE_BEGIN
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factory implementation
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4fZeroFactory::operator Simd4f() const
+{
+ return _mm_setzero_ps();
+}
+
+Simd4fOneFactory::operator Simd4f() const
+{
+ return _mm_set1_ps(1.0f);
+}
+
+Simd4fScalarFactory::operator Simd4f() const
+{
+ return _mm_set1_ps(value);
+}
+
+Simd4fTupleFactory::operator Simd4f() const
+{
+ return reinterpret_cast<const Simd4f&>(tuple);
+}
+
+Simd4fLoadFactory::operator Simd4f() const
+{
+ return _mm_loadu_ps(ptr);
+}
+
+Simd4fLoad3Factory::operator Simd4f() const
+{
+ /* [f0 f1 f2 f3] = [ptr[0] ptr[1] 0 0] */
+ __m128i xy = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr));
+ __m128 z = _mm_load_ss(ptr + 2);
+ return _mm_movelh_ps(_mm_castsi128_ps(xy), z);
+}
+
+Simd4fLoad3SetWFactory::operator Simd4f() const
+{
+ __m128 z = _mm_load_ss(ptr + 2);
+ __m128 wTmp = _mm_load_ss(&w);
+
+ __m128i xy = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr));
+ __m128 zw = _mm_movelh_ps(z, wTmp);
+
+ return _mm_shuffle_ps(_mm_castsi128_ps(xy), zw, _MM_SHUFFLE(2, 0, 1, 0));
+}
+
+Simd4fAlignedLoadFactory::operator Simd4f() const
+{
+ return _mm_load_ps(ptr);
+}
+
+Simd4fOffsetLoadFactory::operator Simd4f() const
+{
+ return _mm_load_ps(reinterpret_cast<const float*>(reinterpret_cast<const char*>(ptr) + offset));
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression template
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline ComplementExpr<Simd4f>::operator Simd4f() const
+{
+ return _mm_andnot_ps(v, _mm_castsi128_ps(_mm_set1_epi32(-1)));
+}
+
+template <>
+inline Simd4f operator&(const ComplementExpr<Simd4f>& complement, const Simd4f& v)
+{
+ return _mm_andnot_ps(complement.v, v);
+}
+
+template <>
+inline Simd4f operator&(const Simd4f& v, const ComplementExpr<Simd4f>& complement)
+{
+ return _mm_andnot_ps(complement.v, v);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operator implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4f operator == (const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_cmpeq_ps(v0, v1);
+}
+
+Simd4f operator<(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_cmplt_ps(v0, v1);
+}
+
+Simd4f operator <= (const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_cmple_ps(v0, v1);
+}
+
+Simd4f operator>(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_cmpgt_ps(v0, v1);
+}
+
+Simd4f operator >= (const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_cmpge_ps(v0, v1);
+}
+
+ComplementExpr<Simd4f> operator~(const Simd4f& v)
+{
+ return ComplementExpr<Simd4f>(v);
+}
+
+Simd4f operator&(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_and_ps(v0, v1);
+}
+
+Simd4f operator|(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_or_ps(v0, v1);
+}
+
+Simd4f operator^(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_xor_ps(v0, v1);
+}
+
+Simd4f operator<<(const Simd4f& v, int shift)
+{
+ return _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(v), shift));
+}
+
+Simd4f operator>>(const Simd4f& v, int shift)
+{
+ return _mm_castsi128_ps(_mm_srli_epi32(_mm_castps_si128(v), shift));
+}
+
+Simd4f operator + (const Simd4f& v)
+{
+ return v;
+}
+
+Simd4f operator + (const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_add_ps(v0, v1);
+}
+
+Simd4f operator - (const Simd4f& v)
+{
+ return _mm_xor_ps(_mm_castsi128_ps(_mm_set1_epi32(0x80000000)), v);
+}
+
+Simd4f operator - (const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_sub_ps(v0, v1);
+}
+
+Simd4f operator*(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_mul_ps(v0, v1);
+}
+
+Simd4f operator/(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_div_ps(v0, v1);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// function implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4f simd4f(const Simd4i& v)
+{
+ return _mm_castsi128_ps(v);
+}
+
+Simd4f convert(const Simd4i& v)
+{
+ return _mm_cvtepi32_ps(v);
+}
+
+float (&array(Simd4f& v))[4]
+{
+ return reinterpret_cast<float(&)[4]>(v);
+}
+
+const float (&array(const Simd4f& v))[4]
+{
+ return reinterpret_cast<const float(&)[4]>(v);
+}
+
+void store(float* ptr, Simd4f const& v)
+{
+ _mm_storeu_ps(ptr, v);
+}
+
+void storeAligned(float* ptr, Simd4f const& v)
+{
+ _mm_store_ps(ptr, v);
+}
+
+void store3(float* dst, const Simd4f& v)
+{
+ const float* __restrict src = array(v);
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+}
+
+void storeAligned(float* ptr, unsigned int offset, Simd4f const& v)
+{
+ _mm_store_ps(reinterpret_cast<float*>(reinterpret_cast<char*>(ptr) + offset), v);
+}
+
+template <size_t i>
+Simd4f splat(Simd4f const& v)
+{
+ return _mm_shuffle_ps(v, v, _MM_SHUFFLE(i, i, i, i));
+}
+
+Simd4f select(Simd4f const& mask, Simd4f const& v0, Simd4f const& v1)
+{
+ return _mm_xor_ps(v1, _mm_and_ps(mask, _mm_xor_ps(v1, v0)));
+}
+
+Simd4f abs(const Simd4f& v)
+{
+ return _mm_andnot_ps(_mm_castsi128_ps(_mm_set1_epi32(0x80000000)), v);
+}
+
+Simd4f floor(const Simd4f& v)
+{
+ // SSE 4.1: return _mm_floor_ps(v);
+ Simd4i i = _mm_cvttps_epi32(v);
+ Simd4i s = _mm_castps_si128(_mm_cmpgt_ps(_mm_cvtepi32_ps(i), v));
+ return _mm_cvtepi32_ps(_mm_sub_epi32(i, _mm_srli_epi32(s, 31)));
+}
+
+#if !defined max
+Simd4f max(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_max_ps(v0, v1);
+}
+#endif
+
+#if !defined min
+Simd4f min(const Simd4f& v0, const Simd4f& v1)
+{
+ return _mm_min_ps(v0, v1);
+}
+#endif
+
+Simd4f recip(const Simd4f& v)
+{
+ return _mm_rcp_ps(v);
+}
+
+template <int n>
+Simd4f recip(const Simd4f& v)
+{
+ Simd4f two = simd4f(2.0f);
+ Simd4f r = recip(v);
+ for (int i = 0; i < n; ++i)
+ r = r * (two - v * r);
+ return r;
+}
+
+Simd4f sqrt(const Simd4f& v)
+{
+ return _mm_sqrt_ps(v);
+}
+
+Simd4f rsqrt(const Simd4f& v)
+{
+ return _mm_rsqrt_ps(v);
+}
+
+template <int n>
+Simd4f rsqrt(const Simd4f& v)
+{
+ Simd4f halfV = v * simd4f(0.5f);
+ Simd4f threeHalf = simd4f(1.5f);
+ Simd4f r = rsqrt(v);
+ for (int i = 0; i < n; ++i)
+ r = r * (threeHalf - halfV * r * r);
+ return r;
+}
+
+Simd4f exp2(const Simd4f& v)
+{
+ // http://www.netlib.org/cephes/
+
+ Simd4f limit = simd4f(127.4999f);
+ Simd4f x = min(max(-limit, v), limit);
+
+ // separate into integer and fractional part
+
+ Simd4f fx = x + simd4f(0.5f);
+ Simd4i ix = _mm_sub_epi32(_mm_cvttps_epi32(fx), _mm_srli_epi32(_mm_castps_si128(fx), 31));
+ fx = x - Simd4f(_mm_cvtepi32_ps(ix));
+
+ // exp2(fx) ~ 1 + 2 * P(fx) / (Q(fx) - P(fx))
+
+ Simd4f fx2 = fx * fx;
+
+ Simd4f px = fx * (simd4f(1.51390680115615096133e+3f) +
+ fx2 * (simd4f(2.02020656693165307700e+1f) + fx2 * simd4f(2.30933477057345225087e-2f)));
+ Simd4f qx = simd4f(4.36821166879210612817e+3f) + fx2 * (simd4f(2.33184211722314911771e+2f) + fx2);
+
+ Simd4f exp2fx = px * recip(qx - px);
+ exp2fx = gSimd4fOne + exp2fx + exp2fx;
+
+ // exp2(ix)
+
+ Simd4f exp2ix = _mm_castsi128_ps(_mm_slli_epi32(_mm_add_epi32(ix, _mm_set1_epi32(0x7f)), 23));
+
+ return exp2fx * exp2ix;
+}
+
+Simd4f log2(const Simd4f& v)
+{
+ // todo: fast approximate implementation like exp2
+ Simd4f scale = simd4f(1.44269504088896341f); // 1/ln(2)
+ const float* ptr = array(v);
+ return simd4f(::logf(ptr[0]), ::logf(ptr[1]), ::logf(ptr[2]), ::logf(ptr[3])) * scale;
+}
+
+Simd4f dot3(const Simd4f& v0, const Simd4f& v1)
+{
+ Simd4f tmp = v0 * v1;
+ return splat<0>(tmp) + splat<1>(tmp) + splat<2>(tmp);
+}
+
+Simd4f cross3(const Simd4f& v0, const Simd4f& v1)
+{
+ Simd4f t0 = _mm_shuffle_ps(v0, v0, 0xc9); // w z y x -> w x z y
+ Simd4f t1 = _mm_shuffle_ps(v1, v1, 0xc9);
+ Simd4f tmp = v0 * t1 - t0 * v1;
+ return _mm_shuffle_ps(tmp, tmp, 0xc9);
+}
+
+void transpose(Simd4f& x, Simd4f& y, Simd4f& z, Simd4f& w)
+{
+ _MM_TRANSPOSE4_PS(x, y, z, w);
+}
+
+void zip(Simd4f& v0, Simd4f& v1)
+{
+ Simd4f t0 = v0;
+ v0 = _mm_unpacklo_ps(v0, v1);
+ v1 = _mm_unpackhi_ps(t0, v1);
+}
+
+void unzip(Simd4f& v0, Simd4f& v1)
+{
+ Simd4f t0 = v0;
+ v0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(2, 0, 2, 0));
+ v1 = _mm_shuffle_ps(t0, v1, _MM_SHUFFLE(3, 1, 3, 1));
+}
+
+Simd4f swaphilo(const Simd4f& v)
+{
+ return _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 0, 3, 2));
+}
+
+int allEqual(const Simd4f& v0, const Simd4f& v1)
+{
+ return allTrue(v0 == v1);
+}
+
+int allEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return allTrue(outMask = v0 == v1);
+}
+
+int anyEqual(const Simd4f& v0, const Simd4f& v1)
+{
+ return anyTrue(v0 == v1);
+}
+
+int anyEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return anyTrue(outMask = v0 == v1);
+}
+
+int allGreater(const Simd4f& v0, const Simd4f& v1)
+{
+ return allTrue(v0 > v1);
+}
+
+int allGreater(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return allTrue(outMask = v0 > v1);
+}
+
+int anyGreater(const Simd4f& v0, const Simd4f& v1)
+{
+ return anyTrue(v0 > v1);
+}
+
+int anyGreater(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return anyTrue(outMask = v0 > v1);
+}
+
+int allGreaterEqual(const Simd4f& v0, const Simd4f& v1)
+{
+ return allTrue(v0 >= v1);
+}
+
+int allGreaterEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return allTrue(outMask = v0 >= v1);
+}
+
+int anyGreaterEqual(const Simd4f& v0, const Simd4f& v1)
+{
+ return anyTrue(v0 >= v1);
+}
+
+int anyGreaterEqual(const Simd4f& v0, const Simd4f& v1, Simd4f& outMask)
+{
+ return anyTrue(outMask = v0 >= v1);
+}
+
+int allTrue(const Simd4f& v)
+{
+ return _mm_movemask_ps(v) == 0xf;
+}
+
+int anyTrue(const Simd4f& v)
+{
+ return _mm_movemask_ps(v);
+}
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/sse2/NvSse2Simd4i.h b/NvCloth/src/NvSimd/sse2/NvSse2Simd4i.h
new file mode 100644
index 0000000..1b1439d
--- /dev/null
+++ b/NvCloth/src/NvSimd/sse2/NvSse2Simd4i.h
@@ -0,0 +1,259 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+NV_SIMD_NAMESPACE_BEGIN
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factory implementation
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4iZeroFactory::operator Simd4i() const
+{
+ return _mm_setzero_si128();
+}
+
+Simd4iScalarFactory::operator Simd4i() const
+{
+ return _mm_set1_epi32(value);
+}
+
+Simd4iTupleFactory::operator Simd4i() const
+{
+ return reinterpret_cast<const Simd4i&>(tuple);
+}
+
+Simd4iLoadFactory::operator Simd4i() const
+{
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr));
+}
+
+Simd4iAlignedLoadFactory::operator Simd4i() const
+{
+ return _mm_load_si128(reinterpret_cast<const __m128i*>(ptr));
+}
+
+Simd4iOffsetLoadFactory::operator Simd4i() const
+{
+ return _mm_load_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const char*>(ptr) + offset));
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression template
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline ComplementExpr<Simd4i>::operator Simd4i() const
+{
+ return _mm_andnot_si128(v, _mm_set1_epi32(0xffffffff));
+}
+
+template <>
+inline Simd4i operator&(const ComplementExpr<Simd4i>& complement, const Simd4i& v)
+{
+ return _mm_andnot_si128(complement.v, v);
+}
+
+template <>
+inline Simd4i operator&(const Simd4i& v, const ComplementExpr<Simd4i>& complement)
+{
+ return _mm_andnot_si128(complement.v, v);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operator implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4i operator == (const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_cmpeq_epi32(v0, v1);
+}
+
+Simd4i operator<(const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_cmplt_epi32(v0, v1);
+}
+
+Simd4i operator>(const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_cmpgt_epi32(v0, v1);
+}
+
+ComplementExpr<Simd4i> operator~(const Simd4i& v)
+{
+ return ComplementExpr<Simd4i>(v);
+}
+
+Simd4i operator&(const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_and_si128(v0, v1);
+}
+
+Simd4i operator|(const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_or_si128(v0, v1);
+}
+
+Simd4i operator^(const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_xor_si128(v0, v1);
+}
+
+Simd4i operator<<(const Simd4i& v, int shift)
+{
+ return _mm_slli_epi32(v, shift);
+}
+
+Simd4i operator>>(const Simd4i& v, int shift)
+{
+ return _mm_srli_epi32(v, shift);
+}
+
+Simd4i operator + (const Simd4i& v)
+{
+ return v;
+}
+
+Simd4i operator + (const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_add_epi32(v0, v1);
+}
+
+Simd4i operator - (const Simd4i& v)
+{
+ return _mm_sub_epi32(_mm_setzero_si128(), v);
+}
+
+Simd4i operator - (const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_sub_epi32(v0, v1);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// function implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+Simd4i simd4i(const Simd4f& v)
+{
+ return _mm_castps_si128(v);
+}
+
+Simd4i truncate(const Simd4f& v)
+{
+ return _mm_cvttps_epi32(v);
+}
+
+int (&array(Simd4i& v))[4]
+{
+ return reinterpret_cast<int(&)[4]>(v);
+}
+
+const int (&array(const Simd4i& v))[4]
+{
+ return reinterpret_cast<const int(&)[4]>(v);
+}
+
+void store(int* ptr, const Simd4i& v)
+{
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(ptr), v);
+}
+
+void storeAligned(int* ptr, const Simd4i& v)
+{
+ _mm_store_si128(reinterpret_cast<__m128i*>(ptr), v);
+}
+
+void storeAligned(int* ptr, unsigned int offset, const Simd4i& v)
+{
+ _mm_store_si128(reinterpret_cast<__m128i*>(reinterpret_cast<char*>(ptr) + offset), v);
+}
+
+template <size_t i>
+Simd4i splat(const Simd4i& v)
+{
+ return _mm_shuffle_epi32(v, _MM_SHUFFLE(i, i, i, i));
+}
+
+Simd4i select(const Simd4i& mask, const Simd4i& v0, const Simd4i& v1)
+{
+ return _mm_xor_si128(v1, _mm_and_si128(mask, _mm_xor_si128(v1, v0)));
+}
+
+int allEqual(const Simd4i& v0, const Simd4i& v1)
+{
+ return allTrue(operator == (v0, v1));
+}
+
+int allEqual(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask)
+{
+ return allTrue(outMask = operator == (v0, v1));
+}
+
+int anyEqual(const Simd4i& v0, const Simd4i& v1)
+{
+ return anyTrue(operator == (v0, v1));
+}
+
+int anyEqual(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask)
+{
+ return anyTrue(outMask = operator == (v0, v1));
+}
+
+int allGreater(const Simd4i& v0, const Simd4i& v1)
+{
+ return allTrue(operator>(v0, v1));
+}
+
+int allGreater(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask)
+{
+ return allTrue(outMask = operator>(v0, v1));
+}
+
+int anyGreater(const Simd4i& v0, const Simd4i& v1)
+{
+ return anyTrue(operator>(v0, v1));
+}
+
+int anyGreater(const Simd4i& v0, const Simd4i& v1, Simd4i& outMask)
+{
+ return anyTrue(outMask = operator>(v0, v1));
+}
+
+int allTrue(const Simd4i& v)
+{
+ return _mm_movemask_ps(_mm_castsi128_ps(v)) == 0xf;
+}
+
+int anyTrue(const Simd4i& v)
+{
+ return _mm_movemask_ps(_mm_castsi128_ps(v));
+}
+
+NV_SIMD_NAMESPACE_END
diff --git a/NvCloth/src/NvSimd/sse2/NvSse2SimdTypes.h b/NvCloth/src/NvSimd/sse2/NvSse2SimdTypes.h
new file mode 100644
index 0000000..353f17a
--- /dev/null
+++ b/NvCloth/src/NvSimd/sse2/NvSse2SimdTypes.h
@@ -0,0 +1,95 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+// SSE + SSE2 (don't include intrin.h!)
+#include <emmintrin.h>
+
+#if defined _MSC_VER && !(defined NV_SIMD_USE_NAMESPACE && NV_SIMD_USE_NAMESPACE)
+
+// SIMD libarary lives in global namespace and Simd4f is
+// typedef'd to __m128 so it can be passed by value on MSVC.
+
+typedef __m128 Simd4f;
+typedef __m128i Simd4i;
+
+#else
+
+NV_SIMD_NAMESPACE_BEGIN
+
+/** \brief SIMD type containing 4 floats */
+struct Simd4f
+{
+ Simd4f()
+ {
+ }
+ Simd4f(__m128 x) : m128(x)
+ {
+ }
+
+ operator __m128&()
+ {
+ return m128;
+ }
+ operator const __m128&() const
+ {
+ return m128;
+ }
+
+ private:
+ __m128 m128;
+};
+
+/** \brief SIMD type containing 4 integers */
+struct Simd4i
+{
+ Simd4i()
+ {
+ }
+ Simd4i(__m128i x) : m128i(x)
+ {
+ }
+
+ operator __m128i&()
+ {
+ return m128i;
+ }
+ operator const __m128i&() const
+ {
+ return m128i;
+ }
+
+ private:
+ __m128i m128i;
+};
+
+NV_SIMD_NAMESPACE_END
+
+#endif
diff --git a/NvCloth/src/PhaseConfig.cpp b/NvCloth/src/PhaseConfig.cpp
new file mode 100644
index 0000000..7ea97f3
--- /dev/null
+++ b/NvCloth/src/PhaseConfig.cpp
@@ -0,0 +1,68 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "NvCloth/PhaseConfig.h"
+#include "PsMathUtils.h"
+#include <algorithm>
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+PhaseConfig transform(const PhaseConfig&);
+}
+}
+
+using namespace nv;
+
+namespace
+{
+float safeLog2(float x)
+{
+ float saturated = std::max(0.0f, std::min(x, 1.0f));
+ return saturated ? shdfnd::log2(saturated) : -FLT_MAX_EXP;
+}
+}
+
+// convert from user input to solver format
+cloth::PhaseConfig cloth::transform(const PhaseConfig& config)
+{
+ PhaseConfig result(config.mPhaseIndex);
+
+ result.mStiffness = safeLog2(1.0f - config.mStiffness);
+ result.mStiffnessMultiplier = safeLog2(config.mStiffnessMultiplier);
+
+ // negative for compression, positive for stretch
+ result.mCompressionLimit = 1.f - 1.f / config.mCompressionLimit;
+ result.mStretchLimit = 1.f - 1.f / config.mStretchLimit;
+
+ return result;
+}
diff --git a/NvCloth/src/PointInterpolator.h b/NvCloth/src/PointInterpolator.h
new file mode 100644
index 0000000..b9db131
--- /dev/null
+++ b/NvCloth/src/PointInterpolator.h
@@ -0,0 +1,167 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+
+namespace nv
+{
+
+namespace cloth
+{
+
+// acts as a poor mans random access iterator
+template <typename Simd4f, typename BaseIterator>
+class LerpIterator
+{
+
+ LerpIterator& operator = (const LerpIterator&); // not implemented
+
+ public:
+ LerpIterator(BaseIterator start, BaseIterator target, float alpha)
+ : mAlpha(simd4f(alpha)), mStart(start), mTarget(target)
+ {
+ }
+
+ // return the interpolated point at a given index
+ inline Simd4f operator[](size_t index) const
+ {
+ return mStart[index] + (mTarget[index] - mStart[index]) * mAlpha;
+ }
+
+ inline Simd4f operator*() const
+ {
+ return (*this)[0];
+ }
+
+ // prefix increment only
+ inline LerpIterator& operator ++ ()
+ {
+ ++mStart;
+ ++mTarget;
+ return *this;
+ }
+
+ private:
+ // interpolation parameter
+ const Simd4f mAlpha;
+
+ BaseIterator mStart;
+ BaseIterator mTarget;
+};
+
+template <typename Simd4f, size_t Stride>
+class UnalignedIterator
+{
+
+ UnalignedIterator& operator = (const UnalignedIterator&); // not implemented
+
+ public:
+ UnalignedIterator(const float* pointer) : mPointer(pointer)
+ {
+ }
+
+ inline Simd4f operator[](size_t index) const
+ {
+ return load(mPointer + index * Stride);
+ }
+
+ inline Simd4f operator*() const
+ {
+ return (*this)[0];
+ }
+
+ // prefix increment only
+ inline UnalignedIterator& operator ++ ()
+ {
+ mPointer += Stride;
+ return *this;
+ }
+
+ private:
+ const float* mPointer;
+};
+
+// acts as an iterator but returns a constant
+template <typename Simd4f>
+class ConstantIterator
+{
+ public:
+ ConstantIterator(const Simd4f& value) : mValue(value)
+ {
+ }
+
+ inline Simd4f operator*() const
+ {
+ return mValue;
+ }
+
+ inline ConstantIterator& operator ++ ()
+ {
+ return *this;
+ }
+
+ private:
+ ConstantIterator& operator = (const ConstantIterator&);
+ const Simd4f mValue;
+};
+
+// wraps an iterator with constant scale and bias
+template <typename Simd4f, typename BaseIterator>
+class ScaleBiasIterator
+{
+ public:
+ ScaleBiasIterator(BaseIterator base, const Simd4f& scale, const Simd4f& bias)
+ : mScale(scale), mBias(bias), mBaseIterator(base)
+ {
+ }
+
+ inline Simd4f operator*() const
+ {
+ return (*mBaseIterator) * mScale + mBias;
+ }
+
+ inline ScaleBiasIterator& operator ++ ()
+ {
+ ++mBaseIterator;
+ return *this;
+ }
+
+ private:
+ ScaleBiasIterator& operator = (const ScaleBiasIterator&);
+
+ const Simd4f mScale;
+ const Simd4f mBias;
+
+ BaseIterator mBaseIterator;
+};
+
+} // namespace cloth
+
+} // namespace nv
diff --git a/NvCloth/src/Simd.h b/NvCloth/src/Simd.h
new file mode 100644
index 0000000..eaf7a41
--- /dev/null
+++ b/NvCloth/src/Simd.h
@@ -0,0 +1,43 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+// cloth solver is 50% slower (!) on MSVC 11 and earlier when Simd4f lives in a namespace
+#define NV_SIMD_USE_NAMESPACE 0
+
+#include "NvSimd/NvSimd4f.h"
+#include "NvSimd/NvSimd4i.h"
+
+namespace nv
+{
+#if NV_SIMD_USE_NAMESPACE
+using namespace simd;
+#endif
+}
diff --git a/NvCloth/src/StackAllocator.h b/NvCloth/src/StackAllocator.h
new file mode 100644
index 0000000..7191517
--- /dev/null
+++ b/NvCloth/src/StackAllocator.h
@@ -0,0 +1,163 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Callbacks.h"
+
+#if NV_LINUX_FAMILY
+#include <stdint.h> // intptr_t
+#endif
+
+namespace nv
+{
+namespace cloth
+{
+
+template <size_t align>
+class StackAllocator
+{
+ typedef unsigned char byte;
+
+ // todo: switch to offsets so size is consistent on x64
+ // mSize is just for book keeping so could be 4 bytes
+ struct Header
+ {
+ Header* mPrev;
+ size_t mSize : 31;
+ size_t mFree : 1;
+ };
+
+ StackAllocator(const StackAllocator&);
+ StackAllocator& operator = (const StackAllocator&);
+
+ public:
+ StackAllocator(void* buffer, size_t bufferSize)
+ : mBuffer(reinterpret_cast<byte*>(buffer)), mBufferSize(bufferSize), mFreeStart(mBuffer), mTop(0)
+ {
+ }
+
+ ~StackAllocator()
+ {
+ NV_CLOTH_ASSERT(userBytes() == 0);
+ }
+
+ void* allocate(size_t numBytes)
+ {
+ // this is non-standard
+ if (!numBytes)
+ return 0;
+
+ uintptr_t unalignedStart = uintptr_t(mFreeStart) + sizeof(Header);
+
+ byte* allocStart = reinterpret_cast<byte*>((unalignedStart + (align - 1)) & ~(align - 1));
+ byte* allocEnd = allocStart + numBytes;
+
+ // ensure there is space for the alloc
+ NV_CLOTH_ASSERT(allocEnd <= mBuffer + mBufferSize);
+
+ Header* h = getHeader(allocStart);
+ h->mPrev = mTop;
+ h->mSize = numBytes;
+ h->mFree = false;
+
+ mTop = h;
+ mFreeStart = allocEnd;
+
+ return allocStart;
+ }
+
+ void deallocate(void* p)
+ {
+ if (!p)
+ return;
+
+ Header* h = getHeader(p);
+ h->mFree = true;
+
+ // unwind the stack to the next live alloc
+ while (mTop && mTop->mFree)
+ {
+ mFreeStart = reinterpret_cast<byte*>(mTop);
+ mTop = mTop->mPrev;
+ }
+ }
+
+ private:
+ // return the header for an allocation
+ inline Header* getHeader(void* p) const
+ {
+ NV_CLOTH_ASSERT((reinterpret_cast<uintptr_t>(p) & (align - 1)) == 0);
+ NV_CLOTH_ASSERT(reinterpret_cast<byte*>(p) >= mBuffer + sizeof(Header));
+ NV_CLOTH_ASSERT(reinterpret_cast<byte*>(p) < mBuffer + mBufferSize);
+
+ return reinterpret_cast<Header*>(p) - 1;
+ }
+
+ public:
+ // total user-allocated bytes not including any overhead
+ size_t userBytes() const
+ {
+ size_t total = 0;
+ Header* iter = mTop;
+ while (iter)
+ {
+ total += iter->mSize;
+ iter = iter->mPrev;
+ }
+
+ return total;
+ }
+
+ // total user-allocated bytes + overhead
+ size_t totalUsedBytes() const
+ {
+ return mFreeStart - mBuffer;
+ }
+
+ size_t remainingBytes() const
+ {
+ return mBufferSize - totalUsedBytes();
+ }
+
+ size_t wastedBytes() const
+ {
+ return totalUsedBytes() - userBytes();
+ }
+
+ private:
+ byte* const mBuffer;
+ const size_t mBufferSize;
+
+ byte* mFreeStart; // start of free space
+ Header* mTop; // top allocation header
+};
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/SwCloth.cpp b/NvCloth/src/SwCloth.cpp
new file mode 100644
index 0000000..f409ad6
--- /dev/null
+++ b/NvCloth/src/SwCloth.cpp
@@ -0,0 +1,321 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "SwCloth.h"
+#include "SwFabric.h"
+#include "SwFactory.h"
+#include "TripletScheduler.h"
+#include "ClothBase.h"
+#include <foundation/PxMat44.h>
+#include "NvCloth/Allocator.h"
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+PhaseConfig transform(const PhaseConfig&); // from PhaseConfig.cpp
+}
+}
+
+using namespace nv;
+
+cloth::SwCloth::SwCloth(SwFactory& factory, SwFabric& fabric, Range<const PxVec4> particles)
+: mFactory(factory), mFabric(fabric), mNumVirtualParticles(0), mUserData(0)
+{
+ NV_CLOTH_ASSERT(!particles.empty());
+
+ initialize(*this, particles.begin(), particles.end());
+
+#if PX_WINDOWS_FAMILY
+ const uint32_t kSimdWidth = 8; // avx
+#else
+ const uint32_t kSimdWidth = 4; // sse
+#endif
+
+ NV_CLOTH_ASSERT(particles.size() == fabric.getNumParticles());
+
+ mCurParticles.reserve(particles.size() + kSimdWidth - 1);
+ mCurParticles.assign(reinterpret_cast<const PxVec4*>(particles.begin()),
+ reinterpret_cast<const PxVec4*>(particles.end()));
+
+ // 7 dummy particles used in SIMD solver
+ mCurParticles.resize(particles.size() + kSimdWidth - 1, PxVec4(0.0f));
+ mPrevParticles = mCurParticles;
+
+ mCurParticles.resize(particles.size());
+ mPrevParticles.resize(particles.size());
+
+ mFabric.incRefCount();
+}
+
+namespace
+{
+// copy vector and make same capacity
+void copyVector(nv::cloth::Vector<PxVec4>::Type& dst, const nv::cloth::Vector<PxVec4>::Type& src)
+{
+ dst.reserve(src.capacity());
+ dst.assign(src.begin(), src.end());
+
+ // ensure valid dummy data
+ dst.resize(src.capacity(), PxVec4(0.0f));
+ dst.resize(src.size());
+}
+}
+
+// copy constructor, supports rebinding to a different factory
+cloth::SwCloth::SwCloth(SwFactory& factory, const SwCloth& cloth)
+: mFactory(factory)
+, mFabric(cloth.mFabric)
+, mPhaseConfigs(cloth.mPhaseConfigs)
+, mCapsuleIndices(cloth.mCapsuleIndices)
+, mStartCollisionSpheres(cloth.mStartCollisionSpheres)
+, mTargetCollisionSpheres(cloth.mTargetCollisionSpheres)
+, mStartCollisionPlanes(cloth.mStartCollisionPlanes)
+, mTargetCollisionPlanes(cloth.mTargetCollisionPlanes)
+, mStartCollisionTriangles(cloth.mStartCollisionTriangles)
+, mTargetCollisionTriangles(cloth.mTargetCollisionTriangles)
+, mVirtualParticleIndices(cloth.mVirtualParticleIndices)
+, mVirtualParticleWeights(cloth.mVirtualParticleWeights)
+, mNumVirtualParticles(cloth.mNumVirtualParticles)
+, mSelfCollisionIndices(cloth.mSelfCollisionIndices)
+, mRestPositions(cloth.mRestPositions)
+{
+ copy(*this, cloth);
+
+ // carry over capacity (using as dummy particles)
+ copyVector(mCurParticles, cloth.mCurParticles);
+ copyVector(mPrevParticles, cloth.mPrevParticles);
+ copyVector(mMotionConstraints.mStart, cloth.mMotionConstraints.mStart);
+ copyVector(mMotionConstraints.mTarget, cloth.mMotionConstraints.mTarget);
+ copyVector(mSeparationConstraints.mStart, cloth.mSeparationConstraints.mStart);
+ copyVector(mSeparationConstraints.mTarget, cloth.mSeparationConstraints.mTarget);
+ copyVector(mParticleAccelerations, cloth.mParticleAccelerations);
+
+ //Both cloth and this have a reference to fabric. The factory that created fabric does not have to be the same as mFactory.
+ //mFabric needs to outlive both cloth instances. (this is checked with refcount asserts).
+ mFabric.incRefCount();
+}
+
+cloth::SwCloth::~SwCloth()
+{
+ mFabric.decRefCount();
+}
+
+// bounds = lower[3], upper[3]
+void cloth::SwCloth::setParticleBounds(const float* bounds)
+{
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ array(mParticleBoundsCenter)[i] = (bounds[3 + i] + bounds[i]) * 0.5f;
+ array(mParticleBoundsHalfExtent)[i] = (bounds[3 + i] - bounds[i]) * 0.5f;
+ }
+}
+
+cloth::Range<PxVec4> cloth::SwCloth::push(SwConstraints& constraints)
+{
+ uint32_t n = uint32_t(mCurParticles.size());
+
+ if (!constraints.mTarget.capacity())
+ constraints.mTarget.resize((n + 3) & ~3, PxVec4(0.0f)); // reserve multiple of 4 for SIMD
+
+ constraints.mTarget.resizeUninitialized(n);
+ PxVec4* data = &constraints.mTarget.front();
+ Range<PxVec4> result(data, data + constraints.mTarget.size());
+
+ if (constraints.mStart.empty()) // initialize start first
+ constraints.mStart.swap(constraints.mTarget);
+
+ return result;
+}
+
+void cloth::SwCloth::clear(SwConstraints& constraints)
+{
+ Vector<PxVec4>::Type().swap(constraints.mStart);
+ Vector<PxVec4>::Type().swap(constraints.mTarget);
+}
+
+cloth::Range<const PxVec3> cloth::SwCloth::clampTriangleCount(Range<const PxVec3> range, uint32_t)
+{
+ return range;
+}
+
+#include "ClothImpl.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+template <>
+Cloth* ClothImpl<SwCloth>::clone(Factory& factory) const
+{
+ return factory.clone(*this);
+}
+
+template <>
+uint32_t ClothImpl<SwCloth>::getNumParticles() const
+{
+ return uint32_t(mCloth.mCurParticles.size());
+}
+
+template <>
+void ClothImpl<SwCloth>::lockParticles() const
+{
+}
+
+template <>
+void ClothImpl<SwCloth>::unlockParticles() const
+{
+}
+
+template <>
+MappedRange<PxVec4> ClothImpl<SwCloth>::getCurrentParticles()
+{
+ return getMappedParticles(&mCloth.mCurParticles.front());
+}
+
+template <>
+MappedRange<const PxVec4> ClothImpl<SwCloth>::getCurrentParticles() const
+{
+ return getMappedParticles(&mCloth.mCurParticles.front());
+}
+
+template <>
+MappedRange<PxVec4> ClothImpl<SwCloth>::getPreviousParticles()
+{
+ return getMappedParticles(&mCloth.mPrevParticles.front());
+}
+
+template <>
+MappedRange<const PxVec4> ClothImpl<SwCloth>::getPreviousParticles() const
+{
+ return getMappedParticles(&mCloth.mPrevParticles.front());
+}
+
+template <>
+GpuParticles ClothImpl<SwCloth>::getGpuParticles()
+{
+ GpuParticles result = { 0, 0, 0 };
+ return result;
+}
+
+template <>
+void ClothImpl<SwCloth>::setPhaseConfig(Range<const PhaseConfig> configs)
+{
+ mCloth.mPhaseConfigs.resize(0);
+
+ // transform phase config to use in solver
+ for (; !configs.empty(); configs.popFront())
+ if (configs.front().mStiffness > 0.0f)
+ mCloth.mPhaseConfigs.pushBack(transform(configs.front()));
+
+ mCloth.wakeUp();
+}
+
+template <>
+void ClothImpl<SwCloth>::setSelfCollisionIndices(Range<const uint32_t> indices)
+{
+ ContextLockType lock(mCloth.mFactory);
+ mCloth.mSelfCollisionIndices.assign(indices.begin(), indices.end());
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <>
+uint32_t ClothImpl<SwCloth>::getNumVirtualParticles() const
+{
+ return uint32_t(mCloth.mNumVirtualParticles);
+}
+
+template <>
+Range<PxVec4> ClothImpl<SwCloth>::getParticleAccelerations()
+{
+ if (mCloth.mParticleAccelerations.empty())
+ {
+ uint32_t n = uint32_t(mCloth.mCurParticles.size());
+ mCloth.mParticleAccelerations.resize(n, PxVec4(0.0f));
+ }
+
+ mCloth.wakeUp();
+
+ PxVec4* data = &mCloth.mParticleAccelerations.front();
+ return Range<PxVec4>(data, data + mCloth.mParticleAccelerations.size());
+}
+
+template <>
+void ClothImpl<SwCloth>::clearParticleAccelerations()
+{
+ Vector<PxVec4>::Type().swap(mCloth.mParticleAccelerations);
+ mCloth.wakeUp();
+}
+
+template <>
+void ClothImpl<SwCloth>::setVirtualParticles(Range<const uint32_t[4]> indices, Range<const PxVec3> weights)
+{
+ mCloth.mNumVirtualParticles = 0;
+
+ // shuffle indices to form independent SIMD sets
+ uint16_t numParticles = uint16_t(mCloth.mCurParticles.size());
+ TripletScheduler scheduler(indices);
+ scheduler.simd(numParticles, 4);
+
+ // convert indices to byte offset
+ Vec4us dummy(numParticles, uint16_t(numParticles + 1), uint16_t(numParticles + 2), 0);
+ Vector<uint32_t>::Type::ConstIterator sIt = scheduler.mSetSizes.begin();
+ Vector<uint32_t>::Type::ConstIterator sEnd = scheduler.mSetSizes.end();
+ TripletScheduler::ConstTripletIter tIt = scheduler.mTriplets.begin(), tLast;
+ mCloth.mVirtualParticleIndices.resize(0);
+ mCloth.mVirtualParticleIndices.reserve(indices.size() + 3 * uint32_t(sEnd - sIt));
+ for (; sIt != sEnd; ++sIt)
+ {
+ uint32_t setSize = *sIt;
+ for (tLast = tIt + setSize; tIt != tLast; ++tIt, ++mCloth.mNumVirtualParticles)
+ mCloth.mVirtualParticleIndices.pushBack(Vec4us(*tIt));
+ mCloth.mVirtualParticleIndices.resize((mCloth.mVirtualParticleIndices.size() + 3) & ~3, dummy);
+ }
+ Vector<Vec4us>::Type(mCloth.mVirtualParticleIndices.begin(), mCloth.mVirtualParticleIndices.end())
+ .swap(mCloth.mVirtualParticleIndices);
+
+ // precompute 1/dot(w,w)
+ Vector<PxVec4>::Type().swap(mCloth.mVirtualParticleWeights);
+ mCloth.mVirtualParticleWeights.reserve(weights.size());
+ for (; !weights.empty(); weights.popFront())
+ {
+ PxVec3 w = weights.front();
+ float scale = 1 / w.magnitudeSquared();
+ mCloth.mVirtualParticleWeights.pushBack(PxVec4( w.x, w.y, w.z, scale ));
+ }
+
+ mCloth.notifyChanged();
+}
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/SwCloth.h b/NvCloth/src/SwCloth.h
new file mode 100644
index 0000000..2730d9d
--- /dev/null
+++ b/NvCloth/src/SwCloth.h
@@ -0,0 +1,203 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Range.h"
+#include "NvCloth/PhaseConfig.h"
+#include "MovingAverage.h"
+#include "IndexPair.h"
+#include "Vec4T.h"
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+#include <foundation/PxTransform.h>
+
+namespace nv
+{
+
+namespace cloth
+{
+
+class SwFabric;
+class SwFactory;
+
+struct SwConstraints
+{
+ void pop()
+ {
+ if (!mTarget.empty())
+ {
+ mStart.swap(mTarget);
+ mTarget.resize(0);
+ }
+ }
+
+ Vector<physx::PxVec4>::Type mStart;
+ Vector<physx::PxVec4>::Type mTarget;
+};
+
+class SwCloth
+{
+ SwCloth& operator = (const SwCloth&); // not implemented
+ struct SwContextLock
+ {
+ SwContextLock(const SwFactory&)
+ {
+ }
+ };
+
+ public:
+ typedef SwFactory FactoryType;
+ typedef SwFabric FabricType;
+ typedef SwContextLock ContextLockType;
+
+ typedef Vector<physx::PxVec3>::Type& MappedVec3fVectorType;
+ typedef Vector<physx::PxVec4>::Type& MappedVec4fVectorType;
+ typedef Vector<IndexPair>::Type& MappedIndexVectorType;
+ typedef Vector<uint32_t>::Type& MappedMaskVectorType;
+
+ SwCloth(SwFactory&, SwFabric&, Range<const physx::PxVec4>);
+ SwCloth(SwFactory&, const SwCloth&);
+ ~SwCloth(); // not virtual on purpose
+
+ public:
+ bool isSleeping() const
+ {
+ return mSleepPassCounter >= mSleepAfterCount;
+ }
+ void wakeUp()
+ {
+ mSleepPassCounter = 0;
+ }
+
+ void notifyChanged()
+ {
+ }
+
+ void setParticleBounds(const float*);
+
+ Range<physx::PxVec4> push(SwConstraints&);
+ static void clear(SwConstraints&);
+
+ static Range<const physx::PxVec3> clampTriangleCount(Range<const physx::PxVec3>, uint32_t);
+
+ public:
+ SwFactory& mFactory;
+ SwFabric& mFabric;
+
+ bool mClothCostDirty;
+
+ // current and previous-iteration particle positions
+ Vector<physx::PxVec4>::Type mCurParticles;
+ Vector<physx::PxVec4>::Type mPrevParticles;
+
+ physx::PxVec3 mParticleBoundsCenter;
+ physx::PxVec3 mParticleBoundsHalfExtent;
+
+ physx::PxVec3 mGravity;
+ physx::PxVec3 mLogDamping;
+ physx::PxVec3 mLinearLogDrag;
+ physx::PxVec3 mAngularLogDrag;
+ physx::PxVec3 mLinearInertia;
+ physx::PxVec3 mAngularInertia;
+ physx::PxVec3 mCentrifugalInertia;
+ float mSolverFrequency;
+ float mStiffnessFrequency;
+
+ physx::PxTransform mTargetMotion;
+ physx::PxTransform mCurrentMotion;
+ physx::PxVec3 mLinearVelocity;
+ physx::PxVec3 mAngularVelocity;
+
+ float mPrevIterDt;
+ MovingAverage mIterDtAvg;
+
+ Vector<PhaseConfig>::Type mPhaseConfigs; // transformed!
+
+ // tether constraints stuff
+ float mTetherConstraintLogStiffness;
+ float mTetherConstraintScale;
+
+ // motion constraints stuff
+ SwConstraints mMotionConstraints;
+ float mMotionConstraintScale;
+ float mMotionConstraintBias;
+ float mMotionConstraintLogStiffness;
+
+ // separation constraints stuff
+ SwConstraints mSeparationConstraints;
+
+ // particle acceleration stuff
+ Vector<physx::PxVec4>::Type mParticleAccelerations;
+
+ // wind
+ physx::PxVec3 mWind;
+ float mDragLogCoefficient;
+ float mLiftLogCoefficient;
+
+ // collision stuff
+ Vector<IndexPair>::Type mCapsuleIndices;
+ Vector<physx::PxVec4>::Type mStartCollisionSpheres;
+ Vector<physx::PxVec4>::Type mTargetCollisionSpheres;
+ Vector<uint32_t>::Type mConvexMasks;
+ Vector<physx::PxVec4>::Type mStartCollisionPlanes;
+ Vector<physx::PxVec4>::Type mTargetCollisionPlanes;
+ Vector<physx::PxVec3>::Type mStartCollisionTriangles;
+ Vector<physx::PxVec3>::Type mTargetCollisionTriangles;
+ bool mEnableContinuousCollision;
+ float mCollisionMassScale;
+ float mFriction;
+
+ // virtual particles
+ Vector<Vec4us>::Type mVirtualParticleIndices;
+ Vector<physx::PxVec4>::Type mVirtualParticleWeights;
+ uint32_t mNumVirtualParticles;
+
+ // self collision
+ float mSelfCollisionDistance;
+ float mSelfCollisionLogStiffness;
+
+ Vector<uint32_t>::Type mSelfCollisionIndices;
+
+ Vector<physx::PxVec4>::Type mRestPositions;
+
+ // sleeping
+ uint32_t mSleepTestInterval; // how often to test for movement
+ uint32_t mSleepAfterCount; // number of tests to pass before sleep
+ float mSleepThreshold; // max movement delta to pass test
+ uint32_t mSleepPassCounter; // how many tests passed
+ uint32_t mSleepTestCounter; // how many iterations since tested
+
+ // unused for CPU simulation
+ void* mUserData;
+
+};
+
+} // namespace cloth
+}
diff --git a/NvCloth/src/SwClothData.cpp b/NvCloth/src/SwClothData.cpp
new file mode 100644
index 0000000..eddd821
--- /dev/null
+++ b/NvCloth/src/SwClothData.cpp
@@ -0,0 +1,155 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "SwClothData.h"
+#include "SwCloth.h"
+#include "SwFabric.h"
+#include <foundation/Px.h>
+#include <PsUtilities.h>
+
+using namespace physx;
+using namespace nv;
+
+cloth::SwClothData::SwClothData(SwCloth& cloth, const SwFabric& fabric)
+{
+ mNumParticles = uint32_t(cloth.mCurParticles.size());
+ mCurParticles = array(cloth.mCurParticles.front());
+ mPrevParticles = array(cloth.mPrevParticles.front());
+
+ const float* center = array(cloth.mParticleBoundsCenter);
+ const float* extent = array(cloth.mParticleBoundsHalfExtent);
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ mCurBounds[i] = center[i] - extent[i];
+ mCurBounds[i + 3] = center[i] + extent[i];
+ }
+
+ // avoid reading uninitialized data into mCurBounds, even though it's never used.
+ mPrevBounds[0] = 0.0f;
+
+ mConfigBegin = cloth.mPhaseConfigs.empty() ? 0 : &cloth.mPhaseConfigs.front();
+ mConfigEnd = mConfigBegin + cloth.mPhaseConfigs.size();
+
+ mPhases = &fabric.mPhases.front();
+ mNumPhases = uint32_t(fabric.mPhases.size());
+
+ mSets = &fabric.mSets.front();
+ mNumSets = uint32_t(fabric.mSets.size());
+
+ mRestvalues = &fabric.mRestvalues.front();
+ mNumRestvalues = uint32_t(fabric.mRestvalues.size());
+ mStiffnessValues = fabric.mStiffnessValues.empty()?nullptr:&fabric.mStiffnessValues.front();
+
+ mIndices = &fabric.mIndices.front();
+ mNumIndices = uint32_t(fabric.mIndices.size());
+
+ float stiffnessExponent = cloth.mStiffnessFrequency * cloth.mPrevIterDt * 0.69314718055994531f; // logf(2.0f);
+
+ mTethers = fabric.mTethers.begin();
+ mNumTethers = uint32_t(fabric.mTethers.size());
+ mTetherConstraintStiffness = 1.0f - expf(stiffnessExponent * cloth.mTetherConstraintLogStiffness);
+ mTetherConstraintScale = cloth.mTetherConstraintScale * fabric.mTetherLengthScale;
+
+ mTriangles = fabric.mTriangles.begin();
+ mNumTriangles = uint32_t(fabric.mTriangles.size()) / 3;
+ mDragCoefficient = 1.0f - expf(stiffnessExponent * cloth.mDragLogCoefficient);
+ mLiftCoefficient = 1.0f - expf(stiffnessExponent * cloth.mLiftLogCoefficient);
+
+ mStartMotionConstraints = cloth.mMotionConstraints.mStart.size() ? array(cloth.mMotionConstraints.mStart.front()) : 0;
+ mTargetMotionConstraints =
+ !cloth.mMotionConstraints.mTarget.empty() ? array(cloth.mMotionConstraints.mTarget.front()) : 0;
+ mMotionConstraintStiffness = 1.0f - expf(stiffnessExponent * cloth.mMotionConstraintLogStiffness);
+
+ mStartSeparationConstraints =
+ cloth.mSeparationConstraints.mStart.size() ? array(cloth.mSeparationConstraints.mStart.front()) : 0;
+ mTargetSeparationConstraints =
+ !cloth.mSeparationConstraints.mTarget.empty() ? array(cloth.mSeparationConstraints.mTarget.front()) : 0;
+
+ mParticleAccelerations = cloth.mParticleAccelerations.size() ? array(cloth.mParticleAccelerations.front()) : 0;
+
+ mStartCollisionSpheres = cloth.mStartCollisionSpheres.empty() ? 0 : array(cloth.mStartCollisionSpheres.front());
+ mTargetCollisionSpheres =
+ cloth.mTargetCollisionSpheres.empty() ? mStartCollisionSpheres : array(cloth.mTargetCollisionSpheres.front());
+ mNumSpheres = uint32_t(cloth.mStartCollisionSpheres.size());
+
+ mCapsuleIndices = cloth.mCapsuleIndices.empty() ? 0 : &cloth.mCapsuleIndices.front();
+ mNumCapsules = uint32_t(cloth.mCapsuleIndices.size());
+
+ mStartCollisionPlanes = cloth.mStartCollisionPlanes.empty() ? 0 : array(cloth.mStartCollisionPlanes.front());
+ mTargetCollisionPlanes =
+ cloth.mTargetCollisionPlanes.empty() ? mStartCollisionPlanes : array(cloth.mTargetCollisionPlanes.front());
+ mNumPlanes = uint32_t(cloth.mStartCollisionPlanes.size());
+
+ mConvexMasks = cloth.mConvexMasks.empty() ? 0 : &cloth.mConvexMasks.front();
+ mNumConvexes = uint32_t(cloth.mConvexMasks.size());
+
+ mStartCollisionTriangles = cloth.mStartCollisionTriangles.empty() ? 0 : array(cloth.mStartCollisionTriangles.front());
+ mTargetCollisionTriangles = cloth.mTargetCollisionTriangles.empty() ? mStartCollisionTriangles
+ : array(cloth.mTargetCollisionTriangles.front());
+ mNumCollisionTriangles = uint32_t(cloth.mStartCollisionTriangles.size()) / 3;
+
+ mVirtualParticlesBegin = cloth.mVirtualParticleIndices.empty() ? 0 : array(cloth.mVirtualParticleIndices.front());
+ mVirtualParticlesEnd = mVirtualParticlesBegin + 4 * cloth.mVirtualParticleIndices.size();
+ mVirtualParticleWeights = cloth.mVirtualParticleWeights.empty() ? 0 : array(cloth.mVirtualParticleWeights.front());
+ mNumVirtualParticleWeights = uint32_t(cloth.mVirtualParticleWeights.size());
+
+ mEnableContinuousCollision = cloth.mEnableContinuousCollision;
+ mCollisionMassScale = cloth.mCollisionMassScale;
+ mFrictionScale = cloth.mFriction;
+
+ mSelfCollisionDistance = cloth.mSelfCollisionDistance;
+ mSelfCollisionStiffness = 1.0f - expf(stiffnessExponent * cloth.mSelfCollisionLogStiffness);
+
+ mSelfCollisionIndices = cloth.mSelfCollisionIndices.empty() ? nullptr : cloth.mSelfCollisionIndices.begin();
+ mNumSelfCollisionIndices = mSelfCollisionIndices ? uint32_t(cloth.mSelfCollisionIndices.size()) : mNumParticles;
+
+ mRestPositions = cloth.mRestPositions.size() ? array(cloth.mRestPositions.front()) : 0;
+
+ mSleepPassCounter = cloth.mSleepPassCounter;
+ mSleepTestCounter = cloth.mSleepTestCounter;
+}
+
+void cloth::SwClothData::reconcile(SwCloth& cloth) const
+{
+ cloth.setParticleBounds(mCurBounds);
+ cloth.mSleepTestCounter = mSleepTestCounter;
+ cloth.mSleepPassCounter = mSleepPassCounter;
+}
+
+void cloth::SwClothData::verify() const
+{
+ // checks needs to be run after the constructor because
+ // data isn't immediately available on SPU at that stage
+ // perhaps a good reason to construct SwClothData on PPU instead
+
+ NV_CLOTH_ASSERT(!mNumCapsules ||
+ mNumSpheres > *shdfnd::maxElement(&mCapsuleIndices->first, &(mCapsuleIndices + mNumCapsules)->first));
+
+ NV_CLOTH_ASSERT(!mNumConvexes || (static_cast<uint64_t>(1) << static_cast<uint64_t>(mNumPlanes)) - static_cast<uint64_t>(1) >= static_cast<uint64_t>(*shdfnd::maxElement(mConvexMasks, mConvexMasks + mNumConvexes)));
+}
diff --git a/NvCloth/src/SwClothData.h b/NvCloth/src/SwClothData.h
new file mode 100644
index 0000000..d2387b5
--- /dev/null
+++ b/NvCloth/src/SwClothData.h
@@ -0,0 +1,151 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include <foundation/Px.h>
+
+namespace nv
+{
+namespace simd
+{
+}
+}
+
+namespace nv
+{
+namespace cloth
+{
+
+class SwCloth;
+class SwFabric;
+struct PhaseConfig;
+struct IndexPair;
+struct SwTether;
+
+// reference to cloth instance bulk data (POD)
+struct SwClothData
+{
+ SwClothData(SwCloth&, const SwFabric&);
+ void reconcile(SwCloth&) const;
+ void verify() const;
+
+ // particle data
+ uint32_t mNumParticles;
+ float* mCurParticles;
+ float* mPrevParticles;
+
+ float mCurBounds[6]; // lower[3], upper[3]
+ float mPrevBounds[6];
+ float mPadding; // write as simd
+
+ // distance constraints
+ const PhaseConfig* mConfigBegin;
+ const PhaseConfig* mConfigEnd;
+
+ const uint32_t* mPhases;
+ uint32_t mNumPhases;
+
+ const uint32_t* mSets;
+ uint32_t mNumSets;
+
+ const float* mRestvalues;
+ uint32_t mNumRestvalues;
+ const float* mStiffnessValues;
+
+ const uint16_t* mIndices;
+ uint32_t mNumIndices;
+
+ const SwTether* mTethers;
+ uint32_t mNumTethers;
+ float mTetherConstraintStiffness;
+ float mTetherConstraintScale;
+
+ // wind data
+ const uint16_t* mTriangles;
+ uint32_t mNumTriangles;
+ float mDragCoefficient;
+ float mLiftCoefficient;
+
+ // motion constraint data
+ const float* mStartMotionConstraints;
+ const float* mTargetMotionConstraints;
+ float mMotionConstraintStiffness;
+
+ // separation constraint data
+ const float* mStartSeparationConstraints;
+ const float* mTargetSeparationConstraints;
+
+ // particle acceleration data
+ const float* mParticleAccelerations;
+
+ // collision stuff
+ const float* mStartCollisionSpheres;
+ const float* mTargetCollisionSpheres;
+ uint32_t mNumSpheres;
+
+ const IndexPair* mCapsuleIndices;
+ uint32_t mNumCapsules;
+
+ const float* mStartCollisionPlanes;
+ const float* mTargetCollisionPlanes;
+ uint32_t mNumPlanes;
+
+ const uint32_t* mConvexMasks;
+ uint32_t mNumConvexes;
+
+ const float* mStartCollisionTriangles;
+ const float* mTargetCollisionTriangles;
+ uint32_t mNumCollisionTriangles;
+
+ const uint16_t* mVirtualParticlesBegin;
+ const uint16_t* mVirtualParticlesEnd;
+
+ const float* mVirtualParticleWeights;
+ uint32_t mNumVirtualParticleWeights;
+
+ bool mEnableContinuousCollision;
+ float mFrictionScale;
+ float mCollisionMassScale;
+
+ float mSelfCollisionDistance;
+ float mSelfCollisionStiffness;
+
+ uint32_t mNumSelfCollisionIndices;
+ const uint32_t* mSelfCollisionIndices;
+
+ float* mRestPositions;
+
+ // sleep data
+ uint32_t mSleepPassCounter;
+ uint32_t mSleepTestCounter;
+
+};
+}
+}
diff --git a/NvCloth/src/SwCollision.cpp b/NvCloth/src/SwCollision.cpp
new file mode 100644
index 0000000..3774795
--- /dev/null
+++ b/NvCloth/src/SwCollision.cpp
@@ -0,0 +1,1936 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "SwCollision.h"
+#include "SwCloth.h"
+#include "SwClothData.h"
+#include "IterationState.h"
+#include "BoundingBox.h"
+#include "PointInterpolator.h"
+#include "SwCollisionHelpers.h"
+#include <foundation/PxProfiler.h>
+#include <cstring> // for memset
+#include <PsSort.h>
+
+using namespace nv;
+using namespace physx;
+
+// the particle trajectory needs to penetrate more than 0.2 * radius to trigger continuous collision
+template <typename Simd4f>
+const Simd4f cloth::SwCollision<Simd4f>::sSkeletonWidth = simd4f(cloth::sqr(1 - 0.2f) - 1);
+
+#if NV_SIMD_SSE2
+const Simd4i cloth::Gather<Simd4i>::sIntSignBit = simd4i(0x80000000);
+const Simd4i cloth::Gather<Simd4i>::sSignedMask = sIntSignBit | simd4i(0x7);
+#elif NV_SIMD_NEON
+const Simd4i cloth::Gather<Simd4i>::sPack = simd4i(0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c);
+const Simd4i cloth::Gather<Simd4i>::sOffset = simd4i(0x03020100);
+const Simd4i cloth::Gather<Simd4i>::sShift = simd4i(2);
+const Simd4i cloth::Gather<Simd4i>::sMask = simd4i(7);
+#endif
+
+namespace
+{
+const Simd4fTupleFactory sMaskX = simd4f(simd4i(~0, 0, 0, 0));
+const Simd4fTupleFactory sMaskZ = simd4f(simd4i(0, 0, ~0, 0));
+const Simd4fTupleFactory sMaskW = simd4f(simd4i(0, 0, 0, ~0));
+const Simd4fTupleFactory gSimd4fOneXYZ = simd4f(1.0f, 1.0f, 1.0f, 0.0f);
+const Simd4fScalarFactory sGridLength = simd4f(8 - 1e-3f); // sGridSize
+const Simd4fScalarFactory sGridExpand = simd4f(1e-4f);
+const Simd4fTupleFactory sMinusFloatMaxXYZ = simd4f(-FLT_MAX, -FLT_MAX, -FLT_MAX, 0.0f);
+
+#if PX_PROFILE || PX_DEBUG
+template <typename Simd4f>
+uint32_t horizontalSum(const Simd4f& x)
+{
+ const float* p = array(x);
+ return uint32_t(0.5f + p[0] + p[1] + p[2] + p[3]);
+}
+#endif
+
+// 7 elements are written to ptr!
+template <typename Simd4f>
+void storeBounds(float* ptr, const cloth::BoundingBox<Simd4f>& bounds)
+{
+ store(ptr, bounds.mLower);
+ store(ptr + 3, bounds.mUpper);
+}
+}
+
+struct cloth::SphereData
+{
+ PxVec3 center;
+ float radius;
+};
+
+struct cloth::ConeData
+{
+ PxVec3 center;
+ float radius; // cone radius at center
+ PxVec3 axis;
+ float slope; // tan(alpha)
+
+ float sqrCosine; // cos^2(alpha)
+ float halfLength;
+
+ uint32_t firstMask;
+ uint32_t bothMask;
+};
+
+struct cloth::TriangleData
+{
+ PxVec3 base;
+ float edge0DotEdge1;
+
+ PxVec3 edge0;
+ float edge0SqrLength;
+
+ PxVec3 edge1;
+ float edge1SqrLength;
+
+ PxVec3 normal;
+ float padding;
+
+ float det;
+ float denom;
+
+ float edge0InvSqrLength;
+ float edge1InvSqrLength;
+};
+
+namespace nv
+{
+namespace cloth
+{
+template <typename Simd4f>
+BoundingBox<Simd4f> expandBounds(const BoundingBox<Simd4f>& bbox, const SphereData* sIt, const SphereData* sEnd)
+{
+ BoundingBox<Simd4f> result = bbox;
+ for (; sIt != sEnd; ++sIt)
+ {
+ Simd4f p = loadAligned(array(sIt->center));
+ Simd4f r = splat<3>(p);
+ result.mLower = min(result.mLower, p - r);
+ result.mUpper = max(result.mUpper, p + r);
+ }
+ return result;
+}
+}
+}
+
+namespace
+{
+template <typename Simd4f, typename SrcIterator>
+void generateSpheres(Simd4f* dIt, const SrcIterator& src, uint32_t count)
+{
+ // have to copy out iterator to ensure alignment is maintained
+ for (SrcIterator sIt = src; 0 < count--; ++sIt, ++dIt)
+ *dIt = max(sMinusFloatMaxXYZ, *sIt); // clamp radius to 0
+}
+
+void generateCones(cloth::ConeData* dst, const cloth::SphereData* sourceSpheres, const cloth::IndexPair* capsuleIndices,
+ uint32_t numCones)
+{
+ cloth::ConeData* cIt = dst;
+ for (const cloth::IndexPair* iIt = capsuleIndices, *iEnd = iIt + numCones; iIt != iEnd; ++iIt, ++cIt)
+ {
+ PxVec4 first = reinterpret_cast<const PxVec4&>(sourceSpheres[iIt->first]);
+ PxVec4 second = reinterpret_cast<const PxVec4&>(sourceSpheres[iIt->second]);
+
+ PxVec4 center = (second + first) * 0.5f;
+ PxVec4 axis = (second - first) * 0.5f;
+
+ float sqrAxisLength = axis.x * axis.x + axis.y * axis.y + axis.z * axis.z;
+ float sqrConeLength = sqrAxisLength - cloth::sqr(axis.w);
+
+ float invAxisLength = 1 / sqrtf(sqrAxisLength);
+ float invConeLength = 1 / sqrtf(sqrConeLength);
+
+ if (sqrConeLength <= 0.0f)
+ invAxisLength = invConeLength = 0.0f;
+
+ float axisLength = sqrAxisLength * invAxisLength;
+ float slope = axis.w * invConeLength;
+
+ cIt->center = PxVec3(center.x, center.y, center.z );
+ cIt->radius = (axis.w + first.w) * invConeLength * axisLength;
+ cIt->axis = PxVec3(axis.x, axis.y, axis.z) * invAxisLength;
+ cIt->slope = slope;
+
+ cIt->sqrCosine = 1.0f - cloth::sqr(axis.w * invAxisLength);
+ cIt->halfLength = axisLength;
+
+ uint32_t firstMask = 0x1u << iIt->first;
+ cIt->firstMask = firstMask;
+ cIt->bothMask = firstMask | 0x1u << iIt->second;
+ }
+}
+
+template <typename Simd4f, typename SrcIterator>
+void generatePlanes(Simd4f* dIt, const SrcIterator& src, uint32_t count)
+{
+ // have to copy out iterator to ensure alignment is maintained
+ for (SrcIterator sIt = src; 0 < count--; ++sIt, ++dIt)
+ *dIt = *sIt;
+}
+
+template <typename Simd4f, typename SrcIterator>
+void generateTriangles(cloth::TriangleData* dIt, const SrcIterator& src, uint32_t count)
+{
+ // have to copy out iterator to ensure alignment is maintained
+ for (SrcIterator sIt = src; 0 < count--; ++dIt)
+ {
+ Simd4f p0 = *sIt;
+ ++sIt;
+ Simd4f p1 = *sIt;
+ ++sIt;
+ Simd4f p2 = *sIt;
+ ++sIt;
+
+ Simd4f edge0 = p1 - p0;
+ Simd4f edge1 = p2 - p0;
+ Simd4f normal = cross3(edge0, edge1);
+
+ Simd4f edge0SqrLength = dot3(edge0, edge0);
+ Simd4f edge1SqrLength = dot3(edge1, edge1);
+ Simd4f edge0DotEdge1 = dot3(edge0, edge1);
+ Simd4f normalInvLength = rsqrt(dot3(normal, normal));
+
+ Simd4f det = edge0SqrLength * edge1SqrLength - edge0DotEdge1 * edge0DotEdge1;
+ Simd4f denom = edge0SqrLength + edge1SqrLength - edge0DotEdge1 - edge0DotEdge1;
+
+ // there are definitely faster ways...
+ Simd4f aux = select(sMaskX, det, denom);
+ aux = select(sMaskZ, edge0SqrLength, aux);
+ aux = select(sMaskW, edge1SqrLength, aux);
+
+ storeAligned(&dIt->base.x, select(sMaskW, edge0DotEdge1, p0));
+ storeAligned(&dIt->edge0.x, select(sMaskW, edge0SqrLength, edge0));
+ storeAligned(&dIt->edge1.x, select(sMaskW, edge1SqrLength, edge1));
+ storeAligned(&dIt->normal.x, normal * normalInvLength);
+ storeAligned(&dIt->det, recip<1>(aux));
+ }
+}
+
+} // namespace
+
+template <typename Simd4f>
+cloth::SwCollision<Simd4f>::CollisionData::CollisionData()
+: mSpheres(0), mCones(0)
+{
+}
+
+template <typename Simd4f>
+cloth::SwCollision<Simd4f>::SwCollision(SwClothData& clothData, SwKernelAllocator& alloc)
+: mClothData(clothData), mAllocator(alloc)
+{
+ allocate(mCurData);
+
+ if (mClothData.mEnableContinuousCollision || mClothData.mFrictionScale > 0.0f)
+ {
+ allocate(mPrevData);
+
+ generateSpheres(reinterpret_cast<Simd4f*>(mPrevData.mSpheres),
+ reinterpret_cast<const Simd4f*>(clothData.mStartCollisionSpheres), clothData.mNumSpheres);
+
+ generateCones(mPrevData.mCones, mPrevData.mSpheres, clothData.mCapsuleIndices, clothData.mNumCapsules);
+ }
+}
+
+template <typename Simd4f>
+cloth::SwCollision<Simd4f>::~SwCollision()
+{
+ deallocate(mCurData);
+ deallocate(mPrevData);
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::operator()(const IterationState<Simd4f>& state)
+{
+ mNumCollisions = 0;
+
+ collideConvexes(state); // discrete convex collision, no friction
+ collideTriangles(state); // discrete triangle collision, no friction
+
+ computeBounds();
+
+ if (!mClothData.mNumSpheres)
+ return;
+
+ bool lastIteration = state.mRemainingIterations == 1;
+
+ const Simd4f* targetSpheres = reinterpret_cast<const Simd4f*>(mClothData.mTargetCollisionSpheres);
+
+ // generate sphere and cone collision data
+ if (!lastIteration)
+ {
+ // interpolate spheres
+ LerpIterator<Simd4f, const Simd4f*> pIter(reinterpret_cast<const Simd4f*>(mClothData.mStartCollisionSpheres),
+ targetSpheres, state.getCurrentAlpha());
+ generateSpheres(reinterpret_cast<Simd4f*>(mCurData.mSpheres), pIter, mClothData.mNumSpheres);
+ }
+ else
+ {
+ // otherwise use the target spheres directly
+ generateSpheres(reinterpret_cast<Simd4f*>(mCurData.mSpheres), targetSpheres, mClothData.mNumSpheres);
+ }
+
+ // generate cones even if test below fails because
+ // continuous collision might need it in next iteration
+ generateCones(mCurData.mCones, mCurData.mSpheres, mClothData.mCapsuleIndices, mClothData.mNumCapsules);
+
+ if (buildAcceleration())
+ {
+ if (mClothData.mEnableContinuousCollision)
+ collideContinuousParticles();
+
+ mergeAcceleration(reinterpret_cast<uint32_t*>(mSphereGrid));
+ mergeAcceleration(reinterpret_cast<uint32_t*>(mConeGrid));
+
+ if (!mClothData.mEnableContinuousCollision)
+ collideParticles();
+
+ collideVirtualParticles();
+ }
+
+ if (mPrevData.mSpheres)
+ shdfnd::swap(mCurData, mPrevData);
+}
+
+template <typename Simd4f>
+size_t cloth::SwCollision<Simd4f>::estimateTemporaryMemory(const SwCloth& cloth)
+{
+ size_t numTriangles = cloth.mStartCollisionTriangles.size();
+ size_t numPlanes = cloth.mStartCollisionPlanes.size();
+
+ const size_t kTriangleDataSize = sizeof(TriangleData) * numTriangles;
+ const size_t kPlaneDataSize = sizeof(PxVec4) * numPlanes * 2;
+
+ return std::max(kTriangleDataSize, kPlaneDataSize);
+}
+
+template <typename Simd4f>
+size_t cloth::SwCollision<Simd4f>::estimatePersistentMemory(const SwCloth& cloth)
+{
+ size_t numCapsules = cloth.mCapsuleIndices.size();
+ size_t numSpheres = cloth.mStartCollisionSpheres.size();
+
+ size_t sphereDataSize = sizeof(SphereData) * numSpheres * 2;
+ size_t coneDataSize = sizeof(ConeData) * numCapsules * 2;
+
+ return sphereDataSize + coneDataSize;
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::allocate(CollisionData& data)
+{
+ data.mSpheres = static_cast<SphereData*>(mAllocator.allocate(sizeof(SphereData) * mClothData.mNumSpheres));
+
+ data.mCones = static_cast<ConeData*>(mAllocator.allocate(sizeof(ConeData) * mClothData.mNumCapsules));
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::deallocate(const CollisionData& data)
+{
+ mAllocator.deallocate(data.mSpheres);
+ mAllocator.deallocate(data.mCones);
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::computeBounds()
+{
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::computeBounds", /*ProfileContext::None*/ 0);
+
+ Simd4f* prevIt = reinterpret_cast<Simd4f*>(mClothData.mPrevParticles);
+ Simd4f* curIt = reinterpret_cast<Simd4f*>(mClothData.mCurParticles);
+ Simd4f* curEnd = curIt + mClothData.mNumParticles;
+ Simd4f floatMaxXYZ = -static_cast<Simd4f>(sMinusFloatMaxXYZ);
+
+ Simd4f lower = simd4f(FLT_MAX), upper = -lower;
+ for (; curIt < curEnd; ++curIt, ++prevIt)
+ {
+ Simd4f current = *curIt;
+ lower = min(lower, current);
+ upper = max(upper, current);
+ // if (current.w > 0) current.w = previous.w
+ *curIt = select(current > floatMaxXYZ, *prevIt, current);
+ }
+
+ BoundingBox<Simd4f> curBounds;
+ curBounds.mLower = lower;
+ curBounds.mUpper = upper;
+
+ // don't change this order, storeBounds writes 7 floats
+ BoundingBox<Simd4f> prevBounds = loadBounds<Simd4f>(mClothData.mCurBounds);
+ storeBounds(mClothData.mCurBounds, curBounds);
+ storeBounds(mClothData.mPrevBounds, prevBounds);
+}
+
+namespace
+{
+template <typename Simd4i>
+Simd4i andNotIsZero(const Simd4i& left, const Simd4i& right)
+{
+ return (left & ~right) == gSimd4iZero;
+}
+}
+
+// build per-axis mask arrays of spheres on the right/left of grid cell
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::buildSphereAcceleration(const SphereData* sIt)
+{
+ static const int maxIndex = sGridSize - 1;
+
+ const SphereData* sEnd = sIt + mClothData.mNumSpheres;
+ for (uint32_t mask = 0x1; sIt != sEnd; ++sIt, mask <<= 1)
+ {
+ Simd4f sphere = loadAligned(array(sIt->center));
+ Simd4f radius = splat<3>(sphere);
+
+ Simd4i first = intFloor(max((sphere - radius) * mGridScale + mGridBias, gSimd4fZero));
+ Simd4i last = intFloor(min((sphere + radius) * mGridScale + mGridBias, sGridLength));
+
+ const int* firstIdx = array(first);
+ const int* lastIdx = array(last);
+
+ uint32_t* firstIt = reinterpret_cast<uint32_t*>(mSphereGrid);
+ uint32_t* lastIt = firstIt + 3 * sGridSize;
+
+ for (uint32_t i = 0; i < 3; ++i, firstIt += sGridSize, lastIt += sGridSize)
+ {
+ for (int j = firstIdx[i]; j <= maxIndex; ++j)
+ firstIt[j] |= mask;
+
+ for (int j = lastIdx[i]; j >= 0; --j)
+ lastIt[j] |= mask;
+ }
+ }
+}
+
+// generate cone masks from sphere masks
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::buildConeAcceleration()
+{
+ const ConeData* coneIt = mCurData.mCones;
+ const ConeData* coneEnd = coneIt + mClothData.mNumCapsules;
+ for (uint32_t coneMask = 0x1; coneIt != coneEnd; ++coneIt, coneMask <<= 1)
+ {
+ if (coneIt->radius == 0.0f)
+ continue;
+
+ uint32_t spheresMask = coneIt->bothMask;
+
+ uint32_t* sphereIt = reinterpret_cast<uint32_t*>(mSphereGrid);
+ uint32_t* sphereEnd = sphereIt + 6 * sGridSize;
+ uint32_t* gridIt = reinterpret_cast<uint32_t*>(mConeGrid);
+ for (; sphereIt != sphereEnd; ++sphereIt, ++gridIt)
+ if (*sphereIt & spheresMask)
+ *gridIt |= coneMask;
+ }
+}
+
+// convert right/left mask arrays into single overlap array
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::mergeAcceleration(uint32_t* firstIt)
+{
+ uint32_t* firstEnd = firstIt + 3 * sGridSize;
+ uint32_t* lastIt = firstEnd;
+ for (; firstIt != firstEnd; ++firstIt, ++lastIt)
+ *firstIt &= *lastIt;
+}
+
+// build mask of spheres/cones touching a regular grid along each axis
+template <typename Simd4f>
+bool cloth::SwCollision<Simd4f>::buildAcceleration()
+{
+ // determine sphere bbox
+ BoundingBox<Simd4f> sphereBounds =
+ expandBounds(emptyBounds<Simd4f>(), mCurData.mSpheres, mCurData.mSpheres + mClothData.mNumSpheres);
+ BoundingBox<Simd4f> particleBounds = loadBounds<Simd4f>(mClothData.mCurBounds);
+ if (mClothData.mEnableContinuousCollision)
+ {
+ sphereBounds = expandBounds(sphereBounds, mPrevData.mSpheres, mPrevData.mSpheres + mClothData.mNumSpheres);
+ particleBounds = expandBounds(particleBounds, loadBounds<Simd4f>(mClothData.mPrevBounds));
+ }
+
+ BoundingBox<Simd4f> bounds = intersectBounds(sphereBounds, particleBounds);
+ Simd4f edgeLength = (bounds.mUpper - bounds.mLower) & ~static_cast<Simd4f>(sMaskW);
+ if (!allGreaterEqual(edgeLength, gSimd4fZero))
+ return false;
+
+ // calculate an expanded bounds to account for numerical inaccuracy
+ const Simd4f expandedLower = bounds.mLower - abs(bounds.mLower) * sGridExpand;
+ const Simd4f expandedUpper = bounds.mUpper + abs(bounds.mUpper) * sGridExpand;
+ const Simd4f expandedEdgeLength = max(expandedUpper - expandedLower, gSimd4fEpsilon);
+
+ // make grid minimal thickness and strict upper bound of spheres
+ mGridScale = sGridLength * recip<1>(expandedEdgeLength);
+ mGridBias = -expandedLower * mGridScale;
+ array(mGridBias)[3] = 1.0f; // needed for collideVirtualParticles()
+
+ NV_CLOTH_ASSERT(allTrue(((bounds.mLower * mGridScale + mGridBias) >= simd4f(0.0f)) | sMaskW));
+ NV_CLOTH_ASSERT(allTrue(((bounds.mUpper * mGridScale + mGridBias) < simd4f(8.0f)) | sMaskW));
+
+ memset(mSphereGrid, 0, sizeof(uint32_t) * 6 * (sGridSize));
+ if (mClothData.mEnableContinuousCollision)
+ buildSphereAcceleration(mPrevData.mSpheres);
+ buildSphereAcceleration(mCurData.mSpheres);
+
+ memset(mConeGrid, 0, sizeof(uint32_t) * 6 * (sGridSize));
+ buildConeAcceleration();
+
+ return true;
+}
+
+#ifdef _MSC_VER
+#define FORCE_INLINE __forceinline
+#else
+#define FORCE_INLINE inline __attribute__((always_inline))
+#endif
+
+template <typename Simd4f>
+FORCE_INLINE typename cloth::SwCollision<Simd4f>::ShapeMask& cloth::SwCollision<Simd4f>::ShapeMask::
+operator = (const ShapeMask& right)
+{
+ mCones = right.mCones;
+ mSpheres = right.mSpheres;
+ return *this;
+}
+
+template <typename Simd4f>
+FORCE_INLINE typename cloth::SwCollision<Simd4f>::ShapeMask& cloth::SwCollision<Simd4f>::ShapeMask::
+operator &= (const ShapeMask& right)
+{
+ mCones = mCones & right.mCones;
+ mSpheres = mSpheres & right.mSpheres;
+ return *this;
+}
+
+template <typename Simd4f>
+FORCE_INLINE typename cloth::SwCollision<Simd4f>::ShapeMask
+cloth::SwCollision<Simd4f>::getShapeMask(const Simd4f& position, const Simd4i* __restrict sphereGrid,
+ const Simd4i* __restrict coneGrid)
+{
+ Gather<Simd4i> gather(intFloor(position));
+
+ ShapeMask result;
+ result.mCones = gather(coneGrid);
+ result.mSpheres = gather(sphereGrid);
+ return result;
+}
+
+// lookup acceleration structure and return mask of potential intersectors
+template <typename Simd4f>
+FORCE_INLINE typename cloth::SwCollision<Simd4f>::ShapeMask
+cloth::SwCollision<Simd4f>::getShapeMask(const Simd4f* __restrict positions) const
+{
+ Simd4f posX = positions[0] * splat<0>(mGridScale) + splat<0>(mGridBias);
+ Simd4f posY = positions[1] * splat<1>(mGridScale) + splat<1>(mGridBias);
+ Simd4f posZ = positions[2] * splat<2>(mGridScale) + splat<2>(mGridBias);
+
+ ShapeMask result = getShapeMask(posX, mSphereGrid, mConeGrid);
+ result &= getShapeMask(posY, mSphereGrid + 2, mConeGrid + 2);
+ result &= getShapeMask(posZ, mSphereGrid + 4, mConeGrid + 4);
+
+ return result;
+}
+
+// lookup acceleration structure and return mask of potential intersectors
+template <typename Simd4f>
+FORCE_INLINE typename cloth::SwCollision<Simd4f>::ShapeMask
+cloth::SwCollision<Simd4f>::getShapeMask(const Simd4f* __restrict prevPos, const Simd4f* __restrict curPos) const
+{
+ Simd4f scaleX = splat<0>(mGridScale);
+ Simd4f scaleY = splat<1>(mGridScale);
+ Simd4f scaleZ = splat<2>(mGridScale);
+
+ Simd4f biasX = splat<0>(mGridBias);
+ Simd4f biasY = splat<1>(mGridBias);
+ Simd4f biasZ = splat<2>(mGridBias);
+
+ Simd4f prevX = prevPos[0] * scaleX + biasX;
+ Simd4f prevY = prevPos[1] * scaleY + biasY;
+ Simd4f prevZ = prevPos[2] * scaleZ + biasZ;
+
+ Simd4f curX = curPos[0] * scaleX + biasX;
+ Simd4f curY = curPos[1] * scaleY + biasY;
+ Simd4f curZ = curPos[2] * scaleZ + biasZ;
+
+ Simd4f maxX = min(max(prevX, curX), sGridLength);
+ Simd4f maxY = min(max(prevY, curY), sGridLength);
+ Simd4f maxZ = min(max(prevZ, curZ), sGridLength);
+
+ ShapeMask result = getShapeMask(maxX, mSphereGrid, mConeGrid);
+ result &= getShapeMask(maxY, mSphereGrid + 2, mConeGrid + 2);
+ result &= getShapeMask(maxZ, mSphereGrid + 4, mConeGrid + 4);
+
+ Simd4f zero = gSimd4fZero;
+ Simd4f minX = max(min(prevX, curX), zero);
+ Simd4f minY = max(min(prevY, curY), zero);
+ Simd4f minZ = max(min(prevZ, curZ), zero);
+
+ result &= getShapeMask(minX, mSphereGrid + 6, mConeGrid + 6);
+ result &= getShapeMask(minY, mSphereGrid + 8, mConeGrid + 8);
+ result &= getShapeMask(minZ, mSphereGrid + 10, mConeGrid + 10);
+
+ return result;
+}
+
+template <typename Simd4f>
+struct cloth::SwCollision<Simd4f>::ImpulseAccumulator
+{
+ ImpulseAccumulator()
+ : mDeltaX(gSimd4fZero)
+ , mDeltaY(mDeltaX)
+ , mDeltaZ(mDeltaX)
+ , mVelX(mDeltaX)
+ , mVelY(mDeltaX)
+ , mVelZ(mDeltaX)
+ , mNumCollisions(gSimd4fEpsilon)
+ {
+ }
+
+ void add(const Simd4f& x, const Simd4f& y, const Simd4f& z, const Simd4f& scale, const Simd4f& mask)
+ {
+ NV_CLOTH_ASSERT(allTrue((mask & x) == (mask & x)));
+ NV_CLOTH_ASSERT(allTrue((mask & y) == (mask & y)));
+ NV_CLOTH_ASSERT(allTrue((mask & z) == (mask & z)));
+ NV_CLOTH_ASSERT(allTrue((mask & scale) == (mask & scale)));
+
+ Simd4f maskedScale = scale & mask;
+ mDeltaX = mDeltaX + x * maskedScale;
+ mDeltaY = mDeltaY + y * maskedScale;
+ mDeltaZ = mDeltaZ + z * maskedScale;
+ mNumCollisions = mNumCollisions + (gSimd4fOne & mask);
+ }
+
+ void addVelocity(const Simd4f& vx, const Simd4f& vy, const Simd4f& vz, const Simd4f& mask)
+ {
+ NV_CLOTH_ASSERT(allTrue((mask & vx) == (mask & vx)));
+ NV_CLOTH_ASSERT(allTrue((mask & vy) == (mask & vy)));
+ NV_CLOTH_ASSERT(allTrue((mask & vz) == (mask & vz)));
+
+ mVelX = mVelX + (vx & mask);
+ mVelY = mVelY + (vy & mask);
+ mVelZ = mVelZ + (vz & mask);
+ }
+
+ void subtract(const Simd4f& x, const Simd4f& y, const Simd4f& z, const Simd4f& scale, const Simd4f& mask)
+ {
+ NV_CLOTH_ASSERT(allTrue((mask & x) == (mask & x)));
+ NV_CLOTH_ASSERT(allTrue((mask & y) == (mask & y)));
+ NV_CLOTH_ASSERT(allTrue((mask & z) == (mask & z)));
+ NV_CLOTH_ASSERT(allTrue((mask & scale) == (mask & scale)));
+
+ Simd4f maskedScale = scale & mask;
+ mDeltaX = mDeltaX - x * maskedScale;
+ mDeltaY = mDeltaY - y * maskedScale;
+ mDeltaZ = mDeltaZ - z * maskedScale;
+ mNumCollisions = mNumCollisions + (gSimd4fOne & mask);
+ }
+
+ Simd4f mDeltaX, mDeltaY, mDeltaZ;
+ Simd4f mVelX, mVelY, mVelZ;
+ Simd4f mNumCollisions;
+};
+
+template <typename Simd4f>
+FORCE_INLINE void cloth::SwCollision<Simd4f>::collideSpheres(const Simd4i& sphereMask, const Simd4f* positions,
+ ImpulseAccumulator& accum) const
+{
+ const float* __restrict spherePtr = array(mCurData.mSpheres->center);
+
+ bool frictionEnabled = mClothData.mFrictionScale > 0.0f;
+
+ Simd4i mask4 = horizontalOr(sphereMask);
+ uint32_t mask = uint32_t(array(mask4)[0]);
+ while (mask)
+ {
+ uint32_t test = mask - 1;
+ uint32_t offset = findBitSet(mask & ~test) * sizeof(SphereData);
+ mask = mask & test;
+
+ Simd4f sphere = loadAligned(spherePtr, offset);
+
+ Simd4f deltaX = positions[0] - splat<0>(sphere);
+ Simd4f deltaY = positions[1] - splat<1>(sphere);
+ Simd4f deltaZ = positions[2] - splat<2>(sphere);
+
+ Simd4f sqrDistance = gSimd4fEpsilon + deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ;
+ Simd4f negativeScale = gSimd4fOne - rsqrt(sqrDistance) * splat<3>(sphere);
+
+ Simd4f contactMask;
+ if (!anyGreater(gSimd4fZero, negativeScale, contactMask))
+ continue;
+
+ accum.subtract(deltaX, deltaY, deltaZ, negativeScale, contactMask);
+
+ if (frictionEnabled)
+ {
+ // load previous sphere pos
+ const float* __restrict prevSpherePtr = array(mPrevData.mSpheres->center);
+
+ Simd4f prevSphere = loadAligned(prevSpherePtr, offset);
+ Simd4f velocity = sphere - prevSphere;
+
+ accum.addVelocity(splat<0>(velocity), splat<1>(velocity), splat<2>(velocity), contactMask);
+ }
+ }
+}
+
+template <typename Simd4f>
+FORCE_INLINE typename cloth::SwCollision<Simd4f>::Simd4i
+cloth::SwCollision<Simd4f>::collideCones(const Simd4f* __restrict positions, ImpulseAccumulator& accum) const
+{
+ const float* __restrict centerPtr = array(mCurData.mCones->center);
+ const float* __restrict axisPtr = array(mCurData.mCones->axis);
+ const int32_t* __restrict auxiliaryPtr = reinterpret_cast<const int32_t*>(&mCurData.mCones->sqrCosine);
+
+ bool frictionEnabled = mClothData.mFrictionScale > 0.0f;
+
+ ShapeMask shapeMask = getShapeMask(positions);
+ Simd4i mask4 = horizontalOr(shapeMask.mCones);
+ uint32_t mask = uint32_t(array(mask4)[0]);
+ while (mask)
+ {
+ uint32_t test = mask - 1;
+ uint32_t coneIndex = findBitSet(mask & ~test);
+ uint32_t offset = coneIndex * sizeof(ConeData);
+ mask = mask & test;
+
+ Simd4i test4 = mask4 - gSimd4iOne;
+ Simd4f culled = simd4f(andNotIsZero(shapeMask.mCones, test4));
+ mask4 = mask4 & test4;
+
+ Simd4f center = loadAligned(centerPtr, offset);
+
+ Simd4f deltaX = positions[0] - splat<0>(center);
+ Simd4f deltaY = positions[1] - splat<1>(center);
+ Simd4f deltaZ = positions[2] - splat<2>(center);
+
+ Simd4f axis = loadAligned(axisPtr, offset);
+
+ Simd4f axisX = splat<0>(axis);
+ Simd4f axisY = splat<1>(axis);
+ Simd4f axisZ = splat<2>(axis);
+ Simd4f slope = splat<3>(axis);
+
+ Simd4f dot = deltaX * axisX + deltaY * axisY + deltaZ * axisZ;
+ Simd4f radius = dot * slope + splat<3>(center);
+
+ // set radius to zero if cone is culled
+ radius = max(radius, gSimd4fZero) & ~culled;
+
+ Simd4f sqrDistance = deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ - dot * dot;
+
+ Simd4i auxiliary = loadAligned(auxiliaryPtr, offset);
+ Simd4i bothMask = splat<3>(auxiliary);
+
+ Simd4f contactMask;
+ if (!anyGreater(radius * radius, sqrDistance, contactMask))
+ {
+ // cone only culled when spheres culled, ok to clear those too
+ shapeMask.mSpheres = shapeMask.mSpheres & ~bothMask;
+ continue;
+ }
+
+ // clamp to a small positive epsilon to avoid numerical error
+ // making sqrDistance negative when point lies on the cone axis
+ sqrDistance = max(sqrDistance, gSimd4fEpsilon);
+
+ Simd4f invDistance = rsqrt(sqrDistance);
+ Simd4f base = dot + slope * sqrDistance * invDistance;
+
+ // force left/rightMask to false if not inside cone
+ base = base & contactMask;
+
+ Simd4f halfLength = splat<1>(simd4f(auxiliary));
+ Simd4i leftMask = simd4i(base < -halfLength);
+ Simd4i rightMask = simd4i(base > halfLength);
+
+ // we use both mask because of the early out above.
+ Simd4i firstMask = splat<2>(auxiliary);
+ Simd4i secondMask = firstMask ^ bothMask;
+ shapeMask.mSpheres = shapeMask.mSpheres & ~(firstMask & ~leftMask);
+ shapeMask.mSpheres = shapeMask.mSpheres & ~(secondMask & ~rightMask);
+
+ deltaX = deltaX - base * axisX;
+ deltaY = deltaY - base * axisY;
+ deltaZ = deltaZ - base * axisZ;
+
+ Simd4f sqrCosine = splat<0>(simd4f(auxiliary));
+ Simd4f scale = radius * invDistance * sqrCosine - sqrCosine;
+
+ contactMask = contactMask & ~simd4f(leftMask | rightMask);
+
+ if (!anyTrue(contactMask))
+ continue;
+
+ accum.add(deltaX, deltaY, deltaZ, scale, contactMask);
+
+ if (frictionEnabled)
+ {
+ uint32_t s0 = mClothData.mCapsuleIndices[coneIndex].first;
+ uint32_t s1 = mClothData.mCapsuleIndices[coneIndex].second;
+
+ float* prevSpheres = reinterpret_cast<float*>(mPrevData.mSpheres);
+ float* curSpheres = reinterpret_cast<float*>(mCurData.mSpheres);
+
+ // todo: could pre-compute sphere velocities or it might be
+ // faster to compute cur/prev sphere positions directly
+ Simd4f s0p0 = loadAligned(prevSpheres, s0 * sizeof(SphereData));
+ Simd4f s0p1 = loadAligned(curSpheres, s0 * sizeof(SphereData));
+
+ Simd4f s1p0 = loadAligned(prevSpheres, s1 * sizeof(SphereData));
+ Simd4f s1p1 = loadAligned(curSpheres, s1 * sizeof(SphereData));
+
+ Simd4f v0 = s0p1 - s0p0;
+ Simd4f v1 = s1p1 - s1p0;
+ Simd4f vd = v1 - v0;
+
+ // dot is in the range -1 to 1, scale and bias to 0 to 1
+ dot = dot * gSimd4fHalf + gSimd4fHalf;
+
+ // interpolate velocity at contact points
+ Simd4f vx = splat<0>(v0) + dot * splat<0>(vd);
+ Simd4f vy = splat<1>(v0) + dot * splat<1>(vd);
+ Simd4f vz = splat<2>(v0) + dot * splat<2>(vd);
+
+ accum.addVelocity(vx, vy, vz, contactMask);
+ }
+ }
+
+ return shapeMask.mSpheres;
+}
+
+template <typename Simd4f>
+FORCE_INLINE void cloth::SwCollision<Simd4f>::collideSpheres(const Simd4i& sphereMask, const Simd4f* __restrict prevPos,
+ Simd4f* __restrict curPos, ImpulseAccumulator& accum) const
+{
+ const float* __restrict prevSpheres = array(mPrevData.mSpheres->center);
+ const float* __restrict curSpheres = array(mCurData.mSpheres->center);
+
+ bool frictionEnabled = mClothData.mFrictionScale > 0.0f;
+
+ Simd4i mask4 = horizontalOr(sphereMask);
+ uint32_t mask = uint32_t(array(mask4)[0]);
+ while (mask)
+ {
+ uint32_t test = mask - 1;
+ uint32_t offset = findBitSet(mask & ~test) * sizeof(SphereData);
+ mask = mask & test;
+
+ Simd4f prevSphere = loadAligned(prevSpheres, offset);
+ Simd4f prevX = prevPos[0] - splat<0>(prevSphere);
+ Simd4f prevY = prevPos[1] - splat<1>(prevSphere);
+ Simd4f prevZ = prevPos[2] - splat<2>(prevSphere);
+ Simd4f prevRadius = splat<3>(prevSphere);
+
+ Simd4f curSphere = loadAligned(curSpheres, offset);
+ Simd4f curX = curPos[0] - splat<0>(curSphere);
+ Simd4f curY = curPos[1] - splat<1>(curSphere);
+ Simd4f curZ = curPos[2] - splat<2>(curSphere);
+ Simd4f curRadius = splat<3>(curSphere);
+
+ Simd4f sqrDistance = gSimd4fEpsilon + curX * curX + curY * curY + curZ * curZ;
+
+ Simd4f dotPrevPrev = prevX * prevX + prevY * prevY + prevZ * prevZ - prevRadius * prevRadius;
+ Simd4f dotPrevCur = prevX * curX + prevY * curY + prevZ * curZ - prevRadius * curRadius;
+ Simd4f dotCurCur = sqrDistance - curRadius * curRadius;
+
+ Simd4f discriminant = dotPrevCur * dotPrevCur - dotCurCur * dotPrevPrev;
+ Simd4f sqrtD = sqrt(discriminant);
+ Simd4f halfB = dotPrevCur - dotPrevPrev;
+ Simd4f minusA = dotPrevCur - dotCurCur + halfB;
+
+ // time of impact or 0 if prevPos inside sphere
+ Simd4f toi = recip(minusA) * min(gSimd4fZero, halfB + sqrtD);
+ Simd4f collisionMask = (toi < gSimd4fOne) & (halfB < sqrtD);
+
+ // skip continuous collision if the (un-clamped) particle
+ // trajectory only touches the outer skin of the cone.
+ Simd4f rMin = prevRadius + halfB * minusA * (curRadius - prevRadius);
+ collisionMask = collisionMask & (discriminant > minusA * rMin * rMin * sSkeletonWidth);
+
+ // a is negative when one sphere is contained in the other,
+ // which is already handled by discrete collision.
+ collisionMask = collisionMask & (minusA < -static_cast<Simd4f>(gSimd4fEpsilon));
+
+ if (!allEqual(collisionMask, gSimd4fZero))
+ {
+ Simd4f deltaX = prevX - curX;
+ Simd4f deltaY = prevY - curY;
+ Simd4f deltaZ = prevZ - curZ;
+
+ Simd4f oneMinusToi = (gSimd4fOne - toi) & collisionMask;
+
+ // reduce ccd impulse if (clamped) particle trajectory stays in sphere skin,
+ // i.e. scale by exp2(-k) or 1/(1+k) with k = (tmin - toi) / (1 - toi)
+ Simd4f minusK = sqrtD * recip(minusA * oneMinusToi) & (oneMinusToi > gSimd4fEpsilon);
+ oneMinusToi = oneMinusToi * recip(gSimd4fOne - minusK);
+
+ curX = curX + deltaX * oneMinusToi;
+ curY = curY + deltaY * oneMinusToi;
+ curZ = curZ + deltaZ * oneMinusToi;
+
+ curPos[0] = splat<0>(curSphere) + curX;
+ curPos[1] = splat<1>(curSphere) + curY;
+ curPos[2] = splat<2>(curSphere) + curZ;
+
+ sqrDistance = gSimd4fEpsilon + curX * curX + curY * curY + curZ * curZ;
+ }
+
+ Simd4f negativeScale = gSimd4fOne - rsqrt(sqrDistance) * curRadius;
+
+ Simd4f contactMask;
+ if (!anyGreater(gSimd4fZero, negativeScale, contactMask))
+ continue;
+
+ accum.subtract(curX, curY, curZ, negativeScale, contactMask);
+
+ if (frictionEnabled)
+ {
+ Simd4f velocity = curSphere - prevSphere;
+ accum.addVelocity(splat<0>(velocity), splat<1>(velocity), splat<2>(velocity), contactMask);
+ }
+ }
+}
+
+template <typename Simd4f>
+FORCE_INLINE typename cloth::SwCollision<Simd4f>::Simd4i
+cloth::SwCollision<Simd4f>::collideCones(const Simd4f* __restrict prevPos, Simd4f* __restrict curPos,
+ ImpulseAccumulator& accum) const
+{
+ const float* __restrict prevCenterPtr = array(mPrevData.mCones->center);
+ const float* __restrict prevAxisPtr = array(mPrevData.mCones->axis);
+ const int32_t* __restrict prevAuxiliaryPtr = reinterpret_cast<const int32_t*>(&mPrevData.mCones->sqrCosine);
+
+ const float* __restrict curCenterPtr = array(mCurData.mCones->center);
+ const float* __restrict curAxisPtr = array(mCurData.mCones->axis);
+ const int32_t* __restrict curAuxiliaryPtr = reinterpret_cast<const int32_t*>(&mCurData.mCones->sqrCosine);
+
+ bool frictionEnabled = mClothData.mFrictionScale > 0.0f;
+
+ ShapeMask shapeMask = getShapeMask(prevPos, curPos);
+ Simd4i mask4 = horizontalOr(shapeMask.mCones);
+ uint32_t mask = uint32_t(array(mask4)[0]);
+ while (mask)
+ {
+ uint32_t test = mask - 1;
+ uint32_t coneIndex = findBitSet(mask & ~test);
+ uint32_t offset = coneIndex * sizeof(ConeData);
+ mask = mask & test;
+
+ Simd4i test4 = mask4 - gSimd4iOne;
+ Simd4f culled = simd4f(andNotIsZero(shapeMask.mCones, test4));
+ mask4 = mask4 & test4;
+
+ Simd4f prevCenter = loadAligned(prevCenterPtr, offset);
+ Simd4f prevAxis = loadAligned(prevAxisPtr, offset);
+ Simd4f prevAxisX = splat<0>(prevAxis);
+ Simd4f prevAxisY = splat<1>(prevAxis);
+ Simd4f prevAxisZ = splat<2>(prevAxis);
+ Simd4f prevSlope = splat<3>(prevAxis);
+
+ Simd4f prevX = prevPos[0] - splat<0>(prevCenter);
+ Simd4f prevY = prevPos[1] - splat<1>(prevCenter);
+ Simd4f prevZ = prevPos[2] - splat<2>(prevCenter);
+ Simd4f prevT = prevY * prevAxisZ - prevZ * prevAxisY;
+ Simd4f prevU = prevZ * prevAxisX - prevX * prevAxisZ;
+ Simd4f prevV = prevX * prevAxisY - prevY * prevAxisX;
+ Simd4f prevDot = prevX * prevAxisX + prevY * prevAxisY + prevZ * prevAxisZ;
+ Simd4f prevRadius = prevDot * prevSlope + splat<3>(prevCenter);
+
+ Simd4f curCenter = loadAligned(curCenterPtr, offset);
+ Simd4f curAxis = loadAligned(curAxisPtr, offset);
+ Simd4f curAxisX = splat<0>(curAxis);
+ Simd4f curAxisY = splat<1>(curAxis);
+ Simd4f curAxisZ = splat<2>(curAxis);
+ Simd4f curSlope = splat<3>(curAxis);
+ Simd4i curAuxiliary = loadAligned(curAuxiliaryPtr, offset);
+
+ Simd4f curX = curPos[0] - splat<0>(curCenter);
+ Simd4f curY = curPos[1] - splat<1>(curCenter);
+ Simd4f curZ = curPos[2] - splat<2>(curCenter);
+ Simd4f curT = curY * curAxisZ - curZ * curAxisY;
+ Simd4f curU = curZ * curAxisX - curX * curAxisZ;
+ Simd4f curV = curX * curAxisY - curY * curAxisX;
+ Simd4f curDot = curX * curAxisX + curY * curAxisY + curZ * curAxisZ;
+ Simd4f curRadius = curDot * curSlope + splat<3>(curCenter);
+
+ Simd4f curSqrDistance = gSimd4fEpsilon + curT * curT + curU * curU + curV * curV;
+
+ // set radius to zero if cone is culled
+ prevRadius = max(prevRadius, gSimd4fZero) & ~culled;
+ curRadius = max(curRadius, gSimd4fZero) & ~culled;
+
+ Simd4f dotPrevPrev = prevT * prevT + prevU * prevU + prevV * prevV - prevRadius * prevRadius;
+ Simd4f dotPrevCur = prevT * curT + prevU * curU + prevV * curV - prevRadius * curRadius;
+ Simd4f dotCurCur = curSqrDistance - curRadius * curRadius;
+
+ Simd4f discriminant = dotPrevCur * dotPrevCur - dotCurCur * dotPrevPrev;
+ Simd4f sqrtD = sqrt(discriminant);
+ Simd4f halfB = dotPrevCur - dotPrevPrev;
+ Simd4f minusA = dotPrevCur - dotCurCur + halfB;
+
+ // time of impact or 0 if prevPos inside cone
+ Simd4f toi = recip(minusA) * min(gSimd4fZero, halfB + sqrtD);
+ Simd4f collisionMask = (toi < gSimd4fOne) & (halfB < sqrtD);
+
+ // skip continuous collision if the (un-clamped) particle
+ // trajectory only touches the outer skin of the cone.
+ Simd4f rMin = prevRadius + halfB * minusA * (curRadius - prevRadius);
+ collisionMask = collisionMask & (discriminant > minusA * rMin * rMin * sSkeletonWidth);
+
+ // a is negative when one cone is contained in the other,
+ // which is already handled by discrete collision.
+ collisionMask = collisionMask & (minusA < -static_cast<Simd4f>(gSimd4fEpsilon));
+
+ // test if any particle hits infinite cone (and 0<time of impact<1)
+ if (!allEqual(collisionMask, gSimd4fZero))
+ {
+ Simd4f deltaX = prevX - curX;
+ Simd4f deltaY = prevY - curY;
+ Simd4f deltaZ = prevZ - curZ;
+
+ // interpolate delta at toi
+ Simd4f posX = prevX - deltaX * toi;
+ Simd4f posY = prevY - deltaY * toi;
+ Simd4f posZ = prevZ - deltaZ * toi;
+
+ Simd4f curScaledAxis = curAxis * splat<1>(simd4f(curAuxiliary));
+ Simd4i prevAuxiliary = loadAligned(prevAuxiliaryPtr, offset);
+ Simd4f deltaScaledAxis = curScaledAxis - prevAxis * splat<1>(simd4f(prevAuxiliary));
+
+ Simd4f oneMinusToi = gSimd4fOne - toi;
+
+ // interpolate axis at toi
+ Simd4f axisX = splat<0>(curScaledAxis) - splat<0>(deltaScaledAxis) * oneMinusToi;
+ Simd4f axisY = splat<1>(curScaledAxis) - splat<1>(deltaScaledAxis) * oneMinusToi;
+ Simd4f axisZ = splat<2>(curScaledAxis) - splat<2>(deltaScaledAxis) * oneMinusToi;
+ Simd4f slope = (prevSlope * oneMinusToi + curSlope * toi);
+
+ Simd4f sqrHalfLength = axisX * axisX + axisY * axisY + axisZ * axisZ;
+ Simd4f invHalfLength = rsqrt(sqrHalfLength);
+ Simd4f dot = (posX * axisX + posY * axisY + posZ * axisZ) * invHalfLength;
+
+ Simd4f sqrDistance = posX * posX + posY * posY + posZ * posZ - dot * dot;
+ Simd4f invDistance = rsqrt(sqrDistance) & (sqrDistance > gSimd4fZero);
+
+ Simd4f base = dot + slope * sqrDistance * invDistance;
+ Simd4f scale = base * invHalfLength & collisionMask;
+
+ Simd4f cullMask = (abs(scale) < gSimd4fOne) & collisionMask;
+
+ // test if any impact position is in cone section
+ if (!allEqual(cullMask, gSimd4fZero))
+ {
+ deltaX = deltaX + splat<0>(deltaScaledAxis) * scale;
+ deltaY = deltaY + splat<1>(deltaScaledAxis) * scale;
+ deltaZ = deltaZ + splat<2>(deltaScaledAxis) * scale;
+
+ oneMinusToi = oneMinusToi & cullMask;
+
+ // reduce ccd impulse if (clamped) particle trajectory stays in cone skin,
+ // i.e. scale by exp2(-k) or 1/(1+k) with k = (tmin - toi) / (1 - toi)
+ // oneMinusToi = oneMinusToi * recip(gSimd4fOne - sqrtD * recip(minusA * oneMinusToi));
+ Simd4f minusK = sqrtD * recip(minusA * oneMinusToi) & (oneMinusToi > gSimd4fEpsilon);
+ oneMinusToi = oneMinusToi * recip(gSimd4fOne - minusK);
+
+ curX = curX + deltaX * oneMinusToi;
+ curY = curY + deltaY * oneMinusToi;
+ curZ = curZ + deltaZ * oneMinusToi;
+
+ curDot = curX * curAxisX + curY * curAxisY + curZ * curAxisZ;
+ curRadius = curDot * curSlope + splat<3>(curCenter);
+ curRadius = max(curRadius, gSimd4fZero) & ~culled;
+ curSqrDistance = curX * curX + curY * curY + curZ * curZ - curDot * curDot;
+
+ curPos[0] = splat<0>(curCenter) + curX;
+ curPos[1] = splat<1>(curCenter) + curY;
+ curPos[2] = splat<2>(curCenter) + curZ;
+ }
+ }
+
+ // curPos inside cone (discrete collision)
+ Simd4f contactMask;
+ int anyContact = anyGreater(curRadius * curRadius, curSqrDistance, contactMask);
+
+ Simd4i bothMask = splat<3>(curAuxiliary);
+
+ // instead of culling continuous collision for ~collisionMask, and discrete
+ // collision for ~contactMask, disable both if ~collisionMask & ~contactMask
+ Simd4i cullMask = bothMask & ~simd4i(collisionMask | contactMask);
+ shapeMask.mSpheres = shapeMask.mSpheres & ~cullMask;
+
+ if (!anyContact)
+ continue;
+
+ Simd4f invDistance = rsqrt(curSqrDistance) & (curSqrDistance > gSimd4fZero);
+ Simd4f base = curDot + curSlope * curSqrDistance * invDistance;
+
+ Simd4f halfLength = splat<1>(simd4f(curAuxiliary));
+ Simd4i leftMask = simd4i(base < -halfLength);
+ Simd4i rightMask = simd4i(base > halfLength);
+
+ // can only skip continuous sphere collision if post-ccd position
+ // is on code side *and* particle had cone-ccd collision.
+ Simd4i firstMask = splat<2>(curAuxiliary);
+ Simd4i secondMask = firstMask ^ bothMask;
+ cullMask = (firstMask & ~leftMask) | (secondMask & ~rightMask);
+ shapeMask.mSpheres = shapeMask.mSpheres & ~(cullMask & simd4i(collisionMask));
+
+ Simd4f deltaX = curX - base * curAxisX;
+ Simd4f deltaY = curY - base * curAxisY;
+ Simd4f deltaZ = curZ - base * curAxisZ;
+
+ Simd4f sqrCosine = splat<0>(simd4f(curAuxiliary));
+ Simd4f scale = curRadius * invDistance * sqrCosine - sqrCosine;
+
+ contactMask = contactMask & ~simd4f(leftMask | rightMask);
+
+ if (!anyTrue(contactMask))
+ continue;
+
+ accum.add(deltaX, deltaY, deltaZ, scale, contactMask);
+
+ if (frictionEnabled)
+ {
+ uint32_t s0 = mClothData.mCapsuleIndices[coneIndex].first;
+ uint32_t s1 = mClothData.mCapsuleIndices[coneIndex].second;
+
+ float* prevSpheres = reinterpret_cast<float*>(mPrevData.mSpheres);
+ float* curSpheres = reinterpret_cast<float*>(mCurData.mSpheres);
+
+ // todo: could pre-compute sphere velocities or it might be
+ // faster to compute cur/prev sphere positions directly
+ Simd4f s0p0 = loadAligned(prevSpheres, s0 * sizeof(SphereData));
+ Simd4f s0p1 = loadAligned(curSpheres, s0 * sizeof(SphereData));
+
+ Simd4f s1p0 = loadAligned(prevSpheres, s1 * sizeof(SphereData));
+ Simd4f s1p1 = loadAligned(curSpheres, s1 * sizeof(SphereData));
+
+ Simd4f v0 = s0p1 - s0p0;
+ Simd4f v1 = s1p1 - s1p0;
+ Simd4f vd = v1 - v0;
+
+ // dot is in the range -1 to 1, scale and bias to 0 to 1
+ curDot = curDot * gSimd4fHalf + gSimd4fHalf;
+
+ // interpolate velocity at contact points
+ Simd4f vx = splat<0>(v0) + curDot * splat<0>(vd);
+ Simd4f vy = splat<1>(v0) + curDot * splat<1>(vd);
+ Simd4f vz = splat<2>(v0) + curDot * splat<2>(vd);
+
+ accum.addVelocity(vx, vy, vz, contactMask);
+ }
+ }
+
+ return shapeMask.mSpheres;
+}
+
+namespace
+{
+
+template <typename Simd4f>
+PX_INLINE void calculateFrictionImpulse(const Simd4f& deltaX, const Simd4f& deltaY, const Simd4f& deltaZ,
+ const Simd4f& velX, const Simd4f& velY, const Simd4f& velZ,
+ const Simd4f* curPos, const Simd4f* prevPos, const Simd4f& scale,
+ const Simd4f& coefficient, const Simd4f& mask, Simd4f* impulse)
+{
+ // calculate collision normal
+ Simd4f deltaSq = deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ;
+
+ Simd4f rcpDelta = rsqrt(deltaSq + gSimd4fEpsilon);
+
+ Simd4f nx = deltaX * rcpDelta;
+ Simd4f ny = deltaY * rcpDelta;
+ Simd4f nz = deltaZ * rcpDelta;
+
+ // calculate relative velocity scaled by number of collisions
+ Simd4f rvx = curPos[0] - prevPos[0] - velX * scale;
+ Simd4f rvy = curPos[1] - prevPos[1] - velY * scale;
+ Simd4f rvz = curPos[2] - prevPos[2] - velZ * scale;
+
+ // calculate magnitude of relative normal velocity
+ Simd4f rvn = rvx * nx + rvy * ny + rvz * nz;
+
+ // calculate relative tangential velocity
+ Simd4f rvtx = rvx - rvn * nx;
+ Simd4f rvty = rvy - rvn * ny;
+ Simd4f rvtz = rvz - rvn * nz;
+
+ // calculate magnitude of vt
+ Simd4f rcpVt = rsqrt(rvtx * rvtx + rvty * rvty + rvtz * rvtz + gSimd4fEpsilon);
+
+ // magnitude of friction impulse (cannot be greater than -vt)
+ Simd4f j = max(-coefficient * deltaSq * rcpDelta * rcpVt, gSimd4fMinusOne) & mask;
+
+ impulse[0] = rvtx * j;
+ impulse[1] = rvty * j;
+ impulse[2] = rvtz * j;
+}
+
+} // anonymous namespace
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::collideParticles()
+{
+ const bool massScalingEnabled = mClothData.mCollisionMassScale > 0.0f;
+ const Simd4f massScale = simd4f(mClothData.mCollisionMassScale);
+
+ const bool frictionEnabled = mClothData.mFrictionScale > 0.0f;
+ const Simd4f frictionScale = simd4f(mClothData.mFrictionScale);
+
+ Simd4f curPos[4];
+ Simd4f prevPos[4];
+
+ float* __restrict prevIt = mClothData.mPrevParticles;
+ float* __restrict pIt = mClothData.mCurParticles;
+ float* __restrict pEnd = pIt + mClothData.mNumParticles * 4;
+ for (; pIt < pEnd; pIt += 16, prevIt += 16)
+ {
+ curPos[0] = loadAligned(pIt, 0);
+ curPos[1] = loadAligned(pIt, 16);
+ curPos[2] = loadAligned(pIt, 32);
+ curPos[3] = loadAligned(pIt, 48);
+ transpose(curPos[0], curPos[1], curPos[2], curPos[3]);
+
+ ImpulseAccumulator accum;
+ Simd4i sphereMask = collideCones(curPos, accum);
+ collideSpheres(sphereMask, curPos, accum);
+
+ Simd4f mask;
+ if (!anyGreater(accum.mNumCollisions, gSimd4fEpsilon, mask))
+ continue;
+
+ Simd4f invNumCollisions = recip(accum.mNumCollisions);
+
+ if (frictionEnabled)
+ {
+ prevPos[0] = loadAligned(prevIt, 0);
+ prevPos[1] = loadAligned(prevIt, 16);
+ prevPos[2] = loadAligned(prevIt, 32);
+ prevPos[3] = loadAligned(prevIt, 48);
+ transpose(prevPos[0], prevPos[1], prevPos[2], prevPos[3]);
+
+ Simd4f frictionImpulse[3];
+ calculateFrictionImpulse(accum.mDeltaX, accum.mDeltaY, accum.mDeltaZ, accum.mVelX, accum.mVelY, accum.mVelZ,
+ curPos, prevPos, invNumCollisions, frictionScale, mask, frictionImpulse);
+
+ prevPos[0] = prevPos[0] - frictionImpulse[0];
+ prevPos[1] = prevPos[1] - frictionImpulse[1];
+ prevPos[2] = prevPos[2] - frictionImpulse[2];
+
+ transpose(prevPos[0], prevPos[1], prevPos[2], prevPos[3]);
+ storeAligned(prevIt, 0, prevPos[0]);
+ storeAligned(prevIt, 16, prevPos[1]);
+ storeAligned(prevIt, 32, prevPos[2]);
+ storeAligned(prevIt, 48, prevPos[3]);
+ }
+
+ if (massScalingEnabled)
+ {
+ // calculate the inverse mass scale based on the collision impulse magnitude
+ Simd4f dSq = invNumCollisions * invNumCollisions *
+ (accum.mDeltaX * accum.mDeltaX + accum.mDeltaY * accum.mDeltaY + accum.mDeltaZ * accum.mDeltaZ);
+
+ Simd4f scale = recip(gSimd4fOne + massScale * dSq);
+
+ // scale invmass
+ curPos[3] = select(mask, curPos[3] * scale, curPos[3]);
+ }
+
+ curPos[0] = curPos[0] + accum.mDeltaX * invNumCollisions;
+ curPos[1] = curPos[1] + accum.mDeltaY * invNumCollisions;
+ curPos[2] = curPos[2] + accum.mDeltaZ * invNumCollisions;
+
+ transpose(curPos[0], curPos[1], curPos[2], curPos[3]);
+ storeAligned(pIt, 0, curPos[0]);
+ storeAligned(pIt, 16, curPos[1]);
+ storeAligned(pIt, 32, curPos[2]);
+ storeAligned(pIt, 48, curPos[3]);
+
+#if PX_PROFILE || PX_DEBUG
+ mNumCollisions += horizontalSum(accum.mNumCollisions);
+#endif
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::collideVirtualParticles()
+{
+ const bool massScalingEnabled = mClothData.mCollisionMassScale > 0.0f;
+ const Simd4f massScale = simd4f(mClothData.mCollisionMassScale);
+
+ const bool frictionEnabled = mClothData.mFrictionScale > 0.0f;
+ const Simd4f frictionScale = simd4f(mClothData.mFrictionScale);
+
+ Simd4f curPos[3];
+
+ const float* __restrict weights = mClothData.mVirtualParticleWeights;
+ float* __restrict particles = mClothData.mCurParticles;
+ float* __restrict prevParticles = mClothData.mPrevParticles;
+
+ // move dummy particles outside of collision range
+ Simd4f* __restrict dummy = mClothData.mNumParticles + reinterpret_cast<Simd4f*>(mClothData.mCurParticles);
+ Simd4f invGridScale = recip(mGridScale) & (mGridScale > gSimd4fEpsilon);
+ dummy[0] = dummy[1] = dummy[2] = invGridScale * mGridBias - invGridScale;
+
+ const uint16_t* __restrict vpIt = mClothData.mVirtualParticlesBegin;
+ const uint16_t* __restrict vpEnd = mClothData.mVirtualParticlesEnd;
+ for (; vpIt != vpEnd; vpIt += 16)
+ {
+ // load 12 particles and 4 weights
+ Simd4f p0v0 = loadAligned(particles, vpIt[0] * sizeof(PxVec4));
+ Simd4f p0v1 = loadAligned(particles, vpIt[1] * sizeof(PxVec4));
+ Simd4f p0v2 = loadAligned(particles, vpIt[2] * sizeof(PxVec4));
+ Simd4f w0 = loadAligned(weights, vpIt[3] * sizeof(PxVec4));
+
+ Simd4f p1v0 = loadAligned(particles, vpIt[4] * sizeof(PxVec4));
+ Simd4f p1v1 = loadAligned(particles, vpIt[5] * sizeof(PxVec4));
+ Simd4f p1v2 = loadAligned(particles, vpIt[6] * sizeof(PxVec4));
+ Simd4f w1 = loadAligned(weights, vpIt[7] * sizeof(PxVec4));
+
+ Simd4f p2v0 = loadAligned(particles, vpIt[8] * sizeof(PxVec4));
+ Simd4f p2v1 = loadAligned(particles, vpIt[9] * sizeof(PxVec4));
+ Simd4f p2v2 = loadAligned(particles, vpIt[10] * sizeof(PxVec4));
+ Simd4f w2 = loadAligned(weights, vpIt[11] * sizeof(PxVec4));
+
+ Simd4f p3v1 = loadAligned(particles, vpIt[13] * sizeof(PxVec4));
+ Simd4f p3v0 = loadAligned(particles, vpIt[12] * sizeof(PxVec4));
+ Simd4f p3v2 = loadAligned(particles, vpIt[14] * sizeof(PxVec4));
+ Simd4f w3 = loadAligned(weights, vpIt[15] * sizeof(PxVec4));
+
+ // interpolate particles and transpose
+ Simd4f px = p0v0 * splat<0>(w0) + p0v1 * splat<1>(w0) + p0v2 * splat<2>(w0);
+ Simd4f py = p1v0 * splat<0>(w1) + p1v1 * splat<1>(w1) + p1v2 * splat<2>(w1);
+ Simd4f pz = p2v0 * splat<0>(w2) + p2v1 * splat<1>(w2) + p2v2 * splat<2>(w2);
+ Simd4f pw = p3v0 * splat<0>(w3) + p3v1 * splat<1>(w3) + p3v2 * splat<2>(w3);
+ transpose(px, py, pz, pw);
+
+ curPos[0] = px;
+ curPos[1] = py;
+ curPos[2] = pz;
+
+ ImpulseAccumulator accum;
+ Simd4i sphereMask = collideCones(curPos, accum);
+ collideSpheres(sphereMask, curPos, accum);
+
+ Simd4f mask;
+ if (!anyGreater(accum.mNumCollisions, gSimd4fEpsilon, mask))
+ continue;
+
+ Simd4f invNumCollisions = recip(accum.mNumCollisions);
+
+ // displacement and transpose back
+ Simd4f d0 = accum.mDeltaX * invNumCollisions;
+ Simd4f d1 = accum.mDeltaY * invNumCollisions;
+ Simd4f d2 = accum.mDeltaZ * invNumCollisions;
+ Simd4f d3 = gSimd4fZero;
+ transpose(d0, d1, d2, d3);
+
+ // scale weights by 1/dot(w,w)
+ Simd4f rw0 = w0 * splat<3>(w0);
+ Simd4f rw1 = w1 * splat<3>(w1);
+ Simd4f rw2 = w2 * splat<3>(w2);
+ Simd4f rw3 = w3 * splat<3>(w3);
+
+ if (frictionEnabled)
+ {
+ Simd4f q0v0 = loadAligned(prevParticles, vpIt[0] * sizeof(PxVec4));
+ Simd4f q0v1 = loadAligned(prevParticles, vpIt[1] * sizeof(PxVec4));
+ Simd4f q0v2 = loadAligned(prevParticles, vpIt[2] * sizeof(PxVec4));
+
+ Simd4f q1v0 = loadAligned(prevParticles, vpIt[4] * sizeof(PxVec4));
+ Simd4f q1v1 = loadAligned(prevParticles, vpIt[5] * sizeof(PxVec4));
+ Simd4f q1v2 = loadAligned(prevParticles, vpIt[6] * sizeof(PxVec4));
+
+ Simd4f q2v0 = loadAligned(prevParticles, vpIt[8] * sizeof(PxVec4));
+ Simd4f q2v1 = loadAligned(prevParticles, vpIt[9] * sizeof(PxVec4));
+ Simd4f q2v2 = loadAligned(prevParticles, vpIt[10] * sizeof(PxVec4));
+
+ Simd4f q3v0 = loadAligned(prevParticles, vpIt[12] * sizeof(PxVec4));
+ Simd4f q3v1 = loadAligned(prevParticles, vpIt[13] * sizeof(PxVec4));
+ Simd4f q3v2 = loadAligned(prevParticles, vpIt[14] * sizeof(PxVec4));
+
+ // calculate previous interpolated positions
+ Simd4f qx = q0v0 * splat<0>(w0) + q0v1 * splat<1>(w0) + q0v2 * splat<2>(w0);
+ Simd4f qy = q1v0 * splat<0>(w1) + q1v1 * splat<1>(w1) + q1v2 * splat<2>(w1);
+ Simd4f qz = q2v0 * splat<0>(w2) + q2v1 * splat<1>(w2) + q2v2 * splat<2>(w2);
+ Simd4f qw = q3v0 * splat<0>(w3) + q3v1 * splat<1>(w3) + q3v2 * splat<2>(w3);
+ transpose(qx, qy, qz, qw);
+
+ Simd4f prevPos[3] = { qx, qy, qz };
+ Simd4f frictionImpulse[4];
+ frictionImpulse[3] = gSimd4fZero;
+
+ calculateFrictionImpulse(accum.mDeltaX, accum.mDeltaY, accum.mDeltaZ, accum.mVelX, accum.mVelY, accum.mVelZ,
+ curPos, prevPos, invNumCollisions, frictionScale, mask, frictionImpulse);
+
+ transpose(frictionImpulse[0], frictionImpulse[1], frictionImpulse[2], frictionImpulse[3]);
+
+ q0v0 = q0v0 - (splat<0>(rw0) * frictionImpulse[0]);
+ q0v1 = q0v1 - (splat<1>(rw0) * frictionImpulse[0]);
+ q0v2 = q0v2 - (splat<2>(rw0) * frictionImpulse[0]);
+
+ q1v0 = q1v0 - (splat<0>(rw1) * frictionImpulse[1]);
+ q1v1 = q1v1 - (splat<1>(rw1) * frictionImpulse[1]);
+ q1v2 = q1v2 - (splat<2>(rw1) * frictionImpulse[1]);
+
+ q2v0 = q2v0 - (splat<0>(rw2) * frictionImpulse[2]);
+ q2v1 = q2v1 - (splat<1>(rw2) * frictionImpulse[2]);
+ q2v2 = q2v2 - (splat<2>(rw2) * frictionImpulse[2]);
+
+ q3v0 = q3v0 - (splat<0>(rw3) * frictionImpulse[3]);
+ q3v1 = q3v1 - (splat<1>(rw3) * frictionImpulse[3]);
+ q3v2 = q3v2 - (splat<2>(rw3) * frictionImpulse[3]);
+
+ // write back prev particles
+ storeAligned(prevParticles, vpIt[0] * sizeof(PxVec4), q0v0);
+ storeAligned(prevParticles, vpIt[1] * sizeof(PxVec4), q0v1);
+ storeAligned(prevParticles, vpIt[2] * sizeof(PxVec4), q0v2);
+
+ storeAligned(prevParticles, vpIt[4] * sizeof(PxVec4), q1v0);
+ storeAligned(prevParticles, vpIt[5] * sizeof(PxVec4), q1v1);
+ storeAligned(prevParticles, vpIt[6] * sizeof(PxVec4), q1v2);
+
+ storeAligned(prevParticles, vpIt[8] * sizeof(PxVec4), q2v0);
+ storeAligned(prevParticles, vpIt[9] * sizeof(PxVec4), q2v1);
+ storeAligned(prevParticles, vpIt[10] * sizeof(PxVec4), q2v2);
+
+ storeAligned(prevParticles, vpIt[12] * sizeof(PxVec4), q3v0);
+ storeAligned(prevParticles, vpIt[13] * sizeof(PxVec4), q3v1);
+ storeAligned(prevParticles, vpIt[14] * sizeof(PxVec4), q3v2);
+ }
+
+ if (massScalingEnabled)
+ {
+ // calculate the inverse mass scale based on the collision impulse
+ Simd4f dSq = invNumCollisions * invNumCollisions *
+ (accum.mDeltaX * accum.mDeltaX + accum.mDeltaY * accum.mDeltaY + accum.mDeltaZ * accum.mDeltaZ);
+
+ Simd4f weightScale = recip(gSimd4fOne + massScale * dSq);
+
+ weightScale = weightScale - gSimd4fOne;
+ Simd4f s0 = gSimd4fOne + splat<0>(weightScale) * (w0 & splat<0>(mask));
+ Simd4f s1 = gSimd4fOne + splat<1>(weightScale) * (w1 & splat<1>(mask));
+ Simd4f s2 = gSimd4fOne + splat<2>(weightScale) * (w2 & splat<2>(mask));
+ Simd4f s3 = gSimd4fOne + splat<3>(weightScale) * (w3 & splat<3>(mask));
+
+ p0v0 = p0v0 * (gSimd4fOneXYZ | (splat<0>(s0) & sMaskW));
+ p0v1 = p0v1 * (gSimd4fOneXYZ | (splat<1>(s0) & sMaskW));
+ p0v2 = p0v2 * (gSimd4fOneXYZ | (splat<2>(s0) & sMaskW));
+
+ p1v0 = p1v0 * (gSimd4fOneXYZ | (splat<0>(s1) & sMaskW));
+ p1v1 = p1v1 * (gSimd4fOneXYZ | (splat<1>(s1) & sMaskW));
+ p1v2 = p1v2 * (gSimd4fOneXYZ | (splat<2>(s1) & sMaskW));
+
+ p2v0 = p2v0 * (gSimd4fOneXYZ | (splat<0>(s2) & sMaskW));
+ p2v1 = p2v1 * (gSimd4fOneXYZ | (splat<1>(s2) & sMaskW));
+ p2v2 = p2v2 * (gSimd4fOneXYZ | (splat<2>(s2) & sMaskW));
+
+ p3v0 = p3v0 * (gSimd4fOneXYZ | (splat<0>(s3) & sMaskW));
+ p3v1 = p3v1 * (gSimd4fOneXYZ | (splat<1>(s3) & sMaskW));
+ p3v2 = p3v2 * (gSimd4fOneXYZ | (splat<2>(s3) & sMaskW));
+ }
+
+ p0v0 = p0v0 + (splat<0>(rw0) * d0);
+ p0v1 = p0v1 + (splat<1>(rw0) * d0);
+ p0v2 = p0v2 + (splat<2>(rw0) * d0);
+
+ p1v0 = p1v0 + (splat<0>(rw1) * d1);
+ p1v1 = p1v1 + (splat<1>(rw1) * d1);
+ p1v2 = p1v2 + (splat<2>(rw1) * d1);
+
+ p2v0 = p2v0 + (splat<0>(rw2) * d2);
+ p2v1 = p2v1 + (splat<1>(rw2) * d2);
+ p2v2 = p2v2 + (splat<2>(rw2) * d2);
+
+ p3v0 = p3v0 + (splat<0>(rw3) * d3);
+ p3v1 = p3v1 + (splat<1>(rw3) * d3);
+ p3v2 = p3v2 + (splat<2>(rw3) * d3);
+
+ // write back particles
+ storeAligned(particles, vpIt[0] * sizeof(PxVec4), p0v0);
+ storeAligned(particles, vpIt[1] * sizeof(PxVec4), p0v1);
+ storeAligned(particles, vpIt[2] * sizeof(PxVec4), p0v2);
+
+ storeAligned(particles, vpIt[4] * sizeof(PxVec4), p1v0);
+ storeAligned(particles, vpIt[5] * sizeof(PxVec4), p1v1);
+ storeAligned(particles, vpIt[6] * sizeof(PxVec4), p1v2);
+
+ storeAligned(particles, vpIt[8] * sizeof(PxVec4), p2v0);
+ storeAligned(particles, vpIt[9] * sizeof(PxVec4), p2v1);
+ storeAligned(particles, vpIt[10] * sizeof(PxVec4), p2v2);
+
+ storeAligned(particles, vpIt[12] * sizeof(PxVec4), p3v0);
+ storeAligned(particles, vpIt[13] * sizeof(PxVec4), p3v1);
+ storeAligned(particles, vpIt[14] * sizeof(PxVec4), p3v2);
+
+#if PX_PROFILE || PX_DEBUG
+ mNumCollisions += horizontalSum(accum.mNumCollisions);
+#endif
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::collideContinuousParticles()
+{
+ Simd4f curPos[4];
+ Simd4f prevPos[4];
+
+ const bool massScalingEnabled = mClothData.mCollisionMassScale > 0.0f;
+ const Simd4f massScale = simd4f(mClothData.mCollisionMassScale);
+
+ const bool frictionEnabled = mClothData.mFrictionScale > 0.0f;
+ const Simd4f frictionScale = simd4f(mClothData.mFrictionScale);
+
+ float* __restrict prevIt = mClothData.mPrevParticles;
+ float* __restrict curIt = mClothData.mCurParticles;
+ float* __restrict curEnd = curIt + mClothData.mNumParticles * 4;
+
+ for (; curIt < curEnd; curIt += 16, prevIt += 16)
+ {
+ prevPos[0] = loadAligned(prevIt, 0);
+ prevPos[1] = loadAligned(prevIt, 16);
+ prevPos[2] = loadAligned(prevIt, 32);
+ prevPos[3] = loadAligned(prevIt, 48);
+ transpose(prevPos[0], prevPos[1], prevPos[2], prevPos[3]);
+
+ curPos[0] = loadAligned(curIt, 0);
+ curPos[1] = loadAligned(curIt, 16);
+ curPos[2] = loadAligned(curIt, 32);
+ curPos[3] = loadAligned(curIt, 48);
+ transpose(curPos[0], curPos[1], curPos[2], curPos[3]);
+
+ ImpulseAccumulator accum;
+ Simd4i sphereMask = collideCones(prevPos, curPos, accum);
+ collideSpheres(sphereMask, prevPos, curPos, accum);
+
+ Simd4f mask;
+ if (!anyGreater(accum.mNumCollisions, gSimd4fEpsilon, mask))
+ continue;
+
+ Simd4f invNumCollisions = recip(accum.mNumCollisions);
+
+ if (frictionEnabled)
+ {
+ Simd4f frictionImpulse[3];
+ calculateFrictionImpulse(accum.mDeltaX, accum.mDeltaY, accum.mDeltaZ, accum.mVelX, accum.mVelY, accum.mVelZ,
+ curPos, prevPos, invNumCollisions, frictionScale, mask, frictionImpulse);
+
+ prevPos[0] = prevPos[0] - frictionImpulse[0];
+ prevPos[1] = prevPos[1] - frictionImpulse[1];
+ prevPos[2] = prevPos[2] - frictionImpulse[2];
+
+ transpose(prevPos[0], prevPos[1], prevPos[2], prevPos[3]);
+ storeAligned(prevIt, 0, prevPos[0]);
+ storeAligned(prevIt, 16, prevPos[1]);
+ storeAligned(prevIt, 32, prevPos[2]);
+ storeAligned(prevIt, 48, prevPos[3]);
+ }
+
+ if (massScalingEnabled)
+ {
+ // calculate the inverse mass scale based on the collision impulse magnitude
+ Simd4f dSq = invNumCollisions * invNumCollisions *
+ (accum.mDeltaX * accum.mDeltaX + accum.mDeltaY * accum.mDeltaY + accum.mDeltaZ * accum.mDeltaZ);
+
+ Simd4f weightScale = recip(gSimd4fOne + massScale * dSq);
+
+ // scale invmass
+ curPos[3] = select(mask, curPos[3] * weightScale, curPos[3]);
+ }
+
+ curPos[0] = curPos[0] + accum.mDeltaX * invNumCollisions;
+ curPos[1] = curPos[1] + accum.mDeltaY * invNumCollisions;
+ curPos[2] = curPos[2] + accum.mDeltaZ * invNumCollisions;
+
+ transpose(curPos[0], curPos[1], curPos[2], curPos[3]);
+ storeAligned(curIt, 0, curPos[0]);
+ storeAligned(curIt, 16, curPos[1]);
+ storeAligned(curIt, 32, curPos[2]);
+ storeAligned(curIt, 48, curPos[3]);
+
+#if PX_PROFILE || PX_DEBUG
+ mNumCollisions += horizontalSum(accum.mNumCollisions);
+#endif
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::collideConvexes(const IterationState<Simd4f>& state)
+{
+ if (!mClothData.mNumConvexes)
+ return;
+
+ // times 2 for plane equation result buffer
+ Simd4f* planes = static_cast<Simd4f*>(mAllocator.allocate(sizeof(Simd4f) * mClothData.mNumPlanes * 2));
+
+ const Simd4f* targetPlanes = reinterpret_cast<const Simd4f*>(mClothData.mTargetCollisionPlanes);
+
+ // generate plane collision data
+ if (state.mRemainingIterations != 1)
+ {
+ // interpolate planes
+ LerpIterator<Simd4f, const Simd4f*> planeIter(reinterpret_cast<const Simd4f*>(mClothData.mStartCollisionPlanes),
+ targetPlanes, state.getCurrentAlpha());
+
+ // todo: normalize plane equations
+ generatePlanes(planes, planeIter, mClothData.mNumPlanes);
+ }
+ else
+ {
+ // otherwise use the target planes directly
+ generatePlanes(planes, targetPlanes, mClothData.mNumPlanes);
+ }
+
+ Simd4f curPos[4], prevPos[4];
+
+ const bool frictionEnabled = mClothData.mFrictionScale > 0.0f;
+ const Simd4f frictionScale = simd4f(mClothData.mFrictionScale);
+
+ float* __restrict curIt = mClothData.mCurParticles;
+ float* __restrict curEnd = curIt + mClothData.mNumParticles * 4;
+ float* __restrict prevIt = mClothData.mPrevParticles;
+ for (; curIt < curEnd; curIt += 16, prevIt += 16)
+ {
+ curPos[0] = loadAligned(curIt, 0);
+ curPos[1] = loadAligned(curIt, 16);
+ curPos[2] = loadAligned(curIt, 32);
+ curPos[3] = loadAligned(curIt, 48);
+ transpose(curPos[0], curPos[1], curPos[2], curPos[3]);
+
+ ImpulseAccumulator accum;
+ collideConvexes(planes, curPos, accum);
+
+ Simd4f mask;
+ if (!anyGreater(accum.mNumCollisions, gSimd4fEpsilon, mask))
+ continue;
+
+ Simd4f invNumCollisions = recip(accum.mNumCollisions);
+
+ if (frictionEnabled)
+ {
+ prevPos[0] = loadAligned(prevIt, 0);
+ prevPos[1] = loadAligned(prevIt, 16);
+ prevPos[2] = loadAligned(prevIt, 32);
+ prevPos[3] = loadAligned(prevIt, 48);
+ transpose(prevPos[0], prevPos[1], prevPos[2], prevPos[3]);
+
+ Simd4f frictionImpulse[3];
+ calculateFrictionImpulse(accum.mDeltaX, accum.mDeltaY, accum.mDeltaZ, accum.mVelX, accum.mVelY, accum.mVelZ,
+ curPos, prevPos, invNumCollisions, frictionScale, mask, frictionImpulse);
+
+ prevPos[0] = prevPos[0] - frictionImpulse[0];
+ prevPos[1] = prevPos[1] - frictionImpulse[1];
+ prevPos[2] = prevPos[2] - frictionImpulse[2];
+
+ transpose(prevPos[0], prevPos[1], prevPos[2], prevPos[3]);
+ storeAligned(prevIt, 0, prevPos[0]);
+ storeAligned(prevIt, 16, prevPos[1]);
+ storeAligned(prevIt, 32, prevPos[2]);
+ storeAligned(prevIt, 48, prevPos[3]);
+ }
+
+ curPos[0] = curPos[0] + accum.mDeltaX * invNumCollisions;
+ curPos[1] = curPos[1] + accum.mDeltaY * invNumCollisions;
+ curPos[2] = curPos[2] + accum.mDeltaZ * invNumCollisions;
+
+ transpose(curPos[0], curPos[1], curPos[2], curPos[3]);
+ storeAligned(curIt, 0, curPos[0]);
+ storeAligned(curIt, 16, curPos[1]);
+ storeAligned(curIt, 32, curPos[2]);
+ storeAligned(curIt, 48, curPos[3]);
+
+#if PX_PROFILE || PX_DEBUG
+ mNumCollisions += horizontalSum(accum.mNumCollisions);
+#endif
+ }
+
+ mAllocator.deallocate(planes);
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::collideConvexes(const Simd4f* __restrict planes, Simd4f* __restrict curPos,
+ ImpulseAccumulator& accum)
+{
+ Simd4i result = gSimd4iZero;
+ Simd4i mask4 = gSimd4iOne;
+
+ const Simd4f* __restrict pIt, *pEnd = planes + mClothData.mNumPlanes;
+ Simd4f* __restrict dIt = const_cast<Simd4f*>(pEnd);
+ for (pIt = planes; pIt != pEnd; ++pIt, ++dIt)
+ {
+ *dIt = splat<3>(*pIt) + curPos[2] * splat<2>(*pIt) + curPos[1] * splat<1>(*pIt) + curPos[0] * splat<0>(*pIt);
+ result = result | (mask4 & simd4i(*dIt < gSimd4fZero));
+ mask4 = mask4 << 1; // todo: shift by Simd4i on consoles
+ }
+
+ if (allEqual(result, gSimd4iZero))
+ return;
+
+ const uint32_t* __restrict cIt = mClothData.mConvexMasks;
+ const uint32_t* __restrict cEnd = cIt + mClothData.mNumConvexes;
+ for (; cIt != cEnd; ++cIt)
+ {
+ uint32_t mask = *cIt;
+ mask4 = simd4i(int(mask));
+ if (!anyEqual(mask4 & result, mask4, mask4))
+ continue;
+
+ uint32_t test = mask - 1;
+ uint32_t planeIndex = findBitSet(mask & ~test);
+ Simd4f plane = planes[planeIndex];
+ Simd4f planeX = splat<0>(plane);
+ Simd4f planeY = splat<1>(plane);
+ Simd4f planeZ = splat<2>(plane);
+ Simd4f planeD = pEnd[planeIndex];
+ while (mask &= test)
+ {
+ test = mask - 1;
+ planeIndex = findBitSet(mask & ~test);
+ plane = planes[planeIndex];
+ Simd4f dist = pEnd[planeIndex];
+ Simd4f closer = dist > planeD;
+ planeX = select(closer, splat<0>(plane), planeX);
+ planeY = select(closer, splat<1>(plane), planeY);
+ planeZ = select(closer, splat<2>(plane), planeZ);
+ planeD = max(dist, planeD);
+ }
+
+ accum.subtract(planeX, planeY, planeZ, planeD, simd4f(mask4));
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::collideTriangles(const IterationState<Simd4f>& state)
+{
+ if (!mClothData.mNumCollisionTriangles)
+ return;
+
+ TriangleData* triangles =
+ static_cast<TriangleData*>(mAllocator.allocate(sizeof(TriangleData) * mClothData.mNumCollisionTriangles));
+
+ UnalignedIterator<Simd4f, 3> targetTriangles(mClothData.mTargetCollisionTriangles);
+
+ // generate triangle collision data
+ if (state.mRemainingIterations != 1)
+ {
+ // interpolate triangles
+ LerpIterator<Simd4f, UnalignedIterator<Simd4f, 3> > triangleIter(mClothData.mStartCollisionTriangles,
+ targetTriangles, state.getCurrentAlpha());
+
+ generateTriangles<Simd4f>(triangles, triangleIter, mClothData.mNumCollisionTriangles);
+ }
+ else
+ {
+ // otherwise use the target triangles directly
+ generateTriangles<Simd4f>(triangles, targetTriangles, mClothData.mNumCollisionTriangles);
+ }
+
+ Simd4f positions[4];
+
+ float* __restrict pIt = mClothData.mCurParticles;
+ float* __restrict pEnd = pIt + mClothData.mNumParticles * 4;
+ for (; pIt < pEnd; pIt += 16)
+ {
+ positions[0] = loadAligned(pIt, 0);
+ positions[1] = loadAligned(pIt, 16);
+ positions[2] = loadAligned(pIt, 32);
+ positions[3] = loadAligned(pIt, 48);
+ transpose(positions[0], positions[1], positions[2], positions[3]);
+
+ ImpulseAccumulator accum;
+ collideTriangles(triangles, positions, accum);
+
+ Simd4f mask;
+ if (!anyGreater(accum.mNumCollisions, gSimd4fEpsilon, mask))
+ continue;
+
+ Simd4f invNumCollisions = recip(accum.mNumCollisions);
+
+ positions[0] = positions[0] + accum.mDeltaX * invNumCollisions;
+ positions[1] = positions[1] + accum.mDeltaY * invNumCollisions;
+ positions[2] = positions[2] + accum.mDeltaZ * invNumCollisions;
+
+ transpose(positions[0], positions[1], positions[2], positions[3]);
+ storeAligned(pIt, 0, positions[0]);
+ storeAligned(pIt, 16, positions[1]);
+ storeAligned(pIt, 32, positions[2]);
+ storeAligned(pIt, 48, positions[3]);
+
+#if PX_PROFILE || PX_DEBUG
+ mNumCollisions += horizontalSum(accum.mNumCollisions);
+#endif
+ }
+
+ mAllocator.deallocate(triangles);
+}
+
+template <typename Simd4f>
+void cloth::SwCollision<Simd4f>::collideTriangles(const TriangleData* __restrict triangles, Simd4f* __restrict curPos,
+ ImpulseAccumulator& accum)
+{
+ Simd4f normalX, normalY, normalZ, normalD;
+ normalX = normalY = normalZ = normalD = gSimd4fZero;
+ Simd4f minSqrLength = gSimd4fFloatMax;
+
+ const TriangleData* __restrict tIt, *tEnd = triangles + mClothData.mNumCollisionTriangles;
+ for (tIt = triangles; tIt != tEnd; ++tIt)
+ {
+ Simd4f base = loadAligned(&tIt->base.x);
+ Simd4f edge0 = loadAligned(&tIt->edge0.x);
+ Simd4f edge1 = loadAligned(&tIt->edge1.x);
+ Simd4f normal = loadAligned(&tIt->normal.x);
+ Simd4f aux = loadAligned(&tIt->det);
+
+ Simd4f dx = curPos[0] - splat<0>(base);
+ Simd4f dy = curPos[1] - splat<1>(base);
+ Simd4f dz = curPos[2] - splat<2>(base);
+
+ Simd4f e0x = splat<0>(edge0);
+ Simd4f e0y = splat<1>(edge0);
+ Simd4f e0z = splat<2>(edge0);
+
+ Simd4f e1x = splat<0>(edge1);
+ Simd4f e1y = splat<1>(edge1);
+ Simd4f e1z = splat<2>(edge1);
+
+ Simd4f nx = splat<0>(normal);
+ Simd4f ny = splat<1>(normal);
+ Simd4f nz = splat<2>(normal);
+
+ Simd4f deltaDotEdge0 = dx * e0x + dy * e0y + dz * e0z;
+ Simd4f deltaDotEdge1 = dx * e1x + dy * e1y + dz * e1z;
+ Simd4f deltaDotNormal = dx * nx + dy * ny + dz * nz;
+
+ Simd4f edge0DotEdge1 = splat<3>(base);
+ Simd4f edge0SqrLength = splat<3>(edge0);
+ Simd4f edge1SqrLength = splat<3>(edge1);
+
+ Simd4f s = edge1SqrLength * deltaDotEdge0 - edge0DotEdge1 * deltaDotEdge1;
+ Simd4f t = edge0SqrLength * deltaDotEdge1 - edge0DotEdge1 * deltaDotEdge0;
+
+ Simd4f sPositive = s > gSimd4fZero;
+ Simd4f tPositive = t > gSimd4fZero;
+
+ Simd4f det = splat<0>(aux);
+
+ s = select(tPositive, s * det, deltaDotEdge0 * splat<2>(aux));
+ t = select(sPositive, t * det, deltaDotEdge1 * splat<3>(aux));
+
+ Simd4f clamp = gSimd4fOne < s + t;
+ Simd4f numerator = edge1SqrLength - edge0DotEdge1 + deltaDotEdge0 - deltaDotEdge1;
+
+ s = select(clamp, numerator * splat<1>(aux), s);
+
+ s = max(gSimd4fZero, min(gSimd4fOne, s));
+ t = max(gSimd4fZero, min(gSimd4fOne - s, t));
+
+ dx = dx - e0x * s - e1x * t;
+ dy = dy - e0y * s - e1y * t;
+ dz = dz - e0z * s - e1z * t;
+
+ Simd4f sqrLength = dx * dx + dy * dy + dz * dz;
+
+ // slightly increase distance for colliding triangles
+ Simd4f slack = (gSimd4fZero > deltaDotNormal) & simd4f(1e-4f);
+ sqrLength = sqrLength + sqrLength * slack;
+
+ Simd4f mask = sqrLength < minSqrLength;
+
+ normalX = select(mask, nx, normalX);
+ normalY = select(mask, ny, normalY);
+ normalZ = select(mask, nz, normalZ);
+ normalD = select(mask, deltaDotNormal, normalD);
+
+ minSqrLength = min(sqrLength, minSqrLength);
+ }
+
+ Simd4f mask;
+ if (!anyGreater(gSimd4fZero, normalD, mask))
+ return;
+
+ accum.subtract(normalX, normalY, normalZ, normalD, mask);
+}
+
+// explicit template instantiation
+#if NV_SIMD_SIMD
+template class cloth::SwCollision<Simd4f>;
+#endif
+#if NV_SIMD_SCALAR
+template class cloth::SwCollision<Scalar4f>;
+#endif
+
+/*
+namespace
+{
+ using namespace cloth;
+
+ int test()
+ {
+ Simd4f vertices[] = {
+ simd4f(0.0f, 0.0f, 0.0f, 0.0f),
+ simd4f(0.1f, 0.0f, 0.0f, 0.0f),
+ simd4f(0.0f, 0.1f, 0.0f, 0.0f)
+ };
+ TriangleData triangle;
+ generateTriangles<Simd4f>(&triangle, &*vertices, 1);
+
+ char buffer[1000];
+ SwKernelAllocator alloc(buffer, 1000);
+
+ SwClothData* cloth = static_cast<SwClothData*>(malloc(sizeof(SwClothData)));
+ memset(cloth, 0, sizeof(SwClothData));
+ cloth->mNumTriangles = 1;
+
+ SwCollision<Simd4f> collision(*cloth, alloc);
+ SwCollision<Simd4f>::ImpulseAccumulator accum;
+
+ Simd4f particles[4] = {};
+ for (float y = -0.1f; y < 0.0f; y += 0.2f)
+ {
+ for (float x = -0.1f; x < 0.0f; x += 0.2f)
+ {
+ particles[0] = simd4f(x);
+ particles[1] = simd4f(y);
+ particles[2] = simd4f(-1.0f);
+
+ collision.collideTriangles(&triangle, particles, accum);
+ }
+ }
+
+ return 0;
+ }
+
+ static int blah = test();
+}
+*/
diff --git a/NvCloth/src/SwCollision.h b/NvCloth/src/SwCollision.h
new file mode 100644
index 0000000..8d5746c
--- /dev/null
+++ b/NvCloth/src/SwCollision.h
@@ -0,0 +1,139 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include <foundation/Px.h>
+#include "StackAllocator.h"
+#include "Simd.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+class SwCloth;
+struct SwClothData;
+template <typename>
+struct IterationState;
+struct IndexPair;
+struct SphereData;
+struct ConeData;
+struct TriangleData;
+
+typedef StackAllocator<16> SwKernelAllocator;
+
+/**
+ Collision handler for SwSolver.
+ */
+template <typename Simd4f>
+class SwCollision
+{
+ typedef typename Simd4fToSimd4i<Simd4f>::Type Simd4i;
+
+ public:
+ struct ShapeMask
+ {
+ Simd4i mCones;
+ Simd4i mSpheres;
+
+ ShapeMask& operator = (const ShapeMask&);
+ ShapeMask& operator &= (const ShapeMask&);
+ };
+
+ struct CollisionData
+ {
+ CollisionData();
+ SphereData* mSpheres;
+ ConeData* mCones;
+ };
+
+ struct ImpulseAccumulator;
+
+ public:
+ SwCollision(SwClothData& clothData, SwKernelAllocator& alloc);
+ ~SwCollision();
+
+ void operator()(const IterationState<Simd4f>& state);
+
+ static size_t estimateTemporaryMemory(const SwCloth& cloth);
+ static size_t estimatePersistentMemory(const SwCloth& cloth);
+
+ private:
+ SwCollision& operator = (const SwCollision&); // not implemented
+ void allocate(CollisionData&);
+ void deallocate(const CollisionData&);
+
+ void computeBounds();
+
+ void buildSphereAcceleration(const SphereData*);
+ void buildConeAcceleration();
+ static void mergeAcceleration(uint32_t*);
+ bool buildAcceleration();
+
+ static ShapeMask getShapeMask(const Simd4f&, const Simd4i*, const Simd4i*);
+ ShapeMask getShapeMask(const Simd4f*) const;
+ ShapeMask getShapeMask(const Simd4f*, const Simd4f*) const;
+
+ void collideSpheres(const Simd4i&, const Simd4f*, ImpulseAccumulator&) const;
+ Simd4i collideCones(const Simd4f*, ImpulseAccumulator&) const;
+
+ void collideSpheres(const Simd4i&, const Simd4f*, Simd4f*, ImpulseAccumulator&) const;
+ Simd4i collideCones(const Simd4f*, Simd4f*, ImpulseAccumulator&) const;
+
+ void collideParticles();
+ void collideVirtualParticles();
+ void collideContinuousParticles();
+
+ void collideConvexes(const IterationState<Simd4f>&);
+ void collideConvexes(const Simd4f*, Simd4f*, ImpulseAccumulator&);
+
+ void collideTriangles(const IterationState<Simd4f>&);
+ void collideTriangles(const TriangleData*, Simd4f*, ImpulseAccumulator&);
+
+ public:
+ // acceleration structure
+ static const uint32_t sGridSize = 8;
+ Simd4i mSphereGrid[6 * sGridSize / 4];
+ Simd4i mConeGrid[6 * sGridSize / 4];
+ Simd4f mGridScale, mGridBias;
+
+ CollisionData mPrevData;
+ CollisionData mCurData;
+
+ SwClothData& mClothData;
+ SwKernelAllocator& mAllocator;
+
+ uint32_t mNumCollisions;
+
+ static const Simd4f sSkeletonWidth;
+};
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/SwCollisionHelpers.h b/NvCloth/src/SwCollisionHelpers.h
new file mode 100644
index 0000000..9168a3b
--- /dev/null
+++ b/NvCloth/src/SwCollisionHelpers.h
@@ -0,0 +1,84 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "Simd.h"
+
+// platform specific helpers
+
+namespace nv
+{
+namespace cloth
+{
+
+inline uint32_t findBitSet(uint32_t mask);
+
+// intFloor(-1.0f) returns -2 on SSE and NEON!
+inline Simd4i intFloor(const Simd4f& v);
+
+inline Simd4i horizontalOr(const Simd4i& mask);
+
+template <typename>
+struct Gather;
+
+#if NV_SIMD_SIMD
+template <>
+struct Gather<Simd4i>
+{
+ inline Gather(const Simd4i& index);
+ inline Simd4i operator()(const Simd4i*) const;
+
+#if NV_SIMD_SSE2
+ Simd4i mSelectQ, mSelectD, mSelectW;
+ static const Simd4i sIntSignBit;
+ static const Simd4i sSignedMask;
+#elif NV_SIMD_NEON
+ Simd4i mPermute;
+ static const Simd4i sPack;
+ static const Simd4i sOffset;
+ static const Simd4i sShift;
+ static const Simd4i sMask;
+#endif
+ Simd4i mOutOfRange;
+};
+#endif
+
+} // namespace cloth
+} // namespace nv
+
+#if NV_SIMD_SSE2
+#include "sse2/SwCollisionHelpers.h"
+#elif NV_SIMD_NEON
+#include "neon/SwCollisionHelpers.h"
+#endif
+
+#if NV_SIMD_SCALAR
+#include "scalar/SwCollisionHelpers.h"
+#endif
diff --git a/NvCloth/src/SwFabric.cpp b/NvCloth/src/SwFabric.cpp
new file mode 100644
index 0000000..6309c80
--- /dev/null
+++ b/NvCloth/src/SwFabric.cpp
@@ -0,0 +1,199 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxAssert.h"
+#include "SwFabric.h"
+#include "SwFactory.h"
+#include "PsSort.h"
+#include "limits.h" // for USHRT_MAX
+#include <algorithm>
+#include "PsUtilities.h"
+
+using namespace nv;
+using namespace physx;
+
+cloth::SwTether::SwTether(uint16_t anchor, float length) : mAnchor(anchor), mLength(length)
+{
+}
+
+cloth::SwFabric::SwFabric(SwFactory& factory, uint32_t numParticles, Range<const uint32_t> phaseIndices,
+ Range<const uint32_t> sets, Range<const float> restvalues, Range<const float> stiffnessValues,
+ Range<const uint32_t> indices, Range<const uint32_t> anchors, Range<const float> tetherLengths,
+ Range<const uint32_t> triangles, uint32_t id)
+: mFactory(factory)
+, mNumParticles(numParticles)
+, mTetherLengthScale(1.0f), mId(id)
+{
+ // should no longer be prefixed with 0
+ NV_CLOTH_ASSERT(sets.front() != 0);
+
+#if PX_WINDOWS_FAMILY
+ const uint32_t kSimdWidth = 8; // avx
+#else
+ const uint32_t kSimdWidth = 4;
+#endif
+
+ // consistency check
+ NV_CLOTH_ASSERT(sets.back() == restvalues.size());
+ NV_CLOTH_ASSERT(restvalues.size() * 2 == indices.size());
+ NV_CLOTH_ASSERT(restvalues.size() == stiffnessValues.size() || stiffnessValues.size() == 0);
+ NV_CLOTH_ASSERT(mNumParticles > *shdfnd::maxElement(indices.begin(), indices.end()));
+ NV_CLOTH_ASSERT(mNumParticles + kSimdWidth - 1 <= USHRT_MAX);
+
+ mPhases.assign(phaseIndices.begin(), phaseIndices.end());
+ mSets.reserve(sets.size() + 1);
+ mSets.pushBack(0); // prefix with 0
+
+ mOriginalNumRestvalues = uint32_t(restvalues.size());
+
+ // padd indices for SIMD
+ const uint32_t* iBegin = indices.begin(), *iIt = iBegin;
+ const float* rBegin = restvalues.begin(), *rIt = rBegin;
+ const float* stBegin = stiffnessValues.begin(), *stIt = stBegin;
+ const uint32_t* sIt, *sEnd = sets.end();
+ for (sIt = sets.begin(); sIt != sEnd; ++sIt)
+ {
+ const float* rEnd = rBegin + *sIt;
+ const float* stEnd = stBegin + *sIt;
+ const uint32_t* iEnd = iBegin + *sIt * 2;
+ uint32_t numConstraints = uint32_t(rEnd - rIt);
+
+ for (; rIt != rEnd; ++rIt)
+ {
+ mRestvalues.pushBack(*rIt);
+ }
+ if (!stiffnessValues.empty())
+ {
+ for (; stIt != stEnd; ++stIt)
+ {
+ mStiffnessValues.pushBack(*stIt);
+ }
+ }
+ for (; iIt != iEnd; ++iIt)
+ mIndices.pushBack(uint16_t(*iIt));
+
+ // add dummy indices to make multiple of 4
+ for (; numConstraints &= kSimdWidth - 1; ++numConstraints)
+ {
+ mRestvalues.pushBack(-FLT_MAX);
+ if (!stiffnessValues.empty())
+ mStiffnessValues.pushBack(-FLT_MAX);
+ uint32_t index = mNumParticles + numConstraints - 1;
+ mIndices.pushBack(uint16_t(index));
+ mIndices.pushBack(uint16_t(index));
+ }
+
+ mSets.pushBack(uint32_t(mRestvalues.size()));
+ }
+
+ // trim overallocations
+ RestvalueContainer(mRestvalues.begin(), mRestvalues.end()).swap(mRestvalues);
+ RestvalueContainer(mStiffnessValues.begin(), mStiffnessValues.end()).swap(mStiffnessValues);
+ Vector<uint16_t>::Type(mIndices.begin(), mIndices.end()).swap(mIndices);
+
+ // tethers
+ NV_CLOTH_ASSERT(anchors.size() == tetherLengths.size());
+
+ // pad to allow for direct 16 byte (unaligned) loads
+ mTethers.reserve(anchors.size() + 2);
+ for (; !anchors.empty(); anchors.popFront(), tetherLengths.popFront())
+ mTethers.pushBack(SwTether(uint16_t(anchors.front()), tetherLengths.front()));
+
+ // triangles
+ mTriangles.reserve(triangles.size());
+ const uint32_t* iEnd = triangles.end();
+ for (iIt = triangles.begin(); iIt != iEnd; ++iIt)
+ mTriangles.pushBack(uint16_t(*iIt));
+
+ mFactory.mFabrics.pushBack(this);
+}
+
+cloth::SwFabric::~SwFabric()
+{
+ Vector<SwFabric*>::Type::Iterator fIt = mFactory.mFabrics.find(this);
+ NV_CLOTH_ASSERT(fIt != mFactory.mFabrics.end());
+ mFactory.mFabrics.replaceWithLast(fIt);
+}
+
+cloth::Factory& cloth::SwFabric::getFactory() const
+{
+ return mFactory;
+}
+
+uint32_t cloth::SwFabric::getNumPhases() const
+{
+ return uint32_t(mPhases.size());
+}
+
+uint32_t cloth::SwFabric::getNumRestvalues() const
+{
+ return mOriginalNumRestvalues;
+}
+
+uint32_t cloth::SwFabric::getNumStiffnessValues() const
+{
+ return mStiffnessValues.size()?mOriginalNumRestvalues:0;
+}
+
+uint32_t cloth::SwFabric::getNumSets() const
+{
+ return uint32_t(mSets.size() - 1);
+}
+
+uint32_t cloth::SwFabric::getNumIndices() const
+{
+ return 2 * mOriginalNumRestvalues;
+}
+
+uint32_t cloth::SwFabric::getNumParticles() const
+{
+ return mNumParticles;
+}
+
+uint32_t cloth::SwFabric::getNumTethers() const
+{
+ return uint32_t(mTethers.size());
+}
+
+uint32_t cloth::SwFabric::getNumTriangles() const
+{
+ return uint32_t(mTriangles.size()) / 3;
+}
+
+void cloth::SwFabric::scaleRestvalues(float scale)
+{
+ RestvalueContainer::Iterator rIt, rEnd = mRestvalues.end();
+ for (rIt = mRestvalues.begin(); rIt != rEnd; ++rIt)
+ *rIt *= scale;
+}
+
+void cloth::SwFabric::scaleTetherLengths(float scale)
+{
+ mTetherLengthScale *= scale;
+}
diff --git a/NvCloth/src/SwFabric.h b/NvCloth/src/SwFabric.h
new file mode 100644
index 0000000..29d0921
--- /dev/null
+++ b/NvCloth/src/SwFabric.h
@@ -0,0 +1,108 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Fabric.h"
+#include "NvCloth/Range.h"
+
+namespace nv
+{
+
+namespace cloth
+{
+
+class SwFactory;
+
+struct SwTether
+{
+ SwTether(uint16_t, float);
+ uint16_t mAnchor;
+ float mLength;
+};
+
+class SwFabric : public Fabric
+{
+ public:
+#if PX_WINDOWS_FAMILY
+ typedef AlignedVector<float, 32>::Type RestvalueContainer; // avx
+#else
+ typedef AlignedVector<float, 16>::Type RestvalueContainer;
+#endif
+
+ SwFabric(SwFactory& factory, uint32_t numParticles, Range<const uint32_t> phasesIndices, Range<const uint32_t> sets,
+ Range<const float> restvalues, Range<const float> stiffnessValues, Range<const uint32_t> indices, Range<const uint32_t> anchors,
+ Range<const float> tetherLengths, Range<const uint32_t> triangles, uint32_t id);
+
+ SwFabric& operator = (const SwFabric&);
+
+ virtual ~SwFabric();
+
+ virtual Factory& getFactory() const;
+
+ virtual uint32_t getNumPhases() const;
+ virtual uint32_t getNumRestvalues() const;
+ virtual uint32_t getNumStiffnessValues() const;
+
+ virtual uint32_t getNumSets() const;
+ virtual uint32_t getNumIndices() const;
+
+ virtual uint32_t getNumParticles() const;
+
+ virtual uint32_t getNumTethers() const;
+
+ virtual uint32_t getNumTriangles() const;
+
+ virtual void scaleRestvalues(float);
+ virtual void scaleTetherLengths(float);
+
+ public:
+ SwFactory& mFactory;
+
+ uint32_t mNumParticles;
+
+ Vector<uint32_t>::Type mPhases; // index of set to use
+ Vector<uint32_t>::Type mSets; // offset of first restvalue, with 0 prefix
+
+ RestvalueContainer mRestvalues; // rest values (edge length)
+ RestvalueContainer mStiffnessValues; // constraint stiffnesses, uses phase config if empty
+ Vector<uint16_t>::Type mIndices; // particle index pairs
+
+ Vector<SwTether>::Type mTethers;
+ float mTetherLengthScale;
+
+ Vector<uint16_t>::Type mTriangles;
+
+ uint32_t mId;
+
+ uint32_t mOriginalNumRestvalues;
+
+};
+}
+}
diff --git a/NvCloth/src/SwFactory.cpp b/NvCloth/src/SwFactory.cpp
new file mode 100644
index 0000000..418eb13
--- /dev/null
+++ b/NvCloth/src/SwFactory.cpp
@@ -0,0 +1,299 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "SwFactory.h"
+#include "SwFabric.h"
+#include "SwCloth.h"
+#include "SwSolver.h"
+#include "ClothImpl.h"
+#include <string.h> // for memcpy
+
+using namespace nv;
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+// defined in Factory.cpp
+uint32_t getNextFabricId();
+}
+}
+
+cloth::SwFactory::SwFactory()
+{
+}
+
+cloth::SwFactory::~SwFactory()
+{
+ NV_CLOTH_ASSERT(("All fabrics created by this factory need to be deleted before this factory is destroyed.", mFabrics.size() == 0));
+}
+
+cloth::Fabric* cloth::SwFactory::createFabric(uint32_t numParticles, Range<const uint32_t> phaseIndices,
+ Range<const uint32_t> sets, Range<const float> restvalues, Range<const float> stiffnessValues,
+ Range<const uint32_t> indices, Range<const uint32_t> anchors,
+ Range<const float> tetherLengths, Range<const uint32_t> triangles)
+{
+ return NV_CLOTH_NEW(SwFabric)(*this, numParticles, phaseIndices, sets, restvalues, stiffnessValues, indices, anchors, tetherLengths, triangles,
+ getNextFabricId());
+}
+
+cloth::Cloth* cloth::SwFactory::createCloth(Range<const PxVec4> particles, Fabric& fabric)
+{
+ return NV_CLOTH_NEW(SwClothImpl)(*this, fabric, particles);
+}
+
+cloth::Solver* cloth::SwFactory::createSolver()
+{
+ return NV_CLOTH_NEW(SwSolver)();
+}
+
+cloth::Cloth* cloth::SwFactory::clone(const Cloth& cloth)
+{
+ if (cloth.getFactory().getPlatform() != Platform::CPU)
+ return cloth.clone(*this); // forward to CuCloth
+
+ // copy construct
+ return NV_CLOTH_NEW(SwClothImpl)(*this, static_cast<const SwClothImpl&>(cloth));
+}
+
+void cloth::SwFactory::extractFabricData(const Fabric& fabric, Range<uint32_t> phaseIndices, Range<uint32_t> sets,
+ Range<float> restvalues, Range<float> stiffnessValues, Range<uint32_t> indices, Range<uint32_t> anchors,
+ Range<float> tetherLengths, Range<uint32_t> triangles) const
+{
+ const SwFabric& swFabric = static_cast<const SwFabric&>(fabric);
+
+ NV_CLOTH_ASSERT(phaseIndices.empty() || phaseIndices.size() == swFabric.getNumPhases());
+ NV_CLOTH_ASSERT(restvalues.empty() || restvalues.size() == swFabric.getNumRestvalues());
+ NV_CLOTH_ASSERT(stiffnessValues.empty() || stiffnessValues.size() == swFabric.getNumStiffnessValues());
+ NV_CLOTH_ASSERT(sets.empty() || sets.size() == swFabric.getNumSets());
+ NV_CLOTH_ASSERT(indices.empty() || indices.size() == swFabric.getNumIndices());
+ NV_CLOTH_ASSERT(anchors.empty() || anchors.size() == swFabric.getNumTethers());
+ NV_CLOTH_ASSERT(tetherLengths.empty() || tetherLengths.size() == swFabric.getNumTethers());
+
+ for (uint32_t i = 0; !phaseIndices.empty(); ++i, phaseIndices.popFront())
+ phaseIndices.front() = swFabric.mPhases[i];
+
+ typedef SwFabric::RestvalueContainer::ConstIterator RestvalueIterator;
+
+ Vector<uint32_t>::Type::ConstIterator sEnd = swFabric.mSets.end(), sIt;
+ RestvalueIterator rBegin = swFabric.mRestvalues.begin(), rIt = rBegin;
+ RestvalueIterator stIt = swFabric.mStiffnessValues.begin();
+ Vector<uint16_t>::Type::ConstIterator iIt = swFabric.mIndices.begin();
+
+ uint32_t* sDst = sets.begin();
+ float* rDst = restvalues.begin();
+ float* stDst = stiffnessValues.begin();
+ uint32_t* iDst = indices.begin();
+
+ uint32_t numConstraints = 0;
+ for (sIt = swFabric.mSets.begin(); ++sIt != sEnd;)
+ {
+ RestvalueIterator rEnd = rBegin + *sIt;
+ for (; rIt != rEnd; ++rIt, ++stIt)
+ {
+ uint16_t i0 = *iIt++;
+ uint16_t i1 = *iIt++;
+
+ if (std::max(i0, i1) >= swFabric.mNumParticles)
+ continue;
+
+ if (!restvalues.empty())
+ *rDst++ = *rIt;
+ if (!stiffnessValues.empty())
+ *stDst++ = *stIt;
+
+ if (!indices.empty())
+ {
+ *iDst++ = i0;
+ *iDst++ = i1;
+ }
+
+ ++numConstraints;
+ }
+
+ if (!sets.empty())
+ *sDst++ = numConstraints;
+ }
+
+ for (uint32_t i = 0; !anchors.empty(); ++i, anchors.popFront())
+ anchors.front() = swFabric.mTethers[i].mAnchor;
+
+ for (uint32_t i = 0; !tetherLengths.empty(); ++i, tetherLengths.popFront())
+ tetherLengths.front() = swFabric.mTethers[i].mLength * swFabric.mTetherLengthScale;
+
+ for (uint32_t i = 0; !triangles.empty(); ++i, triangles.popFront())
+ triangles.front() = swFabric.mTriangles[i];
+}
+
+void cloth::SwFactory::extractCollisionData(const Cloth& cloth, Range<PxVec4> spheres, Range<uint32_t> capsules,
+ Range<PxVec4> planes, Range<uint32_t> convexes, Range<PxVec3> triangles) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ const SwCloth& swCloth = static_cast<const SwClothImpl&>(cloth).mCloth;
+
+ NV_CLOTH_ASSERT(spheres.empty() || spheres.size() == swCloth.mStartCollisionSpheres.size());
+ NV_CLOTH_ASSERT(capsules.empty() || capsules.size() == swCloth.mCapsuleIndices.size() * 2);
+ NV_CLOTH_ASSERT(planes.empty() || planes.size() == swCloth.mStartCollisionPlanes.size());
+ NV_CLOTH_ASSERT(convexes.empty() || convexes.size() == swCloth.mConvexMasks.size());
+ NV_CLOTH_ASSERT(triangles.empty() || triangles.size() == swCloth.mStartCollisionTriangles.size());
+
+ if (!swCloth.mStartCollisionSpheres.empty() && !spheres.empty())
+ memcpy(spheres.begin(), &swCloth.mStartCollisionSpheres.front(),
+ swCloth.mStartCollisionSpheres.size() * sizeof(PxVec4));
+
+ if (!swCloth.mCapsuleIndices.empty() && !capsules.empty())
+ memcpy(capsules.begin(), &swCloth.mCapsuleIndices.front(), swCloth.mCapsuleIndices.size() * sizeof(IndexPair));
+
+ if (!swCloth.mStartCollisionPlanes.empty() && !planes.empty())
+ memcpy(planes.begin(), &swCloth.mStartCollisionPlanes.front(),
+ swCloth.mStartCollisionPlanes.size() * sizeof(PxVec4));
+
+ if (!swCloth.mConvexMasks.empty() && !convexes.empty())
+ memcpy(convexes.begin(), &swCloth.mConvexMasks.front(), swCloth.mConvexMasks.size() * sizeof(uint32_t));
+
+ if (!swCloth.mStartCollisionTriangles.empty() && !triangles.empty())
+ memcpy(triangles.begin(), &swCloth.mStartCollisionTriangles.front(),
+ swCloth.mStartCollisionTriangles.size() * sizeof(PxVec3));
+}
+
+void cloth::SwFactory::extractMotionConstraints(const Cloth& cloth, Range<PxVec4> destConstraints) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ const SwCloth& swCloth = static_cast<const SwClothImpl&>(cloth).mCloth;
+
+ Vector<PxVec4>::Type const& srcConstraints = !swCloth.mMotionConstraints.mTarget.empty()
+ ? swCloth.mMotionConstraints.mTarget
+ : swCloth.mMotionConstraints.mStart;
+
+ if (!srcConstraints.empty())
+ {
+ // make sure dest array is big enough
+ NV_CLOTH_ASSERT(destConstraints.size() == srcConstraints.size());
+
+ memcpy(destConstraints.begin(), &srcConstraints.front(), srcConstraints.size() * sizeof(PxVec4));
+ }
+}
+
+void cloth::SwFactory::extractSeparationConstraints(const Cloth& cloth, Range<PxVec4> destConstraints) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ const SwCloth& swCloth = static_cast<const SwClothImpl&>(cloth).mCloth;
+
+ Vector<PxVec4>::Type const& srcConstraints = !swCloth.mSeparationConstraints.mTarget.empty()
+ ? swCloth.mSeparationConstraints.mTarget
+ : swCloth.mSeparationConstraints.mStart;
+
+ if (!srcConstraints.empty())
+ {
+ // make sure dest array is big enough
+ NV_CLOTH_ASSERT(destConstraints.size() == srcConstraints.size());
+
+ memcpy(destConstraints.begin(), &srcConstraints.front(), srcConstraints.size() * sizeof(PxVec4));
+ }
+}
+
+void cloth::SwFactory::extractParticleAccelerations(const Cloth& cloth, Range<PxVec4> destAccelerations) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ const SwCloth& swCloth = static_cast<const SwClothImpl&>(cloth).mCloth;
+
+ if (!swCloth.mParticleAccelerations.empty())
+ {
+ // make sure dest array is big enough
+ NV_CLOTH_ASSERT(destAccelerations.size() == swCloth.mParticleAccelerations.size());
+
+ memcpy(destAccelerations.begin(), &swCloth.mParticleAccelerations.front(),
+ swCloth.mParticleAccelerations.size() * sizeof(PxVec4));
+ }
+}
+
+void cloth::SwFactory::extractVirtualParticles(const Cloth& cloth, Range<uint32_t[4]> indices, Range<PxVec3> weights) const
+{
+ NV_CLOTH_ASSERT(this == &cloth.getFactory());
+
+ const SwCloth& swCloth = static_cast<const SwClothImpl&>(cloth).mCloth;
+
+ uint32_t numIndices = cloth.getNumVirtualParticles();
+ uint32_t numWeights = cloth.getNumVirtualParticleWeights();
+
+ NV_CLOTH_ASSERT(indices.size() == numIndices || indices.empty());
+ NV_CLOTH_ASSERT(weights.size() == numWeights || weights.empty());
+
+ if (weights.size() == numWeights)
+ {
+ PxVec3* wDestIt = reinterpret_cast<PxVec3*>(weights.begin());
+
+ // convert weights from vec4 to vec3
+ Vector<PxVec4>::Type::ConstIterator wIt = swCloth.mVirtualParticleWeights.begin();
+ Vector<PxVec4>::Type::ConstIterator wEnd = wIt + numWeights;
+
+ for (; wIt != wEnd; ++wIt, ++wDestIt)
+ *wDestIt = PxVec3( wIt->x, wIt->y, wIt->z );
+
+ NV_CLOTH_ASSERT(wDestIt == weights.end());
+ }
+ if (indices.size() == numIndices)
+ {
+ // convert indices
+ Vec4u* iDestIt = reinterpret_cast<Vec4u*>(indices.begin());
+ Vector<Vec4us>::Type::ConstIterator iIt = swCloth.mVirtualParticleIndices.begin();
+ Vector<Vec4us>::Type::ConstIterator iEnd = swCloth.mVirtualParticleIndices.end();
+
+ uint32_t numParticles = uint32_t(swCloth.mCurParticles.size());
+
+ for (; iIt != iEnd; ++iIt)
+ {
+ // skip dummy indices
+ if (iIt->x < numParticles)
+ // byte offset to element index
+ *iDestIt++ = Vec4u(*iIt);
+ }
+
+ NV_CLOTH_ASSERT(&array(*iDestIt) == indices.end());
+ }
+}
+
+void cloth::SwFactory::extractSelfCollisionIndices(const Cloth& cloth, Range<uint32_t> destIndices) const
+{
+ const SwCloth& swCloth = static_cast<const SwClothImpl&>(cloth).mCloth;
+ NV_CLOTH_ASSERT(destIndices.size() == swCloth.mSelfCollisionIndices.size());
+ memcpy(destIndices.begin(), swCloth.mSelfCollisionIndices.begin(), destIndices.size() * sizeof(uint32_t));
+}
+
+void cloth::SwFactory::extractRestPositions(const Cloth& cloth, Range<PxVec4> destRestPositions) const
+{
+ const SwCloth& swCloth = static_cast<const SwClothImpl&>(cloth).mCloth;
+ NV_CLOTH_ASSERT(destRestPositions.size() == swCloth.mRestPositions.size());
+ memcpy(destRestPositions.begin(), swCloth.mRestPositions.begin(), destRestPositions.size() * sizeof(PxVec4));
+}
diff --git a/NvCloth/src/SwFactory.h b/NvCloth/src/SwFactory.h
new file mode 100644
index 0000000..e0caf87
--- /dev/null
+++ b/NvCloth/src/SwFactory.h
@@ -0,0 +1,93 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Factory.h"
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+
+namespace nv
+{
+
+namespace cloth
+{
+
+class SwFabric;
+class SwCloth;
+template <typename>
+class ClothImpl;
+
+class SwFactory : public Factory
+{
+ public:
+ typedef SwFabric FabricType;
+ typedef ClothImpl<SwCloth> ImplType;
+
+ SwFactory();
+ virtual ~SwFactory();
+
+ virtual Platform getPlatform() const { return Platform::CPU; }
+
+ virtual Fabric* createFabric(uint32_t numParticles, Range<const uint32_t> phaseIndices, Range<const uint32_t> sets,
+ Range<const float> restvalues, Range<const float> stiffnessValues, Range<const uint32_t> indices,
+ Range<const uint32_t> anchors, Range<const float> tetherLengths,
+ Range<const uint32_t> triangles);
+
+ virtual Cloth* createCloth(Range<const physx::PxVec4> particles, Fabric& fabric);
+
+ virtual Solver* createSolver();
+
+ virtual Cloth* clone(const Cloth& cloth);
+
+ virtual void extractFabricData(const Fabric& fabric, Range<uint32_t> phaseIndices, Range<uint32_t> sets,
+ Range<float> restvalues, Range<float> stiffnessValues, Range<uint32_t> indices, Range<uint32_t> anchors,
+ Range<float> tetherLengths, Range<uint32_t> triangles) const;
+
+ virtual void extractCollisionData(const Cloth& cloth, Range<physx::PxVec4> spheres, Range<uint32_t> capsules,
+ Range<physx::PxVec4> planes, Range<uint32_t> convexes, Range<physx::PxVec3> triangles) const;
+
+ virtual void extractMotionConstraints(const Cloth& cloth, Range<physx::PxVec4> destConstraints) const;
+
+ virtual void extractSeparationConstraints(const Cloth& cloth, Range<physx::PxVec4> destConstraints) const;
+
+ virtual void extractParticleAccelerations(const Cloth& cloth, Range<physx::PxVec4> destAccelerations) const;
+
+ virtual void extractVirtualParticles(const Cloth& cloth, Range<uint32_t[4]> destIndices,
+ Range<physx::PxVec3> destWeights) const;
+
+ virtual void extractSelfCollisionIndices(const Cloth& cloth, Range<uint32_t> destIndices) const;
+
+ virtual void extractRestPositions(const Cloth& cloth, Range<physx::PxVec4> destRestPositions) const;
+
+ public:
+ Vector<SwFabric*>::Type mFabrics;
+};
+}
+}
diff --git a/NvCloth/src/SwInterCollision.cpp b/NvCloth/src/SwInterCollision.cpp
new file mode 100644
index 0000000..6d5e013
--- /dev/null
+++ b/NvCloth/src/SwInterCollision.cpp
@@ -0,0 +1,703 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "NvCloth/Callbacks.h"
+#include "SwInterCollision.h"
+#include "SwCollisionHelpers.h"
+#include "BoundingBox.h"
+#include <foundation/PxMat44.h>
+#include <foundation/PxBounds3.h>
+#include <algorithm>
+#include <PsSort.h>
+#include "NvCloth/Allocator.h"
+
+using namespace nv;
+using namespace physx;
+
+namespace
+{
+
+const Simd4fTupleFactory sMaskXYZ = simd4f(simd4i(~0, ~0, ~0, 0));
+const Simd4fTupleFactory sMaskW = simd4f(simd4i(0, 0, 0, ~0));
+const Simd4fScalarFactory sEpsilon = simd4f(FLT_EPSILON);
+const Simd4fTupleFactory sZeroW = simd4f(-FLT_MAX, -FLT_MAX, -FLT_MAX, 0.0f);
+
+// returns sorted indices, output needs to be at least 2*(last - first) + 1024
+void radixSort(const uint32_t* first, const uint32_t* last, uint32_t* out)
+{
+ uint32_t n = uint32_t(last - first);
+
+ uint32_t* buffer = out + 2 * n;
+ uint32_t* __restrict histograms[] = { buffer, buffer + 256, buffer + 512, buffer + 768 };
+
+ memset(buffer, 0, 1024 * sizeof(uint32_t));
+
+ // build 3 histograms in one pass
+ for (const uint32_t* __restrict it = first; it != last; ++it)
+ {
+ uint32_t key = *it;
+ ++histograms[0][0xff & key];
+ ++histograms[1][0xff & (key >> 8)];
+ ++histograms[2][0xff & (key >> 16)];
+ ++histograms[3][key >> 24];
+ }
+
+ // convert histograms to offset tables in-place
+ uint32_t sums[4] = {};
+ for (uint32_t i = 0; i < 256; ++i)
+ {
+ uint32_t temp0 = histograms[0][i] + sums[0];
+ histograms[0][i] = sums[0], sums[0] = temp0;
+
+ uint32_t temp1 = histograms[1][i] + sums[1];
+ histograms[1][i] = sums[1], sums[1] = temp1;
+
+ uint32_t temp2 = histograms[2][i] + sums[2];
+ histograms[2][i] = sums[2], sums[2] = temp2;
+
+ uint32_t temp3 = histograms[3][i] + sums[3];
+ histograms[3][i] = sums[3], sums[3] = temp3;
+ }
+
+ NV_CLOTH_ASSERT(sums[0] == n && sums[1] == n && sums[2] == n && sums[3] == n);
+
+#if PX_DEBUG
+ memset(out, 0xff, 2 * n * sizeof(uint32_t));
+#endif
+
+ // sort 8 bits per pass
+
+ uint32_t* __restrict indices[] = { out, out + n };
+
+ for (uint32_t i = 0; i != n; ++i)
+ indices[1][histograms[0][0xff & first[i]]++] = i;
+
+ for (uint32_t i = 0, index; index = indices[1][i], i != n; ++i)
+ indices[0][histograms[1][0xff & (first[index] >> 8)]++] = index;
+
+ for (uint32_t i = 0, index; index = indices[0][i], i != n; ++i)
+ indices[1][histograms[2][0xff & (first[index] >> 16)]++] = index;
+
+ for (uint32_t i = 0, index; index = indices[1][i], i != n; ++i)
+ indices[0][histograms[3][first[index] >> 24]++] = index;
+}
+
+template <typename Simd4f>
+uint32_t longestAxis(const Simd4f& edgeLength)
+{
+ const float* e = array(edgeLength);
+
+ if (e[0] > e[1])
+ return uint32_t(e[0] > e[2] ? 0 : 2);
+ else
+ return uint32_t(e[1] > e[2] ? 1 : 2);
+}
+}
+
+template <typename Simd4f>
+cloth::SwInterCollision<Simd4f>::SwInterCollision(const cloth::SwInterCollisionData* instances, uint32_t n,
+ float colDist, float stiffness, uint32_t iterations,
+ InterCollisionFilter filter, cloth::SwKernelAllocator& alloc)
+: mInstances(instances)
+, mNumInstances(n)
+, mClothIndices(NULL)
+, mParticleIndices(NULL)
+, mNumParticles(0)
+, mTotalParticles(0)
+, mFilter(filter)
+, mAllocator(alloc)
+{
+ NV_CLOTH_ASSERT(mFilter);
+
+ mCollisionDistance = simd4f(colDist, colDist, colDist, 0.0f);
+ mCollisionSquareDistance = mCollisionDistance * mCollisionDistance;
+ mStiffness = simd4f(stiffness);
+ mNumIterations = iterations;
+
+ // calculate particle size
+ for (uint32_t i = 0; i < n; ++i)
+ mTotalParticles += instances[i].mNumParticles;
+}
+
+template <typename Simd4f>
+cloth::SwInterCollision<Simd4f>::~SwInterCollision()
+{
+}
+
+namespace
+{
+// multiple x by m leaving w component of x intact
+template <typename Simd4f>
+PX_INLINE Simd4f transform(const Simd4f m[4], const Simd4f& x)
+{
+ const Simd4f a = m[3] + splat<0>(x) * m[0] + splat<1>(x) * m[1] + splat<2>(x) * m[2];
+ return select(sMaskXYZ, a, x);
+}
+
+// rotate x by m leaving w component intact
+template <typename Simd4f>
+PX_INLINE Simd4f rotate(const Simd4f m[4], const Simd4f& x)
+{
+ const Simd4f a = splat<0>(x) * m[0] + splat<1>(x) * m[1] + splat<2>(x) * m[2];
+ return select(sMaskXYZ, a, x);
+}
+
+template <typename Simd4f>
+struct ClothSorter
+{
+ typedef cloth::BoundingBox<Simd4f> BoundingBox;
+
+ ClothSorter(BoundingBox* bounds, uint32_t n, uint32_t axis) : mBounds(bounds), mNumBounds(n), mAxis(axis)
+ {
+ }
+
+ bool operator()(uint32_t i, uint32_t j) const
+ {
+ NV_CLOTH_ASSERT(i < mNumBounds);
+ NV_CLOTH_ASSERT(j < mNumBounds);
+
+ return array(mBounds[i].mLower)[mAxis] < array(mBounds[j].mLower)[mAxis];
+ }
+
+ BoundingBox* mBounds;
+ uint32_t mNumBounds;
+ uint32_t mAxis;
+};
+
+// for the given cloth array this function calculates the set of particles
+// which potentially interact, the potential colliders are returned with their
+// cloth index and particle index in clothIndices and particleIndices, the
+// function returns the number of potential colliders
+template <typename Simd4f>
+uint32_t calculatePotentialColliders(const cloth::SwInterCollisionData* cBegin, const cloth::SwInterCollisionData* cEnd,
+ const Simd4f& colDist, uint16_t* clothIndices, uint32_t* particleIndices,
+ cloth::BoundingBox<Simd4f>& bounds, uint32_t* overlapMasks,
+ cloth::InterCollisionFilter filter, cloth::SwKernelAllocator& allocator)
+{
+ using namespace cloth;
+
+ typedef BoundingBox<Simd4f> BoundingBox;
+
+ uint32_t numParticles = 0;
+ const uint32_t numCloths = uint32_t(cEnd - cBegin);
+
+ // bounds of each cloth objects in world space
+ BoundingBox* const clothBounds = static_cast<BoundingBox*>(allocator.allocate(numCloths * sizeof(BoundingBox)));
+ BoundingBox* const overlapBounds = static_cast<BoundingBox*>(allocator.allocate(numCloths * sizeof(BoundingBox)));
+
+ // union of all cloth world bounds
+ BoundingBox totalClothBounds = emptyBounds<Simd4f>();
+
+ uint32_t* sortedIndices = static_cast<uint32_t*>(allocator.allocate(numCloths * sizeof(uint32_t)));
+
+ for (uint32_t i = 0; i < numCloths; ++i)
+ {
+ const SwInterCollisionData& c = cBegin[i];
+
+ // transform bounds from b local space to local space of a
+ PxBounds3 lcBounds = PxBounds3::centerExtents(c.mBoundsCenter, c.mBoundsHalfExtent + PxVec3(array(colDist)[0]));
+ NV_CLOTH_ASSERT(!lcBounds.isEmpty());
+ PxBounds3 cWorld = PxBounds3::transformFast(c.mGlobalPose,lcBounds);
+
+ BoundingBox cBounds = { simd4f(cWorld.minimum.x, cWorld.minimum.y, cWorld.minimum.z, 0.0f),
+ simd4f(cWorld.maximum.x, cWorld.maximum.y, cWorld.maximum.z, 0.0f) };
+
+ sortedIndices[i] = i;
+ clothBounds[i] = cBounds;
+
+ totalClothBounds = expandBounds(totalClothBounds, cBounds);
+ }
+
+ // sort indices by their minimum extent on the longest axis
+ const uint32_t sweepAxis = longestAxis(totalClothBounds.mUpper - totalClothBounds.mLower);
+
+ ClothSorter<Simd4f> predicate(clothBounds, numCloths, sweepAxis);
+ shdfnd::sort(sortedIndices, numCloths, predicate, nv::cloth::NonTrackingAllocator());
+
+ for (uint32_t i = 0; i < numCloths; ++i)
+ {
+ NV_CLOTH_ASSERT(sortedIndices[i] < numCloths);
+
+ const SwInterCollisionData& a = cBegin[sortedIndices[i]];
+
+ // local bounds
+ const Simd4f aCenter = load(reinterpret_cast<const float*>(&a.mBoundsCenter));
+ const Simd4f aHalfExtent = load(reinterpret_cast<const float*>(&a.mBoundsHalfExtent)) + colDist;
+ const BoundingBox aBounds = { aCenter - aHalfExtent, aCenter + aHalfExtent };
+
+ const PxMat44 aToWorld = PxMat44(a.mGlobalPose);
+ const PxTransform aToLocal = a.mGlobalPose.getInverse();
+
+ const float axisMin = array(clothBounds[sortedIndices[i]].mLower)[sweepAxis];
+ const float axisMax = array(clothBounds[sortedIndices[i]].mUpper)[sweepAxis];
+
+ uint32_t overlapMask = 0;
+ uint32_t numOverlaps = 0;
+
+ // scan back to find first intersecting bounding box
+ uint32_t startIndex = i;
+ while (startIndex > 0 && array(clothBounds[sortedIndices[startIndex]].mUpper)[sweepAxis] > axisMin)
+ --startIndex;
+
+ // compute all overlapping bounds
+ for (uint32_t j = startIndex; j < numCloths; ++j)
+ {
+ // ignore self-collision
+ if (i == j)
+ continue;
+
+ // early out if no more cloths along axis intersect us
+ if (array(clothBounds[sortedIndices[j]].mLower)[sweepAxis] > axisMax)
+ break;
+
+ const SwInterCollisionData& b = cBegin[sortedIndices[j]];
+
+ // check if collision between these shapes is filtered
+ if (!filter(a.mUserData, b.mUserData))
+ continue;
+
+ // set mask bit for this cloth
+ overlapMask |= 1 << sortedIndices[j];
+
+ // transform bounds from b local space to local space of a
+ PxBounds3 lcBounds = PxBounds3::centerExtents(b.mBoundsCenter, b.mBoundsHalfExtent + PxVec3(array(colDist)[0]));
+ NV_CLOTH_ASSERT(!lcBounds.isEmpty());
+ PxBounds3 bLocal = PxBounds3::transformFast(aToLocal * b.mGlobalPose,lcBounds);
+
+ BoundingBox bBounds = { simd4f(bLocal.minimum.x, bLocal.minimum.y, bLocal.minimum.z, 0.0f),
+ simd4f(bLocal.maximum.x, bLocal.maximum.y, bLocal.maximum.z, 0.0f) };
+
+ BoundingBox iBounds = intersectBounds(aBounds, bBounds);
+
+ // setup bounding box w to make point containment test cheaper
+ Simd4f floatMax = gSimd4fFloatMax & static_cast<Simd4f>(sMaskW);
+ iBounds.mLower = (iBounds.mLower & sMaskXYZ) | -floatMax;
+ iBounds.mUpper = (iBounds.mUpper & sMaskXYZ) | floatMax;
+
+ if (!isEmptyBounds(iBounds))
+ overlapBounds[numOverlaps++] = iBounds;
+ }
+
+ //----------------------------------------------------------------
+ // cull all particles to overlapping bounds and transform particles to world space
+
+ const uint32_t clothIndex = sortedIndices[i];
+ overlapMasks[clothIndex] = overlapMask;
+
+ Simd4f* pBegin = reinterpret_cast<Simd4f*>(a.mParticles);
+ Simd4f* qBegin = reinterpret_cast<Simd4f*>(a.mPrevParticles);
+
+ const Simd4f xform[4] = { load(reinterpret_cast<const float*>(&aToWorld.column0)),
+ load(reinterpret_cast<const float*>(&aToWorld.column1)),
+ load(reinterpret_cast<const float*>(&aToWorld.column2)),
+ load(reinterpret_cast<const float*>(&aToWorld.column3)) };
+
+ Simd4f impulseInvScale = recip(Simd4f(simd4f(cBegin[clothIndex].mImpulseScale)));
+
+ for (uint32_t k = 0; k < a.mNumParticles; ++k)
+ {
+ Simd4f* pIt = a.mIndices ? pBegin + a.mIndices[k] : pBegin + k;
+ Simd4f* qIt = a.mIndices ? qBegin + a.mIndices[k] : qBegin + k;
+
+ const Simd4f p = *pIt;
+
+ for (const BoundingBox* oIt = overlapBounds, *oEnd = overlapBounds + numOverlaps; oIt != oEnd; ++oIt)
+ {
+ // point in box test
+ if (anyGreater(oIt->mLower, p) != 0)
+ continue;
+ if (anyGreater(p, oIt->mUpper) != 0)
+ continue;
+
+ // transform particle to world space in-place
+ // (will be transformed back after collision)
+ *pIt = transform(xform, p);
+
+ Simd4f impulse = (p - *qIt) * impulseInvScale;
+ *qIt = rotate(xform, impulse);
+
+ // update world bounds
+ bounds = expandBounds(bounds, pIt, pIt + 1);
+
+ // add particle to output arrays
+ clothIndices[numParticles] = uint16_t(clothIndex);
+ particleIndices[numParticles] = uint32_t(pIt - pBegin);
+
+ // output each particle only once
+ ++numParticles;
+ break;
+ }
+ }
+ }
+
+ allocator.deallocate(sortedIndices);
+ allocator.deallocate(overlapBounds);
+ allocator.deallocate(clothBounds);
+
+ return numParticles;
+}
+}
+
+template <typename Simd4f>
+PX_INLINE Simd4f& cloth::SwInterCollision<Simd4f>::getParticle(uint32_t index)
+{
+ NV_CLOTH_ASSERT(index < mNumParticles);
+
+ uint16_t clothIndex = mClothIndices[index];
+ uint32_t particleIndex = mParticleIndices[index];
+
+ NV_CLOTH_ASSERT(clothIndex < mNumInstances);
+
+ return reinterpret_cast<Simd4f&>(mInstances[clothIndex].mParticles[particleIndex]);
+}
+
+template <typename Simd4f>
+void cloth::SwInterCollision<Simd4f>::operator()()
+{
+ mNumTests = mNumCollisions = 0;
+
+ mClothIndices = static_cast<uint16_t*>(mAllocator.allocate(sizeof(uint16_t) * mTotalParticles));
+ mParticleIndices = static_cast<uint32_t*>(mAllocator.allocate(sizeof(uint32_t) * mTotalParticles));
+ mOverlapMasks = static_cast<uint32_t*>(mAllocator.allocate(sizeof(uint32_t*) * mNumInstances));
+
+ for (uint32_t k = 0; k < mNumIterations; ++k)
+ {
+ // world bounds of particles
+ BoundingBox<Simd4f> bounds = emptyBounds<Simd4f>();
+
+ // calculate potentially colliding set
+ {
+ NV_CLOTH_PROFILE_ZONE("cloth::SwInterCollision::BroadPhase", /*ProfileContext::None*/ 0);
+
+ mNumParticles =
+ calculatePotentialColliders(mInstances, mInstances + mNumInstances, mCollisionDistance, mClothIndices,
+ mParticleIndices, bounds, mOverlapMasks, mFilter, mAllocator);
+ }
+
+ // collide
+ if (mNumParticles)
+ {
+ NV_CLOTH_PROFILE_ZONE("cloth::SwInterCollision::Collide", /*ProfileContext::None*/ 0);
+
+ Simd4f lowerBound = bounds.mLower;
+ Simd4f edgeLength = max(bounds.mUpper - lowerBound, sEpsilon);
+
+ // sweep along longest axis
+ uint32_t sweepAxis = longestAxis(edgeLength);
+ uint32_t hashAxis0 = (sweepAxis + 1) % 3;
+ uint32_t hashAxis1 = (sweepAxis + 2) % 3;
+
+ // reserve 0, 127, and 65535 for sentinel
+ Simd4f cellSize = max(mCollisionDistance, simd4f(1.0f / 253) * edgeLength);
+ array(cellSize)[sweepAxis] = array(edgeLength)[sweepAxis] / 65533;
+
+ Simd4f one = gSimd4fOne;
+ Simd4f gridSize = simd4f(254.0f);
+ array(gridSize)[sweepAxis] = 65534.0f;
+
+ Simd4f gridScale = recip<1>(cellSize);
+ Simd4f gridBias = -lowerBound * gridScale + one;
+
+ void* buffer = mAllocator.allocate(getBufferSize(mNumParticles));
+
+ uint32_t* __restrict sortedIndices = reinterpret_cast<uint32_t*>(buffer);
+ uint32_t* __restrict sortedKeys = sortedIndices + mNumParticles;
+ uint32_t* __restrict keys = std::max(sortedKeys + mNumParticles, sortedIndices + 2 * mNumParticles + 1024);
+
+ typedef typename Simd4fToSimd4i<Simd4f>::Type Simd4i;
+
+ // create keys
+ for (uint32_t i = 0; i < mNumParticles; ++i)
+ {
+ // grid coordinate
+ Simd4f indexf = getParticle(i) * gridScale + gridBias;
+
+ // need to clamp index because shape collision potentially
+ // pushes particles outside of their original bounds
+ Simd4i indexi = intFloor(max(one, min(indexf, gridSize)));
+
+ const int32_t* ptr = array(indexi);
+ keys[i] = uint32_t(ptr[sweepAxis] | (ptr[hashAxis0] << 16) | (ptr[hashAxis1] << 24));
+ }
+
+ // compute sorted keys indices
+ radixSort(keys, keys + mNumParticles, sortedIndices);
+
+ // snoop histogram: offset of first index with 8 msb > 1 (0 is sentinel)
+ uint32_t firstColumnSize = sortedIndices[2 * mNumParticles + 769];
+
+ // sort keys
+ for (uint32_t i = 0; i < mNumParticles; ++i)
+ sortedKeys[i] = keys[sortedIndices[i]];
+ sortedKeys[mNumParticles] = uint32_t(-1); // sentinel
+
+ // calculate the number of buckets we need to search forward
+ const Simd4i data = intFloor(gridScale * mCollisionDistance);
+ uint32_t collisionDistance = uint32_t(2 + array(data)[sweepAxis]);
+
+ // collide particles
+ collideParticles(sortedKeys, firstColumnSize, sortedIndices, mNumParticles, collisionDistance);
+
+ mAllocator.deallocate(buffer);
+ }
+
+ /*
+ // verify against brute force (disable collision response when testing)
+ uint32_t numCollisions = mNumCollisions;
+ mNumCollisions = 0;
+
+ for (uint32_t i = 0; i < mNumParticles; ++i)
+ for (uint32_t j = i + 1; j < mNumParticles; ++j)
+ if (mOverlapMasks[mClothIndices[i]] & (1 << mClothIndices[j]))
+ collideParticles(getParticle(i), getParticle(j));
+
+ static uint32_t iter = 0; ++iter;
+ if (numCollisions != mNumCollisions)
+ printf("%u: %u != %u\n", iter, numCollisions, mNumCollisions);
+ */
+
+ // transform back to local space
+ {
+ NV_CLOTH_PROFILE_ZONE("cloth::SwInterCollision::PostTransform", /*ProfileContext::None*/ 0);
+
+ Simd4f toLocal[4], impulseScale;
+ uint16_t lastCloth = uint16_t(0xffff);
+
+ for (uint32_t i = 0; i < mNumParticles; ++i)
+ {
+ uint16_t clothIndex = mClothIndices[i];
+ const SwInterCollisionData* instance = mInstances + clothIndex;
+
+ // todo: could pre-compute these inverses
+ if (clothIndex != lastCloth)
+ {
+ const PxMat44 xform = PxMat44(instance->mGlobalPose.getInverse());
+
+ toLocal[0] = load(reinterpret_cast<const float*>(&xform.column0));
+ toLocal[1] = load(reinterpret_cast<const float*>(&xform.column1));
+ toLocal[2] = load(reinterpret_cast<const float*>(&xform.column2));
+ toLocal[3] = load(reinterpret_cast<const float*>(&xform.column3));
+
+ impulseScale = simd4f(instance->mImpulseScale);
+
+ lastCloth = mClothIndices[i];
+ }
+
+ uint32_t particleIndex = mParticleIndices[i];
+ Simd4f& particle = reinterpret_cast<Simd4f&>(instance->mParticles[particleIndex]);
+ Simd4f& impulse = reinterpret_cast<Simd4f&>(instance->mPrevParticles[particleIndex]);
+
+ particle = transform(toLocal, particle);
+ // avoid w becoming negative due to numerical inaccuracies
+ impulse = max(sZeroW, particle - rotate(toLocal, Simd4f(impulse * impulseScale)));
+ }
+ }
+ }
+
+ mAllocator.deallocate(mOverlapMasks);
+ mAllocator.deallocate(mParticleIndices);
+ mAllocator.deallocate(mClothIndices);
+}
+
+template <typename Simd4f>
+size_t cloth::SwInterCollision<Simd4f>::estimateTemporaryMemory(SwInterCollisionData* cloths, uint32_t n)
+{
+ // count total particles
+ uint32_t numParticles = 0;
+ for (uint32_t i = 0; i < n; ++i)
+ numParticles += cloths[i].mNumParticles;
+
+ uint32_t boundsSize = 2 * n * sizeof(BoundingBox<Simd4f>) + n * sizeof(uint32_t);
+ uint32_t clothIndicesSize = numParticles * sizeof(uint16_t);
+ uint32_t particleIndicesSize = numParticles * sizeof(uint32_t);
+ uint32_t masksSize = n * sizeof(uint32_t);
+
+ return boundsSize + clothIndicesSize + particleIndicesSize + masksSize + getBufferSize(numParticles);
+}
+
+template <typename Simd4f>
+size_t cloth::SwInterCollision<Simd4f>::getBufferSize(uint32_t numParticles)
+{
+ uint32_t keysSize = numParticles * sizeof(uint32_t);
+ uint32_t indicesSize = numParticles * sizeof(uint32_t);
+ uint32_t histogramSize = 1024 * sizeof(uint32_t);
+
+ return keysSize + indicesSize + std::max(indicesSize + histogramSize, keysSize);
+}
+
+template <typename Simd4f>
+void cloth::SwInterCollision<Simd4f>::collideParticle(uint32_t index)
+{
+ uint16_t clothIndex = mClothIndices[index];
+
+ if ((1 << clothIndex) & ~mClothMask)
+ return;
+
+ const SwInterCollisionData* instance = mInstances + clothIndex;
+
+ uint32_t particleIndex = mParticleIndices[index];
+ Simd4f& particle = reinterpret_cast<Simd4f&>(instance->mParticles[particleIndex]);
+
+ Simd4f diff = particle - mParticle;
+ Simd4f distSqr = dot3(diff, diff);
+
+#if PX_DEBUG
+ ++mNumTests;
+#endif
+
+ if (allGreater(distSqr, mCollisionSquareDistance))
+ return;
+
+ Simd4f w0 = splat<3>(mParticle);
+ Simd4f w1 = splat<3>(particle);
+
+ Simd4f ratio = mCollisionDistance * rsqrt<1>(distSqr);
+ Simd4f scale = mStiffness * recip<1>(sEpsilon + w0 + w1);
+ Simd4f delta = (scale * (diff - diff * ratio)) & sMaskXYZ;
+
+ mParticle = mParticle + delta * w0;
+ particle = particle - delta * w1;
+
+ Simd4f& impulse = reinterpret_cast<Simd4f&>(instance->mPrevParticles[particleIndex]);
+
+ mImpulse = mImpulse + delta * w0;
+ impulse = impulse - delta * w1;
+
+#if PX_DEBUG || PX_PROFILE
+ ++mNumCollisions;
+#endif
+}
+
+template <typename Simd4f>
+void cloth::SwInterCollision<Simd4f>::collideParticles(const uint32_t* keys, uint32_t firstColumnSize,
+ const uint32_t* indices, uint32_t numParticles,
+ uint32_t collisionDistance)
+{
+ const uint32_t bucketMask = uint16_t(-1);
+
+ const uint32_t keyOffsets[] = { 0, 0x00010000, 0x00ff0000, 0x01000000, 0x01010000 };
+
+ const uint32_t* __restrict kFirst[5];
+ const uint32_t* __restrict kLast[5];
+
+ {
+ // optimization: scan forward iterator starting points once instead of 9 times
+ const uint32_t* __restrict kIt = keys;
+
+ uint32_t key = *kIt;
+ uint32_t firstKey = key - std::min(collisionDistance, key & bucketMask);
+ uint32_t lastKey = std::min(key + collisionDistance, key | bucketMask);
+
+ kFirst[0] = kIt;
+ while (*kIt < lastKey)
+ ++kIt;
+ kLast[0] = kIt;
+
+ for (uint32_t k = 1; k < 5; ++k)
+ {
+ for (uint32_t n = firstKey + keyOffsets[k]; *kIt < n;)
+ ++kIt;
+ kFirst[k] = kIt;
+
+ for (uint32_t n = lastKey + keyOffsets[k]; *kIt < n;)
+ ++kIt;
+ kLast[k] = kIt;
+
+ // jump forward once to second column
+ kIt = keys + firstColumnSize;
+ firstColumnSize = 0;
+ }
+ }
+
+ const uint32_t* __restrict iIt = indices;
+ const uint32_t* __restrict iEnd = indices + numParticles;
+
+ const uint32_t* __restrict jIt;
+ const uint32_t* __restrict jEnd;
+
+ for (; iIt != iEnd; ++iIt, ++kFirst[0])
+ {
+ // load current particle once outside of inner loop
+ uint32_t index = *iIt;
+ NV_CLOTH_ASSERT(index < mNumParticles);
+ mClothIndex = mClothIndices[index];
+ NV_CLOTH_ASSERT(mClothIndex < mNumInstances);
+ mClothMask = mOverlapMasks[mClothIndex];
+
+ const SwInterCollisionData* instance = mInstances + mClothIndex;
+
+ mParticleIndex = mParticleIndices[index];
+ mParticle = reinterpret_cast<const Simd4f&>(instance->mParticles[mParticleIndex]);
+ mImpulse = reinterpret_cast<const Simd4f&>(instance->mPrevParticles[mParticleIndex]);
+
+ uint32_t key = *kFirst[0];
+
+ // range of keys we need to check against for this particle
+ uint32_t firstKey = key - std::min(collisionDistance, key & bucketMask);
+ uint32_t lastKey = std::min(key + collisionDistance, key | bucketMask);
+
+ // scan forward end point
+ while (*kLast[0] < lastKey)
+ ++kLast[0];
+
+ // process potential colliders of same cell
+ jEnd = indices + (kLast[0] - keys);
+ for (jIt = iIt + 1; jIt != jEnd; ++jIt)
+ collideParticle(*jIt);
+
+ // process neighbor cells
+ for (uint32_t k = 1; k < 5; ++k)
+ {
+ // scan forward start point
+ for (uint32_t n = firstKey + keyOffsets[k]; *kFirst[k] < n;)
+ ++kFirst[k];
+
+ // scan forward end point
+ for (uint32_t n = lastKey + keyOffsets[k]; *kLast[k] < n;)
+ ++kLast[k];
+
+ // process potential colliders
+ jEnd = indices + (kLast[k] - keys);
+ for (jIt = indices + (kFirst[k] - keys); jIt != jEnd; ++jIt)
+ collideParticle(*jIt);
+ }
+
+ // write back particle and impulse
+ reinterpret_cast<Simd4f&>(instance->mParticles[mParticleIndex]) = mParticle;
+ reinterpret_cast<Simd4f&>(instance->mPrevParticles[mParticleIndex]) = mImpulse;
+ }
+}
+
+// explicit template instantiation
+#if NV_SIMD_SIMD
+template class cloth::SwInterCollision<Simd4f>;
+#endif
+#if NV_SIMD_SCALAR
+template class cloth::SwInterCollision<Scalar4f>;
+#endif
diff --git a/NvCloth/src/SwInterCollision.h b/NvCloth/src/SwInterCollision.h
new file mode 100644
index 0000000..656fbbc
--- /dev/null
+++ b/NvCloth/src/SwInterCollision.h
@@ -0,0 +1,142 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "StackAllocator.h"
+#include "Simd.h"
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+#include <foundation/PxTransform.h>
+
+namespace nv
+{
+namespace cloth
+{
+
+class SwCloth;
+struct SwClothData;
+
+typedef StackAllocator<16> SwKernelAllocator;
+
+typedef bool (*InterCollisionFilter)(void* cloth0, void* cloth1);
+
+struct SwInterCollisionData
+{
+ SwInterCollisionData()
+ {
+ }
+ SwInterCollisionData(physx::PxVec4* particles, physx::PxVec4* prevParticles, uint32_t numParticles, uint32_t* indices,
+ const physx::PxTransform& globalPose, const physx::PxVec3& boundsCenter, const physx::PxVec3& boundsHalfExtents,
+ float impulseScale, void* userData)
+ : mParticles(particles)
+ , mPrevParticles(prevParticles)
+ , mNumParticles(numParticles)
+ , mIndices(indices)
+ , mGlobalPose(globalPose)
+ , mBoundsCenter(boundsCenter)
+ , mBoundsHalfExtent(boundsHalfExtents)
+ , mImpulseScale(impulseScale)
+ , mUserData(userData)
+ {
+ }
+
+ physx::PxVec4* mParticles;
+ physx::PxVec4* mPrevParticles;
+ uint32_t mNumParticles;
+ uint32_t* mIndices;
+ physx::PxTransform mGlobalPose;
+ physx::PxVec3 mBoundsCenter;
+ physx::PxVec3 mBoundsHalfExtent;
+ float mImpulseScale;
+ void* mUserData;
+};
+
+template <typename Simd4f>
+class SwInterCollision
+{
+
+ public:
+ SwInterCollision(const SwInterCollisionData* cloths, uint32_t n, float colDist, float stiffness,
+ uint32_t iterations, InterCollisionFilter filter, cloth::SwKernelAllocator& alloc);
+
+ ~SwInterCollision();
+
+ void operator()();
+
+ static size_t estimateTemporaryMemory(SwInterCollisionData* cloths, uint32_t n);
+
+ private:
+ SwInterCollision& operator = (const SwInterCollision&); // not implemented
+
+ static size_t getBufferSize(uint32_t);
+
+ void collideParticles(const uint32_t* keys, uint32_t firstColumnSize, const uint32_t* sortedIndices,
+ uint32_t numParticles, uint32_t collisionDistance);
+
+ Simd4f& getParticle(uint32_t index);
+
+ // better wrap these in a struct
+ void collideParticle(uint32_t index);
+
+ Simd4f mParticle;
+ Simd4f mImpulse;
+
+ Simd4f mCollisionDistance;
+ Simd4f mCollisionSquareDistance;
+ Simd4f mStiffness;
+
+ uint16_t mClothIndex;
+ uint32_t mClothMask;
+ uint32_t mParticleIndex;
+
+ uint32_t mNumIterations;
+
+ const SwInterCollisionData* mInstances;
+ uint32_t mNumInstances;
+
+ uint16_t* mClothIndices;
+ uint32_t* mParticleIndices;
+ uint32_t mNumParticles;
+ uint32_t* mOverlapMasks;
+
+ uint32_t mTotalParticles;
+
+ InterCollisionFilter mFilter;
+
+ SwKernelAllocator& mAllocator;
+
+ public:
+ mutable uint32_t mNumTests;
+ mutable uint32_t mNumCollisions;
+};
+
+} // namespace cloth
+
+} // namespace nv
diff --git a/NvCloth/src/SwSelfCollision.cpp b/NvCloth/src/SwSelfCollision.cpp
new file mode 100644
index 0000000..6b3e267
--- /dev/null
+++ b/NvCloth/src/SwSelfCollision.cpp
@@ -0,0 +1,416 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "SwSelfCollision.h"
+#include "SwCloth.h"
+#include "SwClothData.h"
+#include "SwCollisionHelpers.h"
+
+#ifdef _MSC_VER
+#pragma warning(disable : 4127) // conditional expression is constant
+#endif
+
+using namespace nv;
+
+namespace
+{
+
+const Simd4fTupleFactory sMaskXYZ = simd4f(simd4i(~0, ~0, ~0, 0));
+
+// returns sorted indices, output needs to be at least 2*(last - first) + 1024
+void radixSort(const uint32_t* first, const uint32_t* last, uint16_t* out)
+{
+ uint16_t n = uint16_t(last - first);
+
+ uint16_t* buffer = out + 2 * n;
+ uint16_t* __restrict histograms[] = { buffer, buffer + 256, buffer + 512, buffer + 768 };
+
+ memset(buffer, 0, 1024 * sizeof(uint16_t));
+
+ // build 3 histograms in one pass
+ for (const uint32_t* __restrict it = first; it != last; ++it)
+ {
+ uint32_t key = *it;
+ ++histograms[0][0xff & key];
+ ++histograms[1][0xff & (key >> 8)];
+ ++histograms[2][0xff & (key >> 16)];
+ ++histograms[3][key >> 24];
+ }
+
+ // convert histograms to offset tables in-place
+ uint16_t sums[4] = {};
+ for (uint32_t i = 0; i < 256; ++i)
+ {
+ uint16_t temp0 = uint16_t(histograms[0][i] + sums[0]);
+ histograms[0][i] = sums[0], sums[0] = temp0;
+
+ uint16_t temp1 = uint16_t(histograms[1][i] + sums[1]);
+ histograms[1][i] = sums[1], sums[1] = temp1;
+
+ uint16_t temp2 = uint16_t(histograms[2][i] + sums[2]);
+ histograms[2][i] = sums[2], sums[2] = temp2;
+
+ uint16_t temp3 = uint16_t(histograms[3][i] + sums[3]);
+ histograms[3][i] = sums[3], sums[3] = temp3;
+ }
+
+ NV_CLOTH_ASSERT(sums[0] == n && sums[1] == n && sums[2] == n && sums[3] == n);
+
+#if PX_DEBUG
+ memset(out, 0xff, 2 * n * sizeof(uint16_t));
+#endif
+
+ // sort 8 bits per pass
+
+ uint16_t* __restrict indices[] = { out, out + n };
+
+ for (uint16_t i = 0; i != n; ++i)
+ indices[1][histograms[0][0xff & first[i]]++] = i;
+
+ for (uint16_t i = 0, index; index = indices[1][i], i != n; ++i)
+ indices[0][histograms[1][0xff & (first[index] >> 8)]++] = index;
+
+ for (uint16_t i = 0, index; index = indices[0][i], i != n; ++i)
+ indices[1][histograms[2][0xff & (first[index] >> 16)]++] = index;
+
+ for (uint16_t i = 0, index; index = indices[1][i], i != n; ++i)
+ indices[0][histograms[3][first[index] >> 24]++] = index;
+}
+
+template <typename Simd4f>
+uint32_t longestAxis(const Simd4f& edgeLength)
+{
+ const float* e = array(edgeLength);
+
+ if (e[0] > e[1])
+ return uint32_t(e[0] > e[2] ? 0 : 2);
+ else
+ return uint32_t(e[1] > e[2] ? 1 : 2);
+}
+
+bool isSelfCollisionEnabled(const cloth::SwClothData& cloth)
+{
+ return std::min(cloth.mSelfCollisionDistance, cloth.mSelfCollisionStiffness) > 0.0f;
+}
+
+bool isSelfCollisionEnabled(const cloth::SwCloth& cloth)
+{
+ return std::min(cloth.mSelfCollisionDistance, -cloth.mSelfCollisionLogStiffness) > 0.0f;
+}
+
+inline uint32_t align2(uint32_t x)
+{
+ return (x + 1) & ~1;
+}
+
+} // anonymous namespace
+
+template <typename Simd4f>
+cloth::SwSelfCollision<Simd4f>::SwSelfCollision(cloth::SwClothData& clothData, cloth::SwKernelAllocator& alloc)
+: mClothData(clothData), mAllocator(alloc)
+{
+ mCollisionDistance = simd4f(mClothData.mSelfCollisionDistance);
+ mCollisionSquareDistance = mCollisionDistance * mCollisionDistance;
+ mStiffness = sMaskXYZ & static_cast<Simd4f>(simd4f(mClothData.mSelfCollisionStiffness));
+}
+
+template <typename Simd4f>
+cloth::SwSelfCollision<Simd4f>::~SwSelfCollision()
+{
+}
+
+template <typename Simd4f>
+void cloth::SwSelfCollision<Simd4f>::operator()()
+{
+ mNumTests = mNumCollisions = 0;
+
+ if (!isSelfCollisionEnabled(mClothData))
+ return;
+
+ Simd4f lowerBound = load(mClothData.mCurBounds);
+ Simd4f edgeLength = max(load(mClothData.mCurBounds + 3) - lowerBound, gSimd4fEpsilon);
+
+ // sweep along longest axis
+ uint32_t sweepAxis = longestAxis(edgeLength);
+ uint32_t hashAxis0 = (sweepAxis + 1) % 3;
+ uint32_t hashAxis1 = (sweepAxis + 2) % 3;
+
+ // reserve 0, 127, and 65535 for sentinel
+ Simd4f cellSize = max(mCollisionDistance, simd4f(1.0f / 253) * edgeLength);
+ array(cellSize)[sweepAxis] = array(edgeLength)[sweepAxis] / 65533;
+
+ Simd4f one = gSimd4fOne;
+ Simd4f gridSize = simd4f(254.0f);
+ array(gridSize)[sweepAxis] = 65534.0f;
+
+ Simd4f gridScale = recip<1>(cellSize);
+ Simd4f gridBias = -lowerBound * gridScale + one;
+
+ uint32_t numIndices = mClothData.mNumSelfCollisionIndices;
+ void* buffer = mAllocator.allocate(getBufferSize(numIndices));
+
+ const uint32_t* __restrict indices = mClothData.mSelfCollisionIndices;
+ uint32_t* __restrict keys = reinterpret_cast<uint32_t*>(buffer);
+ uint16_t* __restrict sortedIndices = reinterpret_cast<uint16_t*>(keys + numIndices);
+ uint32_t* __restrict sortedKeys = reinterpret_cast<uint32_t*>(sortedIndices + align2(numIndices));
+
+ const Simd4f* particles = reinterpret_cast<const Simd4f*>(mClothData.mCurParticles);
+
+ // create keys
+ for (uint32_t i = 0; i < numIndices; ++i)
+ {
+ uint32_t index = indices ? indices[i] : i;
+
+ // grid coordinate
+ Simd4f keyf = particles[index] * gridScale + gridBias;
+
+ // need to clamp index because shape collision potentially
+ // pushes particles outside of their original bounds
+ Simd4i keyi = intFloor(max(one, min(keyf, gridSize)));
+
+ const int32_t* ptr = array(keyi);
+ keys[i] = uint32_t(ptr[sweepAxis] | (ptr[hashAxis0] << 16) | (ptr[hashAxis1] << 24));
+ }
+
+ // compute sorted keys indices
+ radixSort(keys, keys + numIndices, sortedIndices);
+
+ // snoop histogram: offset of first index with 8 msb > 1 (0 is sentinel)
+ uint16_t firstColumnSize = sortedIndices[2 * numIndices + 769];
+
+ // sort keys
+ for (uint32_t i = 0; i < numIndices; ++i)
+ sortedKeys[i] = keys[sortedIndices[i]];
+ sortedKeys[numIndices] = uint32_t(-1); // sentinel
+
+ if (indices)
+ {
+ // sort indices (into no-longer-needed keys array)
+ const uint16_t* __restrict permutation = sortedIndices;
+ sortedIndices = reinterpret_cast<uint16_t*>(keys);
+ for (uint32_t i = 0; i < numIndices; ++i)
+ sortedIndices[i] = uint16_t(indices[permutation[i]]);
+ }
+
+ // calculate the number of buckets we need to search forward
+ const Simd4i data = intFloor(gridScale * mCollisionDistance);
+ uint32_t collisionDistance = 2 + static_cast<uint32_t>(array(data)[sweepAxis]);
+
+ // collide particles
+ if (mClothData.mRestPositions)
+ collideParticles<true>(sortedKeys, firstColumnSize, sortedIndices, collisionDistance);
+ else
+ collideParticles<false>(sortedKeys, firstColumnSize, sortedIndices, collisionDistance);
+
+ mAllocator.deallocate(buffer);
+
+ // verify against brute force (disable collision response when testing)
+ /*
+ uint32_t numCollisions = mNumCollisions;
+ mNumCollisions = 0;
+
+ Simd4f* qarticles = reinterpret_cast<
+ Simd4f*>(mClothData.mCurParticles);
+ for (uint32_t i = 0; i < numIndices; ++i)
+ {
+ uint32_t indexI = indices ? indices[i] : i;
+ for (uint32_t j = i + 1; j < numIndices; ++j)
+ {
+ uint32_t indexJ = indices ? indices[j] : j;
+ collideParticles(qarticles[indexI], qarticles[indexJ]);
+ }
+ }
+
+ static uint32_t iter = 0; ++iter;
+ if (numCollisions != mNumCollisions)
+ printf("%u: %u != %u\n", iter, numCollisions, mNumCollisions);
+ */
+}
+
+template <typename Simd4f>
+size_t cloth::SwSelfCollision<Simd4f>::estimateTemporaryMemory(const SwCloth& cloth)
+{
+ uint32_t numIndices =
+ uint32_t(cloth.mSelfCollisionIndices.empty() ? cloth.mCurParticles.size() : cloth.mSelfCollisionIndices.size());
+ return isSelfCollisionEnabled(cloth) ? getBufferSize(numIndices) : 0;
+}
+
+template <typename Simd4f>
+size_t cloth::SwSelfCollision<Simd4f>::getBufferSize(uint32_t numIndices)
+{
+ uint32_t keysSize = numIndices * sizeof(uint32_t);
+ uint32_t indicesSize = align2(numIndices) * sizeof(uint16_t);
+ uint32_t radixSize = (numIndices + 1024) * sizeof(uint16_t);
+ return keysSize + indicesSize + std::max(radixSize, keysSize + uint32_t(sizeof(uint32_t)));
+}
+
+template <typename Simd4f>
+template <bool useRestParticles>
+void cloth::SwSelfCollision<Simd4f>::collideParticles(Simd4f& pos0, Simd4f& pos1, const Simd4f& pos0rest,
+ const Simd4f& pos1rest)
+{
+ Simd4f diff = pos1 - pos0;
+ Simd4f distSqr = dot3(diff, diff);
+
+#if PX_DEBUG
+ ++mNumTests;
+#endif
+
+ if (allGreater(distSqr, mCollisionSquareDistance))
+ return;
+
+ if (useRestParticles)
+ {
+ // calculate distance in rest configuration, if less than collision
+ // distance then ignore collision between particles in deformed config
+ Simd4f restDiff = pos1rest - pos0rest;
+ Simd4f restDistSqr = dot3(restDiff, restDiff);
+
+ if (allGreater(mCollisionSquareDistance, restDistSqr))
+ return;
+ }
+
+ Simd4f w0 = splat<3>(pos0);
+ Simd4f w1 = splat<3>(pos1);
+
+ Simd4f ratio = mCollisionDistance * rsqrt(distSqr);
+ Simd4f scale = mStiffness * recip(gSimd4fEpsilon + w0 + w1);
+ Simd4f delta = (scale * (diff - diff * ratio)) & sMaskXYZ;
+
+ pos0 = pos0 + delta * w0;
+ pos1 = pos1 - delta * w1;
+
+#if PX_DEBUG || PX_PROFILE
+ ++mNumCollisions;
+#endif
+}
+
+template <typename Simd4f>
+template <bool useRestParticles>
+void cloth::SwSelfCollision<Simd4f>::collideParticles(const uint32_t* keys, uint16_t firstColumnSize,
+ const uint16_t* indices, uint32_t collisionDistance)
+{
+ Simd4f* __restrict particles = reinterpret_cast<Simd4f*>(mClothData.mCurParticles);
+ Simd4f* __restrict restParticles =
+ useRestParticles ? reinterpret_cast<Simd4f*>(mClothData.mRestPositions) : particles;
+
+ const uint32_t bucketMask = uint16_t(-1);
+
+ const uint32_t keyOffsets[] = { 0, 0x00010000, 0x00ff0000, 0x01000000, 0x01010000 };
+
+ const uint32_t* __restrict kFirst[5];
+ const uint32_t* __restrict kLast[5];
+
+ {
+ // optimization: scan forward iterator starting points once instead of 9 times
+ const uint32_t* __restrict kIt = keys;
+
+ uint32_t key = *kIt;
+ uint32_t firstKey = key - std::min(collisionDistance, key & bucketMask);
+ uint32_t lastKey = std::min(key + collisionDistance, key | bucketMask);
+
+ kFirst[0] = kIt;
+ while (*kIt < lastKey)
+ ++kIt;
+ kLast[0] = kIt;
+
+ for (uint32_t k = 1; k < 5; ++k)
+ {
+ for (uint32_t n = firstKey + keyOffsets[k]; *kIt < n;)
+ ++kIt;
+ kFirst[k] = kIt;
+
+ for (uint32_t n = lastKey + keyOffsets[k]; *kIt < n;)
+ ++kIt;
+ kLast[k] = kIt;
+
+ // jump forward once to second column
+ kIt = keys + firstColumnSize;
+ firstColumnSize = 0;
+ }
+ }
+
+ const uint16_t* __restrict iIt = indices;
+ const uint16_t* __restrict iEnd = indices + mClothData.mNumSelfCollisionIndices;
+
+ const uint16_t* __restrict jIt;
+ const uint16_t* __restrict jEnd;
+
+ for (; iIt != iEnd; ++iIt, ++kFirst[0])
+ {
+ NV_CLOTH_ASSERT(*iIt < mClothData.mNumParticles);
+
+ // load current particle once outside of inner loop
+ Simd4f particle = particles[*iIt];
+ Simd4f restParticle = restParticles[*iIt];
+
+ uint32_t key = *kFirst[0];
+
+ // range of keys we need to check against for this particle
+ uint32_t firstKey = key - std::min(collisionDistance, key & bucketMask);
+ uint32_t lastKey = std::min(key + collisionDistance, key | bucketMask);
+
+ // scan forward end point
+ while (*kLast[0] < lastKey)
+ ++kLast[0];
+
+ // process potential colliders of same cell
+ jEnd = indices + (kLast[0] - keys);
+ for (jIt = iIt + 1; jIt != jEnd; ++jIt)
+ collideParticles<useRestParticles>(particle, particles[*jIt], restParticle, restParticles[*jIt]);
+
+ // process neighbor cells
+ for (uint32_t k = 1; k < 5; ++k)
+ {
+ // scan forward start point
+ for (uint32_t n = firstKey + keyOffsets[k]; *kFirst[k] < n;)
+ ++kFirst[k];
+
+ // scan forward end point
+ for (uint32_t n = lastKey + keyOffsets[k]; *kLast[k] < n;)
+ ++kLast[k];
+
+ // process potential colliders
+ jEnd = indices + (kLast[k] - keys);
+ for (jIt = indices + (kFirst[k] - keys); jIt != jEnd; ++jIt)
+ collideParticles<useRestParticles>(particle, particles[*jIt], restParticle, restParticles[*jIt]);
+ }
+
+ // store current particle
+ particles[*iIt] = particle;
+ }
+}
+
+// explicit template instantiation
+#if NV_SIMD_SIMD
+template class cloth::SwSelfCollision<Simd4f>;
+#endif
+#if NV_SIMD_SCALAR
+template class cloth::SwSelfCollision<Scalar4f>;
+#endif
diff --git a/NvCloth/src/SwSelfCollision.h b/NvCloth/src/SwSelfCollision.h
new file mode 100644
index 0000000..0348e29
--- /dev/null
+++ b/NvCloth/src/SwSelfCollision.h
@@ -0,0 +1,83 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "StackAllocator.h"
+#include <foundation/Px.h>
+#include "Simd.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+class SwCloth;
+struct SwClothData;
+
+typedef StackAllocator<16> SwKernelAllocator;
+
+template <typename Simd4f>
+class SwSelfCollision
+{
+ typedef typename Simd4fToSimd4i<Simd4f>::Type Simd4i;
+
+ public:
+ SwSelfCollision(SwClothData& clothData, SwKernelAllocator& alloc);
+ ~SwSelfCollision();
+
+ void operator()();
+
+ static size_t estimateTemporaryMemory(const SwCloth&);
+
+ private:
+ SwSelfCollision& operator = (const SwSelfCollision&); // not implemented
+ static size_t getBufferSize(uint32_t);
+
+ template <bool useRestParticles>
+ void collideParticles(Simd4f&, Simd4f&, const Simd4f&, const Simd4f&);
+
+ template <bool useRestParticles>
+ void collideParticles(const uint32_t*, uint16_t, const uint16_t*, uint32_t);
+
+ Simd4f mCollisionDistance;
+ Simd4f mCollisionSquareDistance;
+ Simd4f mStiffness;
+
+ SwClothData& mClothData;
+ SwKernelAllocator& mAllocator;
+
+ public:
+ mutable uint32_t mNumTests;
+ mutable uint32_t mNumCollisions;
+};
+
+} // namespace cloth
+
+} // namespace nv
diff --git a/NvCloth/src/SwSolver.cpp b/NvCloth/src/SwSolver.cpp
new file mode 100644
index 0000000..c83eed8
--- /dev/null
+++ b/NvCloth/src/SwSolver.cpp
@@ -0,0 +1,267 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include <foundation/PxProfiler.h>
+#include "SwSolver.h"
+#include "SwCloth.h"
+#include "ClothImpl.h"
+#include "SwFabric.h"
+#include "SwFactory.h"
+#include "SwClothData.h"
+#include "SwSolverKernel.h"
+#include "SwInterCollision.h"
+#include <PsFPU.h>
+#include <PsSort.h>
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+bool neonSolverKernel(SwCloth const&, SwClothData&, SwKernelAllocator&, IterationStateFactory&);
+}
+}
+
+using namespace nv;
+
+#if NV_SIMD_SIMD
+typedef Simd4f Simd4fType;
+#else
+typedef Scalar4f Simd4fType;
+#endif
+
+cloth::SwSolver::SwSolver()
+: mInterCollisionDistance(0.0f)
+, mInterCollisionStiffness(1.0f)
+, mInterCollisionIterations(1)
+, mInterCollisionFilter(nullptr)
+, mInterCollisionScratchMem(nullptr)
+, mInterCollisionScratchMemSize(0)
+, mSimulateProfileEventData(nullptr)
+{
+}
+
+cloth::SwSolver::~SwSolver()
+{
+ if (mInterCollisionScratchMem)
+ NV_CLOTH_FREE(mInterCollisionScratchMem);
+
+ NV_CLOTH_ASSERT(mSimulatedCloths.empty());
+}
+
+namespace
+{
+template <typename T>
+bool clothSizeGreater(const T& t0, const T& t1)
+{
+ return t0.mCloth->mCurParticles.size() > t1.mCloth->mCurParticles.size();
+}
+
+template <typename T>
+void sortTasks(shdfnd::Array<T, cloth::NonTrackingAllocator>& tasks)
+{
+ shdfnd::sort(tasks.begin(), tasks.size(), &clothSizeGreater<T>, nv::cloth::NonTrackingAllocator());
+}
+}
+
+void cloth::SwSolver::addCloth(Cloth* cloth)
+{
+ SwCloth& swCloth = static_cast<SwClothImpl&>(*cloth).mCloth;
+
+ mSimulatedCloths.pushBack(SimulatedCloth(swCloth, this));
+ sortTasks(mSimulatedCloths);
+}
+
+void cloth::SwSolver::removeCloth(Cloth* cloth)
+{
+ SwCloth& swCloth = static_cast<SwClothImpl&>(*cloth).mCloth;
+
+ Vector<SimulatedCloth>::Type::Iterator tIt = mSimulatedCloths.begin();
+ Vector<SimulatedCloth>::Type::Iterator tEnd = mSimulatedCloths.end();
+ while (tIt != tEnd && tIt->mCloth != &swCloth)
+ ++tIt;
+
+ if (tIt != tEnd)
+ {
+ NV_CLOTH_FREE(tIt->mScratchMemory);
+ mSimulatedCloths.replaceWithLast(tIt);
+ sortTasks(mSimulatedCloths);
+ }
+}
+
+bool cloth::SwSolver::beginSimulation(float dt)
+{
+ if (mSimulatedCloths.empty())
+ return false;
+
+ mCurrentDt = dt;
+ beginFrame();
+
+ return true;
+}
+void cloth::SwSolver::simulateChunk(int idx)
+{
+ NV_CLOTH_ASSERT(!mSimulatedCloths.empty());
+ mSimulatedCloths[idx].Simulate();
+ mSimulatedCloths[idx].Destroy();
+}
+void cloth::SwSolver::endSimulation()
+{
+ NV_CLOTH_ASSERT(!mSimulatedCloths.empty());
+ interCollision();
+ endFrame();
+}
+
+int cloth::SwSolver::getSimulationChunkCount() const
+{
+ return static_cast<int>(mSimulatedCloths.size());
+}
+
+void cloth::SwSolver::interCollision()
+{
+ if (!mInterCollisionIterations || mInterCollisionDistance == 0.0f)
+ return;
+ if (mInterCollisionFilter == nullptr)
+ {
+ NV_CLOTH_LOG_WARNING("Inter collision will not work unless an inter collision filter is set using Solver::setInterCollisionFilter.");
+ return;
+ }
+
+ float elasticity = 1.0f;
+
+ // rebuild cloth instance array
+ mInterCollisionInstances.resize(0);
+ for (uint32_t i = 0; i < mSimulatedCloths.size(); ++i)
+ {
+ SwCloth* c = mSimulatedCloths[i].mCloth;
+ float invNumIterations = mSimulatedCloths[i].mInvNumIterations;
+
+ mInterCollisionInstances.pushBack(SwInterCollisionData(
+ c->mCurParticles.begin(), c->mPrevParticles.begin(),
+ c->mSelfCollisionIndices.empty() ? c->mCurParticles.size() : c->mSelfCollisionIndices.size(),
+ c->mSelfCollisionIndices.empty() ? NULL : &c->mSelfCollisionIndices[0], c->mTargetMotion,
+ c->mParticleBoundsCenter, c->mParticleBoundsHalfExtent, elasticity * invNumIterations, c->mUserData));
+ }
+
+ const uint32_t requiredTempMemorySize = uint32_t(SwInterCollision<Simd4fType>::estimateTemporaryMemory(
+ &mInterCollisionInstances[0], mInterCollisionInstances.size()));
+
+ // realloc temp memory if necessary
+ if (mInterCollisionScratchMemSize < requiredTempMemorySize)
+ {
+ if (mInterCollisionScratchMem)
+ NV_CLOTH_FREE(mInterCollisionScratchMem);
+
+ mInterCollisionScratchMem = NV_CLOTH_ALLOC(requiredTempMemorySize, "cloth::SwSolver::mInterCollisionScratchMem");
+ mInterCollisionScratchMemSize = requiredTempMemorySize;
+ }
+
+ SwKernelAllocator allocator(mInterCollisionScratchMem, mInterCollisionScratchMemSize);
+
+ // run inter-collision
+ SwInterCollision<Simd4fType> collider(mInterCollisionInstances.begin(), mInterCollisionInstances.size(),
+ mInterCollisionDistance, mInterCollisionStiffness, mInterCollisionIterations,
+ mInterCollisionFilter, allocator);
+
+ collider();
+}
+
+void cloth::SwSolver::beginFrame() const
+{
+ mSimulateProfileEventData = NV_CLOTH_PROFILE_START_CROSSTHREAD("cloth::SwSolver::simulate", 0);
+}
+
+void cloth::SwSolver::endFrame() const
+{
+ NV_CLOTH_PROFILE_STOP_CROSSTHREAD(mSimulateProfileEventData,"cloth::SwSolver::simulate", 0);
+}
+
+cloth::SwSolver::SimulatedCloth::SimulatedCloth(SwCloth& cloth, SwSolver* parent)
+ : mCloth(&cloth), mScratchMemorySize(0), mScratchMemory(0), mInvNumIterations(0.0f), mParent(parent)
+{
+
+}
+
+void cloth::SwSolver::SimulatedCloth::Destroy()
+{
+ mCloth->mMotionConstraints.pop();
+ mCloth->mSeparationConstraints.pop();
+
+ if (!mCloth->mTargetCollisionSpheres.empty())
+ {
+ swap(mCloth->mStartCollisionSpheres, mCloth->mTargetCollisionSpheres);
+ mCloth->mTargetCollisionSpheres.resize(0);
+ }
+
+ if (!mCloth->mTargetCollisionPlanes.empty())
+ {
+ swap(mCloth->mStartCollisionPlanes, mCloth->mTargetCollisionPlanes);
+ mCloth->mTargetCollisionPlanes.resize(0);
+ }
+
+ if (!mCloth->mTargetCollisionTriangles.empty())
+ {
+ swap(mCloth->mStartCollisionTriangles, mCloth->mTargetCollisionTriangles);
+ mCloth->mTargetCollisionTriangles.resize(0);
+ }
+}
+void cloth::SwSolver::SimulatedCloth::Simulate()
+{
+ // check if we need to reallocate the temp memory buffer
+ // (number of shapes may have changed)
+ uint32_t requiredTempMemorySize = uint32_t(SwSolverKernel<Simd4fType>::estimateTemporaryMemory(*mCloth));
+
+ if (mScratchMemorySize < requiredTempMemorySize)
+ {
+ NV_CLOTH_FREE(mScratchMemory);
+
+ mScratchMemory = NV_CLOTH_ALLOC(requiredTempMemorySize, "cloth::SwSolver::mScratchMemory");
+ mScratchMemorySize = requiredTempMemorySize;
+ }
+
+ if (mParent->mCurrentDt == 0.0f)
+ return;
+
+ IterationStateFactory factory(*mCloth, mParent->mCurrentDt);
+ mInvNumIterations = factory.mInvNumIterations;
+
+ shdfnd::SIMDGuard simdGuard;
+
+ SwClothData data(*mCloth, mCloth->mFabric);
+ SwKernelAllocator allocator(mScratchMemory, uint32_t(mScratchMemorySize));
+
+ // construct kernel functor and execute
+#if NV_ANDROID
+ // if (!neonSolverKernel(cloth, data, allocator, factory))
+#endif
+ SwSolverKernel<Simd4fType>(*mCloth, data, allocator, factory)();
+
+ data.reconcile(*mCloth); // update cloth
+}
diff --git a/NvCloth/src/SwSolver.h b/NvCloth/src/SwSolver.h
new file mode 100644
index 0000000..0c503ac
--- /dev/null
+++ b/NvCloth/src/SwSolver.h
@@ -0,0 +1,136 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Solver.h"
+#include "SwInterCollision.h"
+
+namespace nv
+{
+
+namespace cloth
+{
+
+class SwCloth;
+class SwFactory;
+
+/// CPU/SSE based cloth solver
+class SwSolver : public Solver
+{
+ struct SimulatedCloth
+ {
+ SimulatedCloth(SwCloth& cloth, SwSolver* parent);
+ void Destroy();
+ void Simulate();
+
+ SwCloth* mCloth;
+ uint32_t mScratchMemorySize;
+ void* mScratchMemory;
+ float mInvNumIterations;
+
+ SwSolver* mParent;
+ };
+ friend struct SimulatedCloth;
+
+ public:
+ SwSolver();
+ virtual ~SwSolver();
+
+ virtual void addCloth(Cloth*) override;
+ virtual void removeCloth(Cloth*) override;
+
+ // functions executing the simulation work.
+ virtual bool beginSimulation(float dt) override;
+ virtual void simulateChunk(int idx) override;
+ virtual void endSimulation() override;
+ virtual int getSimulationChunkCount() const override;
+
+ virtual void setInterCollisionDistance(float distance) override
+ {
+ mInterCollisionDistance = distance;
+ }
+ virtual float getInterCollisionDistance() const override
+ {
+ return mInterCollisionDistance;
+ }
+
+ virtual void setInterCollisionStiffness(float stiffness) override
+ {
+ mInterCollisionStiffness = stiffness;
+ }
+ virtual float getInterCollisionStiffness() const override
+ {
+ return mInterCollisionStiffness;
+ }
+
+ virtual void setInterCollisionNbIterations(uint32_t nbIterations) override
+ {
+ mInterCollisionIterations = nbIterations;
+ }
+ virtual uint32_t getInterCollisionNbIterations() const override
+ {
+ return mInterCollisionIterations;
+ }
+
+ virtual void setInterCollisionFilter(InterCollisionFilter filter) override
+ {
+ mInterCollisionFilter = filter;
+ }
+
+ virtual bool hasError() const override
+ {
+ return false;
+ }
+
+ private:
+ void beginFrame() const;
+ void endFrame() const;
+
+ void interCollision();
+
+ private:
+ Vector<SimulatedCloth>::Type mSimulatedCloths;
+
+
+ float mInterCollisionDistance;
+ float mInterCollisionStiffness;
+ uint32_t mInterCollisionIterations;
+ InterCollisionFilter mInterCollisionFilter;
+
+ void* mInterCollisionScratchMem;
+ uint32_t mInterCollisionScratchMemSize;
+ Vector<SwInterCollisionData>::Type mInterCollisionInstances;
+
+ float mCurrentDt; //The delta time for the current simulated frame
+
+ mutable void* mSimulateProfileEventData;
+};
+}
+}
diff --git a/NvCloth/src/SwSolverKernel.cpp b/NvCloth/src/SwSolverKernel.cpp
new file mode 100644
index 0000000..f53c9ab
--- /dev/null
+++ b/NvCloth/src/SwSolverKernel.cpp
@@ -0,0 +1,851 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "SwSolverKernel.h"
+#include "SwCloth.h"
+#include "SwClothData.h"
+#include "SwFabric.h"
+#include "SwFactory.h"
+#include "PointInterpolator.h"
+#include "BoundingBox.h"
+#include <foundation/PxProfiler.h>
+
+using namespace physx;
+
+#define NV_AVX (NV_SIMD_SIMD&&(PX_WIN32 || PX_WIN64) && PX_VC >= 10)
+#ifdef _MSC_VER
+#pragma warning(disable : 4127) // conditional expression is constant
+#endif
+
+#if NV_AVX
+namespace avx
+{
+// defined in SwSolveConstraints.cpp
+
+void initialize();
+
+template <bool, uint32_t>
+void solveConstraints(float* __restrict posIt, const float* __restrict rIt, const float* __restrict stIt, const float* __restrict rEnd,
+ const uint16_t* __restrict iIt, const __m128& stiffnessEtc, const __m128& stiffnessExponent);
+}
+
+namespace
+{
+uint32_t getAvxSupport()
+{
+// Checking for AVX requires 3 things:
+// 1) CPUID indicates that the OS uses XSAVE and XRSTORE
+// 2) CPUID indicates support for AVX
+// 3) XGETBV indicates registers are saved and restored on context switch
+
+#if _MSC_FULL_VER < 160040219 || !defined(_XCR_XFEATURE_ENABLED_MASK)
+ // need at least VC10 SP1 and compile on at least Win7 SP1
+ return 0;
+#else
+ int cpuInfo[4];
+ __cpuid(cpuInfo, 1);
+ int avxFlags = 3 << 27; // checking 1) and 2) above
+ if ((cpuInfo[2] & avxFlags) != avxFlags)
+ return 0; // xgetbv not enabled or no AVX support
+
+ if ((_xgetbv(_XCR_XFEATURE_ENABLED_MASK) & 0x6) != 0x6)
+ return 0; // OS does not save YMM registers
+
+ avx::initialize();
+
+#if _MSC_VER < 1700
+ return 1;
+#else
+ int fmaFlags = 1 << 12;
+ if ((cpuInfo[2] & fmaFlags) != fmaFlags)
+ return 1; // no FMA3 support
+
+ /* only using fma at the moment, don't lock out AMD's piledriver by requiring avx2
+ __cpuid(cpuInfo, 7);
+ int avx2Flags = 1 << 5;
+ if ((cpuInfo[1] & avx2Flags) != avx2Flags)
+ return 1; // no AVX2 support
+ */
+
+ return 2;
+#endif // _MSC_VER
+#endif // _MSC_FULL_VER
+}
+
+const uint32_t sAvxSupport = getAvxSupport(); // 0: no AVX, 1: AVX, 2: AVX+FMA
+}
+#endif
+
+using namespace nv;
+
+namespace
+{
+/* simd constants */
+
+const Simd4fTupleFactory sMaskW = simd4f(simd4i(0, 0, 0, ~0));
+const Simd4fTupleFactory sMaskXY = simd4f(simd4i(~0, ~0, 0, 0));
+const Simd4fTupleFactory sMaskXYZ = simd4f(simd4i(~0, ~0, ~0, 0));
+const Simd4fTupleFactory sMaskYZW = simd4f(simd4i(0, ~0, ~0, ~0));
+const Simd4fTupleFactory sMinusOneXYZOneW = simd4f(-1.0f, -1.0f, -1.0f, 1.0f);
+const Simd4fTupleFactory sFloatMaxW = simd4f(0.0f, 0.0f, 0.0f, FLT_MAX);
+const Simd4fTupleFactory sMinusFloatMaxXYZ = simd4f(-FLT_MAX, -FLT_MAX, -FLT_MAX, 0.0f);
+
+/* static worker functions */
+
+/**
+ This function performs explicit Euler integration based on position, where
+ x_next = x_cur + (x_cur - x_prev) * dt_cur/dt_prev * damping + g * dt * dt
+ The g * dt * dt term is folded into accelIt.
+ */
+
+template <typename Simd4f, typename AccelerationIterator>
+void integrateParticles(Simd4f* __restrict curIt, Simd4f* __restrict curEnd, Simd4f* __restrict prevIt,
+ const Simd4f& scale, const AccelerationIterator& aIt, const Simd4f& prevBias)
+{
+ // local copy to avoid LHS
+ AccelerationIterator accelIt(aIt);
+
+ for (; curIt != curEnd; ++curIt, ++prevIt, ++accelIt)
+ {
+ Simd4f current = *curIt;
+ Simd4f previous = *prevIt;
+ // if (current.w == 0) current.w = previous.w
+ current = select(current > sMinusFloatMaxXYZ, current, previous);
+ Simd4f finiteMass = splat<3>(previous) > sFloatMaxW;
+ Simd4f delta = (current - previous) * scale + *accelIt;
+ *curIt = current + (delta & finiteMass);
+ *prevIt = select(sMaskW, previous, current) + (prevBias & finiteMass);
+ }
+}
+
+template <typename Simd4f, typename AccelerationIterator>
+void integrateParticles(Simd4f* __restrict curIt, Simd4f* __restrict curEnd, Simd4f* __restrict prevIt,
+ const Simd4f (&prevMatrix)[3], const Simd4f (&curMatrix)[3], const AccelerationIterator& aIt,
+ const Simd4f& prevBias)
+{
+ // local copy to avoid LHS
+ AccelerationIterator accelIt(aIt);
+
+ for (; curIt != curEnd; ++curIt, ++prevIt, ++accelIt)
+ {
+ Simd4f current = *curIt;
+ Simd4f previous = *prevIt;
+ // if (current.w == 0) current.w = previous.w
+ current = select(current > sMinusFloatMaxXYZ, current, previous);
+ Simd4f finiteMass = splat<3>(previous) > sFloatMaxW;
+ // curMatrix * current + prevMatrix * previous + accel
+ Simd4f delta = cloth::transform(curMatrix, cloth::transform(prevMatrix, *accelIt, previous), current);
+ *curIt = current + (delta & finiteMass);
+ *prevIt = select(sMaskW, previous, current) + (prevBias & finiteMass);
+ }
+}
+
+template <typename Simd4f, typename ConstraintIterator>
+void constrainMotion(Simd4f* __restrict curIt, const Simd4f* __restrict curEnd, const ConstraintIterator& spheres,
+ const Simd4f& scaleBiasStiffness)
+{
+ Simd4f scale = splat<0>(scaleBiasStiffness);
+ Simd4f bias = splat<1>(scaleBiasStiffness);
+ Simd4f stiffness = splat<3>(scaleBiasStiffness);
+
+ // local copy of iterator to maintain alignment
+ ConstraintIterator sphIt = spheres;
+
+ for (; curIt < curEnd; curIt += 4)
+ {
+ // todo: use msub where available
+ Simd4f curPos0 = curIt[0];
+ Simd4f curPos1 = curIt[1];
+ Simd4f curPos2 = curIt[2];
+ Simd4f curPos3 = curIt[3];
+
+ //delta.xyz = sphereCenter - currentPosition
+ //delta.w = sphereRadius
+ Simd4f delta0 = *sphIt - (sMaskXYZ & curPos0);
+ ++sphIt;
+ Simd4f delta1 = *sphIt - (sMaskXYZ & curPos1);
+ ++sphIt;
+ Simd4f delta2 = *sphIt - (sMaskXYZ & curPos2);
+ ++sphIt;
+ Simd4f delta3 = *sphIt - (sMaskXYZ & curPos3);
+ ++sphIt;
+
+ Simd4f deltaX = delta0, deltaY = delta1, deltaZ = delta2, deltaW = delta3;
+ transpose(deltaX, deltaY, deltaZ, deltaW);
+
+ Simd4f sqrLength = gSimd4fEpsilon + deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ;
+ Simd4f radius = max(gSimd4fZero, deltaW * scale + bias);
+
+ Simd4f slack = gSimd4fOne - radius * rsqrt(sqrLength);
+
+ // if slack <= 0.0f then we don't want to affect particle
+ // and can skip if all particles are unaffected
+ Simd4f isPositive;
+ if (anyGreater(slack, gSimd4fZero, isPositive))
+ {
+ // set invMass to zero if radius is zero
+ curPos0 = curPos0 & (splat<0>(radius) > sMinusFloatMaxXYZ);
+ curPos1 = curPos1 & (splat<1>(radius) > sMinusFloatMaxXYZ);
+ curPos2 = curPos2 & (splat<2>(radius) > sMinusFloatMaxXYZ);
+ curPos3 = curPos3 & ((radius) > sMinusFloatMaxXYZ);
+
+ slack = slack * stiffness & isPositive;
+
+ curIt[0] = curPos0 + (delta0 & sMaskXYZ) * splat<0>(slack);
+ curIt[1] = curPos1 + (delta1 & sMaskXYZ) * splat<1>(slack);
+ curIt[2] = curPos2 + (delta2 & sMaskXYZ) * splat<2>(slack);
+ curIt[3] = curPos3 + (delta3 & sMaskXYZ) * splat<3>(slack);
+ }
+ }
+}
+
+template <typename Simd4f, typename ConstraintIterator>
+void constrainSeparation(Simd4f* __restrict curIt, const Simd4f* __restrict curEnd, const ConstraintIterator& spheres)
+{
+ // local copy of iterator to maintain alignment
+ ConstraintIterator sphIt = spheres;
+
+ for (; curIt < curEnd; curIt += 4)
+ {
+ // todo: use msub where available
+ Simd4f curPos0 = curIt[0];
+ Simd4f curPos1 = curIt[1];
+ Simd4f curPos2 = curIt[2];
+ Simd4f curPos3 = curIt[3];
+
+ //delta.xyz = sphereCenter - currentPosition
+ //delta.w = sphereRadius
+ Simd4f delta0 = *sphIt - (sMaskXYZ & curPos0);
+ ++sphIt;
+ Simd4f delta1 = *sphIt - (sMaskXYZ & curPos1);
+ ++sphIt;
+ Simd4f delta2 = *sphIt - (sMaskXYZ & curPos2);
+ ++sphIt;
+ Simd4f delta3 = *sphIt - (sMaskXYZ & curPos3);
+ ++sphIt;
+
+ Simd4f deltaX = delta0, deltaY = delta1, deltaZ = delta2, deltaW = delta3;
+ transpose(deltaX, deltaY, deltaZ, deltaW);
+
+ Simd4f sqrLength = gSimd4fEpsilon + deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ;
+
+ Simd4f slack = gSimd4fOne - deltaW * rsqrt<1>(sqrLength);
+
+ // if slack >= 0.0f then we don't want to affect particle
+ // and can skip if all particles are unaffected
+ Simd4f isNegative;
+ if (anyGreater(gSimd4fZero, slack, isNegative))
+ {
+ slack = slack & isNegative;
+
+ curIt[0] = curPos0 + (delta0 & sMaskXYZ) * splat<0>(slack);
+ curIt[1] = curPos1 + (delta1 & sMaskXYZ) * splat<1>(slack);
+ curIt[2] = curPos2 + (delta2 & sMaskXYZ) * splat<2>(slack);
+ curIt[3] = curPos3 + (delta3 & sMaskXYZ) * splat<3>(slack);
+ }
+ }
+}
+
+/**
+ traditional gauss-seidel internal constraint solver
+ */
+template <bool useMultiplier, typename Simd4f>
+void solveConstraints(float* __restrict posIt, const float* __restrict rIt, const float* __restrict stIt, const float* __restrict rEnd,
+ const uint16_t* __restrict iIt, const Simd4f& stiffnessEtc, const Simd4f& stiffnessExponent)
+{
+ //posIt particle position (and invMass) iterator
+ //rIt,rEnd edge rest length iterator
+ //iIt index set iterator
+
+ Simd4f stretchLimit, compressionLimit, multiplier;
+ if (useMultiplier)
+ {
+ stretchLimit = splat<3>(stiffnessEtc);
+ compressionLimit = splat<2>(stiffnessEtc);
+ multiplier = splat<1>(stiffnessEtc);
+ }
+ Simd4f stiffness = splat<0>(stiffnessEtc);
+ bool useStiffnessPerConstraint = stIt!=nullptr;
+
+ for (; rIt != rEnd; rIt += 4, stIt += 4, iIt += 8)
+ {
+ //Calculate particle indices
+ uint32_t p0i = iIt[0] * sizeof(PxVec4);
+ uint32_t p0j = iIt[1] * sizeof(PxVec4);
+ uint32_t p1i = iIt[2] * sizeof(PxVec4);
+ uint32_t p1j = iIt[3] * sizeof(PxVec4);
+ uint32_t p2i = iIt[4] * sizeof(PxVec4);
+ uint32_t p2j = iIt[5] * sizeof(PxVec4);
+ uint32_t p3i = iIt[6] * sizeof(PxVec4);
+ uint32_t p3j = iIt[7] * sizeof(PxVec4);
+
+ //Load particle positions
+ //v.w = invMass
+ Simd4f v0i = loadAligned(posIt, p0i);
+ Simd4f v0j = loadAligned(posIt, p0j);
+ Simd4f v1i = loadAligned(posIt, p1i);
+ Simd4f v1j = loadAligned(posIt, p1j);
+ Simd4f v2i = loadAligned(posIt, p2i);
+ Simd4f v2j = loadAligned(posIt, p2j);
+ Simd4f v3i = loadAligned(posIt, p3i);
+ Simd4f v3j = loadAligned(posIt, p3j);
+
+ //offset.xyz = posB - posA
+ //offset.w = invMassB + invMassA
+ Simd4f h0ij = v0j + v0i * sMinusOneXYZOneW;
+ Simd4f h1ij = v1j + v1i * sMinusOneXYZOneW;
+ Simd4f h2ij = v2j + v2i * sMinusOneXYZOneW;
+ Simd4f h3ij = v3j + v3i * sMinusOneXYZOneW;
+
+ //h xyz = offset
+ //vw = invMass sum
+ Simd4f hxij = h0ij, hyij = h1ij, hzij = h2ij, vwij = h3ij;
+ transpose(hxij, hyij, hzij, vwij);
+
+ //load rest lengths
+ Simd4f rij = loadAligned(rIt);
+
+ //Load/calculate the constraint stiffness
+ Simd4f stij = useStiffnessPerConstraint ? gSimd4fOne - exp2(stiffnessExponent * static_cast<Simd4f>(loadAligned(stIt))) : stiffness;
+
+ //squared distance between particles: e2 = epsilon + |h|^2
+ Simd4f e2ij = gSimd4fEpsilon + hxij * hxij + hyij * hyij + hzij * hzij;
+
+ //slack: er = 1 - r / sqrt(e2)
+ // or er = 0 if rest length < epsilon
+ Simd4f erij = (gSimd4fOne - rij * rsqrt(e2ij)) & (rij > gSimd4fEpsilon);
+
+ if (useMultiplier)
+ {
+ erij = erij - multiplier * max(compressionLimit, min(erij, stretchLimit));
+ }
+
+ //ex = er * stiffness / sqrt(epsilon + vw)
+ Simd4f exij = erij * stij * recip(gSimd4fEpsilon + vwij);
+
+ //h = h * ex
+ h0ij = h0ij * splat<0>(exij) & sMaskXYZ;
+ h1ij = h1ij * splat<1>(exij) & sMaskXYZ;
+ h2ij = h2ij * splat<2>(exij) & sMaskXYZ;
+ h3ij = h3ij * splat<3>(exij) & sMaskXYZ;
+
+ //pos = pos + h * invMass
+ storeAligned(posIt, p0i, v0i + h0ij * splat<3>(v0i));
+ storeAligned(posIt, p0j, v0j - h0ij * splat<3>(v0j));
+ storeAligned(posIt, p1i, v1i + h1ij * splat<3>(v1i));
+ storeAligned(posIt, p1j, v1j - h1ij * splat<3>(v1j));
+ storeAligned(posIt, p2i, v2i + h2ij * splat<3>(v2i));
+ storeAligned(posIt, p2j, v2j - h2ij * splat<3>(v2j));
+ storeAligned(posIt, p3i, v3i + h3ij * splat<3>(v3i));
+ storeAligned(posIt, p3j, v3j - h3ij * splat<3>(v3j));
+ }
+}
+
+#if PX_WINDOWS_FAMILY
+#include "sse2/SwSolveConstraints.h"
+#endif
+
+// calculates upper bound of all position deltas
+template <typename Simd4f>
+Simd4f calculateMaxDelta(const Simd4f* prevIt, const Simd4f* curIt, const Simd4f* curEnd)
+{
+ Simd4f maxDelta = gSimd4fZero;
+ for (; curIt < curEnd; ++curIt, ++prevIt)
+ maxDelta = max(maxDelta, abs(*curIt - *prevIt));
+
+ return maxDelta & sMaskXYZ;
+}
+
+template <bool IsTurning, typename Simd4f>
+void applyWind(Simd4f* __restrict curIt, const Simd4f* __restrict prevIt, const uint16_t* __restrict tIt,
+ const uint16_t* __restrict tEnd, Simd4f dragCoefficient, Simd4f liftCoefficient, Simd4f wind,
+ const Simd4f (&rotation)[3])
+{
+ const Simd4f oneThird = simd4f(1.0f / 3.0f);
+
+ for (; tIt < tEnd; tIt += 3)
+ {
+ //Get the triangle vertex indices
+ uint16_t i0 = tIt[0];
+ uint16_t i1 = tIt[1];
+ uint16_t i2 = tIt[2];
+
+ //Get the current particle positions
+ Simd4f c0 = curIt[i0];
+ Simd4f c1 = curIt[i1];
+ Simd4f c2 = curIt[i2];
+
+ //Previous positions
+ Simd4f p0 = prevIt[i0];
+ Simd4f p1 = prevIt[i1];
+ Simd4f p2 = prevIt[i2];
+
+ Simd4f current = oneThird * (c0 + c1 + c2);
+ Simd4f previous = oneThird * (p0 + p1 + p2);
+
+ //offset of the triangle center, including wind
+ Simd4f delta = current - previous + wind;
+
+ if (IsTurning)
+ {
+ // add rotation of frame
+ //rotation = inverse local space rotation for one iteration
+ delta = cloth::transform(rotation, current, delta - current);
+ }
+
+ //not normalized
+ Simd4f normal = cross3(c2 - c0, c1 - c0);
+
+ Simd4f doubleArea = sqrt(dot3(normal, normal));
+
+ Simd4f invSqrScale = dot3(delta, delta);
+ Simd4f isZero = invSqrScale < gSimd4fEpsilon;
+ Simd4f scale = rsqrt(invSqrScale);
+
+ //scale 'normalizes' delta, doubleArea normalized normal
+ Simd4f cosTheta = dot3(normal, delta) * scale / doubleArea;
+ Simd4f sinTheta = sqrt(max(gSimd4fZero, gSimd4fOne - cosTheta * cosTheta));
+
+ // orthogonal to delta, in delta-normal plane, same length as delta
+ Simd4f liftDir = cross3(cross3(delta, normal), delta * scale);
+
+ // sin(theta) * cos(theta) = 0.5 * sin(2 * theta)
+ Simd4f lift = liftCoefficient * cosTheta * sinTheta * liftDir;
+ Simd4f drag = dragCoefficient * abs(cosTheta) * doubleArea * delta; //dragCoefficient should compensate for double area
+
+ Simd4f impulse = (lift + drag) & ~isZero;
+
+ curIt[i0] = c0 - impulse * splat<3>(c0);
+ curIt[i1] = c1 - impulse * splat<3>(c1);
+ curIt[i2] = c2 - impulse * splat<3>(c2);
+ }
+}
+
+} // anonymous namespace
+
+template <typename Simd4f>
+cloth::SwSolverKernel<Simd4f>::SwSolverKernel(SwCloth const& cloth, SwClothData& clothData,
+ SwKernelAllocator& allocator, IterationStateFactory& factory)
+: mCloth(cloth)
+, mClothData(clothData)
+, mAllocator(allocator)
+, mCollision(clothData, allocator)
+, mSelfCollision(clothData, allocator)
+, mState(factory.create<Simd4f>(cloth))
+{
+ mClothData.verify();
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::operator()()
+{
+ simulateCloth();
+}
+
+template <typename Simd4f>
+size_t cloth::SwSolverKernel<Simd4f>::estimateTemporaryMemory(const SwCloth& cloth)
+{
+ size_t collisionTempMemory = SwCollision<Simd4f>::estimateTemporaryMemory(cloth);
+ size_t selfCollisionTempMemory = SwSelfCollision<Simd4f>::estimateTemporaryMemory(cloth);
+
+ size_t tempMemory = std::max(collisionTempMemory, selfCollisionTempMemory);
+ size_t persistentMemory = SwCollision<Simd4f>::estimatePersistentMemory(cloth);
+
+ // account for any allocator overhead (this could be exposed in the allocator)
+ size_t maxAllocs = 32;
+ size_t maxPerAllocationOverhead = 32;
+ size_t maxAllocatorOverhead = maxAllocs * maxPerAllocationOverhead;
+
+ return maxAllocatorOverhead + persistentMemory + tempMemory;
+}
+
+template <typename Simd4f>
+template <typename AccelerationIterator>
+void cloth::SwSolverKernel<Simd4f>::integrateParticles(AccelerationIterator& accelIt, const Simd4f& prevBias)
+{
+ Simd4f* curIt = reinterpret_cast<Simd4f*>(mClothData.mCurParticles);
+ Simd4f* curEnd = curIt + mClothData.mNumParticles;
+ Simd4f* prevIt = reinterpret_cast<Simd4f*>(mClothData.mPrevParticles);
+
+ if (!mState.mIsTurning)
+ {
+ //We use mPrevMatrix to store the scale if we are not rotating
+ ::integrateParticles(curIt, curEnd, prevIt, mState.mPrevMatrix[0], accelIt, prevBias);
+ }
+ else
+ {
+ ::integrateParticles(curIt, curEnd, prevIt, mState.mPrevMatrix, mState.mCurMatrix, accelIt, prevBias);
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::integrateParticles()
+{
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::integrateParticles", /*ProfileContext::None*/ 0);
+
+ const Simd4f* startAccelIt = reinterpret_cast<const Simd4f*>(mClothData.mParticleAccelerations);
+
+ // dt^2 (todo: should this be the smoothed dt used for gravity?)
+ const Simd4f sqrIterDt = simd4f(sqr(mState.mIterDt)) & static_cast<Simd4f>(sMaskXYZ);
+
+ if (!startAccelIt)
+ {
+ // no per-particle accelerations, use a constant
+ ConstantIterator<Simd4f> accelIt(mState.mCurBias);
+ integrateParticles(accelIt, mState.mPrevBias);
+ }
+ else
+ {
+ // iterator implicitly scales by dt^2 and adds gravity
+ ScaleBiasIterator<Simd4f, const Simd4f*> accelIt(startAccelIt, sqrIterDt, mState.mCurBias);
+ integrateParticles(accelIt, mState.mPrevBias);
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::constrainTether()
+{
+ if (0.0f == mClothData.mTetherConstraintStiffness || !mClothData.mNumTethers)
+ return;
+
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::solveTethers", /*ProfileContext::None*/ 0);
+
+ uint32_t numParticles = mClothData.mNumParticles;
+ uint32_t numTethers = mClothData.mNumTethers;
+ NV_CLOTH_ASSERT(0 == numTethers % numParticles); // the particles can have multiple tethers, but each particle has the same amount
+
+ //particle iterators
+ float* __restrict curIt = mClothData.mCurParticles;
+ const float* __restrict curFirst = curIt;
+ const float* __restrict curEnd = curIt + 4 * numParticles;
+
+ //Tether iterators
+ typedef const SwTether* __restrict TetherIter;
+ TetherIter tFirst = mClothData.mTethers;
+ TetherIter tEnd = tFirst + numTethers;
+
+ //Tether properties
+ Simd4f stiffness =
+ static_cast<Simd4f>(sMaskXYZ) & simd4f(numParticles * mClothData.mTetherConstraintStiffness / numTethers);
+ Simd4f scale = simd4f(mClothData.mTetherConstraintScale);
+
+ //Loop through all particles
+ for (; curIt != curEnd; curIt += 4, ++tFirst)
+ {
+ Simd4f position = loadAligned(curIt); //Get the first particle
+ Simd4f offset = gSimd4fZero; //We accumulate the offset in this variable
+
+ //Loop through all tethers connected to our particle
+ for (TetherIter tIt = tFirst; tIt < tEnd; tIt += numParticles)
+ {
+ NV_CLOTH_ASSERT(tIt->mAnchor < numParticles);
+
+ //Get the particle on the other end of the tether
+ Simd4f anchor = loadAligned(curFirst, tIt->mAnchor * sizeof(PxVec4));
+ Simd4f delta = anchor - position;
+ Simd4f sqrLength = gSimd4fEpsilon + dot3(delta, delta);
+
+ Simd4f tetherLength = load(&tIt->mLength);
+ tetherLength = splat<0>(tetherLength);
+
+ Simd4f radius = tetherLength * scale;
+ Simd4f slack = gSimd4fOne - radius * rsqrt(sqrLength);
+
+ offset = offset + delta * max(slack, gSimd4fZero);
+ }
+
+ storeAligned(curIt, position + offset * stiffness); //Apply accumulated offset
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::solveFabric()
+{
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::solveFabric", /*ProfileContext::None*/ 0);
+
+ float* pIt = mClothData.mCurParticles;
+
+ //Phase configuration
+ const PhaseConfig* cIt = mClothData.mConfigBegin;
+ const PhaseConfig* cEnd = mClothData.mConfigEnd;
+
+ const uint32_t* pBegin = mClothData.mPhases;
+ const float* rBegin = mClothData.mRestvalues;
+ const float* stBegin = mClothData.mStiffnessValues;
+
+ const uint32_t* sBegin = mClothData.mSets;
+ const uint16_t* iBegin = mClothData.mIndices;
+
+ uint32_t totalConstraints = 0;
+
+ Simd4f stiffnessExponent = simd4f(mCloth.mStiffnessFrequency * mState.mIterDt);
+
+ //Loop through all phase configs
+ for (; cIt != cEnd; ++cIt)
+ {
+ //Get the set for this config
+ const uint32_t* sIt = sBegin + pBegin[cIt->mPhaseIndex];
+
+ //Get rest value iterators from set
+ const float* rIt = rBegin + sIt[0];
+ const float* rEnd = rBegin + sIt[1]; //start of next set is the end of ours
+ const float* stIt = stBegin?stBegin + sIt[0]:nullptr;
+
+ //Constraint particle indices
+ const uint16_t* iIt = iBegin + sIt[0] * 2; //x2 as we have 2 indices for every rest length
+
+ totalConstraints += uint32_t(rEnd - rIt);
+
+ // (stiffness, multiplier, compressionLimit, stretchLimit)
+ Simd4f config = load(&cIt->mStiffness);
+ // stiffness specified as fraction of constraint error per-millisecond
+ Simd4f scaledConfig = gSimd4fOne - exp2(config * stiffnessExponent);
+ Simd4f stiffness = select(sMaskXY, scaledConfig, config);
+
+ int neutralMultiplier = allEqual(sMaskYZW & stiffness, gSimd4fZero);
+
+#if NV_AVX
+ switch(sAvxSupport)
+ {
+ case 2:
+#if _MSC_VER >= 1700
+ neutralMultiplier ? avx::solveConstraints<false, 2>(pIt, rIt, stIt, rEnd, iIt, stiffness, stiffnessExponent)
+ : avx::solveConstraints<true, 2>(pIt, rIt, stIt, rEnd, iIt, stiffness, stiffnessExponent);
+ break;
+#endif
+ case 1:
+ neutralMultiplier ? avx::solveConstraints<false, 1>(pIt, rIt, stIt, rEnd, iIt, stiffness, stiffnessExponent)
+ : avx::solveConstraints<true, 1>(pIt, rIt, stIt, rEnd, iIt, stiffness, stiffnessExponent);
+ break;
+ default:
+#endif
+ neutralMultiplier ? solveConstraints<false>(pIt, rIt, stIt, rEnd, iIt, stiffness, stiffnessExponent)
+ : solveConstraints<true>(pIt, rIt, stIt, rEnd, iIt, stiffness, stiffnessExponent);
+#if NV_AVX
+ break;
+ }
+#endif
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::applyWind()
+{
+ if (mClothData.mDragCoefficient == 0.0f && mClothData.mLiftCoefficient == 0.0f)
+ return;
+
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::applyWind", /*ProfileContext::None*/ 0);
+
+ Simd4f* curIt = reinterpret_cast<Simd4f*>(mClothData.mCurParticles);
+ Simd4f* prevIt = reinterpret_cast<Simd4f*>(mClothData.mPrevParticles);
+
+ const uint16_t* tIt = mClothData.mTriangles;
+ const uint16_t* tEnd = tIt + 3 * mClothData.mNumTriangles;
+
+ Simd4f dragCoefficient = simd4f(mClothData.mDragCoefficient);
+ Simd4f liftCoefficient = simd4f(mClothData.mLiftCoefficient);
+
+ if (mState.mIsTurning)
+ {
+ ::applyWind<true>(curIt, prevIt, tIt, tEnd, dragCoefficient, liftCoefficient, mState.mWind,
+ mState.mRotationMatrix);
+ }
+ else
+ {
+ ::applyWind<false>(curIt, prevIt, tIt, tEnd, dragCoefficient, liftCoefficient, mState.mWind,
+ mState.mRotationMatrix);
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::constrainMotion()
+{
+ if (!mClothData.mStartMotionConstraints)
+ return;
+
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::constrainMotion", /*ProfileContext::None*/ 0);
+
+ Simd4f* curIt = reinterpret_cast<Simd4f*>(mClothData.mCurParticles);
+ Simd4f* curEnd = curIt + mClothData.mNumParticles;
+
+ const Simd4f* startIt = reinterpret_cast<const Simd4f*>(mClothData.mStartMotionConstraints);
+ const Simd4f* targetIt = reinterpret_cast<const Simd4f*>(mClothData.mTargetMotionConstraints);
+
+ Simd4f scaleBias = load(&mCloth.mMotionConstraintScale);
+ Simd4f stiffness = simd4f(mClothData.mMotionConstraintStiffness);
+ Simd4f scaleBiasStiffness = select(sMaskXYZ, scaleBias, stiffness);
+
+ if (!mClothData.mTargetMotionConstraints)
+ {
+ // no interpolation, use the start positions
+ return ::constrainMotion(curIt, curEnd, startIt, scaleBiasStiffness);
+ }
+
+ if (mState.mRemainingIterations == 1)
+ {
+ // use the target positions on last iteration
+ return ::constrainMotion(curIt, curEnd, targetIt, scaleBiasStiffness);
+ }
+
+ // otherwise use an interpolating iterator
+ LerpIterator<Simd4f, const Simd4f*> interpolator(startIt, targetIt, mState.getCurrentAlpha());
+ ::constrainMotion(curIt, curEnd, interpolator, scaleBiasStiffness);
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::constrainSeparation()
+{
+ if (!mClothData.mStartSeparationConstraints)
+ return;
+
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::constrainSeparation", /*ProfileContext::None*/ 0);
+
+ Simd4f* curIt = reinterpret_cast<Simd4f*>(mClothData.mCurParticles);
+ Simd4f* curEnd = curIt + mClothData.mNumParticles;
+
+ const Simd4f* startIt = reinterpret_cast<const Simd4f*>(mClothData.mStartSeparationConstraints);
+ const Simd4f* targetIt = reinterpret_cast<const Simd4f*>(mClothData.mTargetSeparationConstraints);
+
+ if (!mClothData.mTargetSeparationConstraints)
+ {
+ // no interpolation, use the start positions
+ return ::constrainSeparation(curIt, curEnd, startIt);
+ }
+
+ if (mState.mRemainingIterations == 1)
+ {
+ // use the target positions on last iteration
+ return ::constrainSeparation(curIt, curEnd, targetIt);
+ }
+ // otherwise use an interpolating iterator
+ LerpIterator<Simd4f, const Simd4f*> interpolator(startIt, targetIt, mState.getCurrentAlpha());
+ ::constrainSeparation(curIt, curEnd, interpolator);
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::collideParticles()
+{
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::collideParticles", /*ProfileContext::None*/ 0);
+
+ mCollision(mState);
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::selfCollideParticles()
+{
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::selfCollideParticles", /*ProfileContext::None*/ 0);
+
+ mSelfCollision();
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::updateSleepState()
+{
+ NV_CLOTH_PROFILE_ZONE("cloth::SwSolverKernel::updateSleepState", /*ProfileContext::None*/ 0);
+
+ mClothData.mSleepTestCounter += std::max(1u, uint32_t(mState.mIterDt * 1000));
+ if (mClothData.mSleepTestCounter >= mCloth.mSleepTestInterval)
+ {
+ const Simd4f* prevIt = reinterpret_cast<Simd4f*>(mClothData.mPrevParticles);
+ const Simd4f* curIt = reinterpret_cast<Simd4f*>(mClothData.mCurParticles);
+ const Simd4f* curEnd = curIt + mClothData.mNumParticles;
+
+ // calculate max particle delta since last iteration
+ Simd4f maxDelta = calculateMaxDelta(prevIt, curIt, curEnd);
+
+ ++mClothData.mSleepPassCounter;
+ Simd4f threshold = simd4f(mCloth.mSleepThreshold * mState.mIterDt);
+ if (anyGreaterEqual(maxDelta, threshold))
+ mClothData.mSleepPassCounter = 0;
+
+ mClothData.mSleepTestCounter -= mCloth.mSleepTestInterval;
+ }
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::iterateCloth()
+{
+ // note on invMass (stored in current/previous positions.w):
+ // integrateParticles()
+ // - if (current.w == 0) current.w = previous.w
+ // constraintMotion()
+ // - if (constraint.radius <= 0) current.w = 0
+ // computeBounds()
+ // - if (current.w > 0) current.w = previous.w
+ // collideParticles()
+ // - if (collides) current.w *= 1/massScale
+ // after simulate()
+ // - previous.w: original invMass as set by user
+ // - current.w: zeroed by motion constraints and mass-scaled by collision
+
+ // integrate positions
+ integrateParticles();
+
+ // apply drag and lift
+ applyWind();
+
+ // motion constraints
+ constrainMotion();
+
+ // solve tether constraints
+ constrainTether();
+
+ // solve edge constraints
+ solveFabric();
+
+ // separation constraints
+ constrainSeparation();
+
+ // perform character collision
+ collideParticles();
+
+ // perform self collision
+ selfCollideParticles();
+
+ // test wake / sleep conditions
+ updateSleepState();
+}
+
+template <typename Simd4f>
+void cloth::SwSolverKernel<Simd4f>::simulateCloth()
+{
+ while (mState.mRemainingIterations)
+ {
+ iterateCloth();
+ mState.update();
+ }
+}
+
+// explicit template instantiation
+#if NV_SIMD_SIMD
+template class cloth::SwSolverKernel<Simd4f>;
+#endif
+#if NV_SIMD_SCALAR
+template class cloth::SwSolverKernel<Scalar4f>;
+#endif
diff --git a/NvCloth/src/SwSolverKernel.h b/NvCloth/src/SwSolverKernel.h
new file mode 100644
index 0000000..f71514d
--- /dev/null
+++ b/NvCloth/src/SwSolverKernel.h
@@ -0,0 +1,84 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "IterationState.h"
+#include "SwCollision.h"
+#include "SwSelfCollision.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+class SwCloth;
+struct SwClothData;
+
+template <typename Simd4f>
+class SwSolverKernel
+{
+ public:
+ SwSolverKernel(SwCloth const&, SwClothData&, SwKernelAllocator&, IterationStateFactory&);
+
+ void operator()();
+
+ // returns a conservative estimate of the
+ // total memory requirements during a solve
+ static size_t estimateTemporaryMemory(const SwCloth& c);
+
+ private:
+ void integrateParticles();
+ void constrainTether();
+ void solveFabric();
+ void applyWind();
+ void constrainMotion();
+ void constrainSeparation();
+ void collideParticles();
+ void selfCollideParticles();
+ void updateSleepState();
+
+ void iterateCloth();
+ void simulateCloth();
+
+ SwCloth const& mCloth;
+ SwClothData& mClothData;
+ SwKernelAllocator& mAllocator;
+
+ SwCollision<Simd4f> mCollision;
+ SwSelfCollision<Simd4f> mSelfCollision;
+ IterationState<Simd4f> mState;
+
+ private:
+ SwSolverKernel<Simd4f>& operator = (const SwSolverKernel<Simd4f>&);
+ template <typename AccelerationIterator>
+ void integrateParticles(AccelerationIterator& accelIt, const Simd4f&);
+};
+}
+}
diff --git a/NvCloth/src/TripletScheduler.cpp b/NvCloth/src/TripletScheduler.cpp
new file mode 100644
index 0000000..e051131
--- /dev/null
+++ b/NvCloth/src/TripletScheduler.cpp
@@ -0,0 +1,242 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "TripletScheduler.h"
+#include <algorithm>
+#include <PsUtilities.h>
+
+using namespace physx;
+using namespace nv;
+
+cloth::TripletScheduler::TripletScheduler(Range<const uint32_t[4]> triplets)
+: mTriplets(reinterpret_cast<const Vec4u*>(triplets.begin()), reinterpret_cast<const Vec4u*>(triplets.end()))
+{
+}
+
+// SSE version
+void cloth::TripletScheduler::simd(uint32_t numParticles, uint32_t simdWidth)
+{
+ if (mTriplets.empty())
+ return;
+
+ Vector<uint32_t>::Type mark(numParticles, uint32_t(-1));
+
+ uint32_t setIndex = 0, setSize = 0;
+ for (TripletIter tIt = mTriplets.begin(), tEnd = mTriplets.end(); tIt != tEnd; ++setIndex)
+ {
+ TripletIter tLast = tIt + std::min(simdWidth, uint32_t(tEnd - tIt));
+ TripletIter tSwap = tEnd;
+
+ for (; tIt != tLast && tIt != tSwap; ++tIt, ++setSize)
+ {
+ // swap from tail until independent triplet found
+ while ((mark[tIt->x] == setIndex || mark[tIt->y] == setIndex || mark[tIt->z] == setIndex) && tIt != --tSwap)
+ std::iter_swap(tIt, tSwap);
+
+ if (tIt == tSwap)
+ break; // no independent triplet found
+
+ // mark vertices to be used in simdIndex
+ mark[tIt->x] = setIndex;
+ mark[tIt->y] = setIndex;
+ mark[tIt->z] = setIndex;
+ }
+
+ if (tIt == tSwap) // remaining triplets depend on current set
+ {
+ if (setSize > simdWidth) // trim set to multiple of simdWidth
+ {
+ uint32_t overflow = setSize % simdWidth;
+ setSize -= overflow;
+ tIt -= overflow;
+ }
+ mSetSizes.pushBack(setSize);
+ setSize = 0;
+ }
+ }
+}
+
+namespace
+{
+struct TripletSet
+{
+ TripletSet() : mMark(0xFFFFFFFF)
+ {
+ mNumReplays[0] = mNumReplays[1] = mNumReplays[2] = 1;
+ memset(mNumConflicts[0], 0, 32);
+ memset(mNumConflicts[1], 0, 32);
+ memset(mNumConflicts[2], 0, 32);
+ }
+
+ uint32_t mMark; // triplet index
+ uint8_t mNumReplays[3];
+ uint8_t mNumConflicts[3][32];
+};
+
+/*
+struct GreaterSum
+{
+ typedef cloth::Vector<uint32_t>::Type Container;
+
+ GreaterSum(const Container& cont)
+ : mContainer(cont)
+ {}
+
+ bool operator()(const cloth::Vec4u& a, const cloth::Vec4u& b) const
+ {
+ return mContainer[a.x] + mContainer[a.y] + mContainer[a.z]
+ > mContainer[b.x] + mContainer[b.y] + mContainer[b.z];
+ }
+
+ const Container& mContainer;
+};
+*/
+
+// calculate the inclusive prefix sum, equivalent of std::partial_sum
+template <typename Iter>
+void prefixSum(Iter first, Iter last, Iter dest)
+{
+ if (first == last)
+ return;
+ else
+ {
+ *(dest++) = *(first++);
+
+ for (; first != last; ++first, ++dest)
+ *dest = *(dest - 1) + *first;
+ }
+}
+}
+
+// CUDA version
+void cloth::TripletScheduler::warp(uint32_t numParticles, uint32_t warpWidth)
+{
+ // NV_CLOTH_ASSERT(warpWidth == 32 || warpWidth == 16);
+
+ if (mTriplets.empty())
+ return;
+
+ TripletIter tIt, tEnd = mTriplets.end();
+ uint32_t tripletIndex;
+
+ // count number of triplets per particle
+ Vector<uint32_t>::Type adjacentCount(numParticles + 1, uint32_t(0));
+ for (tIt = mTriplets.begin(); tIt != tEnd; ++tIt)
+ for (int i = 0; i < 3; ++i)
+ ++adjacentCount[(*tIt)[i]];
+
+ /* neither of those were really improving number of batches:
+ // run simd version to pre-sort particles
+ simd(numParticles, blockWidth); mSetSizes.resize(0);
+ // sort according to triplet degree (estimated by sum of adjacentCount)
+ std::sort(mTriplets.begin(), tEnd, GreaterSum(adjacentCount));
+ */
+
+ uint32_t maxTripletCount = *shdfnd::maxElement(adjacentCount.begin(), adjacentCount.end());
+
+ // compute in place prefix sum (inclusive)
+ prefixSum(adjacentCount.begin(), adjacentCount.end(), adjacentCount.begin());
+
+ // initialize adjacencies (for each particle, collect touching triplets)
+ // also converts partial sum in adjacentCount from inclusive to exclusive
+ Vector<uint32_t>::Type adjacencies(adjacentCount.back());
+ for (tIt = mTriplets.begin(), tripletIndex = 0; tIt != tEnd; ++tIt, ++tripletIndex)
+ for (int i = 0; i < 3; ++i)
+ adjacencies[--adjacentCount[(*tIt)[i]]] = tripletIndex;
+
+ uint32_t warpMask = warpWidth - 1;
+
+ uint32_t numSets = maxTripletCount; // start with minimum number of sets
+ Vector<TripletSet>::Type sets(numSets);
+ Vector<uint32_t>::Type setIndices(mTriplets.size(), uint32_t(-1));
+ mSetSizes.resize(numSets);
+
+ // color triplets (assign to sets)
+ Vector<uint32_t>::Type::ConstIterator aBegin = adjacencies.begin(), aIt, aEnd;
+ for (tIt = mTriplets.begin(), tripletIndex = 0; tIt != tEnd; ++tIt, ++tripletIndex)
+ {
+ // mark sets of adjacent triplets
+ for (int i = 0; i < 3; ++i)
+ {
+ uint32_t particleIndex = (*tIt)[i];
+ aIt = aBegin + adjacentCount[particleIndex];
+ aEnd = aBegin + adjacentCount[particleIndex + 1];
+ for (uint32_t setIndex; aIt != aEnd; ++aIt)
+ if (numSets > (setIndex = setIndices[*aIt]))
+ sets[setIndex].mMark = tripletIndex;
+ }
+
+ // find valid set with smallest number of bank conflicts
+ uint32_t bestIndex = numSets;
+ uint32_t minReplays = 4;
+ for (uint32_t setIndex = 0; setIndex < numSets && minReplays; ++setIndex)
+ {
+ const TripletSet& set = sets[setIndex];
+
+ if (set.mMark == tripletIndex)
+ continue; // triplet collision
+
+ uint32_t numReplays = 0;
+ for (uint32_t i = 0; i < 3; ++i)
+ numReplays += set.mNumReplays[i] == set.mNumConflicts[i][warpMask & (*tIt)[i]];
+
+ if (minReplays > numReplays)
+ minReplays = numReplays, bestIndex = setIndex;
+ }
+
+ // add new set if none found
+ if (bestIndex == numSets)
+ {
+ sets.pushBack(TripletSet());
+ mSetSizes.pushBack(0);
+ ++numSets;
+ }
+
+ // increment bank conflicts or reset if warp filled
+ TripletSet& set = sets[bestIndex];
+ if (++mSetSizes[bestIndex] & warpMask)
+ for (uint32_t i = 0; i < 3; ++i)
+ set.mNumReplays[i] = std::max(set.mNumReplays[i], ++set.mNumConflicts[i][warpMask & (*tIt)[i]]);
+ else
+ set = TripletSet();
+
+ setIndices[tripletIndex] = bestIndex;
+ }
+
+ // reorder triplets
+ Vector<uint32_t>::Type setOffsets(mSetSizes.size());
+ prefixSum(mSetSizes.begin(), mSetSizes.end(), setOffsets.begin());
+
+ Vector<Vec4u>::Type triplets(mTriplets.size());
+ Vector<uint32_t>::Type::ConstIterator iIt = setIndices.begin();
+ for (tIt = mTriplets.begin(), tripletIndex = 0; tIt != tEnd; ++tIt, ++iIt)
+ triplets[--setOffsets[*iIt]] = *tIt;
+
+ mTriplets.swap(triplets);
+}
diff --git a/NvCloth/src/TripletScheduler.h b/NvCloth/src/TripletScheduler.h
new file mode 100644
index 0000000..26119cf
--- /dev/null
+++ b/NvCloth/src/TripletScheduler.h
@@ -0,0 +1,55 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Range.h"
+#include "NvCloth/Allocator.h"
+#include "Vec4T.h"
+
+namespace nv
+{
+
+namespace cloth
+{
+
+struct TripletScheduler
+{
+ typedef Vector<Vec4u>::Type::ConstIterator ConstTripletIter;
+ typedef Vector<Vec4u>::Type::Iterator TripletIter;
+
+ TripletScheduler(Range<const uint32_t[4]>);
+ void simd(uint32_t numParticles, uint32_t simdWidth);
+ void warp(uint32_t numParticles, uint32_t warpWidth);
+
+ Vector<Vec4u>::Type mTriplets;
+ Vector<uint32_t>::Type mSetSizes;
+};
+}
+}
diff --git a/NvCloth/src/Vec4T.h b/NvCloth/src/Vec4T.h
new file mode 100644
index 0000000..3aed016
--- /dev/null
+++ b/NvCloth/src/Vec4T.h
@@ -0,0 +1,139 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include <foundation/PxVec3.h>
+#include <foundation/PxVec4.h>
+
+namespace nv
+{
+namespace cloth
+{
+
+template <typename T>
+struct Vec4T
+{
+ Vec4T()
+ {
+ }
+
+ Vec4T(T a, T b, T c, T d) : x(a), y(b), z(c), w(d)
+ {
+ }
+
+ template <typename S>
+ Vec4T(const Vec4T<S>& other)
+ {
+ x = T(other.x);
+ y = T(other.y);
+ z = T(other.z);
+ w = T(other.w);
+ }
+
+ template <typename Index>
+ T& operator[](Index i)
+ {
+ return reinterpret_cast<T*>(this)[i];
+ }
+
+ template <typename Index>
+ const T& operator[](Index i) const
+ {
+ return reinterpret_cast<const T*>(this)[i];
+ }
+
+ T x, y, z, w;
+};
+
+template <typename T>
+Vec4T<T> operator*(const Vec4T<T>& vec, T scalar)
+{
+ return Vec4T<T>(vec.x * scalar, vec.y * scalar, vec.z * scalar, vec.w * scalar);
+}
+
+template <typename T>
+Vec4T<T> operator/(const Vec4T<T>& vec, T scalar)
+{
+ return Vec4T<T>(vec.x / scalar, vec.y / scalar, vec.z / scalar, vec.w / scalar);
+}
+
+template <typename T>
+T (&array(Vec4T<T>& vec))[4]
+{
+ return reinterpret_cast<T(&)[4]>(vec);
+}
+
+template <typename T>
+const T (&array(const Vec4T<T>& vec))[4]
+{
+ return reinterpret_cast<const T(&)[4]>(vec);
+}
+
+inline float(&array(physx::PxVec3& v))[3]
+{
+ return reinterpret_cast<float(&)[3]>(v);
+}
+inline const float(&array(const physx::PxVec3& v))[3]
+{
+ return reinterpret_cast<const float(&)[3]>(v);
+}
+inline float(&array(physx::PxVec4& v))[4]
+{
+ return reinterpret_cast<float(&)[4]>(v);
+}
+inline const float(&array(const physx::PxVec4& v))[4]
+{
+ return reinterpret_cast<const float(&)[4]>(v);
+}
+inline float(&array(physx::PxMat33& v))[3][3]
+{
+ return reinterpret_cast<float(&)[3][3]>(v);
+}
+inline const float(&array(const physx::PxMat33& v))[3][3]
+{
+ return reinterpret_cast<const float(&)[3][3]>(v);
+}
+inline float(&array(physx::PxMat44& v))[4][4]
+{
+ return reinterpret_cast<float(&)[4][4]>(v);
+}
+inline const float(&array(const physx::PxMat44& v))[4][4]
+{
+ return reinterpret_cast<const float(&)[4][4]>(v);
+}
+
+
+
+typedef Vec4T<uint32_t> Vec4u;
+typedef Vec4T<uint16_t> Vec4us;
+
+} // namespace cloth
+
+} // namespace nv
diff --git a/NvCloth/src/avx/SwSolveConstraints.cpp b/NvCloth/src/avx/SwSolveConstraints.cpp
new file mode 100644
index 0000000..db250b7
--- /dev/null
+++ b/NvCloth/src/avx/SwSolveConstraints.cpp
@@ -0,0 +1,340 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma warning(push)
+#pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
+#pragma warning(disable : 4987) // nonstandard extension used: 'throw (...)'
+#include <intrin.h>
+#pragma warning(pop)
+
+#pragma warning(disable : 4127) // conditional expression is constant
+
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+
+namespace avx
+{
+__m128 sMaskYZW;
+__m256 sOne, sEpsilon, sMinusOneXYZOneW, sMaskXY;
+
+void initialize()
+{
+ sMaskYZW = _mm_castsi128_ps(_mm_setr_epi32(0, ~0, ~0, ~0));
+ sOne = _mm256_set1_ps(1.0f);
+ sEpsilon = _mm256_set1_ps(1.192092896e-07f);
+ sMinusOneXYZOneW = _mm256_setr_ps(-1.0f, -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, -1.0f, 1.0f);
+ sMaskXY = _mm256_castsi256_ps(_mm256_setr_epi32(~0, ~0, 0, 0, ~0, ~0, 0, 0));
+}
+
+template <uint32_t>
+__m256 fmadd_ps(__m256 a, __m256 b, __m256 c)
+{
+ return _mm256_add_ps(_mm256_mul_ps(a, b), c);
+}
+template <uint32_t>
+__m256 fnmadd_ps(__m256 a, __m256 b, __m256 c)
+{
+ return _mm256_sub_ps(c, _mm256_mul_ps(a, b));
+}
+#if _MSC_VER >= 1700
+template <>
+__m256 fmadd_ps<2>(__m256 a, __m256 b, __m256 c)
+{
+ return _mm256_fmadd_ps(a, b, c);
+}
+template <>
+__m256 fnmadd_ps<2>(__m256 a, __m256 b, __m256 c)
+{
+ return _mm256_fnmadd_ps(a, b, c);
+}
+#endif
+
+template <uint32_t>
+__m256 exp2(const __m256& v)
+{
+ // http://www.netlib.org/cephes/
+
+
+ __m256 x = _mm256_min_ps(_mm256_max_ps(_mm256_set1_ps(-127.4999f), v), _mm256_set1_ps(127.4999f));
+
+ // separate into integer and fractional part
+
+ __m256 fx = _mm256_add_ps(x,_mm256_set1_ps(0.5f));
+ __m128 fx0 = _mm256_extractf128_ps(fx,0);
+ __m128 fx1 = _mm256_extractf128_ps(fx,1);
+
+ __m128i ix0 = _mm_sub_epi32(_mm_cvttps_epi32(fx0), _mm_srli_epi32(_mm_castps_si128(fx0), 31));
+ __m128i ix1 = _mm_sub_epi32(_mm_cvttps_epi32(fx1), _mm_srli_epi32(_mm_castps_si128(fx1), 31));
+ __m256i ix = _mm256_loadu2_m128i(&ix0,&ix1);
+ fx = _mm256_sub_ps(x,_mm256_cvtepi32_ps(ix));
+
+ // exp2(fx) ~ 1 + 2 * P(fx) / (Q(fx) - P(fx))
+
+ __m256 fx2 = _mm256_mul_ps(fx,fx);
+
+ __m256 px = _mm256_mul_ps(fx,
+ _mm256_add_ps(_mm256_add_ps(
+ _mm256_set1_ps(1.51390680115615096133e+3f),
+ _mm256_mul_ps(fx2, _mm256_set1_ps(2.02020656693165307700e+1f))),
+ _mm256_mul_ps(fx2, _mm256_set1_ps(2.30933477057345225087e-2f))
+ )
+ );
+
+
+
+ __m256 qx = _mm256_add_ps(
+ _mm256_set1_ps(4.36821166879210612817e+3f),
+ _mm256_mul_ps(
+ fx2,
+ _mm256_add_ps(_mm256_set1_ps(2.33184211722314911771e+2f), fx2)
+ )
+ );
+
+ __m256 exp2fx = _mm256_mul_ps(px,_mm256_rcp_ps(_mm256_sub_ps(qx, px)));
+ exp2fx = _mm256_add_ps(_mm256_add_ps(sOne, exp2fx), exp2fx);
+
+ // exp2(ix)
+
+ __m128 exp2ix0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_add_epi32(ix0, _mm_set1_epi32(0x7f)), 23));
+ __m128 exp2ix1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_add_epi32(ix1, _mm_set1_epi32(0x7f)), 23));
+ __m256 exp2ix = _mm256_loadu2_m128((float*)&exp2ix0,(float*)&exp2ix1);
+
+ return _mm256_mul_ps(exp2fx, exp2ix);
+}
+#if _MSC_VER >= 1700
+//AVX2
+template <>
+__m256 exp2<2>(const __m256& v)
+{
+ // http://www.netlib.org/cephes/
+
+
+ __m256 x = _mm256_min_ps(_mm256_max_ps(_mm256_set1_ps(-127.4999f), v), _mm256_set1_ps(127.4999f));
+
+ // separate into integer and fractional part
+
+ __m256 fx = _mm256_add_ps(x,_mm256_set1_ps(0.5f));
+ __m256i ix = _mm256_sub_epi32(_mm256_cvttps_epi32(fx), _mm256_srli_epi32(_mm256_castps_si256(fx), 31));
+ fx = _mm256_sub_ps(x,_mm256_cvtepi32_ps(ix));
+
+ // exp2(fx) ~ 1 + 2 * P(fx) / (Q(fx) - P(fx))
+
+ __m256 fx2 = _mm256_mul_ps(fx,fx);
+
+ __m256 px = _mm256_mul_ps(fx,
+ _mm256_add_ps(_mm256_add_ps(
+ _mm256_set1_ps(1.51390680115615096133e+3f),
+ _mm256_mul_ps(fx2, _mm256_set1_ps(2.02020656693165307700e+1f))),
+ _mm256_mul_ps(fx2, _mm256_set1_ps(2.30933477057345225087e-2f))
+ )
+ );
+
+
+
+ __m256 qx = _mm256_add_ps(
+ _mm256_set1_ps(4.36821166879210612817e+3f),
+ _mm256_mul_ps(
+ fx2,
+ _mm256_add_ps(_mm256_set1_ps(2.33184211722314911771e+2f), fx2)
+ )
+ );
+
+ __m256 exp2fx = _mm256_mul_ps(px,_mm256_rcp_ps(_mm256_sub_ps(qx, px)));
+ exp2fx = _mm256_add_ps(_mm256_add_ps(sOne, exp2fx), exp2fx);
+
+ // exp2(ix)
+
+ __m256 exp2ix = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_add_epi32(ix, _mm256_set1_epi32(0x7f)), 23));
+
+ return _mm256_mul_ps(exp2fx, exp2ix);
+}
+#endif
+
+// roughly same perf as SSE2 intrinsics, the asm version below is about 10% faster
+template <bool useMultiplier, uint32_t avx>
+void solveConstraints(float* __restrict posIt, const float* __restrict rIt, const float* __restrict stIt, const float* __restrict rEnd,
+ const uint16_t* __restrict iIt, const __m128& stiffnessEtc, const __m128& stiffnessExponent)
+{
+ __m256 stiffness, stretchLimit, compressionLimit, multiplier;
+
+ if (useMultiplier)
+ {
+ // A0 A1 A2 A3 B0 B1 B2 B3
+ // (stiffness, multiplier, compressionLimit, stretchLimit, stiffness, multiplier, compressionLimit, stretchLimit)
+ // float*[0], float*[1], etc..
+ stiffness = _mm256_broadcast_ps(&stiffnessEtc);
+ stretchLimit = _mm256_permute_ps(stiffness, 0xff); //(A3, A3, A3, A3, B3, B3, B3, B3)
+ compressionLimit = _mm256_permute_ps(stiffness, 0xaa); //(A2, A2, A2, A2, B2, B2, B2, B2)
+ multiplier = _mm256_permute_ps(stiffness, 0x55); //(A1, A1, A1, A1, B1, B1, B1, B1)
+ stiffness = _mm256_permute_ps(stiffness, 0x00); //(A0, A0, A0, A0, B0, B0, B0, B0)
+ }
+ else
+ {
+ stiffness = _mm256_broadcast_ss((const float*)&stiffnessEtc); // (float*[0], float*[0], ..., float*[0]);
+ }
+
+ bool useStiffnessPerConstraint = stIt!=nullptr;
+
+ for (; rIt < rEnd; rIt += 8, iIt += 16, stIt += 8)
+ {
+ float* p0i = posIt + iIt[0] * 4;
+ float* p4i = posIt + iIt[8] * 4;
+ float* p0j = posIt + iIt[1] * 4;
+ float* p4j = posIt + iIt[9] * 4;
+ float* p1i = posIt + iIt[2] * 4;
+ float* p5i = posIt + iIt[10] * 4;
+ float* p1j = posIt + iIt[3] * 4;
+ float* p5j = posIt + iIt[11] * 4;
+
+ __m128 v0i = _mm_load_ps(p0i);
+ __m128 v4i = _mm_load_ps(p4i);
+ __m128 v0j = _mm_load_ps(p0j);
+ __m128 v4j = _mm_load_ps(p4j);
+ __m128 v1i = _mm_load_ps(p1i);
+ __m128 v5i = _mm_load_ps(p5i);
+ __m128 v1j = _mm_load_ps(p1j);
+ __m128 v5j = _mm_load_ps(p5j);
+
+ __m256 v04i = _mm256_insertf128_ps(_mm256_castps128_ps256(v0i), v4i, 1);
+ __m256 v04j = _mm256_insertf128_ps(_mm256_castps128_ps256(v0j), v4j, 1);
+ __m256 v15i = _mm256_insertf128_ps(_mm256_castps128_ps256(v1i), v5i, 1);
+ __m256 v15j = _mm256_insertf128_ps(_mm256_castps128_ps256(v1j), v5j, 1);
+
+ __m256 h04ij = fmadd_ps<avx>(sMinusOneXYZOneW, v04i, v04j);
+ __m256 h15ij = fmadd_ps<avx>(sMinusOneXYZOneW, v15i, v15j);
+
+ float* p2i = posIt + iIt[4] * 4;
+ float* p6i = posIt + iIt[12] * 4;
+ float* p2j = posIt + iIt[5] * 4;
+ float* p6j = posIt + iIt[13] * 4;
+ float* p3i = posIt + iIt[6] * 4;
+ float* p7i = posIt + iIt[14] * 4;
+ float* p3j = posIt + iIt[7] * 4;
+ float* p7j = posIt + iIt[15] * 4;
+
+ __m128 v2i = _mm_load_ps(p2i);
+ __m128 v6i = _mm_load_ps(p6i);
+ __m128 v2j = _mm_load_ps(p2j);
+ __m128 v6j = _mm_load_ps(p6j);
+ __m128 v3i = _mm_load_ps(p3i);
+ __m128 v7i = _mm_load_ps(p7i);
+ __m128 v3j = _mm_load_ps(p3j);
+ __m128 v7j = _mm_load_ps(p7j);
+
+ __m256 v26i = _mm256_insertf128_ps(_mm256_castps128_ps256(v2i), v6i, 1);
+ __m256 v26j = _mm256_insertf128_ps(_mm256_castps128_ps256(v2j), v6j, 1);
+ __m256 v37i = _mm256_insertf128_ps(_mm256_castps128_ps256(v3i), v7i, 1);
+ __m256 v37j = _mm256_insertf128_ps(_mm256_castps128_ps256(v3j), v7j, 1);
+
+ __m256 h26ij = fmadd_ps<avx>(sMinusOneXYZOneW, v26i, v26j);
+ __m256 h37ij = fmadd_ps<avx>(sMinusOneXYZOneW, v37i, v37j);
+
+ __m256 a = _mm256_unpacklo_ps(h04ij, h26ij);
+ __m256 b = _mm256_unpackhi_ps(h04ij, h26ij);
+ __m256 c = _mm256_unpacklo_ps(h15ij, h37ij);
+ __m256 d = _mm256_unpackhi_ps(h15ij, h37ij);
+
+ __m256 hxij = _mm256_unpacklo_ps(a, c);
+ __m256 hyij = _mm256_unpackhi_ps(a, c);
+ __m256 hzij = _mm256_unpacklo_ps(b, d);
+ __m256 vwij = _mm256_unpackhi_ps(b, d);
+
+ __m256 e2ij = fmadd_ps<avx>(hxij, hxij, fmadd_ps<avx>(hyij, hyij, fmadd_ps<avx>(hzij, hzij, sEpsilon)));
+
+ __m256 rij = _mm256_load_ps(rIt);
+ __m256 stij = useStiffnessPerConstraint?_mm256_sub_ps(sOne, exp2<avx>(_mm256_mul_ps(_mm256_load_ps(stIt),_mm256_broadcast_ps(&stiffnessExponent)))):stiffness;
+ __m256 mask = _mm256_cmp_ps(rij, sEpsilon, _CMP_GT_OQ);
+ __m256 erij = _mm256_and_ps(fnmadd_ps<avx>(rij, _mm256_rsqrt_ps(e2ij), sOne), mask);
+
+ if (useMultiplier)
+ {
+ erij = fnmadd_ps<avx>(multiplier, _mm256_max_ps(compressionLimit, _mm256_min_ps(erij, stretchLimit)), erij);
+ }
+
+ __m256 exij = _mm256_mul_ps(erij, _mm256_mul_ps(stij, _mm256_rcp_ps(_mm256_add_ps(sEpsilon, vwij))));
+
+ // replace these two instructions with _mm_maskstore_ps below?
+ __m256 exlo = _mm256_and_ps(sMaskXY, exij);
+ __m256 exhi = _mm256_andnot_ps(sMaskXY, exij);
+
+ __m256 f04ij = _mm256_mul_ps(h04ij, _mm256_permute_ps(exlo, 0xc0));
+ __m256 u04i = fmadd_ps<avx>(f04ij, _mm256_permute_ps(v04i, 0xff), v04i);
+ __m256 u04j = fnmadd_ps<avx>(f04ij, _mm256_permute_ps(v04j, 0xff), v04j);
+
+ _mm_store_ps(p0i, _mm256_extractf128_ps(u04i, 0));
+ _mm_store_ps(p0j, _mm256_extractf128_ps(u04j, 0));
+ _mm_store_ps(p4i, _mm256_extractf128_ps(u04i, 1));
+ _mm_store_ps(p4j, _mm256_extractf128_ps(u04j, 1));
+
+ __m256 f15ij = _mm256_mul_ps(h15ij, _mm256_permute_ps(exlo, 0xd5));
+ __m256 u15i = fmadd_ps<avx>(f15ij, _mm256_permute_ps(v15i, 0xff), v15i);
+ __m256 u15j = fnmadd_ps<avx>(f15ij, _mm256_permute_ps(v15j, 0xff), v15j);
+
+ _mm_store_ps(p1i, _mm256_extractf128_ps(u15i, 0));
+ _mm_store_ps(p1j, _mm256_extractf128_ps(u15j, 0));
+ _mm_store_ps(p5i, _mm256_extractf128_ps(u15i, 1));
+ _mm_store_ps(p5j, _mm256_extractf128_ps(u15j, 1));
+
+ __m256 f26ij = _mm256_mul_ps(h26ij, _mm256_permute_ps(exhi, 0x2a));
+ __m256 u26i = fmadd_ps<avx>(f26ij, _mm256_permute_ps(v26i, 0xff), v26i);
+ __m256 u26j = fnmadd_ps<avx>(f26ij, _mm256_permute_ps(v26j, 0xff), v26j);
+
+ _mm_store_ps(p2i, _mm256_extractf128_ps(u26i, 0));
+ _mm_store_ps(p2j, _mm256_extractf128_ps(u26j, 0));
+ _mm_store_ps(p6i, _mm256_extractf128_ps(u26i, 1));
+ _mm_store_ps(p6j, _mm256_extractf128_ps(u26j, 1));
+
+ __m256 f37ij = _mm256_mul_ps(h37ij, _mm256_permute_ps(exhi, 0x3f));
+ __m256 u37i = fmadd_ps<avx>(f37ij, _mm256_permute_ps(v37i, 0xff), v37i);
+ __m256 u37j = fnmadd_ps<avx>(f37ij, _mm256_permute_ps(v37j, 0xff), v37j);
+
+ _mm_store_ps(p3i, _mm256_extractf128_ps(u37i, 0));
+ _mm_store_ps(p3j, _mm256_extractf128_ps(u37j, 0));
+ _mm_store_ps(p7i, _mm256_extractf128_ps(u37i, 1));
+ _mm_store_ps(p7j, _mm256_extractf128_ps(u37j, 1));
+ }
+
+ _mm256_zeroupper();
+}
+
+
+template void solveConstraints<false, 1>(float* __restrict, const float* __restrict, const float* __restrict, const float* __restrict,
+ const uint16_t* __restrict, const __m128&, const __m128&);
+
+template void solveConstraints<true, 1>(float* __restrict, const float* __restrict, const float* __restrict, const float* __restrict,
+ const uint16_t* __restrict, const __m128&, const __m128&);
+
+template void solveConstraints<false, 2>(float* __restrict, const float* __restrict, const float* __restrict, const float* __restrict,
+ const uint16_t* __restrict, const __m128&, const __m128&);
+
+template void solveConstraints<true, 2>(float* __restrict, const float* __restrict, const float* __restrict, const float* __restrict,
+ const uint16_t* __restrict, const __m128&, const __m128&);
+
+
+} // namespace avx
diff --git a/NvCloth/src/cuda/CuCheckSuccess.h b/NvCloth/src/cuda/CuCheckSuccess.h
new file mode 100644
index 0000000..3e9b968
--- /dev/null
+++ b/NvCloth/src/cuda/CuCheckSuccess.h
@@ -0,0 +1,44 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include <cuda.h>
+
+namespace nv
+{
+namespace cloth
+{
+// implemented in CuFactory.cpp
+void checkSuccessImpl(CUresult, const char*, const int);
+}
+
+// safe cuda calls
+#define checkSuccess(err) cloth::checkSuccessImpl(err, __FILE__, __LINE__)
+}
diff --git a/NvCloth/src/cuda/CuCloth.cpp b/NvCloth/src/cuda/CuCloth.cpp
new file mode 100644
index 0000000..93b2370
--- /dev/null
+++ b/NvCloth/src/cuda/CuCloth.cpp
@@ -0,0 +1,512 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CuCloth.h"
+#include "CuFabric.h"
+#include "CuFactory.h"
+#include "CuContextLock.h"
+#include "CuCheckSuccess.h"
+#include "CuClothData.h"
+#include "CuSolver.h"
+#include "../TripletScheduler.h"
+#include "../ClothBase.h"
+#include <foundation/PxMat44.h>
+#include <PsFoundation.h>
+
+using namespace physx;
+
+#if PX_VC
+#pragma warning(disable : 4365) // 'action' : conversion from 'type_1' to 'type_2', signed/unsigned mismatch
+#endif
+
+namespace nv
+{
+namespace cloth
+{
+PhaseConfig transform(const PhaseConfig&); // from PhaseConfig.cpp
+}
+}
+
+using namespace nv;
+
+namespace
+{
+bool isSelfCollisionEnabled(const cloth::CuCloth& cloth)
+{
+ return std::min(cloth.mSelfCollisionDistance, -cloth.mSelfCollisionLogStiffness) > 0.0f;
+}
+}
+
+cloth::CuCloth::CuCloth(CuFactory& factory, CuFabric& fabric, Range<const PxVec4> particles)
+: CuContextLock(factory)
+, mFactory(factory)
+, mFabric(fabric)
+, mClothDataDirty(false)
+, mNumParticles(uint32_t(particles.size()))
+, mParticles(mFactory.mContext)
+, mParticlesHostCopy(mFactory.mContext)
+, mDeviceParticlesDirty(false)
+, mHostParticlesDirty(true)
+, mPhaseConfigs(mFactory.mContext)
+, mMotionConstraints(mFactory.mContext)
+, mSeparationConstraints(mFactory.mContext)
+, mParticleAccelerations(mFactory.mContext)
+, mParticleAccelerationsHostCopy(mFactory.mContext)
+, mCapsuleIndices(mFactory.mContext)
+, mStartCollisionSpheres(mFactory.mContext)
+, mTargetCollisionSpheres(mFactory.mContext)
+, mConvexMasks(mFactory.mContext)
+, mStartCollisionPlanes(mFactory.mContext)
+, mTargetCollisionPlanes(mFactory.mContext)
+, mStartCollisionTriangles(mFactory.mContext)
+, mTargetCollisionTriangles(mFactory.mContext)
+, mVirtualParticleSetSizes(mFactory.mContext)
+, mVirtualParticleIndices(mFactory.mContext)
+, mVirtualParticleWeights(mFactory.mContext)
+, mRestPositions(mFactory.mContext)
+, mSelfCollisionIndices(mFactory.mContext)
+, mSelfCollisionData(mFactory.mContext)
+, mSharedMemorySize(0)
+, mUserData(0)
+{
+ NV_CLOTH_ASSERT(!particles.empty());
+ NV_CLOTH_ASSERT(particles.size() == fabric.getNumParticles());
+
+ initialize(*this, particles.begin(), particles.end());
+
+ mParticles.reserve(2 * mNumParticles);
+ mParticles.push_back(particles.begin(), particles.end());
+ mParticles.push_back(particles.begin(), particles.end());
+ mParticlesHostCopy.resizeUninitialized(2 * mNumParticles);
+
+ mFabric.incRefCount();
+
+ CuContextLock::release();
+}
+
+cloth::CuCloth::CuCloth(CuFactory& factory, const CuCloth& cloth)
+: CuContextLock(factory)
+, mFactory(factory)
+, mFabric(cloth.mFabric)
+, mNumParticles(cloth.mNumParticles)
+, mParticles(cloth.mParticles)
+, mParticlesHostCopy(cloth.mParticlesHostCopy)
+, mDeviceParticlesDirty(cloth.mDeviceParticlesDirty)
+, mHostParticlesDirty(cloth.mHostParticlesDirty)
+, mPhaseConfigs(cloth.mPhaseConfigs)
+, mHostPhaseConfigs(cloth.mHostPhaseConfigs)
+, mMotionConstraints(cloth.mMotionConstraints)
+, mSeparationConstraints(cloth.mSeparationConstraints)
+, mParticleAccelerations(cloth.mParticleAccelerations)
+, mParticleAccelerationsHostCopy(cloth.mParticleAccelerationsHostCopy)
+, mCapsuleIndices(cloth.mCapsuleIndices)
+, mStartCollisionSpheres(cloth.mStartCollisionSpheres)
+, mTargetCollisionSpheres(cloth.mTargetCollisionSpheres)
+, mStartCollisionPlanes(cloth.mStartCollisionPlanes)
+, mTargetCollisionPlanes(cloth.mTargetCollisionPlanes)
+, mStartCollisionTriangles(cloth.mStartCollisionTriangles)
+, mTargetCollisionTriangles(cloth.mTargetCollisionTriangles)
+, mVirtualParticleSetSizes(cloth.mVirtualParticleSetSizes)
+, mVirtualParticleIndices(cloth.mVirtualParticleIndices)
+, mVirtualParticleWeights(cloth.mVirtualParticleWeights)
+, mRestPositions(cloth.mRestPositions)
+, mSelfCollisionIndices(cloth.mSelfCollisionIndices)
+, mSelfCollisionData(mFactory.mContext)
+, mSharedMemorySize(cloth.mSharedMemorySize)
+, mUserData(cloth.mUserData)
+{
+ copy(*this, cloth);
+
+ mFabric.incRefCount();
+
+ CuContextLock::release();
+}
+
+cloth::CuCloth::~CuCloth()
+{
+ CuContextLock::acquire();
+
+ mFabric.decRefCount();
+}
+
+void cloth::CuCloth::notifyChanged()
+{
+ mClothDataDirty = true;
+}
+
+bool cloth::CuCloth::updateClothData(CuClothData& clothData)
+{
+ // test particle pointer to detect when cloth data array has been reordered
+ if (!mClothDataDirty && clothData.mParticles == array(*mParticles.begin().get()))
+ {
+ NV_CLOTH_ASSERT(mSharedMemorySize == getSharedMemorySize());
+ return false;
+ }
+
+ mSharedMemorySize = getSharedMemorySize();
+
+ if (mSelfCollisionData.empty() && isSelfCollisionEnabled(*this))
+ {
+ uint32_t numSelfCollisionIndices =
+ mSelfCollisionIndices.empty() ? mNumParticles : uint32_t(mSelfCollisionIndices.size());
+
+ uint32_t particleSize = 4 * mNumParticles;
+ uint32_t keySize = 2 * numSelfCollisionIndices; // 2x for radix buffer
+ uint32_t cellStartSize = (129 + 128 * 128 + 130) / 2 + 1; // half because type is int16_t
+
+ // use 16bit indices for cellStart array (128x128 grid)
+ mSelfCollisionData.resize(particleSize + keySize + cellStartSize);
+ checkSuccess(cuMemsetD32((mSelfCollisionData.begin() + particleSize + keySize).dev(), 0xffffffff, cellStartSize));
+ }
+
+ clothData = CuClothData(*this);
+ mClothDataDirty = false;
+
+ return true;
+}
+
+uint32_t cloth::CuCloth::getSharedMemorySize() const
+{
+ uint32_t numPhases = uint32_t(mPhaseConfigs.size());
+ uint32_t numSpheres = uint32_t(mStartCollisionSpheres.size());
+ uint32_t numCones = uint32_t(mCapsuleIndices.size());
+ uint32_t numPlanes = uint32_t(mStartCollisionPlanes.size());
+ uint32_t numConvexes = uint32_t(mConvexMasks.size());
+ uint32_t numTriangles = uint32_t(mStartCollisionTriangles.size() / 3);
+
+ uint32_t phaseConfigSize = numPhases * sizeof(CuPhaseConfig);
+
+ bool storePrevCollisionData = mEnableContinuousCollision || mFriction > 0.0f;
+ uint32_t continuousCollisionSize = storePrevCollisionData ? 4 * numSpheres + 10 * numCones : 0;
+ continuousCollisionSize += 4 * numCones + numConvexes; // capsule and convex masks
+ uint32_t discreteCollisionSize = 4 * numSpheres + std::max(10 * numCones + 96, 208u);
+ discreteCollisionSize = std::max(discreteCollisionSize, std::max(4 * numPlanes, 19 * numTriangles));
+
+ // scratch memory for prefix sum and histogram
+ uint32_t selfCollisionSize = isSelfCollisionEnabled(*this) ? 571u : 0; //Note: different sizes between PhysX and APEX, took APEX size as it was larger
+
+ // see CuSolverKenel.cu::gSharedMemory comment for details
+ return phaseConfigSize + sizeof(float) * (continuousCollisionSize + std::max(selfCollisionSize, discreteCollisionSize));
+}
+
+void cloth::CuCloth::setPhaseConfig(Range<const PhaseConfig> configs)
+{
+ mHostPhaseConfigs.assign(configs.begin(), configs.end());
+
+ Vector<CuPhaseConfig>::Type deviceConfigs;
+ deviceConfigs.reserve(configs.size());
+ const PhaseConfig* cEnd = configs.end();
+ for (const PhaseConfig* cIt = configs.begin(); cIt != cEnd; ++cIt)
+ {
+ CuPhaseConfig config;
+
+ config.mStiffness = cIt->mStiffness;
+ config.mStiffnessMultiplier = cIt->mStiffnessMultiplier;
+ config.mCompressionLimit = cIt->mCompressionLimit;
+ config.mStretchLimit = cIt->mStretchLimit;
+
+ uint16_t phaseIndex = cIt->mPhaseIndex;
+ config.mNumConstraints = mFabric.mNumConstraintsInPhase[phaseIndex];
+ config.mRestvalues = mFabric.mRestvaluesInPhase[phaseIndex].get();
+ config.mStiffnessValues = mFabric.mStiffnessValuesInPhase[phaseIndex].get();
+ config.mIndices = mFabric.mIndicesInPhase[phaseIndex].get();
+
+ deviceConfigs.pushBack(config);
+ }
+
+ CuContextLock contextLock(mFactory);
+ mPhaseConfigs.assign(deviceConfigs.begin(), deviceConfigs.end());
+}
+
+cloth::Range<PxVec4> cloth::CuCloth::push(cloth::CuConstraints& constraints)
+{
+ if (!constraints.mTarget.capacity())
+ {
+ CuContextLock contextLock(mFactory);
+ constraints.mTarget.reserve(mNumParticles);
+ }
+ if (constraints.mHostCopy.empty())
+ constraints.mTarget.resize(mNumParticles);
+
+ if (constraints.mStart.empty()) // initialize start first
+ constraints.mStart.swap(constraints.mTarget);
+
+ if (!constraints.mHostCopy.capacity())
+ {
+ CuContextLock contextLock(mFactory);
+ constraints.mHostCopy.reserve(mNumParticles);
+ }
+ constraints.mHostCopy.resizeUninitialized(mNumParticles);
+
+ PxVec4* data = &constraints.mHostCopy.front();
+ return Range<PxVec4>(data, data + constraints.mHostCopy.size());
+}
+
+void cloth::CuCloth::clear(cloth::CuConstraints& constraints)
+{
+ CuContextLock contextLock(mFactory);
+ CuDeviceVector<PxVec4>(mFactory.mContext).swap(constraints.mStart);
+ CuDeviceVector<PxVec4>(mFactory.mContext).swap(constraints.mTarget);
+}
+
+void cloth::CuCloth::syncDeviceParticles()
+{
+ if (mDeviceParticlesDirty)
+ {
+ CuContextLock contextLock(mFactory);
+ checkSuccess(
+ cuMemcpyHtoD(mParticles.begin().dev(), mParticlesHostCopy.begin(), 2 * mNumParticles * sizeof(PxVec4)));
+ mDeviceParticlesDirty = false;
+ }
+}
+
+void cloth::CuCloth::syncHostParticles()
+{
+ if (mHostParticlesDirty)
+ {
+ CuContextLock contextLock(mFactory);
+ checkSuccess(cuMemcpyDtoH(mParticlesHostCopy.begin(), mParticles.begin().dev(), 2 * mNumParticles * sizeof(PxVec4)));
+ mHostParticlesDirty = false;
+ }
+}
+
+cloth::Range<const PxVec3> cloth::CuCloth::clampTriangleCount(Range<const PxVec3> range, uint32_t replaceSize)
+{
+ // clamp to 500 triangles (1500 vertices) to prevent running out of shared memory
+ uint32_t removedSize = mStartCollisionTriangles.size() - replaceSize;
+ const PxVec3* clamp = range.begin() + 1500 - removedSize;
+
+ if (range.end() > clamp)
+ {
+ NV_CLOTH_LOG_WARNING("Too many collision triangles specified for cloth, dropping all but first 500.\n");
+ }
+
+ return Range<const PxVec3>(range.begin(), std::min(range.end(), clamp));
+}
+
+#include "../ClothImpl.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+// ClothImpl<CuCloth>::clone() implemented in CuClothClone.cpp
+
+template <>
+uint32_t ClothImpl<CuCloth>::getNumParticles() const
+{
+ return mCloth.mNumParticles;
+}
+
+template <>
+void ClothImpl<CuCloth>::lockParticles() const
+{
+ const_cast<CuCloth&>(mCloth).syncHostParticles();
+}
+
+template <>
+void ClothImpl<CuCloth>::unlockParticles() const
+{
+}
+
+template <>
+MappedRange<PxVec4> ClothImpl<CuCloth>::getCurrentParticles()
+{
+ mCloth.wakeUp();
+ lockParticles();
+ mCloth.mDeviceParticlesDirty = true;
+ return getMappedParticles(mCloth.mParticlesHostCopy.begin());
+}
+
+template <>
+MappedRange<const PxVec4> ClothImpl<CuCloth>::getCurrentParticles() const
+{
+ lockParticles();
+ return getMappedParticles(mCloth.mParticlesHostCopy.begin());
+}
+
+template <>
+MappedRange<PxVec4> ClothImpl<CuCloth>::getPreviousParticles()
+{
+ mCloth.wakeUp();
+ lockParticles();
+ mCloth.mDeviceParticlesDirty = true;
+ return getMappedParticles(mCloth.mParticlesHostCopy.begin() + mCloth.mNumParticles);
+}
+
+template <>
+MappedRange<const PxVec4> ClothImpl<CuCloth>::getPreviousParticles() const
+{
+ lockParticles();
+ return getMappedParticles(mCloth.mParticlesHostCopy.begin() + mCloth.mNumParticles);
+}
+
+template <>
+GpuParticles ClothImpl<CuCloth>::getGpuParticles()
+{
+ mCloth.syncDeviceParticles();
+ mCloth.mHostParticlesDirty = true;
+ PxVec4* particles = mCloth.mParticles.begin().get();
+ GpuParticles result = { particles, particles + mCloth.mNumParticles, 0 };
+ return result;
+}
+
+template <>
+void ClothImpl<CuCloth>::setPhaseConfig(Range<const PhaseConfig> configs)
+{
+ Vector<PhaseConfig>::Type transformedConfigs;
+ transformedConfigs.reserve(configs.size());
+
+ // transform phase config to use in solver
+ for (; !configs.empty(); configs.popFront())
+ if (configs.front().mStiffness > 0.0f)
+ transformedConfigs.pushBack(transform(configs.front()));
+
+ mCloth.setPhaseConfig(Range<const PhaseConfig>(transformedConfigs.begin(), transformedConfigs.end()));
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <>
+void ClothImpl<CuCloth>::setSelfCollisionIndices(Range<const uint32_t> indices)
+{
+ ContextLockType lock(mCloth.mFactory);
+ mCloth.mSelfCollisionIndices.assign(indices.begin(), indices.end());
+ mCloth.mSelfCollisionIndicesHost.assign(indices.begin(), indices.end());
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <>
+uint32_t ClothImpl<CuCloth>::getNumVirtualParticles() const
+{
+ return uint32_t(mCloth.mVirtualParticleIndices.size());
+}
+
+template <>
+Range<PxVec4> ClothImpl<CuCloth>::getParticleAccelerations()
+{
+ if (mCloth.mParticleAccelerations.empty())
+ {
+ CuContextLock contextLock(mCloth.mFactory);
+ mCloth.mParticleAccelerations.resize(mCloth.mNumParticles);
+ }
+
+ if (!mCloth.mParticleAccelerationsHostCopy.capacity())
+ {
+ CuContextLock contextLock(mCloth.mFactory);
+ mCloth.mParticleAccelerationsHostCopy.reserve(mCloth.mNumParticles);
+ }
+ mCloth.mParticleAccelerationsHostCopy.resizeUninitialized(mCloth.mNumParticles);
+
+ mCloth.wakeUp();
+
+ PxVec4* data = mCloth.mParticleAccelerationsHostCopy.begin();
+ return Range<PxVec4>(data, mCloth.mParticleAccelerationsHostCopy.end());
+}
+
+template <>
+void ClothImpl<CuCloth>::clearParticleAccelerations()
+{
+ CuContextLock contextLock(mCloth.mFactory);
+ CuDeviceVector<PxVec4>(mCloth.mFactory.mContext).swap(mCloth.mParticleAccelerations);
+ CuHostVector<PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type().swap(mCloth.mParticleAccelerationsHostCopy);
+ mCloth.wakeUp();
+}
+
+namespace
+{
+ uint32_t calculateNumReplays(const nv::cloth::Vector<cloth::Vec4u>::Type& triplets, const nv::cloth::Vector<uint32_t>::Type setSizes)
+ {
+ uint32_t result = 0;
+
+ auto tIt = triplets.begin();
+ uint32_t index = 0;
+ for (auto sIt = setSizes.begin(), sEnd = setSizes.end(); sIt != sEnd; ++sIt, ++index)
+ {
+ auto tEnd = tIt + *sIt, tLast = tIt;
+ while (tLast != tEnd)
+ {
+ uint8_t numConflicts[3][32] = {};
+ uint8_t numReplays[3] = {};
+
+ for (tLast += std::min(ptrdiff_t(32), tEnd - tLast); tIt != tLast; ++tIt)
+ for (int i = 0; i < 3; ++i)
+ numReplays[i] = std::max(numReplays[i], ++numConflicts[i][(*tIt)[i] & 31]);
+
+ result += numReplays[0] + numReplays[1] + numReplays[2];
+ }
+ }
+
+ return result;
+ }
+}
+
+template <>
+void ClothImpl<CuCloth>::setVirtualParticles(Range<const uint32_t[4]> indices, Range<const PxVec3> weights)
+{
+ // shuffle indices to form independent SIMD sets
+ TripletScheduler scheduler(indices);
+ scheduler.warp(mCloth.mNumParticles, 32);
+
+ // convert to 16bit indices
+ Vector<Vec4us>::Type hostIndices;
+ hostIndices.reserve(indices.size());
+ TripletScheduler::ConstTripletIter tIt = scheduler.mTriplets.begin();
+ TripletScheduler::ConstTripletIter tEnd = scheduler.mTriplets.end();
+ for (; tIt != tEnd; ++tIt)
+ hostIndices.pushBack(Vec4us(*tIt));
+
+ // printf("num sets = %u, num replays = %u\n", scheduler.mSetSizes.size(),
+ // calculateNumReplays(scheduler.mTriplets, scheduler.mSetSizes));
+
+ // add normalization weight
+ Vector<PxVec4>::Type hostWeights;
+ hostWeights.reserve(weights.size());
+ for (; !weights.empty(); weights.popFront())
+ {
+ PxVec3 w = reinterpret_cast<const PxVec3&>(weights.front());
+ float scale = 1 / w.magnitudeSquared();
+ hostWeights.pushBack(PxVec4( w.x, w.y, w.z, scale ));
+ }
+
+ CuContextLock contextLock(mCloth.mFactory);
+
+ // todo: 'swap' these to force reallocation?
+ mCloth.mVirtualParticleIndices = hostIndices;
+ mCloth.mVirtualParticleSetSizes = scheduler.mSetSizes;
+ mCloth.mVirtualParticleWeights = hostWeights;
+
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/cuda/CuCloth.h b/NvCloth/src/cuda/CuCloth.h
new file mode 100644
index 0000000..1f86343
--- /dev/null
+++ b/NvCloth/src/cuda/CuCloth.h
@@ -0,0 +1,223 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Range.h"
+#include "NvCloth/PhaseConfig.h"
+#include "../MovingAverage.h"
+#include "../IndexPair.h"
+#include "../BoundingBox.h"
+#include "../Vec4T.h"
+#include "CuPhaseConfig.h"
+#include "CuPinnedAllocator.h"
+#include "CuContextLock.h"
+#include "CuDeviceVector.h"
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+#include <foundation/PxTransform.h>
+#include "NvCloth/Allocator.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+class CuFabric;
+class CuFactory;
+struct CuClothData;
+
+struct CuConstraints
+{
+ CuConstraints(CUcontext ctx)
+ : mStart(ctx), mTarget(ctx), mHostCopy(ctx)
+ {
+ }
+
+ void pop()
+ {
+ if (!mTarget.empty())
+ {
+ mStart.swap(mTarget);
+ mTarget.resize(0);
+ }
+ }
+
+ CuDeviceVector<physx::PxVec4> mStart;
+ CuDeviceVector<physx::PxVec4> mTarget;
+ CuHostVector<physx::PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type mHostCopy;
+};
+
+class CuCloth : protected CuContextLock
+{
+ CuCloth(); // not implemented
+
+ public:
+ CuCloth& operator = (const CuCloth&);
+ typedef CuFactory FactoryType;
+ typedef CuFabric FabricType;
+ typedef CuContextLock ContextLockType;
+
+ typedef CuHostVector<physx::PxVec3, CU_MEMHOSTALLOC_DEVICEMAP>::Type& MappedVec3fVectorType;
+ typedef CuHostVector<physx::PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type& MappedVec4fVectorType;
+ typedef CuHostVector<IndexPair, CU_MEMHOSTALLOC_DEVICEMAP>::Type& MappedIndexVectorType;
+ typedef CuHostVector<uint32_t, CU_MEMHOSTALLOC_DEVICEMAP>::Type& MappedMaskVectorType;
+
+ CuCloth(CuFactory&, CuFabric&, Range<const physx::PxVec4>);
+ CuCloth(CuFactory&, const CuCloth&);
+ ~CuCloth(); // not virtual on purpose
+
+ public:
+ bool isSleeping() const
+ {
+ return mSleepPassCounter >= mSleepAfterCount;
+ }
+ void wakeUp()
+ {
+ mSleepPassCounter = 0;
+ }
+
+ void notifyChanged();
+
+ bool updateClothData(CuClothData&); // expects acquired context
+ uint32_t getSharedMemorySize() const; // without particle data
+
+ // expects transformed configs, doesn't call notifyChanged()
+ void setPhaseConfig(Range<const PhaseConfig>);
+
+ Range<physx::PxVec4> push(CuConstraints&);
+ void clear(CuConstraints&);
+
+ void syncDeviceParticles();
+ void syncHostParticles();
+
+ Range<const physx::PxVec3> clampTriangleCount(Range<const physx::PxVec3>, uint32_t);
+
+ public:
+ CuFactory& mFactory;
+ CuFabric& mFabric;
+
+ bool mClothDataDirty;
+ bool mClothCostDirty;
+
+ // particle data
+ uint32_t mNumParticles;
+ CuDeviceVector<physx::PxVec4> mParticles; // cur, prev
+ CuHostVector<physx::PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type mParticlesHostCopy;
+ bool mDeviceParticlesDirty;
+ bool mHostParticlesDirty;
+
+ physx::PxVec3 mParticleBoundsCenter;
+ physx::PxVec3 mParticleBoundsHalfExtent;
+
+ physx::PxVec3 mGravity;
+ physx::PxVec3 mLogDamping;
+ physx::PxVec3 mLinearLogDrag;
+ physx::PxVec3 mAngularLogDrag;
+ physx::PxVec3 mLinearInertia;
+ physx::PxVec3 mAngularInertia;
+ physx::PxVec3 mCentrifugalInertia;
+ float mSolverFrequency;
+ float mStiffnessFrequency;
+
+ physx::PxTransform mTargetMotion;
+ physx::PxTransform mCurrentMotion;
+ physx::PxVec3 mLinearVelocity;
+ physx::PxVec3 mAngularVelocity;
+
+ float mPrevIterDt;
+ MovingAverage mIterDtAvg;
+
+ CuDeviceVector<CuPhaseConfig> mPhaseConfigs; // transformed!
+ Vector<PhaseConfig>::Type mHostPhaseConfigs; // transformed!
+
+ // tether constraints stuff
+ float mTetherConstraintLogStiffness;
+ float mTetherConstraintScale;
+
+ // motion constraints stuff
+ CuConstraints mMotionConstraints;
+ float mMotionConstraintScale;
+ float mMotionConstraintBias;
+ float mMotionConstraintLogStiffness;
+
+ // separation constraints stuff
+ CuConstraints mSeparationConstraints;
+
+ // particle acceleration stuff
+ CuDeviceVector<physx::PxVec4> mParticleAccelerations;
+ CuHostVector<physx::PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type mParticleAccelerationsHostCopy;
+
+ // wind
+ physx::PxVec3 mWind;
+ float mDragLogCoefficient;
+ float mLiftLogCoefficient;
+
+ // collision stuff
+ CuHostVector<IndexPair, CU_MEMHOSTALLOC_DEVICEMAP>::Type mCapsuleIndices;
+ CuHostVector<physx::PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type mStartCollisionSpheres;
+ CuHostVector<physx::PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type mTargetCollisionSpheres;
+ CuHostVector<uint32_t, CU_MEMHOSTALLOC_DEVICEMAP>::Type mConvexMasks;
+ CuHostVector<physx::PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type mStartCollisionPlanes;
+ CuHostVector<physx::PxVec4, CU_MEMHOSTALLOC_DEVICEMAP>::Type mTargetCollisionPlanes;
+ CuHostVector<physx::PxVec3, CU_MEMHOSTALLOC_DEVICEMAP>::Type mStartCollisionTriangles;
+ CuHostVector<physx::PxVec3, CU_MEMHOSTALLOC_DEVICEMAP>::Type mTargetCollisionTriangles;
+ bool mEnableContinuousCollision;
+ float mCollisionMassScale;
+ float mFriction;
+
+ // virtual particles
+ CuDeviceVector<uint32_t> mVirtualParticleSetSizes;
+ CuDeviceVector<Vec4us> mVirtualParticleIndices;
+ CuDeviceVector<physx::PxVec4> mVirtualParticleWeights;
+
+ // self collision
+ float mSelfCollisionDistance;
+ float mSelfCollisionLogStiffness;
+
+ CuDeviceVector<physx::PxVec4> mRestPositions;
+ CuDeviceVector<uint32_t> mSelfCollisionIndices;
+ Vector<uint32_t>::Type mSelfCollisionIndicesHost;
+
+ // 4 (position) + 2 (key) per particle + cellStart (8322)
+ CuDeviceVector<float> mSelfCollisionData;
+
+ // sleeping (see SwCloth for comments)
+ uint32_t mSleepTestInterval;
+ uint32_t mSleepAfterCount;
+ float mSleepThreshold;
+ uint32_t mSleepPassCounter;
+ uint32_t mSleepTestCounter;
+
+ uint32_t mSharedMemorySize;
+
+ void* mUserData;
+};
+}
+}
diff --git a/NvCloth/src/cuda/CuClothClone.cpp b/NvCloth/src/cuda/CuClothClone.cpp
new file mode 100644
index 0000000..df43c66
--- /dev/null
+++ b/NvCloth/src/cuda/CuClothClone.cpp
@@ -0,0 +1,86 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "ClothClone.h"
+
+#include "CuFactory.h"
+#include "CuFabric.h"
+#include "CuCloth.h"
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+Range<const PhaseConfig> getPhaseConfigs(const CuCloth& cloth)
+{
+ return makeRange(cloth.mHostPhaseConfigs);
+}
+void setPhaseConfigs(CuCloth& cloth, Range<const PhaseConfig> phaseConfigs)
+{
+ cloth.setPhaseConfig(phaseConfigs);
+}
+Range<const PxVec4> getParticleAccelerations(const CuCloth& cloth)
+{
+ return makeRange(cloth.mParticleAccelerationsHostCopy);
+}
+Range<const uint32_t> getSelfCollisionIndices(const CuCloth& cloth)
+{
+ return makeRange(cloth.mSelfCollisionIndicesHost);
+}
+
+template <>
+Cloth* ClothImpl<CuCloth>::clone(Factory& factory) const
+{
+ if (&mCloth.mFactory == &factory)
+ return NV_CLOTH_NEW(ClothImpl<CuCloth>)(factory, *this); // copy construct directly
+
+ switch(factory.getPlatform())
+ {
+ case Platform::CPU:
+ return convertCloth(static_cast<SwFactory&>(factory), *this);
+ case Platform::CUDA:
+ return convertCloth(static_cast<CuFactory&>(factory), *this);
+ case Platform::DX11:
+ default:
+ return NULL;
+ }
+}
+
+Cloth* CuFactory::clone(const Cloth& cloth)
+{
+ if (cloth.getFactory().getPlatform() == Platform::CPU)
+ return convertCloth(*this, static_cast<const SwClothImpl&>(cloth));
+
+ return cloth.clone(*this);
+}
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/cuda/CuClothData.cpp b/NvCloth/src/cuda/CuClothData.cpp
new file mode 100644
index 0000000..decfd2c
--- /dev/null
+++ b/NvCloth/src/cuda/CuClothData.cpp
@@ -0,0 +1,238 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CuClothData.h"
+#include "CuCloth.h"
+#include "CuFabric.h"
+#include "CuCheckSuccess.h"
+#include "CuContextLock.h"
+#include "../IterationState.h"
+
+using namespace nv;
+
+cloth::CuClothData::CuClothData(CuCloth& cloth)
+{
+ mNumParticles = cloth.mNumParticles;
+ mParticles = array(*cloth.mParticles.begin().get());
+
+ mParticlesHostCopy = array(*getDevicePointer(cloth.mParticlesHostCopy));
+
+ mNumPhases = uint32_t(cloth.mPhaseConfigs.size());
+ mPhaseConfigs = cloth.mPhaseConfigs.begin().get();
+
+ mTethers = cloth.mFabric.mTethers.begin().get();
+ mNumTethers = uint32_t(cloth.mFabric.mTethers.size());
+ mTetherConstraintScale = cloth.mTetherConstraintScale * cloth.mFabric.mTetherLengthScale;
+
+ mTriangles = cloth.mFabric.mTriangles.begin().get();
+ mNumTriangles = uint32_t(cloth.mFabric.mTriangles.size()) / 3;
+
+ mMotionConstraintScale = cloth.mMotionConstraintScale;
+ mMotionConstraintBias = cloth.mMotionConstraintBias;
+
+ mNumSpheres = uint32_t(cloth.mStartCollisionSpheres.size());
+ mNumCapsules = uint32_t(cloth.mCapsuleIndices.size());
+ mCapsuleIndices = getDevicePointer(cloth.mCapsuleIndices);
+
+ mNumPlanes = uint32_t(cloth.mStartCollisionPlanes.size());
+ mNumConvexes = uint32_t(cloth.mConvexMasks.size());
+ mConvexMasks = getDevicePointer(cloth.mConvexMasks);
+
+ mNumCollisionTriangles = uint32_t(cloth.mStartCollisionTriangles.size()) / 3;
+
+ mVirtualParticleSetSizesBegin = cloth.mVirtualParticleSetSizes.begin().get();
+ mVirtualParticleSetSizesEnd = mVirtualParticleSetSizesBegin + cloth.mVirtualParticleSetSizes.size();
+ mVirtualParticleIndices = array(*cloth.mVirtualParticleIndices.begin().get());
+ mVirtualParticleWeights = array(*cloth.mVirtualParticleWeights.begin().get());
+
+ mEnableContinuousCollision = cloth.mEnableContinuousCollision;
+ mCollisionMassScale = cloth.mCollisionMassScale;
+ mFrictionScale = cloth.mFriction;
+
+ mSelfCollisionDistance = cloth.mSelfCollisionDistance;
+ mSelfCollisionIndices = cloth.mSelfCollisionIndices.empty() ? 0 : cloth.mSelfCollisionIndices.begin().get();
+ mNumSelfCollisionIndices = mSelfCollisionIndices ? uint32_t(cloth.mSelfCollisionIndices.size()) : mNumParticles;
+
+ if (!cloth.mSelfCollisionData.empty())
+ {
+ uint32_t keySize = 2 * mNumSelfCollisionIndices;
+ uint32_t particleSize = 4 * mNumParticles;
+
+ mSelfCollisionParticles = cloth.mSelfCollisionData.begin().get();
+ mSelfCollisionKeys = (uint32_t*)(mSelfCollisionParticles + particleSize);
+ mSelfCollisionCellStart = (uint16_t*)(mSelfCollisionKeys + keySize);
+ }
+ else
+ {
+ mSelfCollisionParticles = 0;
+ mSelfCollisionKeys = 0;
+ mSelfCollisionCellStart = 0;
+ }
+
+ mSleepTestInterval = cloth.mSleepTestInterval;
+ mSleepAfterCount = cloth.mSleepAfterCount;
+ mSleepThreshold = cloth.mSleepThreshold;
+}
+
+cloth::CuFrameData::CuFrameData(CuCloth& cloth, uint32_t numSharedPositions, const IterationState<Simd4f>& state,
+ const CuIterationData* iterationData)
+{
+ mDeviceParticlesDirty = cloth.mDeviceParticlesDirty;
+
+ mNumSharedPositions = numSharedPositions;
+
+ mIterDt = state.mIterDt;
+ mNumIterations = state.mRemainingIterations;
+ mIterationData = iterationData;
+
+ Simd4f logStiffness = simd4f(0.0f, cloth.mSelfCollisionLogStiffness, cloth.mMotionConstraintLogStiffness,
+ cloth.mTetherConstraintLogStiffness);
+ Simd4f stiffnessExponent = simd4f(cloth.mStiffnessFrequency * mIterDt);
+ Simd4f stiffness = gSimd4fOne - exp2(logStiffness * stiffnessExponent);
+
+ mTetherConstraintStiffness = array(stiffness)[3];
+ mMotionConstraintStiffness = array(stiffness)[2];
+ mSelfCollisionStiffness = array(stiffness)[1];
+
+ logStiffness = simd4f(cloth.mDragLogCoefficient, cloth.mLiftLogCoefficient, 0.0f, 0.0f);
+ stiffness = gSimd4fOne - exp2(logStiffness * stiffnessExponent);
+ mDragCoefficient = array(stiffness)[0];
+ mLiftCoefficient = array(stiffness)[1];
+ for (int i = 0; i < 9; ++i)
+ mRotation[i] = array(state.mRotationMatrix[i / 3])[i % 3];
+
+ mTargetMotionConstraints = 0;
+ if (!cloth.mMotionConstraints.mStart.empty())
+ {
+ mTargetMotionConstraints = array(*cloth.mMotionConstraints.mStart.begin().get());
+ }
+
+ mStartMotionConstraints = mTargetMotionConstraints;
+ if (!cloth.mMotionConstraints.mTarget.empty())
+ {
+ mTargetMotionConstraints = array(*cloth.mMotionConstraints.mTarget.begin().get());
+ }
+
+ mHostMotionConstraints = array(*getDevicePointer(cloth.mMotionConstraints.mHostCopy));
+
+ mTargetSeparationConstraints = 0;
+ if (!cloth.mSeparationConstraints.mStart.empty())
+ {
+ mTargetSeparationConstraints = array(*cloth.mSeparationConstraints.mStart.begin().get());
+ }
+
+ mStartSeparationConstraints = mTargetSeparationConstraints;
+ if (!cloth.mSeparationConstraints.mTarget.empty())
+ {
+ mTargetSeparationConstraints = array(*cloth.mSeparationConstraints.mTarget.begin().get());
+ }
+
+ mHostSeparationConstraints = array(*getDevicePointer(cloth.mSeparationConstraints.mHostCopy));
+
+ mParticleAccelerations = 0;
+ if (!cloth.mParticleAccelerations.empty())
+ {
+ mParticleAccelerations = array(*cloth.mParticleAccelerations.begin().get());
+ }
+
+ mHostParticleAccelerations = array(*getDevicePointer(cloth.mParticleAccelerationsHostCopy));
+
+ mRestPositions = 0;
+ if (!cloth.mRestPositions.empty())
+ {
+ mRestPositions = array(*cloth.mRestPositions.begin().get());
+ }
+
+ mStartCollisionSpheres = array(*getDevicePointer(cloth.mStartCollisionSpheres));
+ mTargetCollisionSpheres = array(*getDevicePointer(cloth.mTargetCollisionSpheres));
+
+ if (!mTargetCollisionSpheres)
+ mTargetCollisionSpheres = mStartCollisionSpheres;
+
+ mStartCollisionPlanes = array(*getDevicePointer(cloth.mStartCollisionPlanes));
+ mTargetCollisionPlanes = array(*getDevicePointer(cloth.mTargetCollisionPlanes));
+
+ if (!mTargetCollisionPlanes)
+ mTargetCollisionPlanes = mStartCollisionPlanes;
+
+ mStartCollisionTriangles = array(*getDevicePointer(cloth.mStartCollisionTriangles));
+ mTargetCollisionTriangles = array(*getDevicePointer(cloth.mTargetCollisionTriangles));
+
+ if (!mTargetCollisionTriangles)
+ mTargetCollisionTriangles = mStartCollisionTriangles;
+
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ float c = array(cloth.mParticleBoundsCenter)[i];
+ float r = array(cloth.mParticleBoundsHalfExtent)[i];
+ mParticleBounds[i * 2 + 0] = r + c;
+ mParticleBounds[i * 2 + 1] = r - c;
+ }
+
+ mSleepPassCounter = cloth.mSleepPassCounter;
+ mSleepTestCounter = cloth.mSleepTestCounter;
+
+ mStiffnessExponent = cloth.mStiffnessFrequency * mIterDt;
+}
+
+namespace
+{
+void copySquareTransposed(float* dst, const float* src)
+{
+ dst[0] = src[0];
+ dst[1] = src[4];
+ dst[2] = src[8];
+ dst[3] = src[1];
+ dst[4] = src[5];
+ dst[5] = src[9];
+ dst[6] = src[2];
+ dst[7] = src[6];
+ dst[8] = src[10];
+}
+}
+
+cloth::CuIterationData::CuIterationData(const IterationState<Simd4f>& state)
+{
+ mIntegrationTrafo[0] = array(state.mPrevBias)[0];
+ mIntegrationTrafo[1] = array(state.mPrevBias)[1];
+ mIntegrationTrafo[2] = array(state.mPrevBias)[2];
+
+ mIntegrationTrafo[3] = array(state.mCurBias)[0];
+ mIntegrationTrafo[4] = array(state.mCurBias)[1];
+ mIntegrationTrafo[5] = array(state.mCurBias)[2];
+
+ copySquareTransposed(mIntegrationTrafo + 6, array(*state.mPrevMatrix));
+ copySquareTransposed(mIntegrationTrafo + 15, array(*state.mCurMatrix));
+
+ mWind[0] = array(state.mWind)[0];
+ mWind[1] = array(state.mWind)[1];
+ mWind[2] = array(state.mWind)[2];
+
+ mIsTurning = state.mIsTurning ? 0x3F800000u : 0; // 1.0f to avoid ftz
+}
diff --git a/NvCloth/src/cuda/CuClothData.h b/NvCloth/src/cuda/CuClothData.h
new file mode 100644
index 0000000..0e4cda0
--- /dev/null
+++ b/NvCloth/src/cuda/CuClothData.h
@@ -0,0 +1,192 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include <foundation/Px.h>
+
+#ifndef __CUDACC__
+#include "simd.h"
+#endif
+
+namespace nv
+{
+namespace cloth
+{
+
+class CuCloth;
+struct CuPhaseConfig;
+template <typename>
+struct IterationState;
+struct IndexPair;
+struct CuIterationData;
+struct CuTether;
+
+// reference to cloth instance bulk data (POD)
+// should not need frequent updates (stored on device)
+struct CuClothData
+{
+ CuClothData()
+ {
+ }
+ CuClothData(CuCloth&);
+
+ // particle data
+ uint32_t mNumParticles;
+ float* mParticles;
+ float* mParticlesHostCopy;
+
+ // fabric constraints
+ uint32_t mNumPhases;
+ const CuPhaseConfig* mPhaseConfigs;
+
+ const CuTether* mTethers;
+ uint32_t mNumTethers;
+ float mTetherConstraintScale;
+
+ const uint16_t* mTriangles;
+ uint32_t mNumTriangles;
+
+ // motion constraint data
+ float mMotionConstraintScale;
+ float mMotionConstraintBias;
+
+ // collision data
+ uint32_t mNumSpheres; // don't change this order, it's
+ uint32_t mNumCapsules; // needed by mergeAcceleration()
+ const IndexPair* mCapsuleIndices;
+ uint32_t mNumPlanes;
+ uint32_t mNumConvexes;
+ const uint32_t* mConvexMasks;
+ uint32_t mNumCollisionTriangles;
+
+ // virtual particle data
+ const uint32_t* mVirtualParticleSetSizesBegin;
+ const uint32_t* mVirtualParticleSetSizesEnd;
+ const uint16_t* mVirtualParticleIndices;
+ const float* mVirtualParticleWeights;
+
+ bool mEnableContinuousCollision;
+ float mCollisionMassScale;
+ float mFrictionScale;
+
+ float mSelfCollisionDistance;
+ uint32_t mNumSelfCollisionIndices;
+ const uint32_t* mSelfCollisionIndices;
+ float* mSelfCollisionParticles;
+ uint32_t* mSelfCollisionKeys;
+ uint16_t* mSelfCollisionCellStart;
+
+ // sleep data
+ uint32_t mSleepTestInterval;
+ uint32_t mSleepAfterCount;
+ float mSleepThreshold;
+};
+
+// per-frame data (stored in pinned memory)
+struct CuFrameData
+{
+ CuFrameData()
+ {
+ } // not initializing pointers to 0!
+
+#ifndef __CUDACC__
+ explicit CuFrameData(CuCloth&, uint32_t, const IterationState<Simd4f>&, const CuIterationData*);
+#endif
+
+ bool mDeviceParticlesDirty;
+
+ // number of particle copies that fit in shared memory (0, 1, or 2)
+ uint32_t mNumSharedPositions;
+
+ // iteration data
+ float mIterDt;
+ uint32_t mNumIterations;
+ const CuIterationData* mIterationData;
+
+ float mTetherConstraintStiffness;
+
+ // wind data
+ float mDragCoefficient;
+ float mLiftCoefficient;
+ float mRotation[9];
+
+ // motion constraint data
+ const float* mStartMotionConstraints;
+ float* mTargetMotionConstraints;
+ const float* mHostMotionConstraints;
+ float mMotionConstraintStiffness;
+
+ // separation constraint data
+ const float* mStartSeparationConstraints;
+ float* mTargetSeparationConstraints;
+ const float* mHostSeparationConstraints;
+
+ // particle acceleration data
+ float* mParticleAccelerations;
+ const float* mHostParticleAccelerations;
+
+ // rest positions
+ const float* mRestPositions;
+
+ // collision data
+ const float* mStartCollisionSpheres;
+ const float* mTargetCollisionSpheres;
+ const float* mStartCollisionPlanes;
+ const float* mTargetCollisionPlanes;
+ const float* mStartCollisionTriangles;
+ const float* mTargetCollisionTriangles;
+
+ float mSelfCollisionStiffness;
+
+ float mParticleBounds[6]; // maxX, -minX, maxY, ...
+
+ uint32_t mSleepPassCounter;
+ uint32_t mSleepTestCounter;
+
+ float mStiffnessExponent;
+};
+
+// per-iteration data (stored in pinned memory)
+struct CuIterationData
+{
+ CuIterationData()
+ {
+ } // not initializing!
+
+#ifndef __CUDACC__
+ explicit CuIterationData(const IterationState<Simd4f>&);
+#endif
+
+ float mIntegrationTrafo[24];
+ float mWind[3];
+ uint32_t mIsTurning;
+};
+}
+}
diff --git a/NvCloth/src/cuda/CuCollision.h b/NvCloth/src/cuda/CuCollision.h
new file mode 100644
index 0000000..aeb2bda
--- /dev/null
+++ b/NvCloth/src/cuda/CuCollision.h
@@ -0,0 +1,1572 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#ifndef CU_SOLVER_KERNEL_CU
+#error include CuCollision.h only from CuSolverKernel.cu
+#endif
+
+#include "../IndexPair.h"
+
+namespace
+{
+struct CuCollision
+{
+ struct ShapeMask
+ {
+ uint32_t mSpheres;
+ uint32_t mCones;
+
+ __device__ friend ShapeMask& operator &= (ShapeMask& left, const ShapeMask& right)
+ {
+ left.mSpheres = left.mSpheres & right.mSpheres;
+ left.mCones = left.mCones & right.mCones;
+ return left;
+ }
+ };
+
+ struct CollisionData
+ {
+ Pointer<Shared, float> mSphereX;
+ Pointer<Shared, float> mSphereY;
+ Pointer<Shared, float> mSphereZ;
+ Pointer<Shared, float> mSphereW;
+
+ Pointer<Shared, float> mConeCenterX;
+ Pointer<Shared, float> mConeCenterY;
+ Pointer<Shared, float> mConeCenterZ;
+ Pointer<Shared, float> mConeRadius;
+ Pointer<Shared, float> mConeAxisX;
+ Pointer<Shared, float> mConeAxisY;
+ Pointer<Shared, float> mConeAxisZ;
+ Pointer<Shared, float> mConeSlope;
+ Pointer<Shared, float> mConeSqrCosine;
+ Pointer<Shared, float> mConeHalfLength;
+ };
+
+ public:
+ __device__ CuCollision(Pointer<Shared, uint32_t>);
+
+ template <typename CurrentT, typename PreviousT>
+ __device__ void operator()(CurrentT& current, PreviousT& previous, float alpha);
+
+ private:
+ __device__ void buildSphereAcceleration(const CollisionData&);
+ __device__ void buildConeAcceleration();
+ __device__ void mergeAcceleration();
+
+ template <typename CurrentT>
+ __device__ bool buildAcceleration(const CurrentT&, float);
+
+ __device__ static ShapeMask readShapeMask(const float&, Pointer<Shared, const uint32_t>);
+ template <typename CurPos>
+ __device__ ShapeMask getShapeMask(const CurPos&) const;
+ template <typename PrevPos, typename CurPos>
+ __device__ ShapeMask getShapeMask(const PrevPos&, const CurPos&) const;
+
+ template <typename CurPos>
+ __device__ int32_t collideCapsules(const CurPos&, float3&, float3&) const;
+ template <typename PrevPos, typename CurPos>
+ __device__ int32_t collideCapsules(const PrevPos&, CurPos&, float3&, float3&) const;
+
+ template <typename CurrentT, typename PreviousT>
+ __device__ void collideCapsules(CurrentT& current, PreviousT& previous) const;
+ template <typename CurrentT, typename PreviousT>
+ __device__ void collideVirtualCapsules(CurrentT& current, PreviousT& previous) const;
+ template <typename CurrentT, typename PreviousT>
+ __device__ void collideContinuousCapsules(CurrentT& current, PreviousT& previous) const;
+
+ template <typename CurrentT, typename PreviousT>
+ __device__ void collideConvexes(CurrentT& current, PreviousT& previous, float alpha);
+ template <typename CurPos>
+ __device__ int32_t collideConvexes(const CurPos&, float3&) const;
+
+ template <typename CurrentT>
+ __device__ void collideTriangles(CurrentT& current, float alpha);
+ template <typename CurrentT>
+ __device__ void collideTriangles(CurrentT& current, int32_t i);
+
+ public:
+ Pointer<Shared, uint32_t> mCapsuleIndices;
+ Pointer<Shared, uint32_t> mCapsuleMasks;
+ Pointer<Shared, uint32_t> mConvexMasks;
+
+ CollisionData mPrevData;
+ CollisionData mCurData;
+
+ // acceleration structure
+ Pointer<Shared, uint32_t> mShapeGrid;
+ float mGridScale[3];
+ float mGridBias[3];
+ static const uint32_t sGridSize = 8;
+};
+
+template <typename T>
+__device__ void swap(T& a, T& b)
+{
+ T c = a;
+ a = b;
+ b = c;
+}
+}
+
+__shared__ uninitialized<CuCollision> gCollideParticles;
+
+namespace
+{
+// initializes one pointer past data!
+__device__ void allocate(CuCollision::CollisionData& data)
+{
+ if (threadIdx.x < 15)
+ {
+ Pointer<Shared, float>* ptr = &data.mSphereX;
+ ptr[threadIdx.x] = *ptr + threadIdx.x * gClothData.mNumCapsules +
+ min(threadIdx.x, 4) * (gClothData.mNumSpheres - gClothData.mNumCapsules);
+ }
+}
+
+__device__ void generateSpheres(CuCollision::CollisionData& data, float alpha)
+{
+ // interpolate spheres and transpose
+ if (threadIdx.x < gClothData.mNumSpheres * 4)
+ {
+ float start = __ldg(gFrameData.mStartCollisionSpheres + threadIdx.x);
+ float target = __ldg(gFrameData.mTargetCollisionSpheres + threadIdx.x);
+ float value = start + (target - start) * alpha;
+ if (threadIdx.x % 4 == 3)
+ value = max(value, 0.0f);
+ int32_t j = threadIdx.x % 4 * gClothData.mNumSpheres + threadIdx.x / 4;
+ data.mSphereX[j] = value;
+ }
+
+ __syncthreads();
+}
+
+__device__ void generateCones(CuCollision::CollisionData& data, Pointer<Shared, const uint32_t> iIt)
+{
+ // generate cones
+ if (threadIdx.x < gClothData.mNumCapsules)
+ {
+ uint32_t firstIndex = iIt[0];
+ uint32_t secondIndex = iIt[1];
+
+ float firstX = data.mSphereX[firstIndex];
+ float firstY = data.mSphereY[firstIndex];
+ float firstZ = data.mSphereZ[firstIndex];
+ float firstW = data.mSphereW[firstIndex];
+
+ float secondX = data.mSphereX[secondIndex];
+ float secondY = data.mSphereY[secondIndex];
+ float secondZ = data.mSphereZ[secondIndex];
+ float secondW = data.mSphereW[secondIndex];
+
+ float axisX = (secondX - firstX) * 0.5f;
+ float axisY = (secondY - firstY) * 0.5f;
+ float axisZ = (secondZ - firstZ) * 0.5f;
+ float axisW = (secondW - firstW) * 0.5f;
+
+ float sqrAxisLength = axisX * axisX + axisY * axisY + axisZ * axisZ;
+ float sqrConeLength = sqrAxisLength - axisW * axisW;
+
+ float invAxisLength = rsqrtf(sqrAxisLength);
+ float invConeLength = rsqrtf(sqrConeLength);
+
+ if (sqrConeLength <= 0.0f)
+ invAxisLength = invConeLength = 0.0f;
+
+ float axisLength = sqrAxisLength * invAxisLength;
+
+ data.mConeCenterX[threadIdx.x] = (secondX + firstX) * 0.5f;
+ data.mConeCenterY[threadIdx.x] = (secondY + firstY) * 0.5f;
+ data.mConeCenterZ[threadIdx.x] = (secondZ + firstZ) * 0.5f;
+ data.mConeRadius[threadIdx.x] = (axisW + firstW) * invConeLength * axisLength;
+
+ data.mConeAxisX[threadIdx.x] = axisX * invAxisLength;
+ data.mConeAxisY[threadIdx.x] = axisY * invAxisLength;
+ data.mConeAxisZ[threadIdx.x] = axisZ * invAxisLength;
+ data.mConeSlope[threadIdx.x] = axisW * invConeLength;
+
+ float sine = axisW * invAxisLength;
+ data.mConeSqrCosine[threadIdx.x] = 1 - sine * sine;
+ data.mConeHalfLength[threadIdx.x] = axisLength;
+ }
+
+ __syncthreads();
+}
+}
+
+__device__ CuCollision::CuCollision(Pointer<Shared, uint32_t> scratchPtr)
+{
+ int32_t numCapsules2 = 2 * gClothData.mNumCapsules;
+ int32_t numCapsules4 = 4 * gClothData.mNumCapsules;
+ int32_t numConvexes = gClothData.mNumConvexes;
+
+ if (threadIdx.x < 3)
+ {
+ (&mCapsuleIndices)[threadIdx.x] = scratchPtr + threadIdx.x * numCapsules2;
+ (&mShapeGrid)[-14 * int32_t(threadIdx.x)] = scratchPtr + numCapsules4 + numConvexes;
+ }
+
+ Pointer<Shared, uint32_t> indexPtr = scratchPtr + threadIdx.x;
+ if (threadIdx.x < numCapsules2)
+ {
+ uint32_t index = (&gClothData.mCapsuleIndices->first)[threadIdx.x];
+ *indexPtr = index;
+
+ volatile uint32_t* maskPtr = generic(indexPtr + numCapsules2);
+ *maskPtr = 1u << index;
+ *maskPtr |= maskPtr[-int32_t(threadIdx.x & 1)];
+ }
+ indexPtr += numCapsules4;
+
+ if (threadIdx.x < numConvexes)
+ *indexPtr = gClothData.mConvexMasks[threadIdx.x];
+
+ if (gClothData.mEnableContinuousCollision || gClothData.mFrictionScale > 0.0f)
+ {
+ allocate(mPrevData);
+
+ __syncthreads(); // mPrevData raw hazard
+
+ generateSpheres(mPrevData, 0.0f);
+ generateCones(mPrevData, mCapsuleIndices + 2 * threadIdx.x);
+ }
+
+ allocate(mCurData); // also initializes mShapeGrid (!)
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void CuCollision::operator()(CurrentT& current, PreviousT& previous, float alpha)
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::COLLIDE);
+
+ // if (current.w > 0) current.w = previous.w (see SwSolverKernel::computeBounds())
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ if (current(i, 3) > 0.0f)
+ current(i, 3) = previous(i, 3);
+ }
+
+ collideConvexes(current, previous, alpha);
+ collideTriangles(current, alpha);
+
+ if (buildAcceleration(current, alpha))
+ {
+ if (gClothData.mEnableContinuousCollision)
+ collideContinuousCapsules(current, previous);
+ else
+ collideCapsules(current, previous);
+
+ collideVirtualCapsules(current, previous);
+ }
+
+ // sync otherwise first threads overwrite sphere data before
+ // remaining ones have had a chance to use it leading to incorrect
+ // velocity calculation for friction / ccd
+
+ __syncthreads();
+
+ if (gClothData.mEnableContinuousCollision || gClothData.mFrictionScale > 0.0f)
+ {
+ // store current collision data for next iteration
+ Pointer<Shared, float> dstIt = mPrevData.mSphereX + threadIdx.x;
+ Pointer<Shared, const float> srcIt = mCurData.mSphereX + threadIdx.x;
+ for (; dstIt < mCurData.mSphereX; dstIt += blockDim.x, srcIt += blockDim.x)
+ *dstIt = *srcIt;
+ }
+
+ // __syncthreads() called in updateSleepState()
+}
+
+// build per-axis mask arrays of spheres on the right/left of grid cell
+__device__ void CuCollision::buildSphereAcceleration(const CollisionData& data)
+{
+ if (threadIdx.x >= 192)
+ return;
+
+ int32_t sphereIdx = threadIdx.x & 31;
+ int32_t axisIdx = threadIdx.x >> 6; // coordinate index (x, y, or z)
+ int32_t signi = threadIdx.x << 26 & 0x80000000; // sign bit (min or max)
+
+ float signf = copysignf(1.0f, reinterpret_cast<const float&>(signi));
+ float pos = signf * data.mSphereW[sphereIdx] + data.mSphereX[sphereIdx + gClothData.mNumSpheres * axisIdx];
+
+ // use overflow so we can test for non-positive
+ uint32_t index = signi - uint32_t(floorf(pos * mGridScale[axisIdx] + mGridBias[axisIdx]));
+
+ axisIdx += (uint32_t(signi) >> 31) * 3;
+ Pointer<Shared, uint32_t> dst = mShapeGrid + sGridSize * axisIdx;
+ // #pragma unroll
+ for (int32_t i = 0; i < sGridSize; ++i, ++index)
+ dst[i] |= __ballot(int32_t(index) <= 0);
+}
+
+// generate cone masks from sphere masks
+__device__ void CuCollision::buildConeAcceleration()
+{
+ if (threadIdx.x >= 192)
+ return;
+
+ int32_t coneIdx = threadIdx.x & 31;
+
+ uint32_t sphereMask =
+ mCurData.mConeRadius[coneIdx] && coneIdx < gClothData.mNumCapsules ? mCapsuleMasks[2 * coneIdx + 1] : 0;
+
+ int32_t offset = threadIdx.x / 32 * sGridSize;
+ Pointer<Shared, uint32_t> src = mShapeGrid + offset;
+ Pointer<Shared, uint32_t> dst = src + 6 * sGridSize;
+
+ // #pragma unroll
+ for (int32_t i = 0; i < sGridSize; ++i)
+ dst[i] |= __ballot(src[i] & sphereMask);
+}
+
+// convert right/left mask arrays into single overlap array
+__device__ void CuCollision::mergeAcceleration()
+{
+ if (threadIdx.x < sGridSize * 12)
+ {
+ Pointer<Shared, uint32_t> dst = mShapeGrid + threadIdx.x;
+ if (!(gClothData.mEnableContinuousCollision || threadIdx.x * 43 & 1024))
+ *dst &= dst[sGridSize * 3]; // above is same as 'threadIdx.x/24 & 1'
+
+ // mask garbage bits from build * Acceleration
+ int32_t shapeIdx = threadIdx.x >= sGridSize * 6; // spheres=0, cones=1
+ *dst &= (1 << (&gClothData.mNumSpheres)[shapeIdx]) - 1;
+ }
+}
+
+namespace
+{
+#if __CUDA_ARCH__ >= 300
+__device__ float mergeBounds(Pointer<Shared, float> buffer)
+{
+ float value = *buffer;
+ value = max(value, __shfl_down(value, 1));
+ value = max(value, __shfl_down(value, 2));
+ value = max(value, __shfl_down(value, 4));
+ value = max(value, __shfl_down(value, 8));
+ return max(value, __shfl_down(value, 16));
+}
+#else
+__device__ float mergeBounds(Pointer<Shared, float> buffer)
+{
+ // ensure that writes to buffer are visible to all threads
+ __threadfence_block();
+
+ volatile float* ptr = generic(buffer);
+ *ptr = max(*ptr, ptr[16]);
+ *ptr = max(*ptr, ptr[8]);
+ *ptr = max(*ptr, ptr[4]);
+ *ptr = max(*ptr, ptr[2]);
+ return max(*ptr, ptr[1]);
+}
+#endif
+// computes maxX, -minX, maxY, ... with a stride of 32, threadIdx.x must be < 192
+__device__ float computeSphereBounds(const CuCollision::CollisionData& data, Pointer<Shared, float> buffer)
+{
+ assert(threadIdx.x < 192);
+
+ int32_t sphereIdx = min(threadIdx.x & 31, gClothData.mNumSpheres - 1); // sphere index
+ int32_t axisIdx = threadIdx.x >> 6; // coordinate index (x, y, or z)
+ int32_t signi = threadIdx.x << 26; // sign bit (min or max)
+ float signf = copysignf(1.0f, reinterpret_cast<const float&>(signi));
+
+ *buffer = data.mSphereW[sphereIdx] + signf * data.mSphereX[sphereIdx + gClothData.mNumSpheres * axisIdx];
+
+ return mergeBounds(buffer);
+}
+
+#if __CUDA_ARCH__ >= 300
+template <typename CurrentT>
+__device__ float computeParticleBounds(const CurrentT& current, Pointer<Shared, float> buffer)
+{
+ int32_t numThreadsPerAxis = blockDim.x * 342 >> 10 & ~31; // same as / 3
+ int32_t axis = (threadIdx.x >= numThreadsPerAxis) + (threadIdx.x >= 2 * numThreadsPerAxis);
+ int32_t threadIdxInAxis = threadIdx.x - axis * numThreadsPerAxis;
+ int laneIdx = threadIdx.x & 31;
+
+ if (threadIdxInAxis < numThreadsPerAxis)
+ {
+ typename CurrentT::ConstPointerType posIt = current[axis];
+ int32_t i = min(threadIdxInAxis, gClothData.mNumParticles - 1);
+ float minX = posIt[i], maxX = minX;
+ while (i += numThreadsPerAxis, i < gClothData.mNumParticles)
+ {
+ float posX = posIt[i];
+ minX = min(minX, posX);
+ maxX = max(maxX, posX);
+ }
+
+ minX = min(minX, __shfl_down(minX, 1));
+ maxX = max(maxX, __shfl_down(maxX, 1));
+ minX = min(minX, __shfl_down(minX, 2));
+ maxX = max(maxX, __shfl_down(maxX, 2));
+ minX = min(minX, __shfl_down(minX, 4));
+ maxX = max(maxX, __shfl_down(maxX, 4));
+ minX = min(minX, __shfl_down(minX, 8));
+ maxX = max(maxX, __shfl_down(maxX, 8));
+ minX = min(minX, __shfl_down(minX, 16));
+ maxX = max(maxX, __shfl_down(maxX, 16));
+
+ if (!laneIdx)
+ {
+ Pointer<Shared, float> dst = buffer - threadIdx.x + (threadIdxInAxis >> 5) + (axis << 6);
+ dst[0] = maxX;
+ dst[32] = -minX;
+ }
+ }
+
+ __syncthreads();
+
+ if (threadIdx.x >= 192)
+ return 0.0f;
+
+ float value = *buffer;
+ if (laneIdx >= (numThreadsPerAxis >> 5))
+ value = -FLT_MAX;
+
+ // blockDim.x <= 3 * 512, increase to 3 * 1024 by adding a shfl by 16
+ assert(numThreadsPerAxis <= 16 * 32);
+
+ value = max(value, __shfl_down(value, 1));
+ value = max(value, __shfl_down(value, 2));
+ value = max(value, __shfl_down(value, 4));
+ return max(value, __shfl_down(value, 8));
+}
+#else
+template <typename CurrentT>
+__device__ float computeParticleBounds(const CurrentT& current, Pointer<Shared, float> buffer)
+{
+ if (threadIdx.x >= 192)
+ return 0.0f;
+
+ int32_t axisIdx = threadIdx.x >> 6; // x, y, or z
+ int32_t signi = threadIdx.x << 26; // sign bit (min or max)
+ float signf = copysignf(1.0f, reinterpret_cast<const float&>(signi));
+
+ typename CurrentT::ConstPointerType pIt = current[axisIdx];
+ typename CurrentT::ConstPointerType pEnd = pIt + gClothData.mNumParticles;
+ pIt += min(threadIdx.x & 31, gClothData.mNumParticles - 1);
+
+ *buffer = *pIt * signf;
+ while (pIt += 32, pIt < pEnd)
+ *buffer = max(*buffer, *pIt * signf);
+
+ return mergeBounds(buffer);
+}
+#endif
+}
+
+// build mask of spheres/cones touching a regular grid along each axis
+template <typename CurrentT>
+__device__ bool CuCollision::buildAcceleration(const CurrentT& current, float alpha)
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::COLLIDE_ACCELERATION);
+
+ // use still unused cone data as buffer for bounds computation
+ Pointer<Shared, float> buffer = mCurData.mConeCenterX + threadIdx.x;
+ float curParticleBounds = computeParticleBounds(current, buffer);
+ int32_t warpIdx = threadIdx.x >> 5;
+
+ if (!gClothData.mNumSpheres)
+ {
+ if (threadIdx.x < 192 && !(threadIdx.x & 31))
+ gFrameData.mParticleBounds[warpIdx] = curParticleBounds;
+ return false;
+ }
+
+ generateSpheres(mCurData, alpha);
+
+ if (threadIdx.x < 192)
+ {
+ float sphereBounds = computeSphereBounds(mCurData, buffer);
+ float particleBounds = curParticleBounds;
+ if (gClothData.mEnableContinuousCollision)
+ {
+ sphereBounds = max(sphereBounds, computeSphereBounds(mPrevData, buffer));
+ float prevParticleBounds = gFrameData.mParticleBounds[warpIdx];
+ particleBounds = max(particleBounds, prevParticleBounds);
+ }
+
+ float bounds = min(sphereBounds, particleBounds);
+ float expandedBounds = bounds + abs(bounds) * 1e-4f;
+
+ // store bounds data in shared memory
+ if (!(threadIdx.x & 31))
+ {
+ mGridScale[warpIdx] = expandedBounds;
+ gFrameData.mParticleBounds[warpIdx] = curParticleBounds;
+ }
+ }
+
+ __syncthreads(); // mGridScale raw hazard
+
+ if (threadIdx.x < 3)
+ {
+ float negativeLower = mGridScale[threadIdx.x * 2 + 1];
+ float edgeLength = mGridScale[threadIdx.x * 2] + negativeLower;
+ float divisor = max(edgeLength, FLT_EPSILON);
+ mGridScale[threadIdx.x] = __fdividef(sGridSize - 1e-3, divisor);
+ mGridBias[threadIdx.x] = negativeLower * mGridScale[threadIdx.x];
+ if (edgeLength < 0.0f)
+ mGridScale[0] = 0.0f; // mark empty intersection
+ }
+
+ // initialize sphere *and* cone grid to 0
+ if (threadIdx.x < 2 * 6 * sGridSize)
+ mShapeGrid[threadIdx.x] = 0;
+
+ __syncthreads(); // mGridScale raw hazard
+
+ // generate cones even if test below fails because
+ // continuous collision might need it in next iteration
+ generateCones(mCurData, mCapsuleIndices + 2 * threadIdx.x);
+
+ if (mGridScale[0] == 0.0f)
+ return false; // early out for empty intersection
+
+ if (gClothData.mEnableContinuousCollision)
+ buildSphereAcceleration(mPrevData);
+ buildSphereAcceleration(mCurData);
+ __syncthreads(); // mCurData raw hazard
+
+ buildConeAcceleration();
+ __syncthreads(); // mShapeGrid raw hazard
+
+ mergeAcceleration();
+ __syncthreads(); // mShapeGrid raw hazard
+
+ return true;
+}
+
+__device__ CuCollision::ShapeMask CuCollision::readShapeMask(const float& position,
+ Pointer<Shared, const uint32_t> sphereGrid)
+{
+ ShapeMask result;
+ int32_t index = int32_t(floorf(position));
+ uint32_t outMask = (index < sGridSize) - 1;
+
+ Pointer<Shared, const uint32_t> gridPtr = sphereGrid + (index & sGridSize - 1);
+ result.mSpheres = gridPtr[0] & ~outMask;
+ result.mCones = gridPtr[sGridSize * 6] & ~outMask;
+
+ return result;
+}
+
+// lookup acceleration structure and return mask of potential intersectors
+template <typename CurPos>
+__device__ CuCollision::ShapeMask CuCollision::getShapeMask(const CurPos& positions) const
+{
+ ShapeMask result;
+
+ result = readShapeMask(positions.x * mGridScale[0] + mGridBias[0], mShapeGrid);
+ result &= readShapeMask(positions.y * mGridScale[1] + mGridBias[1], mShapeGrid + 8);
+ result &= readShapeMask(positions.z * mGridScale[2] + mGridBias[2], mShapeGrid + 16);
+
+ return result;
+}
+
+template <typename PrevPos, typename CurPos>
+__device__ CuCollision::ShapeMask CuCollision::getShapeMask(const PrevPos& prevPos, const CurPos& curPos) const
+{
+ ShapeMask result;
+
+ float prevX = prevPos.x * mGridScale[0] + mGridBias[0];
+ float prevY = prevPos.y * mGridScale[1] + mGridBias[1];
+ float prevZ = prevPos.z * mGridScale[2] + mGridBias[2];
+
+ float curX = curPos.x * mGridScale[0] + mGridBias[0];
+ float curY = curPos.y * mGridScale[1] + mGridBias[1];
+ float curZ = curPos.z * mGridScale[2] + mGridBias[2];
+
+ float maxX = min(max(prevX, curX), 7.0f);
+ float maxY = min(max(prevY, curY), 7.0f);
+ float maxZ = min(max(prevZ, curZ), 7.0f);
+
+ result = readShapeMask(maxX, mShapeGrid);
+ result &= readShapeMask(maxY, mShapeGrid + 8);
+ result &= readShapeMask(maxZ, mShapeGrid + 16);
+
+ float minX = max(min(prevX, curX), 0.0f);
+ float minY = max(min(prevY, curY), 0.0f);
+ float minZ = max(min(prevZ, curZ), 0.0f);
+
+ result &= readShapeMask(minX, mShapeGrid + 24);
+ result &= readShapeMask(minY, mShapeGrid + 32);
+ result &= readShapeMask(minZ, mShapeGrid + 40);
+
+ return result;
+}
+
+template <typename CurPos>
+__device__ int32_t CuCollision::collideCapsules(const CurPos& curPos, float3& delta, float3& velocity) const
+{
+ ShapeMask shapeMask = getShapeMask(curPos);
+
+ delta.x = delta.y = delta.z = 0.0f;
+ velocity.x = velocity.y = velocity.z = 0.0f;
+
+ int32_t numCollisions = 0;
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+
+ // cone collision
+ for (; shapeMask.mCones; shapeMask.mCones &= shapeMask.mCones - 1)
+ {
+ int32_t j = __ffs(shapeMask.mCones) - 1;
+
+ float deltaX = curPos.x - mCurData.mConeCenterX[j];
+ float deltaY = curPos.y - mCurData.mConeCenterY[j];
+ float deltaZ = curPos.z - mCurData.mConeCenterZ[j];
+
+ float axisX = mCurData.mConeAxisX[j];
+ float axisY = mCurData.mConeAxisY[j];
+ float axisZ = mCurData.mConeAxisZ[j];
+ float slope = mCurData.mConeSlope[j];
+
+ float dot = deltaX * axisX + deltaY * axisY + deltaZ * axisZ;
+ float radius = max(dot * slope + mCurData.mConeRadius[j], 0.0f);
+ float sqrDistance = deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ - dot * dot;
+
+ Pointer<Shared, const uint32_t> mIt = mCapsuleMasks + 2 * j;
+ uint32_t bothMask = mIt[1];
+
+ if (sqrDistance > radius * radius)
+ {
+ shapeMask.mSpheres &= ~bothMask;
+ continue;
+ }
+
+ sqrDistance = max(sqrDistance, FLT_EPSILON);
+ float invDistance = rsqrtf(sqrDistance);
+
+ float base = dot + slope * sqrDistance * invDistance;
+
+ float halfLength = mCurData.mConeHalfLength[j];
+ uint32_t leftMask = base < -halfLength;
+ uint32_t rightMask = base > halfLength;
+
+ uint32_t firstMask = mIt[0];
+ uint32_t secondMask = firstMask ^ bothMask;
+
+ shapeMask.mSpheres &= ~(firstMask & leftMask - 1);
+ shapeMask.mSpheres &= ~(secondMask & rightMask - 1);
+
+ if (!leftMask && !rightMask)
+ {
+ deltaX = deltaX - base * axisX;
+ deltaY = deltaY - base * axisY;
+ deltaZ = deltaZ - base * axisZ;
+
+ float sqrCosine = mCurData.mConeSqrCosine[j];
+ float scale = radius * invDistance * sqrCosine - sqrCosine;
+
+ delta.x = delta.x + deltaX * scale;
+ delta.y = delta.y + deltaY * scale;
+ delta.z = delta.z + deltaZ * scale;
+
+ if (frictionEnabled)
+ {
+ int32_t s0 = mCapsuleIndices[2 * j];
+ int32_t s1 = mCapsuleIndices[2 * j + 1];
+
+ // load previous sphere pos
+ float s0vx = mCurData.mSphereX[s0] - mPrevData.mSphereX[s0];
+ float s0vy = mCurData.mSphereY[s0] - mPrevData.mSphereY[s0];
+ float s0vz = mCurData.mSphereZ[s0] - mPrevData.mSphereZ[s0];
+
+ float s1vx = mCurData.mSphereX[s1] - mPrevData.mSphereX[s1];
+ float s1vy = mCurData.mSphereY[s1] - mPrevData.mSphereY[s1];
+ float s1vz = mCurData.mSphereZ[s1] - mPrevData.mSphereZ[s1];
+
+ // interpolate velocity between the two spheres
+ float t = dot * 0.5f + 0.5f;
+
+ velocity.x += s0vx + t * (s1vx - s0vx);
+ velocity.y += s0vy + t * (s1vy - s0vy);
+ velocity.z += s0vz + t * (s1vz - s0vz);
+ }
+
+ ++numCollisions;
+ }
+ }
+
+ // sphere collision
+ for (; shapeMask.mSpheres; shapeMask.mSpheres &= shapeMask.mSpheres - 1)
+ {
+ int32_t j = __ffs(shapeMask.mSpheres) - 1;
+
+ float deltaX = curPos.x - mCurData.mSphereX[j];
+ float deltaY = curPos.y - mCurData.mSphereY[j];
+ float deltaZ = curPos.z - mCurData.mSphereZ[j];
+
+ float sqrDistance = FLT_EPSILON + deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ;
+ float relDistance = rsqrtf(sqrDistance) * mCurData.mSphereW[j];
+
+ if (relDistance > 1.0f)
+ {
+ float scale = relDistance - 1.0f;
+
+ delta.x = delta.x + deltaX * scale;
+ delta.y = delta.y + deltaY * scale;
+ delta.z = delta.z + deltaZ * scale;
+
+ if (frictionEnabled)
+ {
+ velocity.x += mCurData.mSphereX[j] - mPrevData.mSphereX[j];
+ velocity.y += mCurData.mSphereY[j] - mPrevData.mSphereY[j];
+ velocity.z += mCurData.mSphereZ[j] - mPrevData.mSphereZ[j];
+ }
+
+ ++numCollisions;
+ }
+ }
+
+ return numCollisions;
+}
+
+static const __device__ float gSkeletonWidth = (1 - 0.2f) * (1 - 0.2f) - 1;
+
+template <typename PrevPos, typename CurPos>
+__device__ int32_t
+CuCollision::collideCapsules(const PrevPos& prevPos, CurPos& curPos, float3& delta, float3& velocity) const
+{
+ ShapeMask shapeMask = getShapeMask(prevPos, curPos);
+
+ delta.x = delta.y = delta.z = 0.0f;
+ velocity.x = velocity.y = velocity.z = 0.0f;
+
+ int32_t numCollisions = 0;
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+
+ // cone collision
+ for (; shapeMask.mCones; shapeMask.mCones &= shapeMask.mCones - 1)
+ {
+ int32_t j = __ffs(shapeMask.mCones) - 1;
+
+ float prevAxisX = mPrevData.mConeAxisX[j];
+ float prevAxisY = mPrevData.mConeAxisY[j];
+ float prevAxisZ = mPrevData.mConeAxisZ[j];
+ float prevSlope = mPrevData.mConeSlope[j];
+
+ float prevX = prevPos.x - mPrevData.mConeCenterX[j];
+ float prevY = prevPos.y - mPrevData.mConeCenterY[j];
+ float prevZ = prevPos.z - mPrevData.mConeCenterZ[j];
+ float prevT = prevY * prevAxisZ - prevZ * prevAxisY;
+ float prevU = prevZ * prevAxisX - prevX * prevAxisZ;
+ float prevV = prevX * prevAxisY - prevY * prevAxisX;
+ float prevDot = prevX * prevAxisX + prevY * prevAxisY + prevZ * prevAxisZ;
+ float prevRadius = max(prevDot * prevSlope + mCurData.mConeRadius[j], 0.0f);
+
+ float curAxisX = mCurData.mConeAxisX[j];
+ float curAxisY = mCurData.mConeAxisY[j];
+ float curAxisZ = mCurData.mConeAxisZ[j];
+ float curSlope = mCurData.mConeSlope[j];
+
+ float curX = curPos.x - mCurData.mConeCenterX[j];
+ float curY = curPos.y - mCurData.mConeCenterY[j];
+ float curZ = curPos.z - mCurData.mConeCenterZ[j];
+ float curT = curY * curAxisZ - curZ * curAxisY;
+ float curU = curZ * curAxisX - curX * curAxisZ;
+ float curV = curX * curAxisY - curY * curAxisX;
+ float curDot = curX * curAxisX + curY * curAxisY + curZ * curAxisZ;
+ float curRadius = max(curDot * curSlope + mCurData.mConeRadius[j], 0.0f);
+
+ float curSqrDistance = FLT_EPSILON + curT * curT + curU * curU + curV * curV;
+
+ float dotPrevPrev = prevT * prevT + prevU * prevU + prevV * prevV - prevRadius * prevRadius;
+ float dotPrevCur = prevT * curT + prevU * curU + prevV * curV - prevRadius * curRadius;
+ float dotCurCur = curSqrDistance - curRadius * curRadius;
+
+ float discriminant = dotPrevCur * dotPrevCur - dotCurCur * dotPrevPrev;
+ float sqrtD = sqrtf(discriminant);
+ float halfB = dotPrevCur - dotPrevPrev;
+ float minusA = dotPrevCur - dotCurCur + halfB;
+
+ // time of impact or 0 if prevPos inside cone
+ float toi = __fdividef(min(0.0f, halfB + sqrtD), minusA);
+ bool hasCollision = toi < 1.0f && halfB < sqrtD;
+
+ // skip continuous collision if the (un-clamped) particle
+ // trajectory only touches the outer skin of the cone.
+ float rMin = prevRadius + halfB * minusA * (curRadius - prevRadius);
+ hasCollision = hasCollision && (discriminant > minusA * rMin * rMin * gSkeletonWidth);
+
+ // a is negative when one cone is contained in the other,
+ // which is already handled by discrete collision.
+ hasCollision = hasCollision && minusA < -FLT_EPSILON;
+
+ if (hasCollision)
+ {
+ float deltaX = prevX - curX;
+ float deltaY = prevY - curY;
+ float deltaZ = prevZ - curZ;
+
+ // interpolate delta at toi
+ float posX = prevX - deltaX * toi;
+ float posY = prevY - deltaY * toi;
+ float posZ = prevZ - deltaZ * toi;
+
+ float curHalfLength = mCurData.mConeHalfLength[j];
+ float curScaledAxisX = curAxisX * curHalfLength;
+ float curScaledAxisY = curAxisY * curHalfLength;
+ float curScaledAxisZ = curAxisZ * curHalfLength;
+
+ float prevHalfLength = mPrevData.mConeHalfLength[j];
+ float deltaScaledAxisX = curScaledAxisX - prevAxisX * prevHalfLength;
+ float deltaScaledAxisY = curScaledAxisY - prevAxisY * prevHalfLength;
+ float deltaScaledAxisZ = curScaledAxisZ - prevAxisZ * prevHalfLength;
+
+ float oneMinusToi = 1.0f - toi;
+
+ // interpolate axis at toi
+ float axisX = curScaledAxisX - deltaScaledAxisX * oneMinusToi;
+ float axisY = curScaledAxisY - deltaScaledAxisY * oneMinusToi;
+ float axisZ = curScaledAxisZ - deltaScaledAxisZ * oneMinusToi;
+ float slope = prevSlope * oneMinusToi + curSlope * toi;
+
+ float sqrHalfLength = axisX * axisX + axisY * axisY + axisZ * axisZ;
+ float invHalfLength = rsqrtf(sqrHalfLength);
+ float dot = (posX * axisX + posY * axisY + posZ * axisZ) * invHalfLength;
+
+ float sqrDistance = posX * posX + posY * posY + posZ * posZ - dot * dot;
+ float invDistance = sqrDistance > 0.0f ? rsqrtf(sqrDistance) : 0.0f;
+
+ float base = dot + slope * sqrDistance * invDistance;
+ float scale = base * invHalfLength;
+
+ if (abs(scale) < 1.0f)
+ {
+ deltaX = deltaX + deltaScaledAxisX * scale;
+ deltaY = deltaY + deltaScaledAxisY * scale;
+ deltaZ = deltaZ + deltaScaledAxisZ * scale;
+
+ // reduce ccd impulse if (clamped) particle trajectory stays in cone skin,
+ // i.e. scale by exp2(-k) or 1/(1 + k) with k = (tmin - toi) / (1 - toi)
+ float minusK = __fdividef(sqrtD, minusA * oneMinusToi);
+ oneMinusToi = __fdividef(oneMinusToi, 1.f - minusK);
+
+ curX = curX + deltaX * oneMinusToi;
+ curY = curY + deltaY * oneMinusToi;
+ curZ = curZ + deltaZ * oneMinusToi;
+
+ curDot = curX * curAxisX + curY * curAxisY + curZ * curAxisZ;
+ curRadius = max(curDot * curSlope + mCurData.mConeRadius[j], 0.0f);
+ curSqrDistance = curX * curX + curY * curY + curZ * curZ - curDot * curDot;
+
+ curPos.x = mCurData.mConeCenterX[j] + curX;
+ curPos.y = mCurData.mConeCenterY[j] + curY;
+ curPos.z = mCurData.mConeCenterZ[j] + curZ;
+ }
+ }
+
+ // curPos inside cone (discrete collision)
+ bool hasContact = curRadius * curRadius > curSqrDistance;
+
+ Pointer<Shared, const uint32_t> mIt = mCapsuleMasks + 2 * j;
+ uint32_t bothMask = mIt[1];
+
+ uint32_t cullMask = bothMask & (hasCollision | hasContact) - 1;
+ shapeMask.mSpheres &= ~cullMask;
+
+ if (!hasContact)
+ continue;
+
+ float invDistance = curSqrDistance > 0.0f ? rsqrtf(curSqrDistance) : 0.0f;
+ float base = curDot + curSlope * curSqrDistance * invDistance;
+
+ float halfLength = mCurData.mConeHalfLength[j];
+ uint32_t leftMask = base < -halfLength;
+ uint32_t rightMask = base > halfLength;
+
+ // can only skip continuous sphere collision if post-ccd position
+ // is on code side *and* particle had cone-ccd collision.
+ uint32_t firstMask = mIt[0];
+ uint32_t secondMask = firstMask ^ bothMask;
+ cullMask = (firstMask & leftMask - 1) | (secondMask & rightMask - 1);
+ shapeMask.mSpheres &= ~cullMask | hasCollision - 1;
+
+ if (!leftMask && !rightMask)
+ {
+ float deltaX = curX - base * curAxisX;
+ float deltaY = curY - base * curAxisY;
+ float deltaZ = curZ - base * curAxisZ;
+
+ float sqrCosine = mCurData.mConeSqrCosine[j];
+ float scale = curRadius * invDistance * sqrCosine - sqrCosine;
+
+ delta.x = delta.x + deltaX * scale;
+ delta.y = delta.y + deltaY * scale;
+ delta.z = delta.z + deltaZ * scale;
+
+ if (frictionEnabled)
+ {
+ int32_t s0 = mCapsuleIndices[2 * j];
+ int32_t s1 = mCapsuleIndices[2 * j + 1];
+
+ // load previous sphere pos
+ float s0vx = mCurData.mSphereX[s0] - mPrevData.mSphereX[s0];
+ float s0vy = mCurData.mSphereY[s0] - mPrevData.mSphereY[s0];
+ float s0vz = mCurData.mSphereZ[s0] - mPrevData.mSphereZ[s0];
+
+ float s1vx = mCurData.mSphereX[s1] - mPrevData.mSphereX[s1];
+ float s1vy = mCurData.mSphereY[s1] - mPrevData.mSphereY[s1];
+ float s1vz = mCurData.mSphereZ[s1] - mPrevData.mSphereZ[s1];
+
+ // interpolate velocity between the two spheres
+ float t = curDot * 0.5f + 0.5f;
+
+ velocity.x += s0vx + t * (s1vx - s0vx);
+ velocity.y += s0vy + t * (s1vy - s0vy);
+ velocity.z += s0vz + t * (s1vz - s0vz);
+ }
+
+ ++numCollisions;
+ }
+ }
+
+ // sphere collision
+ for (; shapeMask.mSpheres; shapeMask.mSpheres &= shapeMask.mSpheres - 1)
+ {
+ int32_t j = __ffs(shapeMask.mSpheres) - 1;
+
+ float prevX = prevPos.x - mPrevData.mSphereX[j];
+ float prevY = prevPos.y - mPrevData.mSphereY[j];
+ float prevZ = prevPos.z - mPrevData.mSphereZ[j];
+ float prevRadius = mPrevData.mSphereW[j];
+
+ float curX = curPos.x - mCurData.mSphereX[j];
+ float curY = curPos.y - mCurData.mSphereY[j];
+ float curZ = curPos.z - mCurData.mSphereZ[j];
+ float curRadius = mCurData.mSphereW[j];
+
+ float sqrDistance = FLT_EPSILON + curX * curX + curY * curY + curZ * curZ;
+
+ float dotPrevPrev = prevX * prevX + prevY * prevY + prevZ * prevZ - prevRadius * prevRadius;
+ float dotPrevCur = prevX * curX + prevY * curY + prevZ * curZ - prevRadius * curRadius;
+ float dotCurCur = sqrDistance - curRadius * curRadius;
+
+ float discriminant = dotPrevCur * dotPrevCur - dotCurCur * dotPrevPrev;
+ float sqrtD = sqrtf(discriminant);
+ float halfB = dotPrevCur - dotPrevPrev;
+ float minusA = dotPrevCur - dotCurCur + halfB;
+
+ // time of impact or 0 if prevPos inside sphere
+ float toi = __fdividef(min(0.0f, halfB + sqrtD), minusA);
+ bool hasCollision = toi < 1.0f && halfB < sqrtD;
+
+ // skip continuous collision if the (un-clamped) particle
+ // trajectory only touches the outer skin of the cone.
+ float rMin = prevRadius + halfB * minusA * (curRadius - prevRadius);
+ hasCollision = hasCollision && (discriminant > minusA * rMin * rMin * gSkeletonWidth);
+
+ // a is negative when one cone is contained in the other,
+ // which is already handled by discrete collision.
+ hasCollision = hasCollision && minusA < -FLT_EPSILON;
+
+ if (hasCollision)
+ {
+ float deltaX = prevX - curX;
+ float deltaY = prevY - curY;
+ float deltaZ = prevZ - curZ;
+
+ float oneMinusToi = 1.0f - toi;
+
+ // reduce ccd impulse if (clamped) particle trajectory stays in cone skin,
+ // i.e. scale by exp2(-k) or 1/(1 + k) with k = (tmin - toi) / (1 - toi)
+ float minusK = __fdividef(sqrtD, minusA * oneMinusToi);
+ oneMinusToi = __fdividef(oneMinusToi, 1 - minusK);
+
+ curX = curX + deltaX * oneMinusToi;
+ curY = curY + deltaY * oneMinusToi;
+ curZ = curZ + deltaZ * oneMinusToi;
+
+ curPos.x = mCurData.mSphereX[j] + curX;
+ curPos.y = mCurData.mSphereY[j] + curY;
+ curPos.z = mCurData.mSphereZ[j] + curZ;
+
+ sqrDistance = FLT_EPSILON + curX * curX + curY * curY + curZ * curZ;
+ }
+
+ float relDistance = rsqrtf(sqrDistance) * curRadius;
+
+ if (relDistance > 1.0f)
+ {
+ float scale = relDistance - 1.0f;
+
+ delta.x = delta.x + curX * scale;
+ delta.y = delta.y + curY * scale;
+ delta.z = delta.z + curZ * scale;
+
+ if (frictionEnabled)
+ {
+ velocity.x += mCurData.mSphereX[j] - mPrevData.mSphereX[j];
+ velocity.y += mCurData.mSphereY[j] - mPrevData.mSphereY[j];
+ velocity.z += mCurData.mSphereZ[j] - mPrevData.mSphereZ[j];
+ }
+
+ ++numCollisions;
+ }
+ }
+
+ return numCollisions;
+}
+
+namespace
+{
+template <typename PrevPos, typename CurPos>
+__device__ inline float3 calcFrictionImpulse(const PrevPos& prevPos, const CurPos& curPos, const float3& shapeVelocity,
+ float scale, const float3& collisionImpulse)
+{
+ const float frictionScale = gClothData.mFrictionScale;
+
+ // calculate collision normal
+ float deltaSq = collisionImpulse.x * collisionImpulse.x + collisionImpulse.y * collisionImpulse.y +
+ collisionImpulse.z * collisionImpulse.z;
+
+ float rcpDelta = rsqrtf(deltaSq + FLT_EPSILON);
+
+ float nx = collisionImpulse.x * rcpDelta;
+ float ny = collisionImpulse.y * rcpDelta;
+ float nz = collisionImpulse.z * rcpDelta;
+
+ // calculate relative velocity scaled by number of collision
+ float rvx = curPos.x - prevPos.x - shapeVelocity.x * scale;
+ float rvy = curPos.y - prevPos.y - shapeVelocity.y * scale;
+ float rvz = curPos.z - prevPos.z - shapeVelocity.z * scale;
+
+ // calculate magnitude of relative normal velocity
+ float rvn = rvx * nx + rvy * ny + rvz * nz;
+
+ // calculate relative tangential velocity
+ float rvtx = rvx - rvn * nx;
+ float rvty = rvy - rvn * ny;
+ float rvtz = rvz - rvn * nz;
+
+ // calculate magnitude of vt
+ float rcpVt = rsqrtf(rvtx * rvtx + rvty * rvty + rvtz * rvtz + FLT_EPSILON);
+
+ // magnitude of friction impulse (cannot be larger than -|vt|)
+ float j = max(-frictionScale * deltaSq * rcpDelta * scale * rcpVt, -1.0f);
+
+ return make_float3(rvtx * j, rvty * j, rvtz * j);
+}
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void CuCollision::collideCapsules(CurrentT& current, PreviousT& previous) const
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::COLLIDE_CAPSULES);
+
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+ bool massScaleEnabled = gClothData.mCollisionMassScale > 0.0f;
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ typename CurrentT::VectorType curPos = current(i);
+
+ float3 delta, velocity;
+ if (int32_t numCollisions = collideCapsules(curPos, delta, velocity))
+ {
+ float scale = __fdividef(1.0f, numCollisions);
+
+ if (frictionEnabled)
+ {
+ typename PreviousT::VectorType prevPos = previous(i);
+ float3 frictionImpulse = calcFrictionImpulse(prevPos, curPos, velocity, scale, delta);
+
+ prevPos.x -= frictionImpulse.x;
+ prevPos.y -= frictionImpulse.y;
+ prevPos.z -= frictionImpulse.z;
+
+ previous(i) = prevPos;
+ }
+
+ curPos.x += delta.x * scale;
+ curPos.y += delta.y * scale;
+ curPos.z += delta.z * scale;
+
+ current(i) = curPos;
+
+ if (massScaleEnabled)
+ {
+ float deltaLengthSq = delta.x * delta.x + delta.y * delta.y + delta.z * delta.z;
+ float massScale = 1.0f + gClothData.mCollisionMassScale * deltaLengthSq;
+ current(i, 3) = __fdividef(current(i, 3), massScale);
+ }
+ }
+ }
+}
+
+#define NEW_LERP_AND_APPLY 1
+namespace
+{
+#if NEW_LERP_AND_APPLY
+template <typename ParticleDataT>
+__device__ float3 lerp(const ParticleDataT& particleData, const int4& indices, const float4& weights)
+{
+ typename ParticleDataT::VectorType posX, posY, posZ;
+ posX = particleData(indices.x);
+ posY = particleData(indices.y);
+ posZ = particleData(indices.z);
+
+ float3 result;
+ result.x = posX.x * weights.x + posY.x * weights.y + posZ.x * weights.z;
+ result.y = posX.y * weights.x + posY.y * weights.y + posZ.y * weights.z;
+ result.z = posX.z * weights.x + posY.z * weights.y + posZ.z * weights.z;
+ return result;
+}
+
+template <typename ParticleDataT>
+__device__ void apply(ParticleDataT& particleData, const int4& indices, const float4& weights, float3 delta, float scale)
+{
+ typename ParticleDataT::VectorType posX, posY, posZ;
+ posX = particleData(indices.x);
+ posY = particleData(indices.y);
+ posZ = particleData(indices.z);
+
+ delta.x *= scale;
+ delta.y *= scale;
+ delta.z *= scale;
+
+ posX.x += delta.x * weights.x; posY.x += delta.x * weights.y; posZ.x += delta.x * weights.z;
+ posX.y += delta.y * weights.x; posY.y += delta.y * weights.y; posZ.y += delta.y * weights.z;
+ posX.z += delta.z * weights.x; posY.z += delta.z * weights.y; posZ.z += delta.z * weights.z;
+
+ particleData(indices.x) = posX;
+ particleData(indices.y) = posY;
+ particleData(indices.z) = posZ;
+}
+#else
+template <typename PointerT>
+__device__ float lerp(PointerT pos, const int4& indices, const float4& weights)
+{
+ return pos[indices.x] * weights.x + pos[indices.y] * weights.y + pos[indices.z] * weights.z;
+}
+
+template <typename PointerT>
+__device__ void apply(PointerT pos, const int4& indices, const float4& weights, float delta)
+{
+ pos[indices.x] += delta * weights.x;
+ pos[indices.y] += delta * weights.y;
+ pos[indices.z] += delta * weights.z;
+}
+#endif
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void CuCollision::collideVirtualCapsules(CurrentT& current, PreviousT& previous) const
+{
+ const uint32_t* __restrict setSizeIt = gClothData.mVirtualParticleSetSizesBegin;
+
+ if (!setSizeIt)
+ return;
+
+ if (gClothData.mEnableContinuousCollision)
+ {
+ // copied from mergeAcceleration
+ Pointer<Shared, uint32_t> dst = mShapeGrid + threadIdx.x;
+ if (!(threadIdx.x * 43 & 1024) && threadIdx.x < sGridSize * 12)
+ *dst &= dst[sGridSize * 3];
+ __syncthreads(); // mShapeGrid raw hazard
+ }
+
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::COLLIDE_VIRTUAL_CAPSULES);
+
+ const uint32_t* __restrict setSizeEnd = gClothData.mVirtualParticleSetSizesEnd;
+ const uint16_t* __restrict indicesEnd = gClothData.mVirtualParticleIndices;
+ const float4* __restrict weightsIt = reinterpret_cast<const float4*>(gClothData.mVirtualParticleWeights);
+
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+ bool massScaleEnabled = gClothData.mCollisionMassScale > 0.0f;
+
+ for (; setSizeIt != setSizeEnd; ++setSizeIt)
+ {
+ __syncthreads();
+
+ const uint16_t* __restrict indicesIt = indicesEnd + threadIdx.x * 4;
+ for (indicesEnd += *setSizeIt * 4; indicesIt < indicesEnd; indicesIt += blockDim.x * 4)
+ {
+ int4 indices = make_int4(indicesIt[0], indicesIt[1], indicesIt[2], indicesIt[3]);
+
+ float4 weights = weightsIt[indices.w];
+
+#if NEW_LERP_AND_APPLY
+ float3 curPos = lerp(current, indices, weights);
+#else
+ float3 curPos;
+ curPos.x = lerp(current[0], indices, weights);
+ curPos.y = lerp(current[1], indices, weights);
+ curPos.z = lerp(current[2], indices, weights);
+
+#endif
+ float3 delta, velocity;
+ if (int32_t numCollisions = collideCapsules(curPos, delta, velocity))
+ {
+ float scale = __fdividef(1.0f, numCollisions);
+ float wscale = weights.w * scale;
+
+#if NEW_LERP_AND_APPLY
+ apply(current, indices, weights, delta, wscale);
+#else
+ apply(current[0], indices, weights, delta.x * wscale);
+ apply(current[1], indices, weights, delta.y * wscale);
+ apply(current[2], indices, weights, delta.z * wscale);
+#endif
+ if (frictionEnabled)
+ {
+#if NEW_LERP_AND_APPLY
+ float3 prevPos = lerp(previous, indices, weights);
+#else
+ float3 prevPos;
+ prevPos.x = lerp(previous[0], indices, weights);
+ prevPos.y = lerp(previous[1], indices, weights);
+ prevPos.z = lerp(previous[2], indices, weights);
+#endif
+
+ float3 frictionImpulse = calcFrictionImpulse(prevPos, curPos, velocity, scale, delta);
+
+#if NEW_LERP_AND_APPLY
+ apply(previous, indices, weights, frictionImpulse, -weights.w);
+#else
+ apply(previous[0], indices, weights, frictionImpulse.x * -weights.w);
+ apply(previous[1], indices, weights, frictionImpulse.y * -weights.w);
+ apply(previous[2], indices, weights, frictionImpulse.z * -weights.w);
+#endif
+ }
+
+ if (massScaleEnabled)
+ {
+ float deltaLengthSq = (delta.x * delta.x + delta.y * delta.y + delta.z * delta.z) * scale * scale;
+ float invMassScale = __fdividef(1.0f, 1.0f + gClothData.mCollisionMassScale * deltaLengthSq);
+
+ // not multiplying by weights[3] here because unlike applying velocity
+ // deltas where we want the interpolated position to obtain a particular
+ // value, we instead just require that the total change is equal to invMassScale
+ invMassScale = invMassScale - 1.0f;
+ current(indices.x, 3) *= 1.0f + weights.x * invMassScale;
+ current(indices.y, 3) *= 1.0f + weights.y * invMassScale;
+ current(indices.z, 3) *= 1.0f + weights.z * invMassScale;
+ }
+ }
+ }
+ }
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void CuCollision::collideContinuousCapsules(CurrentT& current, PreviousT& previous) const
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::COLLIDE_CONTINUOUS_CAPSULES);
+
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+ bool massScaleEnabled = gClothData.mCollisionMassScale > 0.0f;
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ typename PreviousT::VectorType prevPos = previous(i);
+ typename CurrentT::VectorType curPos = current(i);
+
+ float3 delta, velocity;
+ if (int32_t numCollisions = collideCapsules(prevPos, curPos, delta, velocity))
+ {
+ float scale = __fdividef(1.0f, numCollisions);
+
+ if (frictionEnabled)
+ {
+ float3 frictionImpulse = calcFrictionImpulse(prevPos, curPos, velocity, scale, delta);
+
+ prevPos.x -= frictionImpulse.x;
+ prevPos.y -= frictionImpulse.y;
+ prevPos.z -= frictionImpulse.z;
+
+ previous(i) = prevPos;
+ }
+
+ curPos.x += delta.x * scale;
+ curPos.y += delta.y * scale;
+ curPos.z += delta.z * scale;
+
+ current(i) = curPos;
+
+ if (massScaleEnabled)
+ {
+ float deltaLengthSq = delta.x * delta.x + delta.y * delta.y + delta.z * delta.z;
+ float massScale = 1.0f + gClothData.mCollisionMassScale * deltaLengthSq;
+ current(i, 3) = __fdividef(current(i, 3), massScale);
+ }
+ }
+ }
+}
+
+template <typename CurPos>
+__device__ int32_t CuCollision::collideConvexes(const CurPos& positions, float3& delta) const
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::COLLIDE_CONVEXES);
+
+ delta.x = delta.y = delta.z = 0.0f;
+
+ Pointer<Shared, const float> planeX = mCurData.mSphereX;
+ Pointer<Shared, const float> planeY = planeX + gClothData.mNumPlanes;
+ Pointer<Shared, const float> planeZ = planeY + gClothData.mNumPlanes;
+ Pointer<Shared, const float> planeW = planeZ + gClothData.mNumPlanes;
+
+ int32_t numCollisions = 0;
+ Pointer<Shared, const uint32_t> cIt = mConvexMasks;
+ Pointer<Shared, const uint32_t> cEnd = cIt + gClothData.mNumConvexes;
+ for (; cIt != cEnd; ++cIt)
+ {
+ uint32_t mask = *cIt;
+
+ int32_t maxIndex = __ffs(mask) - 1;
+ float maxDist = planeW[maxIndex] + positions.z * planeZ[maxIndex] + positions.y * planeY[maxIndex] +
+ positions.x * planeX[maxIndex];
+
+ while ((maxDist < 0.0f) && (mask &= mask - 1))
+ {
+ int32_t i = __ffs(mask) - 1;
+ float dist = planeW[i] + positions.z * planeZ[i] + positions.y * planeY[i] + positions.x * planeX[i];
+ if (dist > maxDist)
+ maxDist = dist, maxIndex = i;
+ }
+
+ if (maxDist < 0.0f)
+ {
+ delta.x -= planeX[maxIndex] * maxDist;
+ delta.y -= planeY[maxIndex] * maxDist;
+ delta.z -= planeZ[maxIndex] * maxDist;
+
+ ++numCollisions;
+ }
+ }
+
+ return numCollisions;
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void CuCollision::collideConvexes(CurrentT& current, PreviousT& previous, float alpha)
+{
+ if (!gClothData.mNumConvexes)
+ return;
+
+ // interpolate planes and transpose
+ if (threadIdx.x < gClothData.mNumPlanes * 4)
+ {
+ float start = gFrameData.mStartCollisionPlanes[threadIdx.x];
+ float target = gFrameData.mTargetCollisionPlanes[threadIdx.x];
+ int32_t j = threadIdx.x % 4 * gClothData.mNumPlanes + threadIdx.x / 4;
+ mCurData.mSphereX[j] = start + (target - start) * alpha;
+ }
+
+ __syncthreads();
+
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ typename CurrentT::VectorType curPos = current(i);
+
+ float3 delta;
+ if (int32_t numCollisions = collideConvexes(curPos, delta))
+ {
+ float scale = __fdividef(1.0f, numCollisions);
+
+ if (frictionEnabled)
+ {
+ typename PreviousT::VectorType prevPos = previous(i);
+
+ float3 frictionImpulse =
+ calcFrictionImpulse(prevPos, curPos, make_float3(0.0f, 0.0f, 0.0f), scale, delta);
+
+ prevPos.x -= frictionImpulse.x;
+ prevPos.y -= frictionImpulse.y;
+ prevPos.z -= frictionImpulse.z;
+
+ previous(i) = prevPos;
+ }
+
+ curPos.x += delta.x * scale;
+ curPos.y += delta.y * scale;
+ curPos.z += delta.z * scale;
+
+ current(i) = curPos;
+ }
+ }
+
+ __syncthreads();
+}
+
+namespace
+{
+struct TriangleData
+{
+ float baseX, baseY, baseZ;
+ float edge0X, edge0Y, edge0Z;
+ float edge1X, edge1Y, edge1Z;
+ float normalX, normalY, normalZ;
+
+ float edge0DotEdge1;
+ float edge0SqrLength;
+ float edge1SqrLength;
+
+ float det;
+ float denom;
+
+ float edge0InvSqrLength;
+ float edge1InvSqrLength;
+
+ // initialize struct after vertices have been stored in first 9 members
+ __device__ void initialize()
+ {
+ edge0X -= baseX, edge0Y -= baseY, edge0Z -= baseZ;
+ edge1X -= baseX, edge1Y -= baseY, edge1Z -= baseZ;
+
+ normalX = edge0Y * edge1Z - edge0Z * edge1Y;
+ normalY = edge0Z * edge1X - edge0X * edge1Z;
+ normalZ = edge0X * edge1Y - edge0Y * edge1X;
+
+ float normalInvLength = rsqrtf(normalX * normalX + normalY * normalY + normalZ * normalZ);
+ normalX *= normalInvLength;
+ normalY *= normalInvLength;
+ normalZ *= normalInvLength;
+
+ edge0DotEdge1 = edge0X * edge1X + edge0Y * edge1Y + edge0Z * edge1Z;
+ edge0SqrLength = edge0X * edge0X + edge0Y * edge0Y + edge0Z * edge0Z;
+ edge1SqrLength = edge1X * edge1X + edge1Y * edge1Y + edge1Z * edge1Z;
+
+ det = __fdividef(1.0f, edge0SqrLength * edge1SqrLength - edge0DotEdge1 * edge0DotEdge1);
+ denom = __fdividef(1.0f, edge0SqrLength + edge1SqrLength - edge0DotEdge1 - edge0DotEdge1);
+
+ edge0InvSqrLength = __fdividef(1.0f, edge0SqrLength);
+ edge1InvSqrLength = __fdividef(1.0f, edge1SqrLength);
+ }
+};
+}
+
+template <typename CurrentT>
+__device__ void CuCollision::collideTriangles(CurrentT& current, int32_t i)
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::COLLIDE_TRIANGLES);
+
+ float posX = current(i, 0);
+ float posY = current(i, 1);
+ float posZ = current(i, 2);
+
+ const TriangleData* __restrict tIt = reinterpret_cast<const TriangleData*>(generic(mCurData.mSphereX));
+ const TriangleData* __restrict tEnd = tIt + gClothData.mNumCollisionTriangles;
+
+ float normalX, normalY, normalZ, normalD = 0.0f;
+ float minSqrLength = FLT_MAX;
+
+ for (; tIt != tEnd; ++tIt)
+ {
+ float dx = posX - tIt->baseX;
+ float dy = posY - tIt->baseY;
+ float dz = posZ - tIt->baseZ;
+
+ float deltaDotEdge0 = dx * tIt->edge0X + dy * tIt->edge0Y + dz * tIt->edge0Z;
+ float deltaDotEdge1 = dx * tIt->edge1X + dy * tIt->edge1Y + dz * tIt->edge1Z;
+ float deltaDotNormal = dx * tIt->normalX + dy * tIt->normalY + dz * tIt->normalZ;
+
+ float s = tIt->edge1SqrLength * deltaDotEdge0 - tIt->edge0DotEdge1 * deltaDotEdge1;
+ float t = tIt->edge0SqrLength * deltaDotEdge1 - tIt->edge0DotEdge1 * deltaDotEdge0;
+
+ s = t > 0.0f ? s * tIt->det : deltaDotEdge0 * tIt->edge0InvSqrLength;
+ t = s > 0.0f ? t * tIt->det : deltaDotEdge1 * tIt->edge1InvSqrLength;
+
+ if (s + t > 1.0f)
+ {
+ s = (tIt->edge1SqrLength - tIt->edge0DotEdge1 + deltaDotEdge0 - deltaDotEdge1) * tIt->denom;
+ }
+
+ s = fmaxf(0.0f, fminf(1.0f, s));
+ t = fmaxf(0.0f, fminf(1.0f - s, t));
+
+ dx = dx - tIt->edge0X * s - tIt->edge1X * t;
+ dy = dy - tIt->edge0Y * s - tIt->edge1Y * t;
+ dz = dz - tIt->edge0Z * s - tIt->edge1Z * t;
+
+ float sqrLength = dx * dx + dy * dy + dz * dz;
+
+ if (0.0f > deltaDotNormal)
+ sqrLength *= 1.0001f;
+
+ if (sqrLength < minSqrLength)
+ {
+ normalX = tIt->normalX;
+ normalY = tIt->normalY;
+ normalZ = tIt->normalZ;
+ normalD = deltaDotNormal;
+ minSqrLength = sqrLength;
+ }
+ }
+
+ if (normalD < 0.0f)
+ {
+ current(i, 0) = posX - normalX * normalD;
+ current(i, 1) = posY - normalY * normalD;
+ current(i, 2) = posZ - normalZ * normalD;
+ }
+}
+
+namespace
+{
+static const int32_t sTrianglePadding = sizeof(TriangleData) / sizeof(float) - 9;
+}
+
+template <typename CurrentT>
+__device__ void CuCollision::collideTriangles(CurrentT& current, float alpha)
+{
+ if (!gClothData.mNumCollisionTriangles)
+ return;
+
+ // interpolate triangle vertices and store in shared memory
+ for (int32_t i = threadIdx.x, n = gClothData.mNumCollisionTriangles * 9; i < n; i += blockDim.x)
+ {
+ float start = gFrameData.mStartCollisionTriangles[i];
+ float target = gFrameData.mTargetCollisionTriangles[i];
+ int32_t idx = i * 7282 >> 16; // same as i/9
+ int32_t offset = i + idx * sTrianglePadding;
+ mCurData.mSphereX[offset] = start + (target - start) * alpha;
+ }
+
+ __syncthreads();
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumCollisionTriangles; i += blockDim.x)
+ {
+ reinterpret_cast<TriangleData*>(generic(mCurData.mSphereX))[i].initialize();
+ }
+
+ __syncthreads();
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ collideTriangles(current, i);
+
+ __syncthreads();
+}
diff --git a/NvCloth/src/cuda/CuContextLock.cpp b/NvCloth/src/cuda/CuContextLock.cpp
new file mode 100644
index 0000000..c034ed4
--- /dev/null
+++ b/NvCloth/src/cuda/CuContextLock.cpp
@@ -0,0 +1,54 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CuContextLock.h"
+#include "CuFactory.h"
+#include "CuCheckSuccess.h"
+
+using namespace nv;
+
+cloth::CuContextLock::CuContextLock(const CuFactory& factory) : mFactory(factory)
+{
+ acquire();
+}
+
+cloth::CuContextLock::~CuContextLock()
+{
+ release();
+}
+
+void cloth::CuContextLock::acquire()
+{
+ checkSuccess(cuCtxPushCurrent(mFactory.mContext));
+}
+
+void cloth::CuContextLock::release()
+{
+ checkSuccess(cuCtxPopCurrent(nullptr));
+}
diff --git a/NvCloth/src/cuda/CuContextLock.h b/NvCloth/src/cuda/CuContextLock.h
new file mode 100644
index 0000000..fd6b1d4
--- /dev/null
+++ b/NvCloth/src/cuda/CuContextLock.h
@@ -0,0 +1,57 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+namespace nv
+{
+
+namespace cloth
+{
+
+class CuFactory;
+
+// acquires cuda context for the lifetime of the instance
+class CuContextLock
+{
+ protected:
+ CuContextLock(const CuContextLock&);
+ CuContextLock& operator = (const CuContextLock&);
+
+ public:
+ CuContextLock(const CuFactory&);
+ ~CuContextLock();
+
+ void acquire();
+ void release();
+
+ const CuFactory& mFactory;
+};
+}
+}
diff --git a/NvCloth/src/cuda/CuDevicePointer.h b/NvCloth/src/cuda/CuDevicePointer.h
new file mode 100644
index 0000000..ad88074
--- /dev/null
+++ b/NvCloth/src/cuda/CuDevicePointer.h
@@ -0,0 +1,215 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "CuCheckSuccess.h"
+
+namespace nv
+{
+
+namespace cloth
+{
+
+template <typename T>
+struct RemoveConst
+{
+ typedef T Type;
+};
+template <typename T>
+struct RemoveConst<const T>
+{
+ typedef T Type;
+};
+
+template <typename>
+class CuDeviceReference; // forward declare
+
+// pointer to POD type in CUDA device memory
+template <typename T>
+class CuDevicePointer
+{
+ template <typename>
+ friend class CuDevicePointer;
+
+ typedef typename RemoveConst<T>::Type ValueType;
+
+ public:
+ // c'tors
+ CuDevicePointer() : mPointer(0)
+ {
+ }
+ template <class U>
+ explicit CuDevicePointer(U* ptr)
+ : mPointer(ptr)
+ {
+ }
+ CuDevicePointer(const CuDevicePointer<ValueType>& ptr) : mPointer(ptr.get())
+ {
+ }
+
+ // conversion
+ template <typename U>
+ operator CuDevicePointer<U>(void) const
+ {
+ return CuDevicePointer<U>(static_cast<U*>(mPointer));
+ }
+ T* get() const
+ {
+ return mPointer;
+ }
+ CUdeviceptr dev() const
+ {
+ return reinterpret_cast<CUdeviceptr>(mPointer);
+ }
+
+ // operators
+ CuDevicePointer operator + (const ptrdiff_t& rhs) const
+ {
+ return CuDevicePointer(mPointer + rhs);
+ }
+ CuDevicePointer operator - (const ptrdiff_t& rhs) const
+ {
+ return CuDevicePointer(mPointer - rhs);
+ }
+ CuDevicePointer& operator ++ (void)
+ {
+ ++mPointer;
+ return *this;
+ }
+ CuDevicePointer operator ++ (int)
+ {
+ CuDevicePointer copy(*this);
+ ++(*this);
+ return copy;
+ }
+ CuDevicePointer& operator -- (void)
+ {
+ --mPointer;
+ return *this;
+ }
+ CuDevicePointer operator -- (int)
+ {
+ CuDevicePointer copy(*this);
+ --(*this);
+ return copy;
+ }
+ CuDevicePointer& operator += (ptrdiff_t rhs)
+ {
+ mPointer += rhs;
+ return *this;
+ }
+ CuDevicePointer& operator -= (ptrdiff_t rhs)
+ {
+ mPointer -= rhs;
+ return *this;
+ }
+ ptrdiff_t operator - (const CuDevicePointer& rhs) const
+ {
+ return mPointer - rhs.mPointer;
+ }
+
+ template <typename U>
+ bool operator == (const CuDevicePointer<U>& other) const
+ {
+ return mPointer == other.mPointer;
+ }
+ template <typename U>
+ bool operator!=(const CuDevicePointer<U>& other) const
+ {
+ return mPointer != other.mPointer;
+ }
+
+ // dereference
+ CuDeviceReference<T> operator[](const ptrdiff_t&) const; // (implemented below)
+ CuDeviceReference<T> operator*(void) const
+ {
+ return operator[](0);
+ }
+
+ private:
+ T* mPointer;
+};
+
+template <typename T>
+class CuDeviceReference
+{
+ template <typename>
+ friend class CuDeviceReference;
+ template <typename>
+ friend class CuDevicePointer;
+
+ typedef typename RemoveConst<T>::Type ValueType;
+
+ template <typename U>
+ CuDeviceReference(CuDevicePointer<U> pointer)
+ : mPointer(static_cast<T*>(pointer.get()))
+ {
+ }
+
+ public:
+ template <typename U>
+ CuDeviceReference(CuDeviceReference<U> reference)
+ : mPointer(static_cast<T*>(reference.mPointer))
+ {
+ }
+
+ CuDevicePointer<T> operator&() const
+ {
+ return CuDevicePointer<T>(mPointer);
+ }
+
+ CuDeviceReference& operator = (const T& v)
+ {
+ checkSuccess(cuMemcpyHtoD(CUdeviceptr(mPointer), &v, sizeof(T)));
+ return *this;
+ }
+ CuDeviceReference& operator = (const CuDeviceReference& ref)
+ {
+ checkSuccess(cuMemcpyDtoD(CUdeviceptr(mPointer), CUdeviceptr(ref.mPointer), sizeof(T)));
+ return *this;
+ }
+ operator ValueType() const
+ {
+ ValueType result;
+ checkSuccess(cuMemcpyDtoH(&result, CUdeviceptr(mPointer), sizeof(T)));
+ return result;
+ }
+
+ private:
+ T* mPointer;
+};
+}
+
+template <typename T>
+cloth::CuDeviceReference<T> cloth::CuDevicePointer<T>::operator[](const ptrdiff_t& i) const
+{
+ return CuDeviceReference<T>(*this + i);
+}
+}
diff --git a/NvCloth/src/cuda/CuDeviceVector.h b/NvCloth/src/cuda/CuDeviceVector.h
new file mode 100644
index 0000000..4d97e5d
--- /dev/null
+++ b/NvCloth/src/cuda/CuDeviceVector.h
@@ -0,0 +1,224 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "CuDevicePointer.h"
+#include "PsArray.h"
+#include "PsUtilities.h"
+#include <algorithm>
+
+namespace nv
+{
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable : 4365) // 'action' : conversion from 'type_1' to 'type_2', signed/unsigned mismatch
+#endif
+
+namespace cloth
+{
+
+// STL-style vector that holds POD types in CUDA device memory. The interface
+// is not complete, add whatever you need from the std::vector interface.
+template <typename T>
+class CuDeviceVector
+{
+ public:
+ typedef CuDevicePointer<T> iterator;
+ typedef CuDevicePointer<const T> const_iterator;
+
+ CuDeviceVector(CUcontext ctx) : mContext(ctx)
+ {
+ NV_CLOTH_ASSERT(mContext);
+ }
+
+ CuDeviceVector(const CuDeviceVector& other)
+ : mContext(other.mContext)
+ {
+ NV_CLOTH_ASSERT(mContext);
+ operator=(other);
+ }
+
+ CuDeviceVector(CUcontext ctx, const T* first, const T* last)
+ : mContext(ctx)
+ {
+ NV_CLOTH_ASSERT(mContext);
+ assign(first, last);
+ }
+
+ template <typename Alloc>
+ CuDeviceVector(const physx::shdfnd::Array<T, Alloc>& other)
+ {
+ operator=(other);
+ }
+
+ ~CuDeviceVector()
+ {
+ checkSuccess(cuMemFree(mFirst.dev()));
+ }
+
+ CuDeviceVector& operator = (const CuDeviceVector& other)
+ {
+ resize(other.size());
+ checkSuccess(cuMemcpyDtoD(mFirst.dev(), other.mFirst.dev(), other.size() * sizeof(T)));
+ return *this;
+ }
+
+ template <typename Alloc>
+ CuDeviceVector& operator = (const physx::shdfnd::Array<T, Alloc>& other)
+ {
+ const T* first = other.empty() ? 0 : &other.front();
+ assign(first, first + other.size());
+ return *this;
+ }
+
+ bool empty() const
+ {
+ return mLast == mFirst;
+ }
+ size_t size() const
+ {
+ return size_t(mLast - mFirst);
+ }
+ size_t capacity() const
+ {
+ return mEnd - mFirst;
+ }
+
+ iterator begin()
+ {
+ return mFirst;
+ }
+ iterator end()
+ {
+ return mLast;
+ }
+ const_iterator begin() const
+ {
+ return mFirst;
+ }
+ const_iterator end() const
+ {
+ return mLast;
+ }
+
+ void push_back(const T& v)
+ {
+ if (mLast == mEnd)
+ reserve(std::max<size_t>(1, capacity() * 2));
+
+ *mLast++ = v;
+ }
+
+ void push_back(const T* first, const T* last)
+ {
+ if (mEnd - mLast < last - first)
+ reserve(std::max<size_t>(2 * capacity(), mLast - mFirst + last - first));
+
+ if (first != last)
+ checkSuccess(cuMemcpyHtoD(mLast.dev(), first, sizeof(T) * (last - first)));
+
+ mLast += last - first;
+ }
+
+ void erase(iterator it)
+ {
+ size_t byteSize = (mLast - it - 1) * sizeof(T);
+ if (byteSize)
+ {
+ CUdeviceptr tmp = 0, dst = it.dev();
+ checkSuccess(cuMemAlloc(&tmp, byteSize));
+ checkSuccess(cuMemcpyDtoD(tmp, dst + sizeof(T), byteSize));
+ checkSuccess(cuMemcpyDtoD(dst, tmp, byteSize));
+ checkSuccess(cuMemFree(tmp));
+ }
+ --mLast;
+ }
+
+ void reserve(size_t n)
+ {
+ if (n <= capacity())
+ return;
+
+ CUdeviceptr newFirst = 0, oldFirst = mFirst.dev();
+
+ checkSuccess(cuMemAlloc(&newFirst, n * sizeof(T)));
+ checkSuccess(cuMemcpyDtoD(newFirst, oldFirst, size() * sizeof(T)));
+ checkSuccess(cuMemFree(oldFirst));
+
+ iterator first(reinterpret_cast<T*>(newFirst));
+ mEnd = first + n;
+ mLast = first + size();
+ mFirst = first;
+ }
+
+ void resize(size_t n)
+ {
+ if (capacity() < n)
+ reserve(std::max(n, capacity() * 2));
+
+ mLast = mFirst + n;
+ }
+
+ template <typename Iter>
+ void assign(Iter first, Iter last)
+ {
+ size_t n = last - first;
+ resize(n);
+ if (n)
+ checkSuccess(cuMemcpyHtoD(mFirst.dev(), &*first, n * sizeof(T)));
+ }
+
+ void swap(CuDeviceVector& other)
+ {
+ std::swap(mFirst, other.mFirst);
+ std::swap(mLast, other.mLast);
+ std::swap(mEnd, other.mEnd);
+ }
+
+ private:
+ CUcontext const mContext;
+ iterator mFirst, mLast, mEnd;
+};
+
+} // namespace cloth
+} // namespace nv
+
+#if PX_VC
+#pragma warning(pop)
+#endif
+
+namespace std
+{
+template <typename T>
+void swap(nv::cloth::CuDeviceVector<T>& first, nv::cloth::CuDeviceVector<T>& second)
+{
+ first.swap(second);
+}
+}
diff --git a/NvCloth/src/cuda/CuFabric.cpp b/NvCloth/src/cuda/CuFabric.cpp
new file mode 100644
index 0000000..9bc20db
--- /dev/null
+++ b/NvCloth/src/cuda/CuFabric.cpp
@@ -0,0 +1,208 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CuFabric.h"
+#include "CuContextLock.h"
+#include "CuFactory.h"
+#include <PsUtilities.h>
+
+using namespace physx;
+
+#if PX_VC
+#pragma warning(disable : 4365) // 'action' : conversion from 'type_1' to 'type_2', signed/unsigned mismatch
+#endif
+
+using namespace nv;
+
+cloth::CuTether::CuTether(uint16_t anchor, uint16_t length) : mAnchor(anchor), mLength(length)
+{
+}
+
+cloth::CuFabric::CuFabric(CuFactory& factory, uint32_t numParticles, Range<const uint32_t> phaseIndices,
+ Range<const uint32_t> sets, Range<const float> restvalues, Range<const float> stiffnessValues, Range<const uint32_t> indices,
+ Range<const uint32_t> anchors, Range<const float> tetherLengths,
+ Range<const uint32_t> triangles, uint32_t id)
+: CuContextLock(factory)
+, mFactory(factory)
+, mNumParticles(numParticles)
+, mPhases(mFactory.mContext, phaseIndices.begin(), phaseIndices.end())
+, mSets(mFactory.mContext)
+, mRestvalues(mFactory.mContext, restvalues.begin(), restvalues.end())
+, mStiffnessValues(mFactory.mContext, stiffnessValues.begin(), stiffnessValues.end())
+, mIndices(mFactory.mContext)
+, mTethers(mFactory.mContext)
+, mTriangles(mFactory.mContext)
+, mId(id)
+{
+ // should no longer be prefixed with 0
+ NV_CLOTH_ASSERT(sets.front() != 0);
+
+ NV_CLOTH_ASSERT(sets.back() == restvalues.size());
+ NV_CLOTH_ASSERT(restvalues.size() * 2 == indices.size());
+ NV_CLOTH_ASSERT(restvalues.size() == stiffnessValues.size() || stiffnessValues.size() == 0);
+ NV_CLOTH_ASSERT(mNumParticles > *shdfnd::maxElement(indices.begin(), indices.end()));
+
+ // copy to device, add leading zero
+ mSets.reserve(sets.size() + 1);
+ mSets.push_back(0);
+ mSets.push_back(sets.begin(), sets.end());
+
+ // manually convert uint32_t indices to uint16_t in temp memory
+ Vector<uint16_t>::Type hostIndices;
+ hostIndices.resizeUninitialized(indices.size());
+ Vector<uint16_t>::Type::Iterator dIt = hostIndices.begin();
+
+ const uint32_t* it = indices.begin();
+ const uint32_t* end = indices.end();
+ for (; it != end; ++it, ++dIt)
+ *dIt = uint16_t(*it);
+
+ // copy to device vector in one go
+ mIndices.assign(hostIndices.begin(), hostIndices.end());
+
+ // gather data per phase
+ mNumConstraintsInPhase.reserve(phaseIndices.size());
+ CuDevicePointer<const float> devRestvalues = mRestvalues.begin();
+ CuDevicePointer<const float> devStiffnessValues = mStiffnessValues.begin();
+ CuDevicePointer<const uint16_t> devIndices = mIndices.begin();
+ for (const uint32_t* pIt = phaseIndices.begin(); pIt != phaseIndices.end(); ++pIt)
+ {
+ uint32_t setIndex = *pIt;
+ uint32_t firstIndex = setIndex ? sets[setIndex - 1] : 0;
+ uint32_t lastIndex = sets[setIndex];
+ mNumConstraintsInPhase.pushBack(lastIndex - firstIndex);
+ mRestvaluesInPhase.pushBack(devRestvalues + firstIndex);
+ mStiffnessValuesInPhase.pushBack(stiffnessValues.size()?devStiffnessValues + firstIndex : CuDevicePointer<const float>());
+ mIndicesInPhase.pushBack(devIndices + 2 * firstIndex);
+ }
+
+ // tethers
+ NV_CLOTH_ASSERT(anchors.size() == tetherLengths.size());
+ mTetherLengthScale =
+ tetherLengths.empty() ? 1.0f : *shdfnd::maxElement(tetherLengths.begin(), tetherLengths.end()) / USHRT_MAX;
+ float inverseScale = 1 / (mTetherLengthScale + FLT_EPSILON);
+ Vector<CuTether>::Type tethers;
+ tethers.reserve(anchors.size());
+ for (; !anchors.empty(); anchors.popFront(), tetherLengths.popFront())
+ {
+ tethers.pushBack(CuTether(uint16_t(anchors.front()), uint16_t(tetherLengths.front() * inverseScale + 0.5f)));
+ }
+ mTethers.assign(tethers.begin(), tethers.end());
+
+ // triangles
+ hostIndices.resizeUninitialized(triangles.size());
+ dIt = hostIndices.begin();
+
+ it = triangles.begin();
+ end = triangles.end();
+ for (; it != end; ++it, ++dIt)
+ *dIt = uint16_t(*it);
+
+ mTriangles.assign(hostIndices.begin(), hostIndices.end());
+
+ CuContextLock::release();
+
+ // add to factory
+ mFactory.mFabrics.pushBack(this);
+}
+
+cloth::CuFabric::~CuFabric()
+{
+ CuContextLock::acquire();
+
+ Vector<CuFabric*>::Type::Iterator fIt = mFactory.mFabrics.find(this);
+
+ NV_CLOTH_ASSERT(fIt != mFactory.mFabrics.end());
+ mFactory.mFabrics.replaceWithLast(fIt);
+}
+
+cloth::Factory& cloth::CuFabric::getFactory() const
+{
+ return mFactory;
+}
+
+uint32_t cloth::CuFabric::getNumPhases() const
+{
+ return uint32_t(mPhases.size());
+}
+
+uint32_t cloth::CuFabric::getNumRestvalues() const
+{
+ return uint32_t(mRestvalues.size());
+}
+
+uint32_t cloth::CuFabric::getNumStiffnessValues() const
+{
+ return uint32_t(mStiffnessValues.size());
+}
+
+uint32_t cloth::CuFabric::getNumSets() const
+{
+ return uint32_t(mSets.size() - 1);
+}
+
+uint32_t cloth::CuFabric::getNumIndices() const
+{
+ return uint32_t(mIndices.size());
+}
+
+uint32_t cloth::CuFabric::getNumParticles() const
+{
+ return mNumParticles;
+}
+
+uint32_t cloth::CuFabric::getNumTethers() const
+{
+ return uint32_t(mTethers.size());
+}
+
+uint32_t cloth::CuFabric::getNumTriangles() const
+{
+ return uint32_t(mTriangles.size()) / 3;
+}
+
+void cloth::CuFabric::scaleRestvalues(float scale)
+{
+ CuContextLock contextLock(mFactory);
+
+ Vector<float>::Type restvalues(uint32_t(mRestvalues.size()));
+ mFactory.copyToHost(mRestvalues.begin().get(), mRestvalues.end().get(), restvalues.begin());
+
+ Vector<float>::Type::Iterator rIt, rEnd = restvalues.end();
+ for (rIt = restvalues.begin(); rIt != rEnd; ++rIt)
+ *rIt *= scale;
+
+ mRestvalues = restvalues;
+}
+
+void cloth::CuFabric::scaleTetherLengths(float scale)
+{
+ // cloth instances won't pick this up until CuClothData is dirty!
+ mTetherLengthScale *= scale;
+}
diff --git a/NvCloth/src/cuda/CuFabric.h b/NvCloth/src/cuda/CuFabric.h
new file mode 100644
index 0000000..b7aab07
--- /dev/null
+++ b/NvCloth/src/cuda/CuFabric.h
@@ -0,0 +1,105 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Fabric.h"
+#include "NvCloth/Range.h"
+#include "CuContextLock.h"
+#include "CuDeviceVector.h"
+
+namespace nv
+{
+
+namespace cloth
+{
+
+struct CuTether
+{
+ CuTether(uint16_t, uint16_t);
+ uint16_t mAnchor;
+ uint16_t mLength;
+};
+
+class CuFabric : private CuContextLock, public Fabric
+{
+ CuFabric(const CuFabric&); // not implemented
+ CuFabric& operator = (const CuFabric&); // not implemented
+
+ public:
+ CuFabric(CuFactory& factory, uint32_t numParticles, Range<const uint32_t> phaseIndices, Range<const uint32_t> sets,
+ Range<const float> restvalues, Range<const float> stiffnessValues, Range<const uint32_t> indices, Range<const uint32_t> anchors,
+ Range<const float> tetherLengths, Range<const uint32_t> triangles, uint32_t id);
+
+ virtual ~CuFabric();
+
+ virtual Factory& getFactory() const;
+
+ virtual uint32_t getNumPhases() const;
+ virtual uint32_t getNumRestvalues() const;
+ virtual uint32_t getNumStiffnessValues() const;
+
+ virtual uint32_t getNumSets() const;
+ virtual uint32_t getNumIndices() const;
+
+ virtual uint32_t getNumParticles() const;
+
+ virtual uint32_t getNumTethers() const;
+
+ virtual uint32_t getNumTriangles() const;
+
+ virtual void scaleRestvalues(float);
+ virtual void scaleTetherLengths(float);
+
+public:
+ CuFactory& mFactory;
+
+ uint32_t mNumParticles;
+
+ CuDeviceVector<uint32_t> mPhases; // index of set to use
+ CuDeviceVector<uint32_t> mSets; // offset of first restvalue, with 0 prefix
+
+ CuDeviceVector<float> mRestvalues;
+ CuDeviceVector<float> mStiffnessValues;
+ CuDeviceVector<uint16_t> mIndices;
+
+ CuDeviceVector<CuTether> mTethers;
+ float mTetherLengthScale;
+
+ CuDeviceVector<uint16_t> mTriangles;
+
+ Vector<uint32_t>::Type mNumConstraintsInPhase;
+ Vector<CuDevicePointer<const float> >::Type mRestvaluesInPhase;
+ Vector<CuDevicePointer<const float> >::Type mStiffnessValuesInPhase;
+ Vector<CuDevicePointer<const uint16_t> >::Type mIndicesInPhase;
+
+ uint32_t mId;
+};
+}
+}
diff --git a/NvCloth/src/cuda/CuFactory.cpp b/NvCloth/src/cuda/CuFactory.cpp
new file mode 100644
index 0000000..460cebc
--- /dev/null
+++ b/NvCloth/src/cuda/CuFactory.cpp
@@ -0,0 +1,414 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CuFactory.h"
+#include "CuFabric.h"
+#include "CuCloth.h"
+#include "CuSolver.h"
+#include "../ClothImpl.h"
+#include "CuCheckSuccess.h"
+#include "CuContextLock.h"
+#include <cuda.h>
+#include <PsFoundation.h>
+#include "CuSolverKernelBlob.h"
+
+#if PX_VC
+#pragma warning(disable : 4061 4062) // enumerator 'identifier' in switch of enum 'enumeration' is not handled
+#endif
+
+using namespace physx;
+using namespace nv;
+
+namespace nv
+{
+namespace cloth
+{
+// defined in Factory.cpp
+uint32_t getNextFabricId();
+
+typedef Vec4T<uint32_t> Vec4u;
+}
+}
+
+void cloth::checkSuccessImpl(CUresult err, const char* file, const int line)
+{
+ if (err != CUDA_SUCCESS)
+ {
+ const char* code = "Unknown";
+ switch(err)
+ {
+#define ADD_CASE(X) \
+ case X: \
+ code = #X; \
+ break
+ ADD_CASE(CUDA_ERROR_INVALID_VALUE);
+ ADD_CASE(CUDA_ERROR_OUT_OF_MEMORY);
+ ADD_CASE(CUDA_ERROR_NOT_INITIALIZED);
+ ADD_CASE(CUDA_ERROR_DEINITIALIZED);
+ ADD_CASE(CUDA_ERROR_NO_DEVICE);
+ ADD_CASE(CUDA_ERROR_INVALID_DEVICE);
+ ADD_CASE(CUDA_ERROR_INVALID_IMAGE);
+ ADD_CASE(CUDA_ERROR_INVALID_CONTEXT);
+ ADD_CASE(CUDA_ERROR_MAP_FAILED);
+ ADD_CASE(CUDA_ERROR_UNMAP_FAILED);
+ ADD_CASE(CUDA_ERROR_ARRAY_IS_MAPPED);
+ ADD_CASE(CUDA_ERROR_ALREADY_MAPPED);
+ ADD_CASE(CUDA_ERROR_NO_BINARY_FOR_GPU);
+ ADD_CASE(CUDA_ERROR_ALREADY_ACQUIRED);
+ ADD_CASE(CUDA_ERROR_NOT_MAPPED);
+ ADD_CASE(CUDA_ERROR_NOT_MAPPED_AS_ARRAY);
+ ADD_CASE(CUDA_ERROR_NOT_MAPPED_AS_POINTER);
+ ADD_CASE(CUDA_ERROR_ECC_UNCORRECTABLE);
+ ADD_CASE(CUDA_ERROR_UNSUPPORTED_LIMIT);
+ ADD_CASE(CUDA_ERROR_INVALID_SOURCE);
+ ADD_CASE(CUDA_ERROR_FILE_NOT_FOUND);
+ ADD_CASE(CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND);
+ ADD_CASE(CUDA_ERROR_SHARED_OBJECT_INIT_FAILED);
+ ADD_CASE(CUDA_ERROR_OPERATING_SYSTEM);
+ ADD_CASE(CUDA_ERROR_INVALID_HANDLE);
+ ADD_CASE(CUDA_ERROR_NOT_FOUND);
+ ADD_CASE(CUDA_ERROR_NOT_READY);
+ ADD_CASE(CUDA_ERROR_LAUNCH_FAILED);
+ ADD_CASE(CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES);
+ ADD_CASE(CUDA_ERROR_LAUNCH_TIMEOUT);
+ ADD_CASE(CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING);
+ default:
+ ADD_CASE(CUDA_ERROR_UNKNOWN);
+#undef ADD_CASE
+ }
+ NV_CLOTH_LOG_ERROR("CUDA error: %s at %s:%d", code, file, line);
+ }
+}
+
+namespace
+{
+// returns max threads as specified by launch bounds in CuSolverKernel.cu
+uint32_t getMaxThreadsPerBlock(CUcontext context)
+{
+ checkSuccess(cuCtxPushCurrent(context));
+
+ CUdevice device;
+ checkSuccess(cuCtxGetDevice(&device));
+
+ int major = 0;
+ checkSuccess(cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device));
+
+ checkSuccess(cuCtxPopCurrent(nullptr));
+
+ if (major >= 3)
+ return 1024;
+ if (major >= 2)
+ return 512;
+ return 192; // Should be the same logic as APEX used
+}
+}
+
+cloth::CuFactory::CuFactory(CUcontext context)
+: mContext(context)
+, mNumThreadsPerBlock(getMaxThreadsPerBlock(context))
+, mMaxThreadsPerBlock(mNumThreadsPerBlock)
+, mSolverCount(0)
+{
+ checkSuccess(cuModuleLoadFatBinary(&mKernelModule, kCuSolverKernel));
+}
+
+cloth::CuFactory::~CuFactory()
+{
+ NV_CLOTH_ASSERT(("All fabrics created by this factory need to be deleted before this factory is destroyed.", mFabrics.size() == 0));
+ NV_CLOTH_ASSERT(("All solvers created by this factory need to be deleted before this factory is destroyed.", mSolverCount == 0));
+}
+
+cloth::Fabric* cloth::CuFactory::createFabric(uint32_t numParticles, Range<const uint32_t> phaseIndices,
+ Range<const uint32_t> sets, Range<const float> restvalues, Range<const float> stiffnessValues,
+ Range<const uint32_t> indices, Range<const uint32_t> anchors,
+ Range<const float> tetherLengths, Range<const uint32_t> triangles)
+{
+ return NV_CLOTH_NEW(CuFabric)(*this, numParticles, phaseIndices, sets, restvalues, stiffnessValues, indices, anchors, tetherLengths, triangles,
+ getNextFabricId());
+}
+
+cloth::Cloth* cloth::CuFactory::createCloth(Range<const PxVec4> particles, Fabric& fabric)
+{
+ return NV_CLOTH_NEW(CuClothImpl)(*this, fabric, particles);
+}
+
+cloth::Solver* cloth::CuFactory::createSolver()
+{
+ CuSolver* solver = NV_CLOTH_NEW(CuSolver)(*this);
+
+ if (solver->hasError())
+ {
+ NV_CLOTH_DELETE(solver);
+ return NULL;
+ }
+
+ return solver;
+}
+
+// CuFactory::clone() implemented in CuClothClone.cpp
+
+void cloth::CuFactory::copyToHost(const void* srcIt, const void* srcEnd, void* dstIt) const
+{
+ CuContextLock contextLock(*this);
+
+ checkSuccess(cuMemcpyDtoH(dstIt, CUdeviceptr(srcIt), size_t(intptr_t(srcEnd) - intptr_t(srcIt))));
+}
+
+void cloth::CuFactory::extractFabricData(const Fabric& fabric, Range<uint32_t> phaseIndices, Range<uint32_t> sets,
+ Range<float> restvalues, Range<float> stiffnessValues, Range<uint32_t> indices, Range<uint32_t> anchors,
+ Range<float> tetherLengths, Range<uint32_t> triangles) const
+{
+ CuContextLock contextLock(*this);
+
+ const CuFabric& cuFabric = static_cast<const CuFabric&>(fabric);
+
+ if (!phaseIndices.empty())
+ {
+ NV_CLOTH_ASSERT(phaseIndices.size() == cuFabric.mPhases.size());
+ const uint32_t* devicePhases = cuFabric.mPhases.begin().get();
+ copyToHost(devicePhases, devicePhases + cuFabric.mPhases.size(), phaseIndices.begin());
+ }
+
+ if (!restvalues.empty())
+ {
+ NV_CLOTH_ASSERT(restvalues.size() == cuFabric.mRestvalues.size());
+ const float* deviceRestvalues = cuFabric.mRestvalues.begin().get();
+ copyToHost(deviceRestvalues, deviceRestvalues + cuFabric.mRestvalues.size(), restvalues.begin());
+ }
+
+ if (!stiffnessValues.empty())
+ {
+ NV_CLOTH_ASSERT(stiffnessValues.size() == cuFabric.mStiffnessValues.size());
+ const float* deviceStiffnessValues = cuFabric.mStiffnessValues.begin().get();
+ copyToHost(deviceStiffnessValues, deviceStiffnessValues + cuFabric.mStiffnessValues.size(), stiffnessValues.begin());
+ }
+
+ if (!sets.empty())
+ {
+ NV_CLOTH_ASSERT(sets.size() == cuFabric.mSets.size() - 1);
+ const uint32_t* deviceSets = cuFabric.mSets.begin().get();
+ copyToHost(deviceSets + 1, deviceSets + cuFabric.mSets.size(), sets.begin());
+ }
+
+ if (!indices.empty())
+ {
+ NV_CLOTH_ASSERT(indices.size() == cuFabric.mIndices.size());
+ const uint16_t* deviceIndices = cuFabric.mIndices.begin().get();
+ uint16_t* hostIndices = reinterpret_cast<uint16_t*>(indices.begin());
+ copyToHost(deviceIndices, deviceIndices + cuFabric.mIndices.size(), hostIndices);
+
+ // convert from 16bit to 32bit indices
+ for (uint32_t i = indices.size(); 0 < i--;)
+ indices[i] = hostIndices[i];
+ }
+
+ if (!anchors.empty() || !tetherLengths.empty())
+ {
+ uint32_t numTethers = uint32_t(cuFabric.mTethers.size());
+ Vector<CuTether>::Type tethers(numTethers, CuTether(0, 0));
+ const CuTether* deviceTethers = cuFabric.mTethers.begin().get();
+ copyToHost(deviceTethers, deviceTethers + numTethers, tethers.begin());
+
+ NV_CLOTH_ASSERT(anchors.empty() || anchors.size() == tethers.size());
+ for (uint32_t i = 0; !anchors.empty(); ++i, anchors.popFront())
+ anchors.front() = tethers[i].mAnchor;
+
+ NV_CLOTH_ASSERT(tetherLengths.empty() || tetherLengths.size() == tethers.size());
+ for (uint32_t i = 0; !tetherLengths.empty(); ++i, tetherLengths.popFront())
+ tetherLengths.front() = tethers[i].mLength * cuFabric.mTetherLengthScale;
+ }
+
+ if (!triangles.empty())
+ {
+ // todo triangles
+ }
+}
+
+void cloth::CuFactory::extractCollisionData(const Cloth& cloth, Range<PxVec4> spheres, Range<uint32_t> capsules,
+ Range<PxVec4> planes, Range<uint32_t> convexes, Range<PxVec3> triangles) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ const CuCloth& cuCloth = static_cast<const CuClothImpl&>(cloth).mCloth;
+
+ NV_CLOTH_ASSERT(spheres.empty() || spheres.size() == cuCloth.mStartCollisionSpheres.size());
+ NV_CLOTH_ASSERT(capsules.empty() || capsules.size() == cuCloth.mCapsuleIndices.size() * 2);
+ NV_CLOTH_ASSERT(planes.empty() || planes.size() == cuCloth.mStartCollisionPlanes.size());
+ NV_CLOTH_ASSERT(convexes.empty() || convexes.size() == cuCloth.mConvexMasks.size());
+ NV_CLOTH_ASSERT(triangles.empty() || triangles.size() == cuCloth.mStartCollisionTriangles.size());
+
+ // collision spheres are in pinned memory, so memcpy directly
+ if (!cuCloth.mStartCollisionSpheres.empty() && !spheres.empty())
+ memcpy(spheres.begin(), &cuCloth.mStartCollisionSpheres.front(),
+ cuCloth.mStartCollisionSpheres.size() * sizeof(PxVec4));
+
+ if (!cuCloth.mCapsuleIndices.empty() && !capsules.empty())
+ memcpy(capsules.begin(), &cuCloth.mCapsuleIndices.front(), cuCloth.mCapsuleIndices.size() * sizeof(IndexPair));
+
+ if (!cuCloth.mStartCollisionPlanes.empty() && !planes.empty())
+ memcpy(planes.begin(), &cuCloth.mStartCollisionPlanes.front(),
+ cuCloth.mStartCollisionPlanes.size() * sizeof(PxVec4));
+
+ if (!cuCloth.mConvexMasks.empty() && !convexes.empty())
+ memcpy(convexes.begin(), &cuCloth.mConvexMasks.front(), cuCloth.mConvexMasks.size() * sizeof(uint32_t));
+
+ if (!cuCloth.mStartCollisionTriangles.empty() && !triangles.empty())
+ memcpy(triangles.begin(), &cuCloth.mStartCollisionTriangles.front(),
+ cuCloth.mStartCollisionTriangles.size() * sizeof(PxVec3));
+}
+
+void cloth::CuFactory::extractMotionConstraints(const Cloth& cloth, Range<PxVec4> destConstraints) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ const CuCloth& cuCloth = static_cast<const CuClothImpl&>(cloth).mCloth;
+
+ if (cuCloth.mMotionConstraints.mHostCopy.size())
+ {
+ NV_CLOTH_ASSERT(destConstraints.size() == cuCloth.mMotionConstraints.mHostCopy.size());
+
+ memcpy(destConstraints.begin(), cuCloth.mMotionConstraints.mHostCopy.begin(),
+ sizeof(PxVec4) * cuCloth.mMotionConstraints.mHostCopy.size());
+ }
+ else
+ {
+ CuContextLock contextLock(*this);
+
+ CuDeviceVector<PxVec4> const& srcConstraints = !cuCloth.mMotionConstraints.mTarget.empty()
+ ? cuCloth.mMotionConstraints.mTarget
+ : cuCloth.mMotionConstraints.mStart;
+
+ NV_CLOTH_ASSERT(destConstraints.size() == srcConstraints.size());
+
+ copyToHost(srcConstraints.begin().get(), srcConstraints.end().get(), destConstraints.begin());
+ }
+}
+
+void cloth::CuFactory::extractSeparationConstraints(const Cloth& cloth, Range<PxVec4> destConstraints) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ const CuCloth& cuCloth = static_cast<const CuClothImpl&>(cloth).mCloth;
+
+ if (cuCloth.mSeparationConstraints.mHostCopy.size())
+ {
+ NV_CLOTH_ASSERT(destConstraints.size() == cuCloth.mSeparationConstraints.mHostCopy.size());
+
+ memcpy(destConstraints.begin(), cuCloth.mSeparationConstraints.mHostCopy.begin(),
+ sizeof(PxVec4) * cuCloth.mSeparationConstraints.mHostCopy.size());
+ }
+ else
+ {
+ CuContextLock contextLock(*this);
+
+ CuDeviceVector<PxVec4> const& srcConstraints = !cuCloth.mSeparationConstraints.mTarget.empty()
+ ? cuCloth.mSeparationConstraints.mTarget
+ : cuCloth.mSeparationConstraints.mStart;
+
+ NV_CLOTH_ASSERT(destConstraints.size() == srcConstraints.size());
+
+ copyToHost(srcConstraints.begin().get(), srcConstraints.end().get(), destConstraints.begin());
+ }
+}
+
+void cloth::CuFactory::extractParticleAccelerations(const Cloth& cloth, Range<PxVec4> destAccelerations) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ const CuCloth& cuCloth = static_cast<const CuClothImpl&>(cloth).mCloth;
+
+ if (cuCloth.mParticleAccelerationsHostCopy.size())
+ {
+ NV_CLOTH_ASSERT(destAccelerations.size() == cuCloth.mParticleAccelerationsHostCopy.size());
+
+ memcpy(destAccelerations.begin(), cuCloth.mParticleAccelerationsHostCopy.begin(),
+ sizeof(PxVec4) * cuCloth.mParticleAccelerationsHostCopy.size());
+ }
+}
+
+void cloth::CuFactory::extractVirtualParticles(const Cloth& cloth, Range<uint32_t[4]> destIndices,
+ Range<PxVec3> destWeights) const
+{
+ NV_CLOTH_ASSERT(&cloth.getFactory() == this);
+
+ CuContextLock contextLock(*this);
+
+ const CuCloth& cuCloth = static_cast<const CuClothImpl&>(cloth).mCloth;
+
+ if (destWeights.size() > 0)
+ {
+ uint32_t numWeights = cloth.getNumVirtualParticleWeights();
+
+ Vector<PxVec4>::Type hostWeights(numWeights, PxVec4(0.0f));
+ copyToHost(cuCloth.mVirtualParticleWeights.begin().get(), cuCloth.mVirtualParticleWeights.end().get(),
+ &hostWeights.front());
+
+ // convert weights to Vec3
+ PxVec3* destIt = reinterpret_cast<PxVec3*>(destWeights.begin());
+ Vector<PxVec4>::Type::ConstIterator srcIt = hostWeights.begin();
+ Vector<PxVec4>::Type::ConstIterator srcEnd = srcIt + numWeights;
+ for (; srcIt != srcEnd; ++srcIt, ++destIt)
+ *destIt = reinterpret_cast<const PxVec3&>(*srcIt);
+
+ NV_CLOTH_ASSERT(destIt <= destWeights.end());
+ }
+
+ if (destIndices.size() > 0)
+ {
+ uint32_t numIndices = cloth.getNumVirtualParticles();
+
+ Vector<Vec4us>::Type hostIndices(numIndices);
+ copyToHost(cuCloth.mVirtualParticleIndices.begin().get(), cuCloth.mVirtualParticleIndices.end().get(),
+ &hostIndices.front());
+
+ // convert indices to 32 bit
+ Vec4u* destIt = reinterpret_cast<Vec4u*>(destIndices.begin());
+ Vector<Vec4us>::Type::ConstIterator srcIt = hostIndices.begin();
+ Vector<Vec4us>::Type::ConstIterator srcEnd = srcIt + numIndices;
+ for (; srcIt != srcEnd; ++srcIt, ++destIt)
+ *destIt = Vec4u(*srcIt);
+
+ NV_CLOTH_ASSERT(&array(*destIt) <= destIndices.end());
+ }
+}
+
+void cloth::CuFactory::extractSelfCollisionIndices(const Cloth& cloth, Range<uint32_t> destIndices) const
+{
+ const CuCloth& cuCloth = static_cast<const CuClothImpl&>(cloth).mCloth;
+ NV_CLOTH_ASSERT(destIndices.size() == cuCloth.mSelfCollisionIndices.size());
+ copyToHost(cuCloth.mSelfCollisionIndices.begin().get(), cuCloth.mSelfCollisionIndices.end().get(),
+ destIndices.begin());
+}
+
+void cloth::CuFactory::extractRestPositions(const Cloth& cloth, Range<PxVec4> destRestPositions) const
+{
+ const CuCloth& cuCloth = static_cast<const CuClothImpl&>(cloth).mCloth;
+ NV_CLOTH_ASSERT(destRestPositions.size() == cuCloth.mRestPositions.size());
+ copyToHost(cuCloth.mRestPositions.begin().get(), cuCloth.mRestPositions.end().get(), destRestPositions.begin());
+}
diff --git a/NvCloth/src/cuda/CuFactory.h b/NvCloth/src/cuda/CuFactory.h
new file mode 100644
index 0000000..81799ec
--- /dev/null
+++ b/NvCloth/src/cuda/CuFactory.h
@@ -0,0 +1,109 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Factory.h"
+#include <PsArray.h>
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+#include <cuda.h>
+
+namespace nv
+{
+namespace cloth
+{
+
+class CuFabric;
+class CuCloth;
+template <typename>
+class ClothImpl;
+
+class CuFactory : public Factory
+{
+ protected:
+ CuFactory& operator = (const CuFactory&);
+
+ public:
+ typedef CuFabric FabricType;
+ typedef ClothImpl<CuCloth> ImplType;
+
+ CuFactory(CUcontext);
+ virtual ~CuFactory();
+
+ virtual Platform getPlatform() const { return Platform::CUDA; }
+
+ virtual Fabric* createFabric(uint32_t numParticles, Range<const uint32_t> phaseIndices, Range<const uint32_t> sets,
+ Range<const float> restvalues, Range<const float> stiffnessValues, Range<const uint32_t> indices,
+ Range<const uint32_t> anchors, Range<const float> tetherLengths,
+ Range<const uint32_t> triangles);
+
+ virtual Cloth* createCloth(Range<const physx::PxVec4> particles, Fabric& fabric);
+
+ virtual Solver* createSolver();
+
+ virtual Cloth* clone(const Cloth& cloth);
+
+ virtual void extractFabricData(const Fabric& fabric, Range<uint32_t> phaseIndices, Range<uint32_t> sets,
+ Range<float> restvalues, Range<float> stiffnessValues, Range<uint32_t> indices, Range<uint32_t> anchors,
+ Range<float> tetherLengths, Range<uint32_t> triangles) const;
+
+ virtual void extractCollisionData(const Cloth& cloth, Range<physx::PxVec4> spheres, Range<uint32_t> capsules,
+ Range<physx::PxVec4> planes, Range<uint32_t> convexes, Range<physx::PxVec3> triangles) const;
+
+ virtual void extractMotionConstraints(const Cloth& cloth, Range<physx::PxVec4> destConstraints) const;
+
+ virtual void extractSeparationConstraints(const Cloth& cloth, Range<physx::PxVec4> destConstraints) const;
+
+ virtual void extractParticleAccelerations(const Cloth& cloth, Range<physx::PxVec4> destAccelerations) const;
+
+ virtual void extractVirtualParticles(const Cloth& cloth, Range<uint32_t[4]> destIndices,
+ Range<physx::PxVec3> destWeights) const;
+
+ virtual void extractSelfCollisionIndices(const Cloth& cloth, Range<uint32_t> destIndices) const;
+
+ virtual void extractRestPositions(const Cloth& cloth, Range<physx::PxVec4> destRestPositions) const;
+
+ public:
+ void copyToHost(const void* srcIt, const void* srcEnd, void* dstIt) const;
+
+ public:
+ Vector<CuFabric*>::Type mFabrics;
+ int mSolverCount; //Tracks how many cuda solvers are alive
+
+ CUcontext mContext;
+
+ CUmodule mKernelModule;
+
+ uint32_t mNumThreadsPerBlock;
+
+ const uint32_t mMaxThreadsPerBlock;
+};
+}
+}
diff --git a/NvCloth/src/cuda/CuPhaseConfig.h b/NvCloth/src/cuda/CuPhaseConfig.h
new file mode 100644
index 0000000..d913b8b
--- /dev/null
+++ b/NvCloth/src/cuda/CuPhaseConfig.h
@@ -0,0 +1,51 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+#include <stdint.h>
+
+namespace nv
+{
+namespace cloth
+{
+
+struct CuPhaseConfig
+{
+ float mStiffness;
+ float mStiffnessMultiplier;
+ float mCompressionLimit;
+ float mStretchLimit;
+
+ uint32_t mNumConstraints;
+ const float* mRestvalues;
+ const float* mStiffnessValues;
+ const uint16_t* mIndices;
+};
+}
+}
diff --git a/NvCloth/src/cuda/CuPinnedAllocator.h b/NvCloth/src/cuda/CuPinnedAllocator.h
new file mode 100644
index 0000000..8b1787b
--- /dev/null
+++ b/NvCloth/src/cuda/CuPinnedAllocator.h
@@ -0,0 +1,182 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "CuCheckSuccess.h"
+#include "NvCloth/Allocator.h"
+
+// todo: rename file to CuHostAllocator.h
+
+namespace nv
+{
+namespace cloth
+{
+
+template <typename T, unsigned Flags = 0>
+class CuHostAllocator
+{
+public:
+ typedef T value_type;
+
+ CuHostAllocator(CUcontext ctx = nullptr)
+ : mContext(ctx), mDevicePtr(0)
+ {}
+
+ template <class U>
+ CuHostAllocator(const CuHostAllocator<U, Flags>& other)
+ : mContext(other.mContext), mDevicePtr(0)
+ {}
+
+ template <class U>
+ struct rebind
+ {
+ typedef CuHostAllocator<U, Flags> other;
+ };
+
+ T* allocate(size_t num, const char*, int)
+ {
+ void* ptr = nullptr;
+ checkSuccess(cuMemHostAlloc(&ptr, num * sizeof(T), Flags));
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable : 4127) // conditional expression is constant
+#endif
+ if (Flags & CU_MEMHOSTALLOC_DEVICEMAP)
+ checkSuccess(cuMemHostGetDevicePointer(&mDevicePtr, ptr, 0));
+#if PX_VC
+#pragma warning(pop)
+#endif
+ return static_cast<T*>(ptr);
+ }
+
+ void deallocate(void* ptr)
+ {
+ checkSuccess(cuMemFreeHost(ptr));
+ }
+
+#if PX_VC
+ template <class U>
+ void construct(U* p, T&& val)
+ {
+ new (static_cast<void*>(p)) U(std::forward<T>(val));
+ }
+#else
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+
+ template <class U, class... Args>
+ void construct(U* p, Args&&... args)
+ {
+ ::new (static_cast<void*>(p)) U(std::forward<Args>(args)...);
+ }
+#endif
+
+ void destroy(T* ptr)
+ {
+ core::unused(ptr);
+ ptr->~T();
+ }
+
+ CUcontext mContext;
+ CUdeviceptr mDevicePtr; // device pointer of last allocation
+};
+
+template <class T1, unsigned Flag1, class T2, unsigned Flag2>
+bool operator == (const CuHostAllocator<T1, Flag1>&, const CuHostAllocator<T2, Flag2>&)
+{
+ return true;
+}
+
+template <class T1, unsigned Flag1, class T2, unsigned Flag2>
+bool operator!=(const CuHostAllocator<T1, Flag1>&, const CuHostAllocator<T2, Flag2>&)
+{
+ return false;
+}
+
+//Use CuHostVectorImpl instead of physx::shdfnd::Array<T, typename CuHostAllocator<T, Flags>>
+//This entire class is just to make sure that the mDevicePtr from the CuHostAllocator is properly swapped together with mData
+template <typename T, unsigned Flags = 0>
+class CuHostVectorImpl : public physx::shdfnd::Array<T, typename CuHostAllocator<T, Flags>>
+{
+ typedef physx::shdfnd::Array<T, typename CuHostAllocator<T, Flags>> Super;
+ typedef typename CuHostAllocator<T, Flags> Alloc;
+public:
+ explicit CuHostVectorImpl(const physx::PxEMPTY v):Super(v){}
+ PX_INLINE explicit CuHostVectorImpl(const Alloc& alloc = Alloc()):Super(alloc){}
+
+ PX_INLINE explicit CuHostVectorImpl(uint32_t size, const T& a = T(), const Alloc& alloc = Alloc()):Super(size,a,alloc){}
+
+ template <class A>
+ PX_INLINE explicit CuHostVectorImpl(const physx::shdfnd::Array<T, A>& other, const Alloc& alloc = Alloc()):Super(other,alloc){}
+
+ PX_INLINE CuHostVectorImpl(const CuHostVectorImpl& other, const Alloc& alloc = Alloc()):Super(other,alloc){}
+
+ PX_INLINE explicit CuHostVectorImpl(const T* first, const T* last, const Alloc& alloc = Alloc()):Super(first,last,alloc){}
+
+ void swap(physx::shdfnd::Array<T, typename CuHostAllocator<T, Flags>>& other)
+ {
+ PX_ASSERT(mContext == other.mContext);
+ physx::shdfnd::swap(mDevicePtr, other.mDevicePtr);
+ Super::swap(other);
+ }
+};
+
+
+template <typename T, unsigned Flags = 0>
+struct CuHostVector
+{
+ typedef CuHostVectorImpl<T,Flags> Type;
+};
+
+template <typename T, unsigned Flags /*= 0*/>
+T* getDevicePointer(nv::cloth::CuHostVectorImpl<T, Flags>& vector)
+{
+ // cached device pointer only valid if non-empty
+ return vector.empty() ? 0 : reinterpret_cast<T*>(vector.getAllocator().mDevicePtr);
+}
+
+} // namespace cloth
+
+} // namespace nv
+
+namespace physx
+{
+namespace shdfnd
+{
+ //Make sure we call the correct swap function when using the free function swap
+ template <class T, unsigned Flags>
+ PX_INLINE void swap(nv::cloth::CuHostVectorImpl<T, Flags>& x, nv::cloth::CuHostVectorImpl<T, Flags>& y)
+ {
+ x.swap(y);
+ }
+}
+}
diff --git a/NvCloth/src/cuda/CuSelfCollision.h b/NvCloth/src/cuda/CuSelfCollision.h
new file mode 100644
index 0000000..19d2723
--- /dev/null
+++ b/NvCloth/src/cuda/CuSelfCollision.h
@@ -0,0 +1,478 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#ifndef CU_SOLVER_KERNEL_CU
+#error include CuSelfCollision.h only from CuSolverKernel.cu
+#endif
+
+#ifndef UINT16_MAX
+#define UINT16_MAX 0xffff
+#endif
+
+namespace
+{
+#if __CUDA_ARCH__ >= 300
+template <int>
+__device__ void scanWarp(Pointer<Shared, int32_t> counts)
+{
+ asm volatile("{"
+ " .reg .s32 tmp;"
+ " .reg .pred p;"
+ " shfl.up.b32 tmp|p, %0, 0x01, 0x0;"
+ "@p add.s32 %0, tmp, %0;"
+ " shfl.up.b32 tmp|p, %0, 0x02, 0x0;"
+ "@p add.s32 %0, tmp, %0;"
+ " shfl.up.b32 tmp|p, %0, 0x04, 0x0;"
+ "@p add.s32 %0, tmp, %0;"
+ " shfl.up.b32 tmp|p, %0, 0x08, 0x0;"
+ "@p add.s32 %0, tmp, %0;"
+ " shfl.up.b32 tmp|p, %0, 0x10, 0x0;"
+ "@p add.s32 %0, tmp, %0;"
+ "}"
+ : "+r"(*generic(counts))
+ :);
+}
+#else
+template <int stride>
+__device__ void scanWarp(Pointer<Shared, int32_t> counts)
+{
+ volatile int32_t* ptr = generic(counts);
+ const int32_t laneIdx = threadIdx.x & warpSize - 1;
+ if (laneIdx >= 1)
+ *ptr += ptr[-stride];
+ if (laneIdx >= 2)
+ *ptr += ptr[-2 * stride];
+ if (laneIdx >= 4)
+ *ptr += ptr[-4 * stride];
+ if (laneIdx >= 8)
+ *ptr += ptr[-8 * stride];
+ if (laneIdx >= 16)
+ *ptr += ptr[-16 * stride];
+}
+#endif
+
+// sorts array by upper 16bits
+// [keys] must be at least 2 * n in length, in/out in first n elements
+// [histogram] must be at least 34 * 16 = 544 in length
+__device__ void radixSort(int32_t* keys, int32_t n, Pointer<Shared, int32_t> histogram)
+{
+ const int32_t numWarps = blockDim.x >> 5;
+ const int32_t warpIdx = threadIdx.x >> 5;
+ const int32_t laneIdx = threadIdx.x & warpSize - 1;
+
+ const uint32_t laneMask = (1u << laneIdx) - 1;
+ const uint32_t mask1 = (threadIdx.x & 1) - 1;
+ const uint32_t mask2 = !!(threadIdx.x & 2) - 1;
+ const uint32_t mask4 = !!(threadIdx.x & 4) - 1;
+ const uint32_t mask8 = !!(threadIdx.x & 8) - 1;
+
+ const int32_t tn = (n + blockDim.x - 1) / blockDim.x;
+ const int32_t startIndex = tn * (threadIdx.x - laneIdx) + laneIdx;
+ const int32_t endIndex = min(startIndex + tn * warpSize, n + 31 & ~31); // full warps for ballot
+
+ int32_t* srcKeys = keys;
+ int32_t* dstKeys = keys + n;
+
+ Pointer<Shared, int32_t> hIt = histogram + 16 * warpIdx;
+ Pointer<Shared, int32_t> pIt = histogram + 16 * laneIdx + 16;
+ Pointer<Shared, int32_t> tIt = histogram + 16 * numWarps + laneIdx;
+
+ for (int32_t p = 16; p < 32; p += 4) // radix passes (4 bits each)
+ {
+ // gather bucket histograms per warp
+ int32_t warpCount = 0;
+ for (int32_t i = startIndex; i < endIndex; i += 32)
+ {
+ int32_t key = i < n ? srcKeys[i] >> p : 15;
+ uint32_t ballot1 = __ballot(key & 1);
+ uint32_t ballot2 = __ballot(key & 2);
+ uint32_t ballot4 = __ballot(key & 4);
+ uint32_t ballot8 = __ballot(key & 8);
+ warpCount += __popc((mask1 ^ ballot1) & (mask2 ^ ballot2) & (mask4 ^ ballot4) & (mask8 ^ ballot8));
+ }
+
+ if (laneIdx >= 16)
+ hIt[laneIdx] = warpCount;
+
+ __syncthreads();
+
+ // prefix sum of histogram buckets
+ for (int32_t i = warpIdx; i < 16; i += numWarps)
+ scanWarp<16>(pIt + i);
+
+ __syncthreads();
+
+ // prefix sum of bucket totals (exclusive)
+ if (threadIdx.x < 16)
+ {
+ *tIt = tIt[-1] & !threadIdx.x - 1;
+ scanWarp<1>(tIt);
+ hIt[threadIdx.x] = 0;
+ }
+
+ __syncthreads();
+
+ if (laneIdx < 16)
+ hIt[laneIdx] += *tIt;
+
+ // split indices
+ for (int32_t i = startIndex; i < endIndex; i += 32)
+ {
+ int32_t key = i < n ? srcKeys[i] >> p : 15;
+ uint32_t ballot1 = __ballot(key & 1);
+ uint32_t ballot2 = __ballot(key & 2);
+ uint32_t ballot4 = __ballot(key & 4);
+ uint32_t ballot8 = __ballot(key & 8);
+ uint32_t bits = ((key & 1) - 1 ^ ballot1) & (!!(key & 2) - 1 ^ ballot2) & (!!(key & 4) - 1 ^ ballot4) &
+ (!!(key & 8) - 1 ^ ballot8);
+ int32_t index = hIt[key & 15] + __popc(bits & laneMask);
+
+ if (i < n)
+ dstKeys[index] = srcKeys[i];
+
+ if (laneIdx < 16)
+ hIt[laneIdx] += __popc((mask1 ^ ballot1) & (mask2 ^ ballot2) & (mask4 ^ ballot4) & (mask8 ^ ballot8));
+ }
+
+ __syncthreads();
+
+ ::swap(srcKeys, dstKeys);
+ }
+
+#ifndef NDEBUG
+ for (int32_t i = threadIdx.x; i < n; i += blockDim.x)
+ assert(!i || keys[i - 1] >> 16 <= keys[i] >> 16);
+#endif
+}
+}
+
+namespace
+{
+struct CuSelfCollision
+{
+ template <typename CurrentT>
+ __device__ void operator()(CurrentT& current);
+
+ private:
+ template <typename CurrentT>
+ __device__ void buildAcceleration(const CurrentT& current);
+ template <bool useRestPositions, typename CurrentT>
+ __device__ void collideParticles(CurrentT& current) const;
+
+ public:
+ float mPosBias[3];
+ float mPosScale[3];
+ const float* mPosPtr[3];
+};
+}
+
+__shared__ uninitialized<CuSelfCollision> gSelfCollideParticles;
+
+template <typename CurrentT>
+__device__ void CuSelfCollision::operator()(CurrentT& current)
+{
+ if (min(gClothData.mSelfCollisionDistance, gFrameData.mSelfCollisionStiffness) <= 0.0f)
+ return;
+
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::SELFCOLLIDE);
+
+ if (threadIdx.x < 3)
+ {
+ float upper = gFrameData.mParticleBounds[threadIdx.x * 2];
+ float negativeLower = gFrameData.mParticleBounds[threadIdx.x * 2 + 1];
+
+ // expand bounds
+ float eps = (upper + negativeLower) * 1e-4f;
+ float expandedUpper = upper + eps;
+ float expandedNegativeLower = negativeLower + eps;
+ float expandedEdgeLength = expandedUpper + expandedNegativeLower;
+
+ float* edgeLength = mPosBias; // use as temp
+ edgeLength[threadIdx.x] = expandedEdgeLength;
+
+ __threadfence_block();
+
+ // calculate shortest axis
+ int32_t shortestAxis = edgeLength[0] > edgeLength[1];
+ if (edgeLength[shortestAxis] > edgeLength[2])
+ shortestAxis = 2;
+
+ uint32_t writeAxis = threadIdx.x - shortestAxis;
+ writeAxis += writeAxis >> 30;
+
+ float maxInvCellSize = __fdividef(127.0f, expandedEdgeLength);
+ float invCollisionDistance = __fdividef(1.0f, gClothData.mSelfCollisionDistance);
+ float invCellSize = min(maxInvCellSize, invCollisionDistance);
+
+ mPosScale[writeAxis] = invCellSize;
+ mPosBias[writeAxis] = invCellSize * expandedNegativeLower;
+ mPosPtr[writeAxis] = generic(current[threadIdx.x]);
+ }
+
+ __syncthreads();
+
+ buildAcceleration(current);
+
+ if (gFrameData.mRestPositions)
+ collideParticles<true>(current);
+ else
+ collideParticles<false>(current);
+}
+
+template <typename CurrentT>
+__device__ void CuSelfCollision::buildAcceleration(const CurrentT& current)
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::SELFCOLLIDE_ACCELERATION);
+
+ int32_t numIndices = gClothData.mNumSelfCollisionIndices;
+ const int32_t* indices = reinterpret_cast<const int32_t*>(gClothData.mSelfCollisionIndices);
+ int32_t* sortedKeys = reinterpret_cast<int32_t*>(gClothData.mSelfCollisionKeys);
+ int16_t* cellStart = reinterpret_cast<int16_t*>(gClothData.mSelfCollisionCellStart);
+
+ typedef typename CurrentT::ConstPointerType ConstPointerType;
+ ConstPointerType rowPtr = ConstPointerType(mPosPtr[1]);
+ ConstPointerType colPtr = ConstPointerType(mPosPtr[2]);
+
+ float rowScale = mPosScale[1], rowBias = mPosBias[1];
+ float colScale = mPosScale[2], colBias = mPosBias[2];
+
+ // calculate keys
+ for (int32_t i = threadIdx.x; i < numIndices; i += blockDim.x)
+ {
+ int32_t index = indices ? indices[i] : i;
+ assert(index < gClothData.mNumParticles);
+
+ int32_t rowIndex = int32_t(max(0.0f, min(rowPtr[index] * rowScale + rowBias, 127.5f)));
+ int32_t colIndex = int32_t(max(0.0f, min(colPtr[index] * colScale + colBias, 127.5f)));
+ assert(rowIndex >= 0 && rowIndex < 128 && colIndex >= 0 && colIndex < 128);
+
+ int32_t key = (colIndex << 7 | rowIndex) + 129; // + row and column sentinel
+ assert(key <= 0x4080);
+
+ sortedKeys[i] = key << 16 | index; // (key, index) pair in a single int32_t
+ }
+ __syncthreads();
+
+ // get scratch shared mem buffer used for radix sort(histogram)
+ Pointer<Shared, int32_t> buffer =
+ reinterpret_cast<Pointer<Shared, int32_t> const&>(gCollideParticles.get().mCurData.mSphereX);
+
+ // sort keys (__synchthreads inside radix sort)
+ radixSort(sortedKeys, numIndices, buffer);
+
+ // mark cell start if keys are different between neighboring threads
+ for (int32_t i = threadIdx.x; i < numIndices; i += blockDim.x)
+ {
+ int32_t key = sortedKeys[i] >> 16;
+ int32_t prevKey = i ? sortedKeys[i - 1] >> 16 : key - 1;
+ if (key != prevKey)
+ {
+ cellStart[key] = i;
+ cellStart[prevKey + 1] = i;
+ }
+ }
+ __syncthreads();
+}
+
+template <bool useRestPositions, typename CurrentT>
+__device__ void CuSelfCollision::collideParticles(CurrentT& current) const
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::SELFCOLLIDE_PARTICLES);
+
+ const int32_t* sortedKeys = reinterpret_cast<const int32_t*>(gClothData.mSelfCollisionKeys);
+ float* sortedParticles = gClothData.mSelfCollisionParticles;
+ int16_t* cellStart = reinterpret_cast<int16_t*>(gClothData.mSelfCollisionCellStart);
+
+ const float cdist = gClothData.mSelfCollisionDistance;
+ const float cdistSq = cdist * cdist;
+
+ const int32_t numIndices = gClothData.mNumSelfCollisionIndices;
+ const int32_t numParticles = gClothData.mNumParticles;
+
+ // point to particle copied in device memory that is being updated
+ float* xPtr = sortedParticles;
+ float* yPtr = sortedParticles + numParticles;
+ float* zPtr = sortedParticles + 2 * numParticles;
+ float* wPtr = sortedParticles + 3 * numParticles;
+
+ // copy current particles to temporary array
+ for (int32_t i = threadIdx.x; i < numParticles; i += blockDim.x)
+ {
+ xPtr[i] = current(i, 0);
+ yPtr[i] = current(i, 1);
+ zPtr[i] = current(i, 2);
+ wPtr[i] = current(i, 3);
+ }
+ __syncthreads();
+
+ // copy only sorted (indexed) particles to shared mem
+ for (int32_t i = threadIdx.x; i < numIndices; i += blockDim.x)
+ {
+ int32_t index = sortedKeys[i] & UINT16_MAX;
+ current(i, 0) = xPtr[index];
+ current(i, 1) = yPtr[index];
+ current(i, 2) = zPtr[index];
+ current(i, 3) = wPtr[index];
+ }
+ __syncthreads();
+
+ typedef typename CurrentT::ConstPointerType ConstPointerType;
+ ConstPointerType rowPtr = ConstPointerType(mPosPtr[1]);
+ ConstPointerType colPtr = ConstPointerType(mPosPtr[2]);
+
+ float rowScale = mPosScale[1], rowBias = mPosBias[1];
+ float colScale = mPosScale[2], colBias = mPosBias[2];
+
+ for (int32_t i = threadIdx.x; i < numIndices; i += blockDim.x)
+ {
+ const int32_t index = sortedKeys[i] & UINT16_MAX;
+ assert(index < gClothData.mNumParticles);
+
+ float restX, restY, restZ;
+ if (useRestPositions)
+ {
+ const float* restIt = gFrameData.mRestPositions + index * 4;
+ restX = restIt[0];
+ restY = restIt[1];
+ restZ = restIt[2];
+ }
+
+ float posX = current(i, 0);
+ float posY = current(i, 1);
+ float posZ = current(i, 2);
+ float posW = current(i, 3);
+
+ float deltaX = 0.0f;
+ float deltaY = 0.0f;
+ float deltaZ = 0.0f;
+ float deltaW = FLT_EPSILON;
+
+ // get cell index for this particle
+ int32_t rowIndex = int32_t(max(0.0f, min(rowPtr[i] * rowScale + rowBias, 127.5f)));
+ int32_t colIndex = int32_t(max(0.0f, min(colPtr[i] * colScale + colBias, 127.5f)));
+ assert(rowIndex >= 0 && rowIndex < 128 && colIndex >= 0 && colIndex < 128);
+
+ int32_t key = colIndex << 7 | rowIndex;
+ assert(key <= 0x4080);
+
+ // check cells in 3 columns
+ for (int32_t keyEnd = key + 256; key <= keyEnd; key += 128)
+ {
+ // cellStart keys of unoccupied cells have a value of -1
+ uint32_t startIndex; // min<unsigned>(cellStart[key + 0..2])
+ uint32_t endIndex; // max<signed>(0, cellStart[key + 1..3])
+
+ asm volatile("{\n\t"
+ " .reg .u32 start1, start2;\n\t"
+ " ld.global.s16 %1, [%2+6];\n\t"
+ " ld.global.s16 %0, [%2+0];\n\t"
+ " ld.global.s16 start1, [%2+2];\n\t"
+ " ld.global.s16 start2, [%2+4];\n\t"
+ " max.s32 %1, %1, 0;\n\t"
+ " min.u32 %0, %0, start1;\n\t"
+ " max.s32 %1, %1, start1;\n\t"
+ " min.u32 %0, %0, start2;\n\t"
+ " max.s32 %1, %1, start2;\n\t"
+ "}\n\t"
+ : "=r"(startIndex), "=r"(endIndex)
+ : POINTER_CONSTRAINT(cellStart + key));
+
+ // comparison must be unsigned to skip cells with negative startIndex
+ for (uint32_t j = startIndex; j < endIndex; ++j)
+ {
+ if (j != i) // avoid same particle
+ {
+ float dx = posX - current(j, 0);
+ float dy = posY - current(j, 1);
+ float dz = posZ - current(j, 2);
+
+ float distSqr = dx * dx + dy * dy + dz * dz;
+ if (distSqr > cdistSq)
+ continue;
+
+ if (useRestPositions)
+ {
+ const int32_t jndex = sortedKeys[j] & UINT16_MAX;
+ assert(jndex < gClothData.mNumParticles);
+
+ // calculate distance in rest configuration
+ const float* restJt = gFrameData.mRestPositions + jndex * 4;
+ float rx = restX - restJt[0];
+ float ry = restY - restJt[1];
+ float rz = restZ - restJt[2];
+
+ if (rx * rx + ry * ry + rz * rz <= cdistSq)
+ continue;
+ }
+
+ // premultiply ratio for weighted average
+ float ratio = fmaxf(0.0f, cdist * rsqrtf(FLT_EPSILON + distSqr) - 1.0f);
+ float scale = __fdividef(ratio * ratio, FLT_EPSILON + posW + current(j, 3));
+
+ deltaX += scale * dx;
+ deltaY += scale * dy;
+ deltaZ += scale * dz;
+ deltaW += ratio;
+ }
+ }
+ }
+
+ const float stiffness = gFrameData.mSelfCollisionStiffness * posW;
+ float scale = __fdividef(stiffness, deltaW);
+
+ // apply collision impulse
+ xPtr[index] += deltaX * scale;
+ yPtr[index] += deltaY * scale;
+ zPtr[index] += deltaZ * scale;
+
+ assert(!isnan(xPtr[index] + yPtr[index] + zPtr[index]));
+ }
+ __syncthreads();
+
+ // copy temporary particle array back to shared mem
+ // (need to copy whole array)
+ for (int32_t i = threadIdx.x; i < numParticles; i += blockDim.x)
+ {
+ current(i, 0) = xPtr[i];
+ current(i, 1) = yPtr[i];
+ current(i, 2) = zPtr[i];
+ current(i, 3) = wPtr[i];
+ }
+
+ // unmark occupied cells to empty again (faster than clearing all the cells)
+ for (int32_t i = threadIdx.x; i < numIndices; i += blockDim.x)
+ {
+ int32_t key = sortedKeys[i] >> 16;
+ cellStart[key] = 0xffff;
+ cellStart[key + 1] = 0xffff;
+ }
+ __syncthreads();
+}
diff --git a/NvCloth/src/cuda/CuSolver.cpp b/NvCloth/src/cuda/CuSolver.cpp
new file mode 100644
index 0000000..7927a42
--- /dev/null
+++ b/NvCloth/src/cuda/CuSolver.cpp
@@ -0,0 +1,677 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CuSolver.h"
+#include "CuCloth.h"
+#include "../ClothImpl.h"
+#include "CuFabric.h"
+#include "CuFactory.h"
+#include "CuSolverKernel.h"
+#include "CuContextLock.h"
+#include "CuCheckSuccess.h"
+#include "../IterationState.h"
+#include <PsSort.h>
+#include <foundation/PxProfiler.h>
+
+#if NV_NVTX
+#include "nvToolsExt.h"
+#endif
+
+#define NV_CUPTI 0
+
+#if NV_CUPTI
+#pragma warning(disable : 4324)
+#include "cupti_activity.h"
+#include "cupti_metrics.h"
+#include "cupti_driver_cbid.h"
+#include <cstdio>
+
+namespace
+{
+void CUPTIAPI bufferRequested(uint8_t** buffer, size_t* size, size_t* maxNumRecords)
+{
+ *buffer = (uint8_t*)PX_ALIGNED16_ALLOC(*size = 32 * 1024 * 1024);
+ *maxNumRecords = 0;
+}
+
+void CUPTIAPI bufferCompleted(CUcontext context, uint32_t streamId, uint8_t* buffer, size_t /*size*/, size_t validSize)
+{
+ CUpti_Activity* record = NULL;
+ uint64_t totalTime = 0, numRecords = 0;
+ while (CUPTI_SUCCESS == cuptiActivityGetNextRecord(buffer, validSize, &record))
+ {
+ if (record->kind != CUPTI_ACTIVITY_KIND_KERNEL)
+ continue;
+
+ CUpti_ActivityKernel3* kernel = (CUpti_ActivityKernel3*)record;
+ if (strcmp(kernel->name, cloth::getKernelFunctionName()))
+ continue;
+
+ totalTime += kernel->end - kernel->start;
+ ++numRecords;
+ }
+
+ if (numRecords)
+ {
+ printf("%u kernel records, average runtime is %u ns\n", unsigned(numRecords), unsigned(totalTime / numRecords));
+ }
+
+ size_t dropped;
+ cuptiActivityGetNumDroppedRecords(context, streamId, &dropped);
+ if (dropped)
+ {
+ printf("Dropped %u activity records\n", unsigned(dropped));
+ }
+
+ PX_ALIGNED16_FREE(buffer);
+}
+
+struct CuptiEventProfiler
+{
+ CuptiEventProfiler() : mActiveCycles(0), mNumEvents(0)
+ {
+ CUdevice device = 0;
+ cuCtxGetDevice(&device);
+ CUcontext context = 0;
+ cuCtxGetCurrent(&context);
+ cuptiEventGetIdFromName(device, "active_cycles", &mEventId);
+ cuptiEventGroupCreate(context, &mEventGroup, 0);
+ cuptiEventGroupAddEvent(mEventGroup, mEventId);
+ cuptiSubscribe(&mSubscriber, eventCallback, this);
+ cuptiEnableCallback(1, mSubscriber, CUPTI_CB_DOMAIN_DRIVER_API, CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel);
+ }
+
+ ~CuptiEventProfiler()
+ {
+ cuptiUnsubscribe(mSubscriber);
+ cuptiEventGroupRemoveEvent(mEventGroup, mEventId);
+ cuptiEventGroupDestroy(mEventGroup);
+ if (mNumEvents)
+ {
+ printf("%u kernel events, average active cycles is %u\n", unsigned(mNumEvents),
+ unsigned(mActiveCycles / mNumEvents));
+ }
+ }
+
+ static void CUPTIAPI
+ eventCallback(void* profiler, CUpti_CallbackDomain domain, CUpti_CallbackId cbid, const void* cbInfo)
+ {
+ // This callback is enabled only for launch so we shouldn't see anything else.
+ NV_CLOTH_ASSERT(domain == CUPTI_CB_DOMAIN_DRIVER_API);
+ NV_CLOTH_ASSERT(cbid == CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel);
+
+ reinterpret_cast<CuptiEventProfiler*>(profiler)
+ ->eventCallback(reinterpret_cast<const CUpti_CallbackData*>(cbInfo));
+ }
+
+ void eventCallback(const CUpti_CallbackData* cbInfo)
+ {
+ // on entry, enable all the event groups being collected this pass,
+ // for metrics we collect for all instances of the event
+ if (cbInfo->callbackSite == CUPTI_API_ENTER)
+ {
+ cuCtxSynchronize();
+ cuptiSetEventCollectionMode(cbInfo->context, CUPTI_EVENT_COLLECTION_MODE_KERNEL);
+ cuptiEventGroupEnable(mEventGroup);
+ }
+
+ // on exit, read and record event values
+ if (cbInfo->callbackSite == CUPTI_API_EXIT)
+ {
+ cuCtxSynchronize();
+ uint64_t activeCycles = 0;
+ size_t bytesRead = sizeof(activeCycles);
+ cuptiEventGroupReadEvent(mEventGroup, CUPTI_EVENT_READ_FLAG_NONE, mEventId, &bytesRead, &activeCycles);
+ cuptiEventGroupDisable(mEventGroup);
+ mActiveCycles += activeCycles;
+ ++mNumEvents;
+ }
+ }
+
+ CUpti_SubscriberHandle mSubscriber;
+ CUpti_EventGroup mEventGroup;
+ CUpti_EventID mEventId;
+ uint64_t mActiveCycles;
+ uint64_t mNumEvents;
+};
+}
+#endif
+
+using namespace nv;
+using namespace physx;
+
+const char* cloth::getKernelFunctionName()
+{
+ return "simulateCloths";
+}
+
+namespace
+{
+const char* gKernelNames[] = { cloth::getKernelFunctionName(), };
+
+// Note: gCuProfileZoneNames has a corresponding enum list (CuProfileZoneIds) in CuSolverKernel.h.
+// Additions/deletions to gCuProfileZoneNames requires a similar action to CuProfileZoneIds.
+const char* gCuProfileZoneNames[] = {
+ "cloth::CuSolverKernel::simulateKernel", "cloth::CuSolverKernel::integrateParticles",
+ "cloth::CuSolverKernel::accelerateParticles", "cloth::CuSolverKernel::applyWind",
+ "cloth::CuSolverKernel::constrainTether", "cloth::CuSolverKernel::solveFabric",
+ "cloth::CuSolverKernel::constrainMotion", "cloth::CuSolverKernel::constrainSeparation",
+ "cloth::CuSolverKernel::collideParticles", "cloth::CuSolverKernel::selfCollideParticles",
+ "cloth::CuSolverKernel::updateSleepState", "cloth::CuSolverKernel::simulateShared",
+ "cloth::CuSolverKernel::simulateStreamed", "cloth::CuSolverKernel::simulateGlobal",
+ "cloth::CuSolverKernel::solveConstraintSet", "cloth::CuCollision::buildAccleration",
+ "cloth::CuCollision::collideCapsules", "cloth::CuCollision::collideVirtualCapsules",
+ "cloth::CuCollision::collideContinuousCapsules", "cloth::CuCollision::collideConvexes",
+ "cloth::CuCollision::collideTriangles", "cloth::CuSelfCollision::buildAccleration",
+ "cloth::CuSelfCollision::collideParticles",
+};
+}
+
+namespace
+{
+template <typename T>
+struct CuDeviceAllocator
+{
+ CuDeviceAllocator(CUcontext ctx) : mManager(ctx)
+ {
+ }
+
+ T* allocate(size_t n)
+ {
+ CUdeviceptr result;
+ checkSuccess(cuMemAlloc(&result, n * sizeof(T)));
+ return reinterpret_cast<T*>(result);
+ }
+
+ void deallocate(T* ptr)
+ {
+ checkSuccess(cuMemFree(reinterpret_cast<CUdeviceptr>(ptr)));
+ }
+
+ CUcontext mManager;
+};
+}
+
+cloth::CuSolver::CuSolver(CuFactory& factory)
+: CuContextLock(factory)
+, mFactory(factory)
+, mClothData(mFactory.mContext)
+, mClothDataHostCopy(mFactory.mContext)
+, mClothDataDirty(false)
+, mFrameData(mFactory.mContext)
+, mIterationData(mFactory.mContext)
+, mIterationDataBegin(0)
+, mFrameDt(0.0f)
+, mSharedMemorySize(0)
+, mSharedMemoryLimit(0)
+, mStream(0)
+, mKernelFunction(0)
+, mKernelSharedMemorySize(0)
+, mClothIndex(CuDeviceAllocator<uint32_t>(mFactory.mContext).allocate(1))
+, mInterCollisionDistance(0.0f)
+, mInterCollisionStiffness(1.0f)
+, mInterCollisionIterations(1)
+, mInterCollisionFilter(nullptr)
+, mInterCollisionScratchMem(NULL)
+, mInterCollisionScratchMemSize(0)
+, mSimulateNvtxRangeId(0)
+, mProfileBuffer(0)
+, mProfileBaseId(0)
+, mCudaError(false)
+{
+ mFactory.mSolverCount++;
+
+ NV_CLOTH_ASSERT(CuProfileZoneIds::NUMZONES == PX_ARRAY_SIZE(gCuProfileZoneNames));
+
+ if (mCudaError)
+ {
+ CuContextLock::release();
+ return;
+ }
+
+ checkSuccess(cuStreamCreate(&mStream, 0));
+ checkSuccess(cuModuleGetFunction(&mKernelFunction, mFactory.mKernelModule, getKernelFunctionName()));
+
+ // get amount of statically allocated shared memory
+ checkSuccess(cuFuncGetAttribute(&mKernelSharedMemorySize, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, mKernelFunction));
+
+ // initialize cloth index
+ checkSuccess(cuMemsetD32(mClothIndex.dev(), 0, 1));
+
+#if PX_CUPTI
+ // activity (measure kernel runtime in ns)
+ CUcontext context = 0;
+ cuCtxGetCurrent(&context);
+ cuptiActivityEnableContext(context, CUPTI_ACTIVITY_KIND_KERNEL);
+ cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted);
+ // event (measure kernel active cycles)
+ mCuptiEventProfiler = NV_CLOTH_NEW(CuptiEventProfiler);
+#endif
+
+ CuContextLock::release();
+
+ mSimulateProfileEventData = nullptr;
+}
+
+cloth::CuSolver::~CuSolver()
+{
+ NV_CLOTH_ASSERT(mCloths.empty());
+
+ CuContextLock::acquire();
+
+#if PX_CUPTI
+ cuptiActivityFlushAll(0);
+ cuptiActivityDisable(CUPTI_ACTIVITY_KIND_KERNEL);
+ NV_CLOTH_DELETE((CuptiEventProfiler*)mCuptiEventProfiler);
+#endif
+
+ CuDeviceAllocator<uint32_t>(mFactory.mContext).deallocate(mClothIndex.get());
+
+ if (mStream)
+ checkSuccess(cuStreamDestroy(mStream));
+
+ if (mInterCollisionScratchMem)
+ NV_CLOTH_FREE(mInterCollisionScratchMem);
+
+ mFactory.mSolverCount--;
+}
+
+void cloth::CuSolver::updateKernelData()
+{
+ mKernelDataHost.mClothIndex = mClothIndex.get();
+ mKernelDataHost.mClothData = mClothData.begin().get();
+ mKernelDataHost.mFrameData = getDevicePointer(mFrameData);
+
+ mKernelDataHost.mProfileBuffer = mProfileBuffer;
+ mKernelDataHost.mProfileBaseId = mProfileBaseId;
+
+}
+
+namespace
+{
+struct ClothSimCostGreater
+{
+ bool operator()(const cloth::CuCloth* left, const cloth::CuCloth* right) const
+ {
+ return left->mNumParticles * left->mSolverFrequency > right->mNumParticles * right->mSolverFrequency;
+ }
+};
+}
+
+void cloth::CuSolver::addCloth(Cloth* cloth)
+{
+ CuCloth& cuCloth = static_cast<CuClothImpl&>(*cloth).mCloth;
+
+ NV_CLOTH_ASSERT(mCloths.find(&cuCloth) == mCloths.end());
+
+ mCloths.pushBack(&cuCloth);
+ // trigger update of mClothData array
+ cuCloth.notifyChanged();
+
+ // sort cloth instances by size
+ shdfnd::sort(mCloths.begin(), mCloths.size(), ClothSimCostGreater(), NonTrackingAllocator());
+
+ CuContextLock contextLock(mFactory);
+
+ // resize containers and update kernel data
+ mClothDataHostCopy.resize(mCloths.size());
+ mClothData.resize(mCloths.size());
+ mFrameData.resize(mCloths.size());
+ updateKernelData();
+}
+
+void cloth::CuSolver::removeCloth(Cloth* cloth)
+{
+ CuCloth& cuCloth = static_cast<CuClothImpl&>(*cloth).mCloth;
+
+ ClothVector::Iterator begin = mCloths.begin(), end = mCloths.end();
+ ClothVector::Iterator it = mCloths.find(&cuCloth);
+
+ if (it == end)
+ return; // not found
+
+ uint32_t index = uint32_t(it - begin);
+
+ mCloths.remove(index);
+ mClothDataHostCopy.remove(index);
+ mClothData.resize(mCloths.size());
+ mClothDataDirty = true;
+}
+
+bool cloth::CuSolver::beginSimulation(float dt)
+{
+ if (mCloths.empty())
+ return false;
+ mFrameDt = dt;
+ beginFrame();
+ return true;
+}
+
+void cloth::CuSolver::simulateChunk(int idx)
+{
+ PX_UNUSED(idx);
+ NV_CLOTH_ASSERT(!mCloths.empty());
+ NV_CLOTH_ASSERT(idx == 0);
+ CuSolver::executeKernel();
+}
+
+void cloth::CuSolver::endSimulation()
+{
+ NV_CLOTH_ASSERT(!mCloths.empty());
+ CuSolver::endFrame();
+}
+
+int cloth::CuSolver::getSimulationChunkCount() const
+{
+ return 1;
+}
+
+void cloth::CuSolver::beginFrame()
+{
+ CuContextLock contextLock(mFactory);
+
+ mSimulateProfileEventData = NV_CLOTH_PROFILE_START_CROSSTHREAD("cloth::CuSolver::simulate", 0);
+
+ CuIterationData* iterationDataBegin = mIterationData.empty() ? 0 : &mIterationData.front();
+
+ mFrameData.resize(0);
+ mIterationData.resize(0);
+
+ // update cloth data
+ ClothVector::Iterator cIt, cEnd = mCloths.end();
+ CuHostVector<CuClothData>::Type::Iterator dIt = mClothDataHostCopy.begin();
+ for (cIt = mCloths.begin(); cIt != cEnd; ++cIt, ++dIt)
+ mClothDataDirty |= (*cIt)->updateClothData(*dIt);
+
+ if (mClothDataDirty)
+ {
+ /* find optimal number of cloths per SM */
+
+ CUdevice device = 0;
+ checkSuccess(cuCtxGetDevice(&device));
+ int numSMs = 0;
+ checkSuccess(cuDeviceGetAttribute(&numSMs, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device));
+ int sharedMemoryPerBlock = 0;
+ checkSuccess(cuDeviceGetAttribute(&sharedMemoryPerBlock, CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK, device));
+
+ // at least 192 threads per block (e.g. CuCollision::buildAcceleration)
+ uint32_t maxClothsPerSM = std::min(mFactory.mMaxThreadsPerBlock / 192, uint32_t(mCloths.size() + numSMs - 1) / numSMs);
+
+ // tuning parameters: relative performance per numSharedPositions
+ float weights[3] = { 0.4f, 0.8f, 1.0f }; //TODO check if these are the newest weights (APEX has different values)
+
+ // try all possible number of cloths per SM and estimate performance
+ float maxWeightSum = 0.0f;
+ uint32_t numClothsPerSM = 0;
+ for (uint32_t i = 1; i <= maxClothsPerSM; ++i)
+ {
+ uint32_t sharedMemoryLimit = (sharedMemoryPerBlock / i) - mKernelSharedMemorySize;
+
+ float weightSum = 0.0f;
+ for (cIt = mCloths.begin(); cIt != cEnd; ++cIt)
+ {
+ uint32_t sharedMemorySize = (*cIt)->mSharedMemorySize;
+ uint32_t positionsSize = (*cIt)->mNumParticles * sizeof(PxVec4);
+
+ if (sharedMemorySize > sharedMemoryLimit)
+ break;
+
+ uint32_t numSharedPositions = std::min(2u, (sharedMemoryLimit - sharedMemorySize) / positionsSize);
+
+ weightSum += weights[numSharedPositions] * positionsSize;
+ }
+ // tuning parameter: inverse performance for running i cloths per SM
+ weightSum *= 2.0f + i;
+
+ if (cIt == cEnd && weightSum > maxWeightSum)
+ {
+ maxWeightSum = weightSum;
+ numClothsPerSM = i;
+ }
+ }
+ NV_CLOTH_ASSERT(numClothsPerSM);
+
+ // update block size
+ uint32_t numThreadsPerBlock = mFactory.mMaxThreadsPerBlock / numClothsPerSM & ~31;
+ if (mFactory.mNumThreadsPerBlock != numThreadsPerBlock)
+ {
+ checkSuccess(
+ cuFuncSetBlockShape(mKernelFunction, int(mFactory.mNumThreadsPerBlock = numThreadsPerBlock), 1, 1));
+ }
+
+ // remember num cloths per SM in terms of max shared memory per block
+ mSharedMemoryLimit = (sharedMemoryPerBlock / numClothsPerSM) - mKernelSharedMemorySize;
+ }
+
+ uint32_t maxSharedMemorySize = 0;
+ for (cIt = mCloths.begin(); cIt != cEnd; ++cIt)
+ {
+ CuCloth& cloth = **cIt;
+
+ uint32_t sharedMemorySize = cloth.mSharedMemorySize;
+ uint32_t positionsSize = cloth.mNumParticles * sizeof(PxVec4);
+
+ uint32_t numSharedPositions = std::min(2u, (mSharedMemoryLimit - sharedMemorySize) / positionsSize);
+
+ maxSharedMemorySize = std::max(maxSharedMemorySize, sharedMemorySize + numSharedPositions * positionsSize);
+
+ IterationStateFactory factory(cloth, mFrameDt);
+ IterationState<Simd4f> state = factory.create<Simd4f>(cloth);
+
+ mFrameData.pushBack(CuFrameData(cloth, numSharedPositions, state, mIterationDataBegin + mIterationData.size()));
+
+ while (state.mRemainingIterations)
+ {
+ mIterationData.pushBack(CuIterationData(state));
+ state.update();
+ }
+ }
+ mSharedMemorySize = maxSharedMemorySize;
+
+ // add dummy element because we read past the end
+ mIterationData.pushBack(CuIterationData());
+
+ if (&mIterationData.front() != iterationDataBegin)
+ {
+ // mIterationData grew, update pointers
+ iterationDataBegin = getDevicePointer(mIterationData);
+
+ ptrdiff_t diff = (char*)iterationDataBegin - (char*)mIterationDataBegin;
+ CuHostVector<CuFrameData>::Type::Iterator fIt = mFrameData.begin(), fEnd;
+ for (fEnd = mFrameData.end(); fIt != fEnd; ++fIt)
+ reinterpret_cast<const char*&>(fIt->mIterationData) += diff;
+
+ mIterationDataBegin = iterationDataBegin;
+ }
+}
+
+void CUDA_CB cloth::CuSolver::KernelFinished(CUstream stream, CUresult status, void *userData)
+{
+ PX_UNUSED(stream);
+ PX_UNUSED(status);
+ //static_cast<CuSolver*>(userData)->mEndSimulationTask.removeReference();
+ PX_UNUSED(userData);
+}
+
+void cloth::CuSolver::executeKernel()
+{
+ CuContextLock contextLock(mFactory);
+
+/*#if PX_PROFILE //We don't have a gpu distapcher anymore
+ // Note: The profile buffer is valid only within the cuda launch context
+ void* profileBuffer = getDispatcher().getCurrentProfileBuffer();
+ if (mProfileBuffer != profileBuffer && mProfileBaseId + 1)
+ {
+ mProfileBuffer = profileBuffer;
+ updateKernelData();
+ }
+#endif*/
+
+ if (mClothDataDirty)
+ {
+ NV_CLOTH_ASSERT(mClothDataHostCopy.size() == mClothData.size());
+ size_t numBytes = mClothData.size() * sizeof(CuClothData);
+ checkSuccess(cuMemcpyHtoDAsync(mClothData.begin().dev(), mClothDataHostCopy.begin(), numBytes, mStream));
+ mClothDataDirty = false;
+ }
+
+#if 0
+ static int frame = 0;
+ if (++frame == 100)
+ record(*this);
+#endif
+
+ void* Arguments[] = {(void*)&mKernelDataHost};
+
+ // launch kernel
+ CUresult result = cuLaunchKernel(mKernelFunction, uint32_t(mCloths.size()), 1, 1,
+ mFactory.mNumThreadsPerBlock, 1, 1, mSharedMemorySize, mStream, Arguments, 0);
+ cuStreamAddCallback(mStream, &cloth::CuSolver::KernelFinished, this, 0);
+
+
+#if PX_DEBUG
+ // in debug builds check kernel result
+ checkSuccess(result);
+ checkSuccess(cuStreamSynchronize(mStream));
+#endif
+
+ // mark the solver as being in an error state
+ // all cloth instances should be migrated to software
+ if (result != CUDA_SUCCESS)
+ mCudaError = true;
+}
+
+void cloth::CuSolver::endFrame()
+{
+ checkSuccess(cuStreamSynchronize(mStream));
+
+ CuHostVector<CuFrameData>::Type::ConstIterator fIt = mFrameData.begin();
+ ClothVector::Iterator cIt, cEnd = mCloths.end();
+ for (cIt = mCloths.begin(); cIt != cEnd; ++cIt, ++fIt)
+ {
+ CuCloth& cloth = **cIt;
+
+ cloth.mHostParticlesDirty = false;
+ cloth.mDeviceParticlesDirty = false;
+
+ cloth.mMotionConstraints.pop();
+ cloth.mMotionConstraints.mHostCopy.resize(0);
+
+ cloth.mSeparationConstraints.pop();
+ cloth.mSeparationConstraints.mHostCopy.resize(0);
+
+ if (!cloth.mTargetCollisionSpheres.empty())
+ {
+ shdfnd::swap(cloth.mStartCollisionSpheres, cloth.mTargetCollisionSpheres);
+ cloth.mTargetCollisionSpheres.resize(0);
+ }
+
+ if (!cloth.mTargetCollisionPlanes.empty())
+ {
+ shdfnd::swap(cloth.mStartCollisionPlanes, cloth.mTargetCollisionPlanes);
+ cloth.mTargetCollisionPlanes.resize(0);
+ }
+
+ if (!cloth.mTargetCollisionTriangles.empty())
+ {
+ shdfnd::swap(cloth.mStartCollisionTriangles, cloth.mTargetCollisionTriangles);
+ cloth.mTargetCollisionTriangles.resize(0);
+ }
+
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ float upper = fIt->mParticleBounds[i * 2 + 0];
+ float negativeLower = fIt->mParticleBounds[i * 2 + 1];
+ cloth.mParticleBoundsCenter[i] = (upper - negativeLower) * 0.5f;
+ cloth.mParticleBoundsHalfExtent[i] = (upper + negativeLower) * 0.5f;
+ }
+
+ cloth.mSleepPassCounter = fIt->mSleepPassCounter;
+ cloth.mSleepTestCounter = fIt->mSleepTestCounter;
+ }
+
+ interCollision();
+
+ NV_CLOTH_PROFILE_STOP_CROSSTHREAD(mSimulateProfileEventData, "cloth::CuSolver::simulate", 0);
+}
+
+void cloth::CuSolver::interCollision()
+{
+ if (!mInterCollisionIterations || mInterCollisionDistance == 0.0f)
+ return;
+ if (mInterCollisionFilter == nullptr)
+ {
+ NV_CLOTH_LOG_WARNING("Inter collision will not work unless an inter collision filter is set using Solver::setInterCollisionFilter.");
+ return;
+ }
+
+ typedef SwInterCollision<Simd4f> SwInterCollision;
+
+ // rebuild cloth instance array
+ mInterCollisionInstances.resize(0);
+ for (uint32_t i = 0, n = mCloths.size(); i < n; ++i)
+ {
+ CuCloth& cloth = *mCloths[i];
+
+ float elasticity = 1.0f / mFrameData[i].mNumIterations;
+ NV_CLOTH_ASSERT(!cloth.mHostParticlesDirty);
+ PxVec4* particles = cloth.mParticlesHostCopy.begin();
+ uint32_t* indices = NULL, numIndices = cloth.mNumParticles;
+ if (!cloth.mSelfCollisionIndices.empty())
+ {
+ indices = cloth.mSelfCollisionIndicesHost.begin();
+ numIndices = uint32_t(cloth.mSelfCollisionIndices.size());
+ }
+
+ mInterCollisionInstances.pushBack(SwInterCollisionData(
+ particles, particles + cloth.mNumParticles, numIndices, indices, cloth.mTargetMotion,
+ cloth.mParticleBoundsCenter, cloth.mParticleBoundsHalfExtent, elasticity, cloth.mUserData));
+
+ cloth.mDeviceParticlesDirty = true;
+ }
+
+ uint32_t requiredTempMemorySize = uint32_t(SwInterCollision::estimateTemporaryMemory(
+ &mInterCollisionInstances[0], uint32_t(mInterCollisionInstances.size())));
+
+ // realloc temp memory if necessary
+ if (mInterCollisionScratchMemSize < requiredTempMemorySize)
+ {
+ if (mInterCollisionScratchMem)
+ NV_CLOTH_FREE(mInterCollisionScratchMem);
+
+ mInterCollisionScratchMem = NV_CLOTH_ALLOC(requiredTempMemorySize, "cloth::SwSolver::mInterCollisionScratchMem");
+ mInterCollisionScratchMemSize = requiredTempMemorySize;
+ }
+
+ SwKernelAllocator allocator(mInterCollisionScratchMem, mInterCollisionScratchMemSize);
+
+ // run inter-collision
+ SwInterCollision(mInterCollisionInstances.begin(), mInterCollisionInstances.size(), mInterCollisionDistance,
+ mInterCollisionStiffness, mInterCollisionIterations, mInterCollisionFilter, allocator)();
+} \ No newline at end of file
diff --git a/NvCloth/src/cuda/CuSolver.h b/NvCloth/src/cuda/CuSolver.h
new file mode 100644
index 0000000..8e256a7
--- /dev/null
+++ b/NvCloth/src/cuda/CuSolver.h
@@ -0,0 +1,162 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Solver.h"
+#include "CuClothData.h"
+#include "CuPinnedAllocator.h"
+#include "CuContextLock.h"
+#include "CuDeviceVector.h"
+#include "../SwInterCollision.h"
+#include "CuSolverKernel.h"
+
+namespace nv
+{
+
+namespace cloth
+{
+
+class CuCloth;
+class CuFabric;
+struct PhaseConfig;
+struct CuKernelData;
+
+class CuSolver : private CuContextLock, public Solver
+{
+ CuSolver(const CuSolver&); // not implemented
+ CuSolver& operator = (const CuSolver&); // not implemented
+
+public:
+ CuSolver(CuFactory&);
+ ~CuSolver();
+
+ virtual void addCloth(Cloth*);
+ virtual void removeCloth(Cloth*);
+
+ // functions executing the simulation work.
+ virtual bool beginSimulation(float dt);
+ virtual void simulateChunk(int idx);
+ virtual void endSimulation();
+ virtual int getSimulationChunkCount() const override;
+
+ virtual bool hasError() const
+ {
+ return mCudaError;
+ }
+
+ virtual void setInterCollisionDistance(float distance)
+ {
+ mInterCollisionDistance = distance;
+ }
+ virtual float getInterCollisionDistance() const
+ {
+ return mInterCollisionDistance;
+ }
+ virtual void setInterCollisionStiffness(float stiffness)
+ {
+ mInterCollisionStiffness = stiffness;
+ }
+ virtual float getInterCollisionStiffness() const
+ {
+ return mInterCollisionStiffness;
+ }
+ virtual void setInterCollisionNbIterations(uint32_t nbIterations)
+ {
+ mInterCollisionIterations = nbIterations;
+ }
+ virtual uint32_t getInterCollisionNbIterations() const
+ {
+ return mInterCollisionIterations;
+ }
+ virtual void setInterCollisionFilter(InterCollisionFilter filter)
+ {
+ mInterCollisionFilter = filter;
+ }
+
+ private:
+ void updateKernelData(); // context needs to be acquired
+
+ // simulate helper functions
+ void beginFrame();
+ static void CUDA_CB KernelFinished(CUstream stream, CUresult status, void *userData);
+ void executeKernel();
+ void endFrame();
+
+ void interCollision();
+
+ private:
+ CuFactory& mFactory;
+
+ typedef Vector<CuCloth*>::Type ClothVector;
+ ClothVector mCloths;
+
+ CuDeviceVector<CuClothData> mClothData;
+ CuHostVector<CuClothData, CU_MEMHOSTALLOC_WRITECOMBINED>::Type mClothDataHostCopy;
+ bool mClothDataDirty;
+
+ CuHostVector<CuFrameData, CU_MEMHOSTALLOC_DEVICEMAP>::Type mFrameData;
+
+ CuHostVector<CuIterationData, CU_MEMHOSTALLOC_DEVICEMAP>::Type mIterationData;
+ CuIterationData* mIterationDataBegin; // corresponding device ptr
+
+ float mFrameDt;
+
+ uint32_t mSharedMemorySize;
+ uint32_t mSharedMemoryLimit;
+
+ CUstream mStream;
+ CUfunction mKernelFunction;
+ int mKernelSharedMemorySize;
+ CuKernelData mKernelDataHost;
+ CuDevicePointer<uint32_t> mClothIndex;
+
+ float mInterCollisionDistance;
+ float mInterCollisionStiffness;
+ uint32_t mInterCollisionIterations;
+ InterCollisionFilter mInterCollisionFilter;
+ void* mInterCollisionScratchMem;
+ uint32_t mInterCollisionScratchMemSize;
+ Vector<SwInterCollisionData>::Type mInterCollisionInstances;
+
+ uint64_t mSimulateNvtxRangeId;
+
+ void* mProfileBuffer;
+ uint32_t mProfileBaseId;
+
+ bool mCudaError;
+
+ void* mCuptiEventProfiler;
+
+ friend void record(const CuSolver&);
+
+ void* mSimulateProfileEventData;
+};
+}
+}
diff --git a/NvCloth/src/cuda/CuSolverKernel.cu b/NvCloth/src/cuda/CuSolverKernel.cu
new file mode 100644
index 0000000..3517193
--- /dev/null
+++ b/NvCloth/src/cuda/CuSolverKernel.cu
@@ -0,0 +1,1443 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CuSolverKernel.h"
+#include "CuClothData.h"
+#include "CuPhaseConfig.h"
+
+#include <new> // placement new
+
+/*
+ For detailed comments about the algorithm check SwSolverKernel.cpp (or the documentation)
+ The CPU implementation is generally easier to read, and comments are not duplicated in other implementations.
+ Only CUDA implementation specific comments are left in this implementation.
+*/
+
+#ifndef FLT_EPSILON
+#define FLT_EPSILON 1.192092896e-07F
+#endif
+#ifndef FLT_MAX
+#define FLT_MAX 3.402823466e+38F
+#endif
+
+// Converting pointers to shared/global addresses is faster than doing generic loads on SM50
+#define CONVERT_ADDRESSES (__CUDA_ARCH__ >= 500)
+
+#if !defined(_WIN64) && !defined(__x86_64__)
+#define POINTER_CONSTRAINT "r"
+#define POINTER_TYPE "u32"
+#else
+#define POINTER_CONSTRAINT "l"
+#define POINTER_TYPE "u64"
+#endif
+
+#ifndef __CUDA_ARCH__
+#define assert(x)
+#endif
+
+extern "C" {
+extern _CRTIMP __host__ __device__ int __cdecl printf(const char*, ...);
+}
+
+using namespace nv;
+
+// shared memory copy (instead of relying on constant cache)
+__shared__ cloth::CuClothData gClothData;
+__shared__ cloth::CuFrameData gFrameData;
+__shared__ cloth::CuIterationData gIterData;
+
+// Our way to create stream local variables
+__shared__ void* gProfileBuffer;
+__shared__ uint32_t gProfileBaseId;
+
+static const uint32_t gCuClothDataSize = sizeof(cloth::CuClothData) / sizeof(float);
+static const uint32_t gCuFrameDataSize = sizeof(cloth::CuFrameData) / sizeof(float);
+static const uint32_t gCuIterationDataSize = sizeof(cloth::CuIterationData) / sizeof(float);
+static const uint32_t gCuPhaseConfigSize = sizeof(cloth::CuPhaseConfig) / sizeof(float);
+
+/*
+Memory block for all temporary data in shared memory (in 'allocation' order).
+The numbers indicate the allocation slot if used a stack allocator.
+0) simulate*()::configs (numPhases*sizeof(CuPhaseConfig))
+1) simulate*()::particles ({0,1,2}*4*numParticles floats)
+2) CuCollision::mCapsuleIndices, mCapsuleMasks, mConvexMasks (numCapsules*4+numConvexes ints)
+3) CuCollision::mPrevData (4*numSpheres+10*numCones floats)
+4) CuCollision::collideConvexes() (4*numPlanes floats)
+4) CuCollision::collideTriangles() (19*numTriangles floats)
+4) CuCollision::mCurData::Spheres (4*numSpheres floats)
+5) computeParticleBounds()::dst (192 floats written, 208 float read)
+5) computeSphereBounds()::dst (192 floats written, 208 floats read)
+5) CuCollision::mCurData::Cones (10*numCones floats)
+6) CuCollision::mShapeGrid (2*6*sGridSize=96 floats)
+4) CuSelfCollision::buildAcceleration()::buffer (34*16=544 ints)
+*/
+extern __shared__ float gSharedMemory[];
+extern __shared__ int32_t gSharedSigned[];
+extern __shared__ uint32_t gSharedUnsigned[];
+
+/***************** Pointer Wrappers **********************/
+enum AddressSpace
+{
+ Shared,
+ Global
+};
+
+template <AddressSpace, typename T>
+__device__ T load(const T* ptr);
+template <AddressSpace, typename T>
+__device__ void store(T* ptr, const T& value);
+
+#if !CONVERT_ADDRESSES
+template <AddressSpace, typename T>
+__device__ T load(const T* ptr)
+{
+ return *ptr;
+}
+template <AddressSpace, typename T>
+__device__ void store(T* ptr, const T& value)
+{
+ *ptr = value;
+}
+#else
+template <>
+__device__ float load<Shared>(const float* ptr)
+{
+ float value;
+ asm("ld.shared.f32 %0, [%1];" : "=f"(value) : POINTER_CONSTRAINT(ptr));
+ return value;
+}
+template <>
+__device__ int32_t load<Shared>(const int32_t* ptr)
+{
+ int32_t value;
+ asm("ld.shared.s32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr));
+ return value;
+}
+template <>
+__device__ uint32_t load<Shared>(const uint32_t* ptr)
+{
+ uint32_t value;
+ asm("ld.shared.u32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr));
+ return value;
+}
+template <>
+__device__ void store<Shared>(int32_t* ptr, const int32_t& value)
+{
+ asm("st.shared.s32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory");
+}
+template <>
+__device__ void store<Shared>(float* ptr, const float& value)
+{
+ asm("st.shared.f32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "f"(value) : "memory");
+}
+template <>
+__device__ void store<Shared>(uint32_t* ptr, const uint32_t& value)
+{
+ asm("st.shared.u32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory");
+}
+template <>
+__device__ float load<Global>(const float* ptr)
+{
+ float value;
+ asm("ld.global.f32 %0, [%1];" : "=f"(value) : POINTER_CONSTRAINT(ptr));
+ return value;
+}
+template <>
+__device__ int32_t load<Global>(const int32_t* ptr)
+{
+ int32_t value;
+ asm("ld.global.s32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr));
+ return value;
+}
+template <>
+__device__ uint32_t load<Global>(const uint32_t* ptr)
+{
+ uint32_t value;
+ asm("ld.global.u32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr));
+ return value;
+}
+template <>
+__device__ void store<Global>(int32_t* ptr, const int32_t& value)
+{
+ asm("st.global.s32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory");
+}
+template <>
+__device__ void store<Global>(float* ptr, const float& value)
+{
+ asm("st.global.f32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "f"(value) : "memory");
+}
+template <>
+__device__ void store<Global>(uint32_t* ptr, const uint32_t& value)
+{
+ asm("st.global.u32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory");
+}
+#endif
+
+template <AddressSpace, typename>
+struct Pointer;
+
+template <AddressSpace S, typename T>
+struct Reference
+{
+ template <AddressSpace, typename>
+ friend struct Reference;
+ friend struct Pointer<S, T>;
+
+ __device__ Reference()
+ {
+ }
+ __device__ Reference(const Reference& other) : mPtr(other.mPtr)
+ {
+ }
+ template <typename U>
+ __device__ Reference(const Reference<S, U>& other)
+ : mPtr(other.mPtr)
+ {
+ }
+
+ __device__ Reference& operator = (const Reference& other)
+ {
+ return *this = static_cast<T>(other);
+ }
+ template <typename U>
+ __device__ Reference& operator = (const Reference<S, U>& other)
+ {
+ return *this = static_cast<U>(other);
+ }
+
+ __device__ Reference& operator += (const T& value)
+ {
+ return *this = *this + value;
+ }
+ __device__ Reference& operator |= (const T& value)
+ {
+ return *this = *this | value;
+ }
+ __device__ Reference& operator &= (const T& value)
+ {
+ return *this = *this & value;
+ }
+ __device__ Reference& operator *= (const T& value)
+ {
+ return *this = *this * value;
+ }
+
+ __device__ operator T() const
+ {
+ return load<S>(mPtr);
+ }
+ __device__ Reference& operator = (const T& value)
+ {
+ store<S>(mPtr, value);
+ return *this;
+ }
+
+ //private:
+ T* mPtr;
+
+ __device__ explicit Reference(T& ref) : mPtr(&ref)
+ {
+ }
+
+ template <typename U>
+ friend __device__ void atomicAdd(Reference& ref, U value)
+ {
+ ::atomicAdd(ref.mPtr, value);
+ }
+};
+
+template <AddressSpace S, typename T>
+struct Convert
+{
+ static __device__ T* from(T* ptr)
+ {
+ return ptr;
+ }
+ static __device__ T* to(T* ptr)
+ {
+ return ptr;
+ }
+};
+
+#if CONVERT_ADDRESSES
+template <typename T>
+struct Convert<Shared, T>
+{
+ static __device__ T* from(T* ptr)
+ {
+ asm("cvta.shared." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr));
+ return ptr;
+ }
+ static __device__ T* to(T* ptr)
+ {
+ asm("cvta.to.shared." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr));
+ return ptr;
+ }
+};
+template <typename T>
+struct Convert<Global, T>
+{
+ static __device__ T* from(T* ptr)
+ {
+ asm("cvta.global." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr));
+ return ptr;
+ }
+ static __device__ T* to(T* ptr)
+ {
+ asm("cvta.to.global." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr));
+ return ptr;
+ }
+};
+#endif
+
+template <AddressSpace S, typename T>
+__device__ T* generic(const Pointer<S, T>&);
+
+// pointer forced to point to shared memory (only works for sizeof(T) <= 4)
+template <AddressSpace S, typename T>
+struct Pointer
+{
+ template <AddressSpace, typename> friend struct Pointer;
+ friend __device__ T* generic<S, T>(const Pointer<S, T>&);
+ friend struct GlobalParticleData;
+
+ __device__ Pointer()
+ {
+ }
+ __device__ Pointer(const Pointer& other)
+ : mPtr(other.mPtr)
+ {
+ }
+ template <typename U>
+ __device__ Pointer(const Pointer<S, U>& other)
+ : mPtr(other.mPtr)
+ {
+ }
+
+ // construct from generic pointer
+ __device__ explicit Pointer(T* ptr)
+ : mPtr(Convert<S, T>::to(ptr))
+ {
+ }
+
+ __device__ bool operator!=(const Pointer& other) const
+ {
+ return mPtr != other.mPtr;
+ }
+ __device__ bool operator<(const Pointer& other) const
+ {
+ return mPtr < other.mPtr;
+ }
+
+ __device__ Pointer operator + (ptrdiff_t i) const
+ {
+ return Pointer(*this) += i;
+ }
+ __device__ Pointer& operator += (ptrdiff_t i)
+ {
+ mPtr += i * stride();
+ return *this;
+ }
+ __device__ Pointer operator - (ptrdiff_t i) const
+ {
+ return Pointer(*this) -= i;
+ }
+ __device__ Pointer& operator -= (ptrdiff_t i)
+ {
+ mPtr -= i * stride();
+ return *this;
+ }
+
+ __device__ Pointer& operator ++ ()
+ {
+ mPtr += stride();
+ return *this;
+ }
+ __device__ Pointer& operator -- ()
+ {
+ mPtr -= stride();
+ return *this;
+ }
+
+ __device__ Reference<S, T> operator*() const
+ {
+ return Reference<S, T>(*mPtr);
+ }
+ __device__ Reference<S, T> operator[](int32_t i) const
+ {
+ return Reference<S, T>(mPtr[i * stride()]);
+ }
+
+ private:
+ // convert back to generic pointer, private for safety, use generic() instead
+ __device__ operator T*() const
+ {
+ return Convert<S, T>::from(mPtr);
+ }
+
+ __device__ static size_t stride() { return 1; }
+
+ template <typename U>
+ __device__ Pointer(const Pointer<S, U>& other, ptrdiff_t stridedOffset)
+ : mPtr(other.mPtr + stridedOffset)
+ {
+ }
+
+ T* mPtr;
+};
+
+// pointers to global memory are all referring to particle data
+// stored as array of structs, so they have a stride of 4.
+template<> __device__ size_t Pointer<Global, float>::stride() { return 4; }
+template<> __device__ size_t Pointer<Global, const float>::stride() { return 4; }
+template <AddressSpace S, typename T>
+__device__ T* generic(const Pointer<S, T>& ptr)
+{
+ return ptr;
+}
+
+#if !CONVERT_ADDRESSES
+template <typename T>
+__device__ T* generic(T* ptr)
+{
+ return ptr;
+}
+#endif
+
+/***************** Particle Data **********************/
+
+template <typename T>
+struct SharedParticleReference
+{
+ __device__ operator float3() const
+ {
+ float3 result;
+ result.x = mReferences[0];
+ result.y = mReferences[1];
+ result.z = mReferences[2];
+ return result;
+ }
+
+ __device__ SharedParticleReference& operator = (const float3& vec)
+ {
+ mReferences[0] = vec.x;
+ mReferences[1] = vec.y;
+ mReferences[2] = vec.z;
+ return *this;
+ }
+
+ __device__ operator float4() const
+ {
+ float4 result;
+ result.x = mReferences[0];
+ result.y = mReferences[1];
+ result.z = mReferences[2];
+ result.w = mReferences[3];
+ return result;
+ }
+
+ __device__ SharedParticleReference& operator = (const float4& vec)
+ {
+ mReferences[0] = vec.x;
+ mReferences[1] = vec.y;
+ mReferences[2] = vec.z;
+ mReferences[3] = vec.w;
+ return *this;
+ }
+
+ Reference<Shared, T> mReferences[4];
+};
+
+struct SharedParticleData
+{
+ typedef float3 VectorType;
+
+ typedef Pointer<Shared, float> PointerType;
+ typedef Pointer<Shared, const float> ConstPointerType;
+ typedef Reference<Shared, float> ReferenceType;
+ typedef Reference<Shared, const float> ConstReferenceType;
+
+ typedef SharedParticleReference<float> ParticleReferenceType;
+ typedef SharedParticleReference<const float> ParticleConstReferenceType;
+
+ __device__ ReferenceType operator()(int32_t index, int32_t element)
+ {
+ return mPointers[element][index];
+ }
+ __device__ ConstReferenceType operator()(int32_t index, int32_t element) const
+ {
+ return mPointers[element][index];
+ }
+
+ __device__ ParticleReferenceType operator()(int32_t index)
+ {
+ ParticleReferenceType result = { mPointers[0][index], mPointers[1][index],
+ mPointers[2][index], mPointers[3][index] };
+ return result;
+ }
+ __device__ ParticleConstReferenceType operator()(int32_t index) const
+ {
+ ParticleConstReferenceType result = { mPointers[0][index], mPointers[1][index],
+ mPointers[2][index], mPointers[3][index] };
+ return result;
+ }
+
+ __device__ const PointerType& operator[](int32_t element)
+ {
+ return mPointers[element];
+ }
+ __device__ ConstPointerType operator[](int32_t element) const
+ {
+ return mPointers[element];
+ }
+
+ PointerType mPointers[4];
+};
+
+template <typename T>
+struct GlobalParticleReference
+{
+ __device__ GlobalParticleReference(Pointer<Global, T> ref) : mPtr(reinterpret_cast<T* const&>(ref))
+ {
+ }
+
+#if CONVERT_ADDRESSES
+ __device__ operator float4() const
+ {
+ float4 vec;
+ asm("ld.global.v4.f32 {%0, %1, %2, %3}, [%4];"
+ : "=f"(vec.x), "=f"(vec.y), "=f"(vec.z), "=f"(vec.w)
+ : POINTER_CONSTRAINT(mPtr));
+ return vec;
+ }
+
+ __device__ GlobalParticleReference& operator = (const float4& vec)
+ {
+ asm("st.global.v4.f32 [%0], {%1, %2, %3, %4};" ::POINTER_CONSTRAINT(mPtr), "f"(vec.x), "f"(vec.y), "f"(vec.z),
+ "f"(vec.w)
+ : "memory");
+ return *this;
+ }
+
+ __device__ operator float3() const
+ {
+ float4 vec = *this;
+ return make_float3(vec.x, vec.y, vec.z);
+ }
+#else
+
+ __device__ operator float4() const
+ {
+ return *reinterpret_cast<const float4*>(mPtr);
+ }
+
+ __device__ GlobalParticleReference& operator = (const float4& vec)
+ {
+ *reinterpret_cast<float4*>(mPtr) = vec;
+ return *this;
+ }
+
+ __device__ operator float3() const
+ {
+ return *reinterpret_cast<const float3*>(mPtr);
+ }
+
+ __device__ GlobalParticleReference& operator = (const float3& vec)
+ {
+ *reinterpret_cast<float3*>(mPtr) = vec;
+ return *this;
+ }
+#endif
+
+ T* mPtr; // pointer to global address
+};
+
+struct GlobalParticleData
+{
+#if CONVERT_ADDRESSES
+ // ld.global.v4 saturates memory bandwidth better than 3x ld.global
+ typedef float4 VectorType;
+#else
+ // the same isn't true for ld without state space
+ typedef float3 VectorType;
+#endif
+
+ typedef Pointer<Global, float> PointerType;
+ typedef Pointer<Global, const float> ConstPointerType;
+ typedef Reference<Global, float> ReferenceType;
+ typedef Reference<Global, const float> ConstReferenceType;
+
+ typedef GlobalParticleReference<float> ParticleReferenceType;
+ typedef GlobalParticleReference<const float> ParticleConstReferenceType;
+
+ __device__ ReferenceType operator()(int32_t index, int32_t element)
+ {
+ return *PointerType(mPtr, index * 4 + element);
+ }
+ __device__ ConstReferenceType operator()(int32_t index, int32_t element) const
+ {
+ return *ConstPointerType(mPtr, index * 4 + element);
+ }
+
+ __device__ ParticleReferenceType operator()(int32_t index)
+ {
+ return PointerType(mPtr, index * 4);
+ }
+
+ __device__ ParticleConstReferenceType operator()(int32_t index) const
+ {
+ return ConstPointerType(mPtr, index * 4);
+ }
+
+ __device__ PointerType operator[](int32_t element)
+ {
+ return PointerType(mPtr, element);
+ }
+ __device__ ConstPointerType operator[](int32_t element) const
+ {
+ return ConstPointerType(mPtr, element);
+ }
+
+ PointerType mPtr;
+};
+
+/***************** Profiling **********************/
+struct ProfileDisabledZone
+{
+ __device__ ProfileDisabledZone(cloth::CuProfileZoneIds::Enum)
+ {
+ }
+};
+
+#if defined(__CUDA_ARCH__) && defined(PX_PROFILE) // profile zones enabled for profile build
+
+// code below is copied from GPUProfile.h and needs to be kept in sync.
+
+#define NUM_WARPS_PER_PROFILE_BUFFER (4 * 1024 * 1024)
+
+struct __align__(16) WarpProfileEvent
+{
+ __device__ WarpProfileEvent(uint16_t id)
+ : block(blockIdx.x + gridDim.x * blockIdx.y), warp(threadIdx.x >> 5), userData(0), eventId(id)
+ {
+ uint32_t smid32, warpid32;
+ asm volatile("mov.u32 %0, %smid;" : "=r"(smid32));
+ asm volatile("mov.u32 %0, %warpid;" : "=r"(warpid32));
+ asm volatile("mov.u32 %0, %clock;" : "=r"(startTime));
+ smid = smid32;
+ warpid = warpid32;
+ endTime = startTime;
+ }
+
+ uint16_t block;
+ uint8_t warp;
+ uint8_t smid;
+ uint8_t warpid;
+ uint8_t userData;
+ uint16_t eventId;
+ uint32_t startTime;
+ uint32_t endTime;
+};
+
+struct ProfileZone
+{
+ __device__ ProfileZone(cloth::CuProfileZoneIds::Enum id) : mEvent(0)
+ {
+ if (!gProfileBuffer || threadIdx.x & 0x1f)
+ return;
+
+ // +1: first entry reserved for counter
+ uint32_t index = atomicAdd(reinterpret_cast<uint32_t*>(gProfileBuffer), 1) + 1;
+
+ if (index >= NUM_WARPS_PER_PROFILE_BUFFER)
+ return;
+
+ mEvent = reinterpret_cast<WarpProfileEvent*>(gProfileBuffer) + index;
+
+ new (mEvent) WarpProfileEvent(gProfileBaseId + id);
+ }
+
+ __device__ ~ProfileZone()
+ {
+ if (mEvent)
+ mEvent->endTime = clock();
+ }
+
+ WarpProfileEvent* mEvent;
+};
+
+#else
+typedef ProfileDisabledZone ProfileZone;
+#endif
+
+#if 1 // set to 1 to enable detailed profile zones
+typedef ProfileZone ProfileDetailZone;
+#else
+typedef ProfileDisabledZone ProfileDetailZone;
+#endif
+
+namespace
+{
+// cut down version of thrust::uninitialized
+// avoids warning about non-empty c'tor
+template <typename T>
+struct uninitialized
+{
+ __device__ inline T& get()
+ {
+ return *reinterpret_cast<T*>(data);
+ }
+
+ // maximum alignment required by device code is 16
+ __align__(16) unsigned char data[sizeof(T)];
+};
+}
+
+#if __CUDA_ARCH__ < 320
+namespace
+{
+template <typename T>
+__device__ T __ldg(const T* __restrict ptr)
+{
+ return *ptr;
+}
+}
+#endif
+
+#define CU_SOLVER_KERNEL_CU
+#include "CuCollision.h"
+#include "CuSelfCollision.h"
+
+namespace
+{
+__device__ void loadIterData(const cloth::CuIterationData* __restrict iterData)
+{
+ if (threadIdx.x < gCuIterationDataSize)
+ {
+ gIterData.mIntegrationTrafo[threadIdx.x] = __ldg(iterData->mIntegrationTrafo + threadIdx.x);
+ }
+}
+
+// integrate particle positions and store transposed
+template <bool IsTurning, typename CurrentT, typename PreviousT>
+__device__ void integrateParticles(CurrentT& current, PreviousT& previous)
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::INTEGRATE);
+
+ const float* __restrict trafo = gIterData.mIntegrationTrafo;
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ float4 prev = previous(i);
+ float4 next = current(i);
+ float4 cur = { next.x, next.y, next.z, prev.w };
+
+ if (next.w == 0.0f)
+ next.w = prev.w;
+
+ if (next.w > 0.0f)
+ {
+ if (IsTurning)
+ {
+ next.x = next.x + trafo[3] + cur.x * trafo[15] + prev.x * trafo[6] + cur.y * trafo[16] +
+ prev.y * trafo[7] + cur.z * trafo[17] + prev.z * trafo[8];
+
+ next.y = next.y + trafo[4] + cur.x * trafo[18] + prev.x * trafo[9] + cur.y * trafo[19] +
+ prev.y * trafo[10] + cur.z * trafo[20] + prev.z * trafo[11];
+
+ next.z = next.z + trafo[5] + cur.x * trafo[21] + prev.x * trafo[12] + cur.y * trafo[22] +
+ prev.y * trafo[13] + cur.z * trafo[23] + prev.z * trafo[14];
+ }
+ else
+ {
+ next.x += (cur.x - prev.x) * trafo[6] + trafo[3];
+ next.y += (cur.y - prev.y) * trafo[9] + trafo[4];
+ next.z += (cur.z - prev.z) * trafo[12] + trafo[5];
+ }
+
+ cur.x += trafo[0];
+ cur.y += trafo[1];
+ cur.z += trafo[2];
+ }
+
+ current(i) = next;
+ previous(i) = cur;
+ }
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void integrateParticles(CurrentT& current, PreviousT& previous)
+{
+ if (gIterData.mIsTurning)
+ integrateParticles<true>(current, previous);
+ else
+ integrateParticles<false>(current, previous);
+}
+
+template <typename CurrentT>
+__device__ void accelerateParticles(CurrentT& current)
+{
+ // might be better to move this into integrate particles
+ const float* __restrict accelerations = gFrameData.mParticleAccelerations;
+
+ if (!accelerations)
+ return;
+
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::ACCELERATE);
+
+ __syncthreads(); // looping with 4 instead of 1 thread per particle
+
+ float sqrIterDt = ~threadIdx.x & 0x3 ? gFrameData.mIterDt * gFrameData.mIterDt : 0.0f;
+ typename CurrentT::PointerType sharedCurPos = current[threadIdx.x % 4];
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x)
+ {
+ // turning this into __ldg slows kernel down even without particle accelerations (!)
+ if (current(i / 4, 3) > 0.0f)
+ sharedCurPos[i / 4] += accelerations[i] * sqrIterDt;
+ }
+
+ __syncthreads();
+}
+
+__device__ float3 operator + (const float3& u, const float3& v)
+{
+ return make_float3(u.x + v.x, u.y + v.y, u.z + v.z);
+}
+__device__ float3 operator - (const float3& u, const float3& v)
+{
+ return make_float3(u.x - v.x, u.y - v.y, u.z - v.z);
+}
+__device__ float3 operator*(float s, const float3& v)
+{
+ return make_float3(v.x * s, v.y * s, v.z * s);
+}
+__device__ float dot3(const float3& u, const float3& v)
+{
+ return u.x * v.x + u.y * v.y + u.z * v.z;
+}
+__device__ float3 cross3(const float3& u, const float3& v)
+{
+ return make_float3(u.y * v.z - u.z * v.y, u.z * v.x - u.x * v.z, u.x * v.y - u.y * v.x);
+}
+__device__ void applyImpulse(SharedParticleData::ParticleReferenceType pos, const float3& impulse)
+{
+ float scale = -pos.mReferences[3];
+ //Use this instead of atomicAdd function to work around compiler issue treating the pointer as global memory instead of shared memory
+ asm("red.shared.add.f32 [%0], %1;" :: POINTER_CONSTRAINT(pos.mReferences[0].mPtr), "f"(impulse.x * scale));
+ asm("red.shared.add.f32 [%0], %1;" :: POINTER_CONSTRAINT(pos.mReferences[1].mPtr), "f"(impulse.y * scale));
+ asm("red.shared.add.f32 [%0], %1;" :: POINTER_CONSTRAINT(pos.mReferences[2].mPtr), "f"(impulse.z * scale));
+}
+__device__ void applyImpulse(GlobalParticleData::ParticleReferenceType pos, const float3& impulse)
+{
+ float scale = -pos.mPtr[3];
+ atomicAdd(pos.mPtr + 0, impulse.x * scale);
+ atomicAdd(pos.mPtr + 1, impulse.y * scale);
+ atomicAdd(pos.mPtr + 2, impulse.z * scale);
+}
+
+template <bool IsTurning, typename CurrentT, typename PreviousT>
+__device__ void applyWind(CurrentT& current, PreviousT& previous)
+{
+ const float dragCoefficient = gFrameData.mDragCoefficient;
+ const float liftCoefficient = gFrameData.mLiftCoefficient;
+
+ if (dragCoefficient == 0.0f && liftCoefficient == 0.0f)
+ return;
+
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::WIND);
+
+ const float oneThird = 1 / 3.0f;
+ float3 wind = make_float3(gIterData.mWind[0], gIterData.mWind[1], gIterData.mWind[2]);
+
+ const uint16_t* tIt = gClothData.mTriangles;
+ for (int32_t i = threadIdx.x; i < gClothData.mNumTriangles; i += blockDim.x)
+ {
+ uint16_t i0 = tIt[i * 3 + 0];
+ uint16_t i1 = tIt[i * 3 + 1];
+ uint16_t i2 = tIt[i * 3 + 2];
+
+ float3 c0 = current(i0);
+ float3 c1 = current(i1);
+ float3 c2 = current(i2);
+
+ // float w1 = current(i0, 3);
+ // float w2 = current(i1, 3);
+ // float w2 = current(i2, 3);
+ //
+ // float wMult = w1 * w2 * w3;
+ // float invMass = wMult < FLT_EPSILON ? 0.f : w1 * w2 * w3 / (w1 * w2 + w1 * w3 + w2 * w3);
+
+ float3 p0 = previous(i0);
+ float3 p1 = previous(i1);
+ float3 p2 = previous(i2);
+
+ float3 cur = oneThird * (c0 + c1 + c2);
+ float3 prev = oneThird * (p0 + p1 + p2);
+
+ float3 delta = cur - prev + wind;
+
+ if (IsTurning)
+ {
+ const float3* rot = reinterpret_cast<const float3*>(gFrameData.mRotation);
+ float3 d = wind - prev;
+ delta = cur + d.x * rot[0] + d.y * rot[1] + d.z * rot[2];
+ }
+
+ float3 normal = cross3(c2 - c0, c1 - c0);
+
+ float doubleArea = sqrtf(dot3(normal, normal));
+
+ float invSqrScale = dot3(delta, delta);
+ float scale = rsqrtf(invSqrScale);
+
+ float cosTheta = dot3(normal, delta) * scale / doubleArea;
+ float sinTheta = sqrtf(max(0.0f, 1.0f - cosTheta * cosTheta));
+
+ float3 liftDir = cross3(cross3(delta, normal), scale * delta);
+
+ float3 lift = liftCoefficient * cosTheta * sinTheta * liftDir;
+ float3 drag = dragCoefficient * abs(cosTheta) * doubleArea * delta;
+
+ float3 impulse = invSqrScale < FLT_EPSILON ? make_float3(0.0f, 0.0f, 0.0f) : lift + drag;
+
+ applyImpulse(current(i0), impulse);
+ applyImpulse(current(i1), impulse);
+ applyImpulse(current(i2), impulse);
+ }
+
+ __syncthreads();
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void applyWind(CurrentT& current, PreviousT& previous)
+{
+ if (gIterData.mIsTurning)
+ applyWind<true>(current, previous);
+ else
+ applyWind<false>(current, previous);
+}
+
+template <typename CurrentT>
+__device__ void constrainTether(CurrentT& current)
+{
+ if (0.0f == gFrameData.mTetherConstraintStiffness || !gClothData.mNumTethers)
+ return;
+
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::TETHER);
+
+ int32_t numParticles = gClothData.mNumParticles;
+ int32_t numTethers = gClothData.mNumTethers;
+ assert(0 == numTethers % numParticles);
+
+ float stiffness = numParticles * __fdividef(gFrameData.mTetherConstraintStiffness, numTethers);
+ float scale = gClothData.mTetherConstraintScale;
+
+ const uint32_t* __restrict tIt = reinterpret_cast<const uint32_t*>(gClothData.mTethers);
+
+ for (int32_t i = threadIdx.x; i < numParticles; i += blockDim.x)
+ {
+ float posX = current(i, 0);
+ float posY = current(i, 1);
+ float posZ = current(i, 2);
+
+ float offsetX = 0.0f;
+ float offsetY = 0.0f;
+ float offsetZ = 0.0f;
+
+ for (int32_t j = i; j < numTethers; j += gClothData.mNumParticles)
+ {
+ uint32_t tether = __ldg(tIt + j);
+
+ int32_t anchor = tether & 0xffff;
+ float deltaX = current(anchor, 0) - posX;
+ float deltaY = current(anchor, 1) - posY;
+ float deltaZ = current(anchor, 2) - posZ;
+
+ float sqrLength = FLT_EPSILON + deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ;
+
+ float radius = (tether >> 16) * scale;
+ float slack = 1.0f - radius * rsqrtf(sqrLength);
+
+ if (slack > 0.0f)
+ {
+ offsetX += deltaX * slack;
+ offsetY += deltaY * slack;
+ offsetZ += deltaZ * slack;
+ }
+ }
+
+ current(i, 0) = posX + offsetX * stiffness;
+ current(i, 1) = posY + offsetY * stiffness;
+ current(i, 2) = posZ + offsetZ * stiffness;
+ }
+}
+
+template <typename CurrentT>
+__device__ void solveFabric(CurrentT& current)
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::FABRIC);
+
+ const cloth::CuPhaseConfig* __restrict cIt = (cloth::CuPhaseConfig*)gSharedMemory;
+ const cloth::CuPhaseConfig* cEnd = cIt + gClothData.mNumPhases;
+
+ for (; cIt != cEnd; ++cIt)
+ {
+ __syncthreads();
+
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::CONSTRAINT_SET);
+
+ int32_t numConstraints = cIt->mNumConstraints;
+ if (threadIdx.x >= numConstraints)
+ continue;
+
+ const uint32_t* __restrict iIt = reinterpret_cast<const uint32_t*>(cIt->mIndices) + threadIdx.x;
+ const float* restvalues = cIt->mRestvalues;
+ const float* rIt = restvalues + threadIdx.x;
+ const float* rEnd = restvalues + numConstraints;
+
+ const float* stIt = cIt->mStiffnessValues + threadIdx.x;
+ bool useStiffnessPerConstraint = cIt->mStiffnessValues!=nullptr;
+
+ uint32_t vpijPrefetch = __ldg(iIt);
+ float rijPrefetch = __ldg(rIt);
+ float stijPrefetch;
+ if (useStiffnessPerConstraint)
+ stijPrefetch = __ldg(stIt);
+
+ float stiffness = cIt->mStiffness;
+ float stiffnessMultiplier = cIt->mStiffnessMultiplier;
+ float compressionLimit = cIt->mCompressionLimit;
+ float stretchLimit = cIt->mStretchLimit;
+
+ do
+ {
+ rIt += blockDim.x;
+ iIt += blockDim.x;
+ stIt += blockDim.x;
+
+ int32_t vpi = USHRT_MAX & vpijPrefetch;
+ int32_t vpj = USHRT_MAX & vpijPrefetch >> 16;
+ float rij = rijPrefetch;
+ float stij = useStiffnessPerConstraint?1.0f - exp2f(stijPrefetch * gFrameData.mStiffnessExponent):stiffness;
+
+ if (rIt < rEnd)
+ {
+ vpijPrefetch = __ldg(iIt);
+ rijPrefetch = __ldg(rIt);
+ if (useStiffnessPerConstraint)
+ stijPrefetch = __ldg(stIt);
+ }
+
+ float vxi = current(vpi, 0);
+ float vyi = current(vpi, 1);
+ float vzi = current(vpi, 2);
+ float vwi = current(vpi, 3);
+
+ float vxj = current(vpj, 0);
+ float vyj = current(vpj, 1);
+ float vzj = current(vpj, 2);
+ float vwj = current(vpj, 3);
+
+ float hxij = vxj - vxi;
+ float hyij = vyj - vyi;
+ float hzij = vzj - vzi;
+
+ float e2ij = FLT_EPSILON + hxij * hxij + hyij * hyij + hzij * hzij;
+ float negErij = rij > FLT_EPSILON ? -1.0f + rij * rsqrtf(e2ij) : 0.0f;
+
+ negErij = negErij + stiffnessMultiplier * max(compressionLimit, min(-negErij, stretchLimit));
+
+ float negExij = __fdividef(negErij * stij, FLT_EPSILON + vwi + vwj);
+
+ float vmi = -vwi * negExij;
+ current(vpi, 0) = vxi + vmi * hxij;
+ current(vpi, 1) = vyi + vmi * hyij;
+ current(vpi, 2) = vzi + vmi * hzij;
+
+ float vmj = +vwj * negExij;
+ current(vpj, 0) = vxj + vmj * hxij;
+ current(vpj, 1) = vyj + vmj * hyij;
+ current(vpj, 2) = vzj + vmj * hzij;
+
+ } while (rIt < rEnd);
+ }
+
+ __syncthreads();
+}
+
+template <typename CurrentT>
+__device__ void constrainMotion(CurrentT& current, float alpha)
+{
+ if (!gFrameData.mStartMotionConstraints)
+ return;
+
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::MOTION);
+
+ // negative because of fused multiply-add optimization
+ float negativeScale = -gClothData.mMotionConstraintScale;
+ float negativeBias = -gClothData.mMotionConstraintBias;
+
+ const float4* startIt = reinterpret_cast<const float4*>(gFrameData.mStartMotionConstraints);
+ const float4* targetIt = reinterpret_cast<const float4*>(gFrameData.mTargetMotionConstraints);
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ float4 startPos = __ldg(startIt + i);
+ float4 targetPos = __ldg(targetIt + i);
+
+ float sphereX = startPos.x + (targetPos.x - startPos.x) * alpha;
+ float sphereY = startPos.y + (targetPos.y - startPos.y) * alpha;
+ float sphereZ = startPos.z + (targetPos.z - startPos.z) * alpha;
+ float sphereW = startPos.w + (targetPos.w - startPos.w) * alpha;
+
+ float dx = sphereX - current(i, 0);
+ float dy = sphereY - current(i, 1);
+ float dz = sphereZ - current(i, 2);
+
+ float sqrLength = FLT_EPSILON + dx * dx + dy * dy + dz * dz;
+ float negativeRadius = min(0.0f, sphereW * negativeScale + negativeBias);
+
+ float slack = max(negativeRadius * rsqrtf(sqrLength) + 1.0f, 0.0f) * gFrameData.mMotionConstraintStiffness;
+
+ current(i, 0) += slack * dx;
+ current(i, 1) += slack * dy;
+ current(i, 2) += slack * dz;
+
+ // set invMass to zero if radius is zero
+ if (negativeRadius >= 0.0f)
+ current(i, 3) = 0.0f;
+ }
+}
+
+template <typename T>
+__device__ void constrainSeparation(T& current, float alpha)
+{
+ if (!gFrameData.mStartSeparationConstraints)
+ return;
+
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::SEPARATION);
+
+ const float4* startIt = reinterpret_cast<const float4*>(gFrameData.mStartSeparationConstraints);
+ const float4* targetIt = reinterpret_cast<const float4*>(gFrameData.mTargetSeparationConstraints);
+
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ float4 startPos = __ldg(startIt + i);
+ float4 targetPos = __ldg(targetIt + i);
+
+ float sphereX = startPos.x + (targetPos.x - startPos.x) * alpha;
+ float sphereY = startPos.y + (targetPos.y - startPos.y) * alpha;
+ float sphereZ = startPos.z + (targetPos.z - startPos.z) * alpha;
+ float sphereW = startPos.w + (targetPos.w - startPos.w) * alpha;
+
+ float dx = sphereX - current(i, 0);
+ float dy = sphereY - current(i, 1);
+ float dz = sphereZ - current(i, 2);
+
+ float sqrLength = FLT_EPSILON + dx * dx + dy * dy + dz * dz;
+
+ float slack = min(0.0f, 1.0f - sphereW * rsqrtf(sqrLength));
+
+ current(i, 0) += slack * dx;
+ current(i, 1) += slack * dy;
+ current(i, 2) += slack * dz;
+ }
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void updateSleepState(const CurrentT& current, const PreviousT& previous)
+{
+ ProfileDetailZone zone(cloth::CuProfileZoneIds::SLEEP);
+
+ if (!threadIdx.x)
+ gFrameData.mSleepTestCounter += max(1, uint32_t(gFrameData.mIterDt * 1000));
+
+ __syncthreads();
+
+ if (gFrameData.mSleepTestCounter < gClothData.mSleepTestInterval)
+ return;
+
+ float maxDelta = 0.0f;
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ float4 prev = previous(i);
+ maxDelta = max(fabsf(current(i, 0) - prev.x), maxDelta);
+ maxDelta = max(fabsf(current(i, 1) - prev.y), maxDelta);
+ maxDelta = max(fabsf(current(i, 2) - prev.z), maxDelta);
+ }
+
+ if (!threadIdx.x)
+ {
+ ++gFrameData.mSleepPassCounter;
+ gFrameData.mSleepTestCounter -= gClothData.mSleepTestInterval;
+ }
+
+ __syncthreads();
+
+ if (maxDelta > gClothData.mSleepThreshold * gFrameData.mIterDt)
+ gFrameData.mSleepPassCounter = 0;
+}
+
+template <typename CurrentT, typename PreviousT>
+__device__ void simulateCloth(CurrentT& current, PreviousT& previous)
+{
+ // apply exponent to phase configs
+ assert(blockDim.x >= gClothData.mNumPhases);
+ if (threadIdx.x < gClothData.mNumPhases)
+ {
+ float exponent = gFrameData.mStiffnessExponent;
+ float* ptr = gSharedMemory + threadIdx.x * gCuPhaseConfigSize;
+ ptr[0] = 1.0f - exp2f(ptr[0] * exponent);
+ ptr[1] = 1.0f - exp2f(ptr[1] * exponent);
+ }
+
+ uint32_t numIterations = gFrameData.mNumIterations;
+ float invNumIterations = __fdividef(1.0f, numIterations);
+
+ const cloth::CuIterationData* iterData = gFrameData.mIterationData;
+ const cloth::CuIterationData* iterEnd = iterData + numIterations;
+
+ loadIterData(iterData);
+
+ __syncthreads();
+
+ for (float alpha = invNumIterations; iterData++ != iterEnd; alpha += invNumIterations)
+ {
+ integrateParticles(current, previous);
+ accelerateParticles(current);
+ applyWind(current, previous);
+ constrainMotion(current, alpha);
+ constrainTether(current);
+ solveFabric(current);
+ loadIterData(iterData);
+ constrainSeparation(current, alpha);
+ gCollideParticles.get()(current, previous, alpha);
+ gSelfCollideParticles.get()(current);
+ updateSleepState(current, previous);
+ }
+
+ __syncthreads();
+}
+
+__device__ void simulateShared()
+{
+ ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_SHARED);
+
+ __shared__ uninitialized<SharedParticleData> current;
+ __shared__ uninitialized<SharedParticleData> previous;
+
+ int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize;
+ int32_t particlesDataSize = 4 * gClothData.mNumParticles;
+
+ Pointer<Shared, float> sharedCurPos =
+ Pointer<Shared, float>(gSharedMemory + configDataSize + threadIdx.x % 4 * gClothData.mNumParticles);
+ Pointer<Shared, float> sharedPrevPos = sharedCurPos + particlesDataSize;
+
+ if (threadIdx.x < 4)
+ {
+ current.get().mPointers[threadIdx.x] = sharedCurPos;
+ previous.get().mPointers[threadIdx.x] = sharedPrevPos;
+ }
+
+ float* globalCurPos = gClothData.mParticles;
+ float* globalPrevPos = gClothData.mParticles + particlesDataSize;
+
+ // copy particles from device memory to shared memory and transpose
+ for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x)
+ {
+ sharedCurPos[i / 4] = globalCurPos[i];
+ sharedPrevPos[i / 4] = globalPrevPos[i];
+ }
+
+ simulateCloth(current.get(), previous.get());
+
+ // copy particles from shared memory to device memory and transpose
+ for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x)
+ {
+ globalCurPos[i] = sharedCurPos[i / 4];
+ globalPrevPos[i] = sharedPrevPos[i / 4];
+ }
+
+ __syncthreads();
+}
+
+__device__ void simulateStreamed()
+{
+ ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_STREAMED);
+
+ __shared__ uninitialized<SharedParticleData> current;
+ __shared__ uninitialized<GlobalParticleData> previous;
+
+ int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize;
+ int32_t particlesDataSize = 4 * gClothData.mNumParticles;
+
+ float* globalCurPos = gClothData.mParticles;
+ Pointer<Shared, float> sharedCurPos =
+ Pointer<Shared, float>(gSharedMemory + configDataSize + threadIdx.x % 4 * gClothData.mNumParticles);
+
+ if (threadIdx.x < 4)
+ current.get().mPointers[threadIdx.x] = sharedCurPos;
+ if (!threadIdx.x)
+ previous.get().mPtr = GlobalParticleData::PointerType(globalCurPos + particlesDataSize);
+
+ // copy particles from device memory to shared memory and transpose
+ for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x)
+ sharedCurPos[i / 4] = globalCurPos[i];
+
+ simulateCloth(current.get(), previous.get());
+
+ // copy particles from shared memory to device memory and transpose
+ for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x)
+ globalCurPos[i] = sharedCurPos[i / 4];
+
+ __syncthreads();
+}
+
+__device__ void simulateGlobal()
+{
+ ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_GLOBAL);
+
+ __shared__ uninitialized<GlobalParticleData> current;
+ __shared__ uninitialized<GlobalParticleData> previous;
+
+ if (!threadIdx.x)
+ {
+ GlobalParticleData::PointerType globalCurPos(gClothData.mParticles);
+ current.get().mPtr = globalCurPos;
+ previous.get().mPtr = globalCurPos + gClothData.mNumParticles;
+ }
+
+ simulateCloth(current.get(), previous.get());
+}
+
+} // anonymous namespace
+
+extern "C" __global__ void
+#if __CUDA_ARCH__ >= 300
+__launch_bounds__(1024, 1)
+#else
+__launch_bounds__(512, 1)
+#endif
+ simulateCloths(cloth::CuKernelData kernelData)
+{
+ gProfileBuffer = kernelData.mProfileBuffer;
+ gProfileBaseId = kernelData.mProfileBaseId;
+
+ ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE);
+
+ // check that http://nvbugs/1038473 is fixed
+ assert(gSharedMemory > (float*)&gFrameData);
+ assert(gSharedMemory > (float*)&gClothData);
+
+ // fetch cloth index from queue
+ __shared__ uint32_t clothIdx;
+ if (!threadIdx.x)
+ clothIdx = atomicInc(kernelData.mClothIndex, gridDim.x - 1);
+ __syncthreads();
+ assert(clothIdx < gridDim.x);
+
+ // copy cloth data to shared memory
+ const uint32_t* clothData = reinterpret_cast<const uint32_t*>(kernelData.mClothData + clothIdx);
+ if (threadIdx.x < gCuClothDataSize)
+ reinterpret_cast<uint32_t*>(&gClothData)[threadIdx.x] = clothData[threadIdx.x];
+
+ // copy frame data to shared memory
+ uint32_t* frameData = reinterpret_cast<uint32_t*>(kernelData.mFrameData + clothIdx);
+ if (threadIdx.x < gCuFrameDataSize)
+ reinterpret_cast<uint32_t*>(&gFrameData)[threadIdx.x] = frameData[threadIdx.x];
+
+ __syncthreads();
+
+ if (gFrameData.mSleepPassCounter >= gClothData.mSleepAfterCount)
+ return; // cloth is sleeping, exit
+
+ // copy phase configs to shared memory
+ int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize;
+ for (int32_t i = threadIdx.x; i < configDataSize; i += blockDim.x)
+ gSharedUnsigned[i] = reinterpret_cast<const uint32_t*>(gClothData.mPhaseConfigs)[i];
+
+ Pointer<Shared, uint32_t> scratchPtr = Pointer<Shared, uint32_t>(
+ gSharedUnsigned + configDataSize + 4 * gFrameData.mNumSharedPositions * gClothData.mNumParticles);
+
+ // initialize with placement new
+ new (gCollideParticles.data) CuCollision(scratchPtr);
+ new (gSelfCollideParticles.data) CuSelfCollision();
+
+ // copy particles and constraints to device
+ if (gFrameData.mDeviceParticlesDirty)
+ {
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 8; i += blockDim.x)
+ gClothData.mParticles[i] = gClothData.mParticlesHostCopy[i];
+ }
+ if (gFrameData.mHostMotionConstraints)
+ {
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x)
+ gFrameData.mTargetMotionConstraints[i] = gFrameData.mHostMotionConstraints[i];
+ }
+ if (gFrameData.mHostSeparationConstraints)
+ {
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x)
+ gFrameData.mTargetSeparationConstraints[i] = gFrameData.mHostSeparationConstraints[i];
+ }
+ if (gFrameData.mHostParticleAccelerations)
+ {
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x)
+ gFrameData.mParticleAccelerations[i] = gFrameData.mHostParticleAccelerations[i];
+ }
+
+ // necessary to ensure phase configs are fully loaded before setup in simulateCloth()
+ __syncthreads();
+
+ switch(gFrameData.mNumSharedPositions)
+ {
+ case 0:
+ simulateGlobal();
+ break;
+ case 1:
+ simulateStreamed();
+ break;
+ case 2:
+ simulateShared();
+ break;
+ }
+
+ // write back frame data
+ if (threadIdx.x < gCuFrameDataSize)
+ frameData[threadIdx.x] = reinterpret_cast<const uint32_t*>(&gFrameData)[threadIdx.x];
+
+ // copy particles to host
+ for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 8; i += blockDim.x)
+ gClothData.mParticlesHostCopy[i] = gClothData.mParticles[i];
+}
+
+const char* cloth::getKernelFunctionName()
+{
+ return "simulateCloths";
+}
diff --git a/NvCloth/src/cuda/CuSolverKernel.h b/NvCloth/src/cuda/CuSolverKernel.h
new file mode 100644
index 0000000..e6086ec
--- /dev/null
+++ b/NvCloth/src/cuda/CuSolverKernel.h
@@ -0,0 +1,93 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include <foundation/Px.h>
+
+namespace nv
+{
+namespace cloth
+{
+// Note: CuProfileZoneIds has a corresponding enum list (gCuProfileZoneNames) in CuSolver.cpp.
+// Additions/deletions to CuProfileZoneIds requires a similar action to gCuProfileZoneNames.
+struct CuProfileZoneIds
+{
+ enum Enum
+ {
+ SIMULATE,
+ INTEGRATE,
+ ACCELERATE,
+ WIND,
+ TETHER,
+ FABRIC,
+ MOTION,
+ SEPARATION,
+ COLLIDE,
+ SELFCOLLIDE,
+ SLEEP,
+ SIMULATE_SHARED,
+ SIMULATE_STREAMED,
+ SIMULATE_GLOBAL,
+ CONSTRAINT_SET,
+ COLLIDE_ACCELERATION,
+ COLLIDE_CAPSULES,
+ COLLIDE_VIRTUAL_CAPSULES,
+ COLLIDE_CONTINUOUS_CAPSULES,
+ COLLIDE_CONVEXES,
+ COLLIDE_TRIANGLES,
+ SELFCOLLIDE_ACCELERATION,
+ SELFCOLLIDE_PARTICLES,
+ NUMZONES
+ };
+};
+
+struct CuClothData;
+struct CuFrameData;
+
+// data of all cloth instances, one block per instance
+struct CuKernelData
+{
+ // pointer to atomic variable
+ uint32_t* mClothIndex;
+
+ // array of cloths (length determined by grid dim)
+ const CuClothData* mClothData;
+
+ // frame data per cloth
+ CuFrameData* mFrameData;
+
+ void* mProfileBuffer;
+ uint32_t mProfileBaseId;
+};
+
+const char* getKernelDataName();
+const char* getKernelFunctionName();
+}
+}
diff --git a/NvCloth/src/dx/DxBatchedVector.h b/NvCloth/src/dx/DxBatchedVector.h
new file mode 100644
index 0000000..674586e
--- /dev/null
+++ b/NvCloth/src/dx/DxBatchedVector.h
@@ -0,0 +1,335 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "DxDeviceVector.h"
+#include "NvCloth/Allocator.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+class DxContextManagerCallback;
+
+// forward declarations
+template <typename>
+class DxBatchedVector;
+
+// array of vectors stored in one dx device buffer
+template <typename T>
+class DxBatchedStorage
+{
+ static const uint32_t SizeOfT = sizeof(T);
+
+ friend class DxBatchedVector<T>;
+
+ public:
+ DxBatchedStorage(DxContextManagerCallback* manager, const DxBufferFlags& flags = DxDefaultBufferPolicy())
+ : mBuffer(manager, flags), mSize(0), mMapRefCount(0), mMapPointer(0)
+ {
+ }
+
+ ~DxBatchedStorage()
+ {
+ NV_CLOTH_ASSERT(mViews.empty());
+ NV_CLOTH_ASSERT(!mMapRefCount);
+ }
+
+ private: // functions used by DxBatchedVector
+ void add(DxBatchedVector<T>* view)
+ {
+ mViews.pushBack(view);
+ view->mOffset = mSize;
+ }
+
+ void reserve(DxBatchedVector<T>* view, uint32_t capacity)
+ {
+ if (view->mCapacity >= capacity)
+ return;
+ uint32_t index = view->mOffset + view->mSize;
+ uint32_t delta = capacity - view->mCapacity;
+ replace(index, index, nullptr, (T*)nullptr + delta);
+ update(view, delta);
+ view->mCapacity = capacity;
+ }
+
+ void assign(DxBatchedVector<T>* view, const T* data, uint32_t newSize)
+ {
+ uint32_t offset = view->mOffset;
+ uint32_t oldSize = std::min(newSize, view->mCapacity);
+ replace(offset, offset + oldSize, data, data + newSize);
+ update(view, newSize - oldSize);
+ view->mSize = newSize;
+ if (newSize > view->mCapacity)
+ view->mCapacity = newSize;
+ }
+
+ void remove(DxBatchedVector<T>* view)
+ {
+ uint32_t offset = view->mOffset;
+ uint32_t capacity = view->mCapacity;
+ replace(offset, offset + capacity, nullptr, nullptr);
+ update(view, -int32_t(capacity));
+ DxBatchedVector<T>** it = mViews.find(view);
+ mViews.remove(uint32_t(it - mViews.begin()));
+ }
+
+ T* map(D3D11_MAP mapType = D3D11_MAP_READ_WRITE)
+ {
+ if (!mMapRefCount++)
+ mMapPointer = mBuffer.map(mapType);
+ return mMapPointer;
+ }
+
+ void unmap()
+ {
+ if (!--mMapRefCount)
+ {
+ mBuffer.unmap();
+ mMapPointer = 0;
+ }
+ }
+
+ private: // internal functions
+ // not updating mSize!
+ void replace(uint32_t first, uint32_t last, const T* begin, const T* end)
+ {
+ uint32_t tail = first + uint32_t(end - begin);
+ if (uint32_t newSize = tail == last ? 0 : mSize + tail - last)
+ {
+ // Optimization: dx11.1 would allow in place copies
+ // with ID3D11DeviceContext1::CopySubresourceRegion1
+
+ DxBuffer<T> buffer = DxBuffer<T>(mBuffer.mManager, mBuffer);
+ buffer.reserve(newSize);
+
+ if (0 < first)
+ {
+ NV_CLOTH_ASSERT(!mMapRefCount);
+ CD3D11_BOX box(0, 0, 0, first * SizeOfT, 1, 1);
+ mBuffer.context()->CopySubresourceRegion(buffer.mBuffer, 0, 0, 0, 0, mBuffer.mBuffer, 0, &box);
+ }
+
+ if (last < mSize)
+ {
+ NV_CLOTH_ASSERT(!mMapRefCount);
+ CD3D11_BOX box(last * SizeOfT, 0, 0, mSize * SizeOfT, 1, 1);
+ mBuffer.context()->CopySubresourceRegion(buffer.mBuffer, 0, tail * SizeOfT, 0, 0, mBuffer.mBuffer, 0,
+ &box);
+ }
+
+ physx::shdfnd::swap(mBuffer, buffer);
+ }
+
+ if (begin && end > begin)
+ {
+ if (mBuffer.mUsage == D3D11_USAGE_DEFAULT)
+ {
+ NV_CLOTH_ASSERT(!mMapRefCount);
+ CD3D11_BOX box(first * SizeOfT, 0, 0, tail * SizeOfT, 1, 1);
+ mBuffer.context()->UpdateSubresource(mBuffer.mBuffer, 0, &box, begin, 0, 0);
+ }
+ else
+ {
+ memcpy(map(D3D11_MAP_WRITE) + first, begin, uint32_t(end - begin) * SizeOfT);
+ unmap();
+ }
+ }
+ }
+
+ void update(DxBatchedVector<T>* view, int32_t delta)
+ {
+ const uint32_t offset = view->mOffset;
+ DxBatchedVector<T>** it = mViews.begin();
+ for (uint32_t i = mViews.size(); 0 < i--;)
+ {
+ if (it[i] != view && it[i]->mOffset >= offset)
+ it[i]->mOffset += delta;
+ }
+ mSize += delta;
+ }
+
+ public:
+ DxBuffer<T> mBuffer;
+ uint32_t mSize;
+ typename Vector<DxBatchedVector<T>*>::Type mViews;
+ uint32_t mMapRefCount;
+ T* mMapPointer;
+};
+
+template <typename T>
+class DxBatchedVector
+{
+ friend class DxBatchedStorage<T>;
+
+ public:
+ typedef T ValueType;
+
+ DxBatchedVector(DxBatchedStorage<T>& array) : mStorage(array), mSize(0), mCapacity(0)
+ {
+ mStorage.add(this);
+ }
+
+ ~DxBatchedVector()
+ {
+ mStorage.remove(this);
+ }
+
+ DxBatchedVector(const DxBatchedVector& other) : mStorage(other.mStorage), mSize(0), mCapacity(0)
+ {
+ mStorage.add(this);
+ mStorage.reserve(this, other.mCapacity);
+
+ CD3D11_BOX box(other.mOffset * sizeof(T), 0, 0, (other.mOffset + other.size()) * sizeof(T), 1, 1);
+ mStorage.mBuffer.context()->CopySubresourceRegion(buffer(), 0, mOffset * sizeof(T), 0, 0, other.buffer(), 0,
+ &box);
+
+ mSize = other.size();
+ }
+
+ template <typename Alloc>
+ DxBatchedVector& operator = (const physx::shdfnd::Array<T, Alloc>& other)
+ {
+ assign(other.begin(), other.end());
+ return *this;
+ }
+
+ DxBatchedVector& operator = (const DxBatchedVector& other)
+ {
+ NV_CLOTH_ASSERT(mSize == other.size()); // current limitation
+ NV_CLOTH_ASSERT(!mStorage.mMapRefCount);
+
+ CD3D11_BOX box(other.mOffset * sizeof(T), 0, 0, (other.mOffset + other.size()) * sizeof(T), 1, 1);
+ mStorage.mBuffer.context()->CopySubresourceRegion(buffer(), 0, mOffset * sizeof(T), 0, 0, other.buffer(), 0,
+ &box);
+
+ return *this;
+ }
+
+ DxBatchedVector& operator = (const DxDeviceVector<T>& other)
+ {
+ NV_CLOTH_ASSERT(mSize == other.size()); // current limitation
+ NV_CLOTH_ASSERT(!mStorage.mMapRefCount);
+
+ mStorage.mBuffer.context()->CopySubresourceRegion(buffer(), 0, mOffset * sizeof(T), 0, 0, other.buffer(), 0,
+ nullptr);
+
+ return *this;
+ }
+
+ uint32_t capacity() const
+ {
+ return mCapacity;
+ }
+ bool empty() const
+ {
+ return !mSize;
+ }
+ uint32_t size() const
+ {
+ return mSize;
+ }
+
+ void reserve(uint32_t capacity)
+ {
+ mStorage.reserve(this, capacity);
+ }
+
+ void resize(uint32_t size)
+ {
+ mStorage.assign(this, nullptr, size);
+ }
+
+ void assign(const T* first, const T* last)
+ {
+ mStorage.assign(this, first, uint32_t(last - first));
+ }
+
+ // attention: data of this vector starts at mOffset
+ ID3D11Buffer* buffer() const
+ {
+ return mStorage.mBuffer.mBuffer;
+ }
+
+ T* map(D3D11_MAP mapType = D3D11_MAP_READ_WRITE)
+ {
+ return buffer() ? mStorage.map(mapType) + mOffset : 0;
+ }
+
+ void unmap()
+ {
+ mStorage.unmap();
+ }
+
+ // common interface with DxDeviceVector for DxVectorMap
+ DxContextManagerCallback* manager() const
+ {
+ return mStorage.mBuffer.mManager;
+ }
+
+ void swap(DxBatchedVector<T>& other)
+ {
+ PX_ASSERT(&mStorage == &other.mStorage);
+ physx::shdfnd::swap(mOffset, other.mOffset);
+ physx::shdfnd::swap(mSize, other.mSize);
+ physx::shdfnd::swap(mCapacity, other.mCapacity);
+ // alternative to running through all elements in DxBatchedStorage::update()
+ // however, swap should be O(1) and is used more frequently than reserve/add/remove
+ // nvidia::swap(*mStorage.mViews.find(&left), *other.mStorage.mViews.find(&right));
+ }
+
+ void clear()
+ {
+ //TODO: make more efficient impl.
+ DxBatchedVector<T> temp(mStorage);
+ this->swap(temp);
+ }
+
+ public:
+ DxBatchedStorage<T>& mStorage;
+ uint32_t mOffset, mSize, mCapacity;
+};
+
+} // namespace cloth
+
+} // namespace nv
+
+namespace physx
+{
+namespace shdfnd
+{
+template <typename T>
+void swap(nv::cloth::DxBatchedVector<T>& left, nv::cloth::DxBatchedVector<T>& right)
+{
+ left.swap(right);
+}
+}
+}
diff --git a/NvCloth/src/dx/DxCheckSuccess.h b/NvCloth/src/dx/DxCheckSuccess.h
new file mode 100644
index 0000000..ce8540c
--- /dev/null
+++ b/NvCloth/src/dx/DxCheckSuccess.h
@@ -0,0 +1,63 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#define NOMINMAX
+#pragma warning(disable : 4668 4917 4365 4061 4005)
+#if NV_XBOXONE
+#include <d3d11_x.h>
+#else
+#include <d3d11.h>
+#endif
+
+namespace nv
+{
+namespace cloth
+{
+// implemented in DxFactory.cpp
+void checkSuccessImpl(long, const char*, const int);
+}
+
+// safe dx calls
+#define checkSuccess(err) cloth::checkSuccessImpl(err, __FILE__, __LINE__)
+}
+
+/*
+#define NOMINMAX
+#include "d3dcommon.h"
+
+template<int length>
+inline void setDebugObjectName(ID3D11DeviceChild* resource, const char (&name)[length])
+{
+#if PX_DEBUG
+ resource->SetPrivateData(WKPDID_D3DDebugObjectName, length - 1, name);
+#endif
+}
+*/ \ No newline at end of file
diff --git a/NvCloth/src/dx/DxCloth.cpp b/NvCloth/src/dx/DxCloth.cpp
new file mode 100644
index 0000000..f87943e
--- /dev/null
+++ b/NvCloth/src/dx/DxCloth.cpp
@@ -0,0 +1,533 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "DxCloth.h"
+#include "DxFabric.h"
+#include "DxFactory.h"
+#include "DxContextLock.h"
+#include "DxClothData.h"
+#include "DxSolver.h"
+#include "../TripletScheduler.h"
+#include "../ClothBase.h"
+#include <foundation/PxMat44.h>
+#include <PsFoundation.h>
+
+#if NV_CLOTH_ENABLE_DX11
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+PhaseConfig transform(const PhaseConfig&); // from PhaseConfig.cpp
+}
+}
+
+using namespace nv;
+
+namespace
+{
+bool isSelfCollisionEnabled(const cloth::DxCloth& cloth)
+{
+ return std::min(cloth.mSelfCollisionDistance, -cloth.mSelfCollisionLogStiffness) > 0.0f;
+}
+}
+
+cloth::DxCloth::DxCloth(DxFactory& factory, DxFabric& fabric, Range<const PxVec4> particles)
+: DxContextLock(factory)
+, mFactory(factory)
+, mFabric(fabric)
+, mClothDataDirty(false)
+, mClothCostDirty(true)
+, mNumParticles(uint32_t(particles.size()))
+, mDeviceParticlesDirty(true)
+, mHostParticlesDirty(false)
+, mMotionConstraints(mFactory.mMotionConstraints)
+, mSeparationConstraints(mFactory.mSeparationConstraints)
+, mParticles(mFactory.mParticles)
+, mParticlesHostCopy(mFactory.mParticlesHostCopy)
+, mParticlesMapPointer(0)
+, mParticlesMapRefCount(0)
+, mPhaseConfigs(mFactory.mPhaseConfigs)
+, mParticleAccelerations(mFactory.mParticleAccelerations)
+, mCapsuleIndices(mFactory.mCapsuleIndices)
+, mStartCollisionSpheres(mFactory.mCollisionSpheres)
+, mTargetCollisionSpheres(mFactory.mCollisionSpheres)
+, mConvexMasks(mFactory.mConvexMasks)
+, mStartCollisionPlanes(mFactory.mCollisionPlanes)
+, mTargetCollisionPlanes(mFactory.mCollisionPlanes)
+, mStartCollisionTriangles(mFactory.mCollisionTriangles)
+, mTargetCollisionTriangles(mFactory.mCollisionTriangles)
+, mVirtualParticleSetSizes(mFactory.mContextManager)
+, mVirtualParticleIndices(mFactory.mContextManager)
+, mVirtualParticleWeights(mFactory.mContextManager)
+, mRestPositions(mFactory.mRestPositions)
+, mSelfCollisionIndices(mFactory.mSelfCollisionIndices)
+, mSelfCollisionParticles(mFactory.mSelfCollisionParticles)
+, mSelfCollisionData(mFactory.mSelfCollisionData)
+, mInitSelfCollisionData(false)
+, mSharedMemorySize(0)
+, mUserData(0)
+{
+ NV_CLOTH_ASSERT(!particles.empty());
+ NV_CLOTH_ASSERT(particles.size() == fabric.getNumParticles());
+
+ initialize(*this, particles.begin(), particles.end());
+
+ mParticlesHostCopy.resize(2 * mNumParticles);
+ PxVec4* pIt = mParticlesHostCopy.map(D3D11_MAP_WRITE);
+ memcpy(pIt, particles.begin(), mNumParticles * sizeof(PxVec4));
+ memcpy(pIt + mNumParticles, particles.begin(), mNumParticles * sizeof(PxVec4));
+ mParticlesHostCopy.unmap();
+
+ mParticles.resize(2 * mNumParticles);
+ mFabric.incRefCount();
+
+ DxContextLock::release();
+}
+
+cloth::DxCloth::DxCloth(DxFactory& factory, const DxCloth& cloth)
+: DxContextLock(factory)
+, mFactory(factory)
+, mFabric(cloth.mFabric)
+, mNumParticles(cloth.mNumParticles)
+, mParticles(cloth.mParticles)
+, mParticlesHostCopy(cloth.mParticlesHostCopy)
+, mParticlesMapPointer(0)
+, mParticlesMapRefCount(0)
+, mDeviceParticlesDirty(cloth.mDeviceParticlesDirty)
+, mHostParticlesDirty(cloth.mHostParticlesDirty)
+, mPhaseConfigs(cloth.mPhaseConfigs)
+, mHostPhaseConfigs(cloth.mHostPhaseConfigs)
+, mMotionConstraints(cloth.mMotionConstraints)
+, mSeparationConstraints(cloth.mSeparationConstraints)
+, mParticleAccelerations(cloth.mParticleAccelerations)
+, mParticleAccelerationsHostCopy(cloth.mParticleAccelerationsHostCopy)
+, mCapsuleIndices(cloth.mCapsuleIndices)
+, mStartCollisionSpheres(cloth.mStartCollisionSpheres)
+, mTargetCollisionSpheres(cloth.mTargetCollisionSpheres)
+, mConvexMasks(cloth.mConvexMasks)
+, mStartCollisionPlanes(cloth.mStartCollisionPlanes)
+, mTargetCollisionPlanes(cloth.mTargetCollisionPlanes)
+, mStartCollisionTriangles(cloth.mStartCollisionTriangles)
+, mTargetCollisionTriangles(cloth.mTargetCollisionTriangles)
+, mVirtualParticleSetSizes(cloth.mVirtualParticleSetSizes)
+, mVirtualParticleIndices(cloth.mVirtualParticleIndices)
+, mVirtualParticleWeights(cloth.mVirtualParticleWeights)
+, mRestPositions(cloth.mRestPositions)
+, mSelfCollisionIndices(cloth.mSelfCollisionIndices)
+, mSelfCollisionParticles(cloth.mSelfCollisionParticles)
+, mSelfCollisionData(cloth.mSelfCollisionData)
+, mInitSelfCollisionData(cloth.mInitSelfCollisionData)
+, mSharedMemorySize(cloth.mSharedMemorySize)
+, mUserData(cloth.mUserData)
+{
+ copy(*this, cloth);
+
+ mFabric.incRefCount();
+
+ DxContextLock::release();
+}
+
+cloth::DxCloth::~DxCloth()
+{
+ DxContextLock::acquire();
+
+ mFabric.decRefCount();
+}
+
+void cloth::DxCloth::notifyChanged()
+{
+ mClothDataDirty = true;
+}
+
+bool cloth::DxCloth::updateClothData(DxClothData& clothData)
+{
+ if (!mClothDataDirty)
+ {
+ NV_CLOTH_ASSERT(mSharedMemorySize == getSharedMemorySize());
+ return false;
+ }
+
+ mSharedMemorySize = getSharedMemorySize();
+
+ if (mSelfCollisionData.empty() && isSelfCollisionEnabled(*this))
+ {
+ uint32_t numSelfCollisionIndices =
+ mSelfCollisionIndices.empty() ? mNumParticles : uint32_t(mSelfCollisionIndices.size());
+
+ uint32_t keySize = 2 * numSelfCollisionIndices; // 2x for radix buffer
+ uint32_t cellStartSize = (129 + 128 * 128 + 130);
+
+ mSelfCollisionParticles.resize(mNumParticles);
+ mSelfCollisionData.resize(keySize + cellStartSize);
+ // checkSuccess( cuMemsetD32((mSelfCollisionData.begin()
+ // + particleSize + keySize).dev(), 0xffffffff, cellStartSize) );
+
+ mInitSelfCollisionData = true;
+ }
+
+ clothData = DxClothData(*this);
+ mClothDataDirty = false;
+
+ return true;
+}
+
+uint32_t cloth::DxCloth::getSharedMemorySize() const
+{
+ uint32_t numPhases = uint32_t(mPhaseConfigs.size());
+ uint32_t numSpheres = uint32_t(mStartCollisionSpheres.size());
+ uint32_t numCones = uint32_t(mCapsuleIndices.size());
+ uint32_t numPlanes = uint32_t(mStartCollisionPlanes.size());
+ uint32_t numConvexes = uint32_t(mConvexMasks.size());
+ uint32_t numTriangles = uint32_t(mStartCollisionTriangles.size() / 3);
+
+ uint32_t phaseConfigSize = numPhases * sizeof(DxPhaseConfig);
+
+ bool storePrevCollisionData = mEnableContinuousCollision || mFriction > 0.0f;
+ uint32_t continuousCollisionSize = storePrevCollisionData ? 4 * numSpheres + 10 * numCones : 0;
+ continuousCollisionSize += 4 * numCones + numConvexes; // capsule and convex masks
+ uint32_t discreteCollisionSize = 4 * numSpheres + std::max(10 * numCones + 96, 208u);
+ discreteCollisionSize = std::max(discreteCollisionSize, std::max(4 * numPlanes, 19 * numTriangles));
+
+ // scratch memory for prefix sum and histogram
+ uint32_t selfCollisionSize = isSelfCollisionEnabled(*this) ? 544 : 0;
+
+ // see CuSolverKenel.cu::gSharedMemory comment for details
+ return phaseConfigSize + sizeof(float) * (continuousCollisionSize + std::max(selfCollisionSize, discreteCollisionSize));
+}
+
+void cloth::DxCloth::setPhaseConfig(Range<const PhaseConfig> configs)
+{
+ mHostPhaseConfigs.assign(configs.begin(), configs.end());
+
+ Vector<DxPhaseConfig>::Type deviceConfigs;
+ deviceConfigs.reserve(configs.size());
+ const PhaseConfig* cEnd = configs.end();
+ for (const PhaseConfig* cIt = configs.begin(); cIt != cEnd; ++cIt)
+ {
+ DxPhaseConfig config;
+
+ config.mStiffness = cIt->mStiffness;
+ config.mStiffnessMultiplier = cIt->mStiffnessMultiplier;
+ config.mCompressionLimit = cIt->mCompressionLimit;
+ config.mStretchLimit = cIt->mStretchLimit;
+
+ uint16_t phaseIndex = cIt->mPhaseIndex;
+ config.mFirstConstraint = mFabric.mFirstConstraintInPhase[phaseIndex];
+ config.mNumConstraints = mFabric.mNumConstraintsInPhase[phaseIndex];
+
+ deviceConfigs.pushBack(config);
+ }
+
+ DxContextLock contextLock(mFactory);
+
+ mPhaseConfigs.assign(deviceConfigs.begin(), deviceConfigs.begin() + deviceConfigs.size());
+}
+
+cloth::Range<PxVec4> cloth::DxCloth::push(cloth::DxConstraints& constraints)
+{
+ if (!constraints.mTarget.capacity())
+ {
+ DxContextLock contextLock(mFactory);
+ constraints.mTarget.reserve(mNumParticles);
+ }
+ if (constraints.mHostCopy.empty())
+ constraints.mTarget.resize(mNumParticles);
+
+ if (constraints.mStart.empty()) // initialize start first
+ {
+ DxContextLock contextLock(mFactory);
+ constraints.mStart.swap(constraints.mTarget);
+ }
+
+ if (!constraints.mHostCopy.capacity())
+ {
+ DxContextLock contextLock(mFactory);
+ constraints.mHostCopy.reserve(mNumParticles);
+ }
+ constraints.mHostCopy.resizeUninitialized(mNumParticles);
+
+ PxVec4* data = &constraints.mHostCopy.front();
+ return Range<PxVec4>(data, data + constraints.mHostCopy.size());
+}
+
+void cloth::DxCloth::clear(cloth::DxConstraints& constraints)
+{
+ DxContextLock contextLock(mFactory);
+ constraints.mStart.clear();
+ constraints.mTarget.clear();
+}
+
+void cloth::DxCloth::mapParticles()
+{
+ if (mHostParticlesDirty)
+ {
+ DxContextLock contextLock(mFactory);
+ mParticlesHostCopy = mParticles;
+ mHostParticlesDirty = false;
+ }
+ if (0 == mParticlesMapRefCount++)
+ {
+ DxContextLock contextLock(mFactory);
+ mParticlesMapPointer = mParticlesHostCopy.map();
+ }
+}
+
+void cloth::DxCloth::unmapParticles()
+{
+ if (0 == --mParticlesMapRefCount)
+ {
+ DxContextLock contextLock(mFactory);
+ mParticlesHostCopy.unmap();
+ mParticlesMapPointer = 0;
+ }
+}
+
+cloth::Range<const PxVec3> cloth::DxCloth::clampTriangleCount(Range<const PxVec3> range, uint32_t replaceSize)
+{
+ // clamp to 500 triangles (1500 vertices) to prevent running out of shared memory
+ uint32_t removedSize = mStartCollisionTriangles.size() - replaceSize;
+ const PxVec3* clamp = range.begin() + 1500 - removedSize;
+
+ if (range.end() > clamp)
+ {
+ NV_CLOTH_LOG_WARNING("Too many collision triangles specified for cloth, dropping all but first 500.\n");
+ }
+
+ return Range<const PxVec3>(range.begin(), std::min(range.end(), clamp));
+}
+
+#include "../ClothImpl.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+// ClothImpl<DxCloth>::clone() implemented in DxClothClone.cpp
+
+template <>
+uint32_t ClothImpl<DxCloth>::getNumParticles() const
+{
+ return mCloth.mNumParticles;
+}
+
+template <>
+void ClothImpl<DxCloth>::lockParticles() const
+{
+ const_cast<DxCloth&>(mCloth).mapParticles();
+}
+
+template <>
+void ClothImpl<DxCloth>::unlockParticles() const
+{
+ const_cast<DxCloth&>(mCloth).unmapParticles();
+}
+
+template <>
+MappedRange<PxVec4> ClothImpl<DxCloth>::getCurrentParticles()
+{
+ mCloth.wakeUp();
+ lockParticles();
+ mCloth.mDeviceParticlesDirty = true;
+ return getMappedParticles(mCloth.mParticlesMapPointer);
+}
+
+template <>
+MappedRange<const PxVec4> ClothImpl<DxCloth>::getCurrentParticles() const
+{
+ lockParticles();
+ const PxVec4* data = mCloth.mParticlesMapPointer;
+ return getMappedParticles(data);
+}
+
+template <>
+MappedRange<PxVec4> ClothImpl<DxCloth>::getPreviousParticles()
+{
+ mCloth.wakeUp();
+ lockParticles();
+ mCloth.mDeviceParticlesDirty = true;
+ return getMappedParticles(mCloth.mParticlesMapPointer + mCloth.mNumParticles);
+}
+
+template <>
+MappedRange<const PxVec4> ClothImpl<DxCloth>::getPreviousParticles() const
+{
+ lockParticles();
+ const PxVec4* data = (const PxVec4*)mCloth.mParticlesMapPointer;
+ return getMappedParticles(data + mCloth.mNumParticles);
+}
+
+template <>
+GpuParticles ClothImpl<DxCloth>::getGpuParticles()
+{
+ ID3D11Buffer* buffer = mCloth.mParticles.buffer();
+ PxVec4* offset = (PxVec4*)nullptr + mCloth.mParticles.mOffset;
+ GpuParticles result = { offset, offset + mCloth.mNumParticles, buffer };
+ return result;
+}
+
+template <>
+void ClothImpl<DxCloth>::setPhaseConfig(Range<const PhaseConfig> configs)
+{
+ Vector<PhaseConfig>::Type transformedConfigs;
+ transformedConfigs.reserve(configs.size());
+
+ // transform phase config to use in solver
+ for (; !configs.empty(); configs.popFront())
+ if (configs.front().mStiffness > 0.0f)
+ transformedConfigs.pushBack(transform(configs.front()));
+
+ mCloth.setPhaseConfig(Range<const PhaseConfig>(transformedConfigs.begin(),
+ transformedConfigs.begin() + transformedConfigs.size()));
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <>
+void ClothImpl<DxCloth>::setSelfCollisionIndices(Range<const uint32_t> indices)
+{
+ ContextLockType lock(mCloth.mFactory);
+ mCloth.mSelfCollisionIndices.assign(indices.begin(), indices.end());
+ mCloth.mSelfCollisionIndicesHost.assign(indices.begin(), indices.end());
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+template <>
+uint32_t ClothImpl<DxCloth>::getNumVirtualParticles() const
+{
+ return uint32_t(mCloth.mVirtualParticleIndices.size());
+}
+
+template <>
+Range<PxVec4> ClothImpl<DxCloth>::getParticleAccelerations()
+{
+ if (mCloth.mParticleAccelerations.empty())
+ {
+ DxContextLock contextLock(mCloth.mFactory);
+ mCloth.mParticleAccelerations.resize(mCloth.mNumParticles);
+ }
+
+ if (!mCloth.mParticleAccelerationsHostCopy.capacity())
+ {
+ DxContextLock contextLock(mCloth.mFactory);
+ mCloth.mParticleAccelerationsHostCopy.reserve(mCloth.mNumParticles);
+ }
+ mCloth.mParticleAccelerationsHostCopy.resizeUninitialized(mCloth.mNumParticles);
+
+ mCloth.wakeUp();
+
+ PxVec4* data = mCloth.mParticleAccelerationsHostCopy.begin();
+ return Range<PxVec4>(data, mCloth.mParticleAccelerationsHostCopy.end());
+}
+
+template <>
+void ClothImpl<DxCloth>::clearParticleAccelerations()
+{
+ DxContextLock contextLock(mCloth.mFactory);
+ mCloth.mParticleAccelerations.clear();
+ Vector<PxVec4>::Type().swap(mCloth.mParticleAccelerationsHostCopy);
+ mCloth.wakeUp();
+}
+
+namespace
+{
+uint32_t calculateNumReplays(const Vector<Vec4u>::Type& triplets, const Vector<uint32_t>::Type setSizes)
+{
+ uint32_t result = 0;
+
+ Vector<Vec4u>::Type::ConstIterator tIt = triplets.begin();
+ Vector<uint32_t>::Type::ConstIterator sIt, sEnd = setSizes.end();
+ uint32_t index = 0;
+ for (sIt = setSizes.begin(); sIt != sEnd; ++sIt, ++index)
+ {
+ Vector<Vec4u>::Type::ConstIterator tEnd = tIt + *sIt, tLast = tIt;
+ while (tLast != tEnd)
+ {
+ uint8_t numConflicts[3][32] = {};
+ uint8_t numReplays[3] = {};
+
+ for (tLast += std::min(ptrdiff_t(32), tEnd - tLast); tIt != tLast; ++tIt)
+ for (int i = 0; i < 3; ++i)
+ numReplays[i] = std::max(numReplays[i], ++numConflicts[i][(*tIt)[i] & 31]);
+
+ result += numReplays[0] + numReplays[1] + numReplays[2];
+ }
+ }
+
+ return result;
+}
+}
+
+template <>
+void ClothImpl<DxCloth>::setVirtualParticles(Range<const uint32_t[4]> indices, Range<const PxVec3> weights)
+{
+ // shuffle indices to form independent SIMD sets
+ TripletScheduler scheduler(indices);
+ scheduler.warp(mCloth.mNumParticles, 32);
+
+ // convert to 16bit indices
+ Vector<Vec4us>::Type hostIndices;
+ hostIndices.reserve(indices.size());
+ TripletScheduler::ConstTripletIter tIt = scheduler.mTriplets.begin();
+ TripletScheduler::ConstTripletIter tEnd = scheduler.mTriplets.end();
+ for (; tIt != tEnd; ++tIt)
+ hostIndices.pushBack(Vec4us(*tIt));
+
+ // printf("num sets = %u, num replays = %u\n", scheduler.mSetSizes.size(),
+ // calculateNumReplays(scheduler.mTriplets, scheduler.mSetSizes));
+
+ // add normalization weight
+ Vector<PxVec4>::Type hostWeights;
+ hostWeights.reserve(weights.size());
+ for (; !weights.empty(); weights.popFront())
+ {
+ PxVec3 w = reinterpret_cast<const PxVec3&>(weights.front());
+ float scale = 1.f / w.magnitudeSquared();
+ hostWeights.pushBack(PxVec4(w.x, w.y, w.z, scale));
+ }
+
+ DxContextLock contextLock(mCloth.mFactory);
+
+ // todo: 'swap' these to force reallocation?
+ mCloth.mVirtualParticleIndices = hostIndices;
+ mCloth.mVirtualParticleSetSizes = scheduler.mSetSizes;
+ mCloth.mVirtualParticleWeights = hostWeights;
+
+ mCloth.notifyChanged();
+ mCloth.wakeUp();
+}
+
+} // namespace cloth
+} // namespace nv
+
+#endif // NV_CLOTH_ENABLE_DX11
diff --git a/NvCloth/src/dx/DxCloth.h b/NvCloth/src/dx/DxCloth.h
new file mode 100644
index 0000000..bde2bd6
--- /dev/null
+++ b/NvCloth/src/dx/DxCloth.h
@@ -0,0 +1,229 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Range.h"
+#include "NvCloth/PhaseConfig.h"
+#include "../MovingAverage.h"
+#include "../IndexPair.h"
+#include "../Vec4T.h"
+#include "DxClothData.h"
+#include "DxContextLock.h"
+#include "DxBatchedVector.h"
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+#include <foundation/PxTransform.h>
+
+namespace nv
+{
+namespace cloth
+{
+
+class DxFabric;
+class DxFactory;
+struct DxClothData;
+
+struct DxConstraints
+{
+ DxConstraints::DxConstraints(DxBatchedStorage<physx::PxVec4>& storage)
+ : mStart(storage), mTarget(storage)
+ {
+ }
+
+ void pop()
+ {
+ if (!mTarget.empty())
+ {
+ mStart.swap(mTarget);
+ mTarget.resize(0);
+ }
+ }
+
+ DxBatchedVector<physx::PxVec4> mStart;
+ DxBatchedVector<physx::PxVec4> mTarget;
+ Vector<physx::PxVec4>::Type mHostCopy;
+};
+
+class DxCloth : protected DxContextLock
+{
+ DxCloth(const DxCloth&); // not implemented
+ DxCloth& operator = (const DxCloth&); // not implemented
+
+ public:
+ typedef DxFactory FactoryType;
+ typedef DxFabric FabricType;
+ typedef DxContextLock ContextLockType;
+
+ template <typename>
+ struct MapTraits;
+
+ typedef DxVectorMap<DxBatchedVector<physx::PxVec3> > MappedVec3fVectorType;
+ typedef DxVectorMap<DxBatchedVector<physx::PxVec4> > MappedVec4fVectorType;
+ typedef DxVectorMap<DxBatchedVector<IndexPair> > MappedIndexVectorType;
+ typedef DxVectorMap<DxBatchedVector<uint32_t> > MappedMaskVectorType;
+
+ DxCloth(DxFactory&, DxFabric&, Range<const physx::PxVec4>);
+ DxCloth(DxFactory&, const DxCloth&);
+ ~DxCloth(); // not virtual on purpose
+
+ public:
+ bool isSleeping() const
+ {
+ return mSleepPassCounter >= mSleepAfterCount;
+ }
+ void wakeUp()
+ {
+ mSleepPassCounter = 0;
+ }
+
+ void notifyChanged();
+
+ bool updateClothData(DxClothData&); // expects acquired context
+ uint32_t getSharedMemorySize() const; // without particle data
+
+ // expects transformed configs, doesn't call notifyChanged()
+ void setPhaseConfig(Range<const PhaseConfig>);
+
+ Range<physx::PxVec4> push(DxConstraints&);
+ void clear(DxConstraints&);
+
+ void mapParticles();
+ void unmapParticles();
+
+ Range<const physx::PxVec3> clampTriangleCount(Range<const physx::PxVec3>, uint32_t);
+
+ public:
+ DxFactory& mFactory;
+ DxFabric& mFabric;
+
+ bool mClothDataDirty;
+ bool mClothCostDirty;
+
+ // particle data
+ uint32_t mNumParticles;
+ DxBatchedVector<physx::PxVec4> mParticles; // cur, prev
+ DxBatchedVector<physx::PxVec4> mParticlesHostCopy;
+ physx::PxVec4* mParticlesMapPointer;
+ uint32_t mParticlesMapRefCount;
+
+ bool mDeviceParticlesDirty;
+ bool mHostParticlesDirty;
+
+ physx::PxVec3 mParticleBoundsCenter;
+ physx::PxVec3 mParticleBoundsHalfExtent;
+
+ physx::PxVec3 mGravity;
+ physx::PxVec3 mLogDamping;
+ physx::PxVec3 mLinearLogDrag;
+ physx::PxVec3 mAngularLogDrag;
+ physx::PxVec3 mLinearInertia;
+ physx::PxVec3 mAngularInertia;
+ physx::PxVec3 mCentrifugalInertia;
+ float mSolverFrequency;
+ float mStiffnessFrequency;
+
+ physx::PxTransform mTargetMotion;
+ physx::PxTransform mCurrentMotion;
+ physx::PxVec3 mLinearVelocity;
+ physx::PxVec3 mAngularVelocity;
+
+ float mPrevIterDt;
+ MovingAverage mIterDtAvg;
+
+ DxBatchedVector<DxPhaseConfig> mPhaseConfigs;
+ Vector<PhaseConfig>::Type mHostPhaseConfigs;
+
+ // tether constraints stuff
+ float mTetherConstraintLogStiffness;
+ float mTetherConstraintScale;
+
+ // motion constraints stuff
+ DxConstraints mMotionConstraints;
+ float mMotionConstraintScale;
+ float mMotionConstraintBias;
+ float mMotionConstraintLogStiffness;
+
+ // separation constraints stuff
+ DxConstraints mSeparationConstraints;
+
+ // particle acceleration stuff
+ DxBatchedVector<physx::PxVec4> mParticleAccelerations;
+ Vector<physx::PxVec4>::Type mParticleAccelerationsHostCopy;
+
+ // wind
+ physx::PxVec3 mWind;
+ float mDragLogCoefficient;
+ float mLiftLogCoefficient;
+
+ // collision stuff
+ DxBatchedVector<IndexPair> mCapsuleIndices;
+ DxBatchedVector<physx::PxVec4> mStartCollisionSpheres;
+ DxBatchedVector<physx::PxVec4> mTargetCollisionSpheres;
+ DxBatchedVector<uint32_t> mConvexMasks;
+ DxBatchedVector<physx::PxVec4> mStartCollisionPlanes;
+ DxBatchedVector<physx::PxVec4> mTargetCollisionPlanes;
+ DxBatchedVector<physx::PxVec3> mStartCollisionTriangles;
+ DxBatchedVector<physx::PxVec3> mTargetCollisionTriangles;
+ bool mEnableContinuousCollision;
+ float mCollisionMassScale;
+ float mFriction;
+
+ // virtual particles
+ DxDeviceVector<uint32_t> mVirtualParticleSetSizes;
+ DxDeviceVector<Vec4us> mVirtualParticleIndices;
+ DxDeviceVector<physx::PxVec4> mVirtualParticleWeights;
+
+ // self collision
+ float mSelfCollisionDistance;
+ float mSelfCollisionLogStiffness;
+
+ DxBatchedVector<physx::PxVec4> mRestPositions;
+ DxBatchedVector<uint32_t> mSelfCollisionIndices;
+ Vector<uint32_t>::Type mSelfCollisionIndicesHost;
+
+ DxBatchedVector<physx::PxVec4> mSelfCollisionParticles;
+ // 2x(key) per particle + cellStart (8322)
+ DxBatchedVector<uint32_t> mSelfCollisionData;
+
+ bool mInitSelfCollisionData;
+
+ // sleeping
+ uint32_t mSleepTestInterval; // how often to test for movement
+ uint32_t mSleepAfterCount; // number of tests to pass before sleep
+ float mSleepThreshold; // max movement delta to pass test
+ uint32_t mSleepPassCounter; // how many tests passed
+ uint32_t mSleepTestCounter; // how many iterations since tested
+
+ uint32_t mSharedMemorySize;
+
+ void* mUserData;
+};
+}
+}
diff --git a/NvCloth/src/dx/DxClothClone.cpp b/NvCloth/src/dx/DxClothClone.cpp
new file mode 100644
index 0000000..3f41cdc
--- /dev/null
+++ b/NvCloth/src/dx/DxClothClone.cpp
@@ -0,0 +1,94 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "ClothClone.h"
+
+#include "DxFactory.h"
+#include "DxFabric.h"
+#include "DxCloth.h"
+
+#if NV_CLOTH_ENABLE_DX11
+
+using namespace physx;
+
+namespace nv
+{
+namespace cloth
+{
+Range<const PhaseConfig> getPhaseConfigs(const DxCloth& cloth)
+{
+ return makeRange(cloth.mHostPhaseConfigs);
+}
+void setPhaseConfigs(DxCloth& cloth, Range<const PhaseConfig> phaseConfigs)
+{
+ cloth.setPhaseConfig(phaseConfigs);
+}
+Range<const PxVec4> getParticleAccelerations(const DxCloth& cloth)
+{
+ return makeRange(cloth.mParticleAccelerationsHostCopy);
+}
+Range<const uint32_t> getSelfCollisionIndices(const DxCloth& cloth)
+{
+ return makeRange(cloth.mSelfCollisionIndicesHost);
+}
+
+template <>
+Cloth* ClothImpl<DxCloth>::clone(Factory& factory) const
+{
+ if (&mCloth.mFactory == &factory)
+ return NV_CLOTH_NEW(ClothImpl<DxCloth>)(factory, *this); // copy construct directly
+
+ switch(factory.getPlatform())
+ {
+ case Platform::CPU:
+ return convertCloth(static_cast<SwFactory&>(factory), *this);
+ case Platform::DX11:
+ return convertCloth(static_cast<DxFactory&>(factory), *this);
+ case Platform::CUDA:
+ default:
+ return nullptr;
+ }
+}
+
+Cloth* DxFactory::clone(const Cloth& cloth)
+{
+ if (cloth.getFactory().getPlatform() == Platform::CPU)
+ {
+ if (cloth.getNumParticles() > 2025 /* see DxSolverKernel.hlsl gCurParticles */)
+ return nullptr; // can only fit 2025 particles in dx shared memory
+ return convertCloth(*this, static_cast<const SwClothImpl&>(cloth));
+ }
+
+ return cloth.clone(*this);
+}
+
+} // namespace cloth
+} // namespace nv
+
+#endif // NV_CLOTH_ENABLE_DX11
diff --git a/NvCloth/src/dx/DxClothData.cpp b/NvCloth/src/dx/DxClothData.cpp
new file mode 100644
index 0000000..2625eac
--- /dev/null
+++ b/NvCloth/src/dx/DxClothData.cpp
@@ -0,0 +1,177 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "DxClothData.h"
+#include "DxCloth.h"
+#include "DxFabric.h"
+#include "DxContextLock.h"
+#include "../IterationState.h"
+
+#if NV_CLOTH_ENABLE_DX11
+
+using namespace nv;
+
+cloth::DxTether::DxTether(uint16_t anchor, uint16_t length) : mAnchor(anchor), mLength(length)
+{
+}
+
+cloth::DxClothData::DxClothData(DxCloth& cloth)
+{
+ mNumParticles = cloth.mNumParticles;
+ mParticlesOffset = cloth.mParticles.mOffset;
+
+ mNumPhases = cloth.mPhaseConfigs.size();
+ mPhaseConfigOffset = cloth.mPhaseConfigs.mOffset;
+ mConstraintOffset = cloth.mFabric.mConstraints.mOffset;
+ mStiffnessOffset = cloth.mFabric.mStiffnessValues.empty() ? -1: cloth.mFabric.mStiffnessValues.mOffset;
+
+ mNumTethers = cloth.mFabric.mTethers.size();
+ mTetherOffset = cloth.mFabric.mTethers.mOffset;
+ mTetherConstraintScale = cloth.mTetherConstraintScale * cloth.mFabric.mTetherLengthScale;
+
+ mMotionConstraintScale = cloth.mMotionConstraintScale;
+ mMotionConstraintBias = cloth.mMotionConstraintBias;
+
+ mNumCapsules = cloth.mCapsuleIndices.size();
+ mCapsuleOffset = cloth.mCapsuleIndices.mOffset;
+ mNumSpheres = cloth.mStartCollisionSpheres.size();
+
+ mNumPlanes = cloth.mStartCollisionPlanes.size();
+ mNumConvexes = cloth.mConvexMasks.size();
+ mConvexMasksOffset = cloth.mConvexMasks.mOffset;
+
+ mNumCollisionTriangles = uint32_t(cloth.mStartCollisionTriangles.size()) / 3;
+
+ mEnableContinuousCollision = cloth.mEnableContinuousCollision;
+ mCollisionMassScale = cloth.mCollisionMassScale;
+ mFrictionScale = cloth.mFriction;
+
+ mSelfCollisionDistance = cloth.mSelfCollisionDistance;
+ mNumSelfCollisionIndices = cloth.mSelfCollisionIndices.empty() ? mNumParticles : cloth.mSelfCollisionIndices.size();
+ mSelfCollisionIndicesOffset = cloth.mSelfCollisionIndices.empty() ? uint32_t(-1) : cloth.mSelfCollisionIndices.mOffset;
+ mSelfCollisionParticlesOffset = cloth.mSelfCollisionParticles.mOffset;
+ mSelfCollisionDataOffset = cloth.mSelfCollisionData.mOffset;
+
+ mSleepTestInterval = cloth.mSleepTestInterval;
+ mSleepAfterCount = cloth.mSleepAfterCount;
+ mSleepThreshold = cloth.mSleepThreshold;
+}
+
+cloth::DxFrameData::DxFrameData(DxCloth& cloth, uint32_t numSharedPositions, const IterationState<Simd4f>& state, uint32_t firstIteration)
+{
+ mDeviceParticlesDirty = cloth.mDeviceParticlesDirty;
+
+ mNumSharedPositions = numSharedPositions;
+
+ mIterDt = state.mIterDt;
+
+ mFirstIteration = firstIteration;
+ mNumIterations = state.mRemainingIterations;
+
+ Simd4f logStiffness = simd4f(0.0f, cloth.mSelfCollisionLogStiffness, cloth.mMotionConstraintLogStiffness,
+ cloth.mTetherConstraintLogStiffness);
+ Simd4f stiffnessExponent = simd4f(cloth.mStiffnessFrequency * mIterDt);
+ Simd4f stiffness = gSimd4fOne - exp2(logStiffness * stiffnessExponent);
+
+ mTetherConstraintStiffness = array(stiffness)[3];
+ mMotionConstraintStiffness = array(stiffness)[2];
+ mSelfCollisionStiffness = array(stiffness)[1];
+
+ mStartSphereOffset = cloth.mStartCollisionSpheres.mOffset;
+ mTargetSphereOffset =
+ cloth.mTargetCollisionSpheres.empty() ? mStartSphereOffset : cloth.mTargetCollisionSpheres.mOffset;
+
+ mStartCollisionPlaneOffset = cloth.mStartCollisionPlanes.mOffset;
+ mTargetCollisionPlaneOffset =
+ cloth.mTargetCollisionPlanes.empty() ? mStartCollisionPlaneOffset : cloth.mTargetCollisionPlanes.mOffset;
+
+
+ mStartCollisionTrianglesOffset = cloth.mStartCollisionTriangles.mOffset;
+ mTargetCollisionTrianglesOffset =
+ cloth.mTargetCollisionTriangles.empty() ? mStartCollisionTrianglesOffset : cloth.mTargetCollisionTriangles.mOffset;
+
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ float c = array(cloth.mParticleBoundsCenter)[i];
+ float r = array(cloth.mParticleBoundsHalfExtent)[i];
+ mParticleBounds[i * 2 + 0] = r + c;
+ mParticleBounds[i * 2 + 1] = r - c;
+ }
+
+ mSleepPassCounter = cloth.mSleepPassCounter;
+ mSleepTestCounter = cloth.mSleepTestCounter;
+
+ mStiffnessExponent = cloth.mStiffnessFrequency * mIterDt;
+
+ mStartMotionConstrainsOffset = cloth.mMotionConstraints.mStart.empty() ? uint32_t(-1) : cloth.mMotionConstraints.mStart.mOffset;
+ mTargetMotionConstrainsOffset = cloth.mMotionConstraints.mTarget.empty() ? mStartMotionConstrainsOffset : cloth.mMotionConstraints.mTarget.mOffset;
+
+ mStartSeparationConstrainsOffset = cloth.mSeparationConstraints.mStart.empty() ? uint32_t(-1) : cloth.mSeparationConstraints.mStart.mOffset;
+ mTargetSeparationConstrainsOffset = cloth.mSeparationConstraints.mTarget.empty() ? mStartSeparationConstrainsOffset : cloth.mSeparationConstraints.mTarget.mOffset;
+
+ mParticleAccelerationsOffset = cloth.mParticleAccelerations.mOffset;
+ mRestPositionsOffset = cloth.mRestPositions.empty() ? uint32_t(-1) : cloth.mRestPositions.mOffset;
+
+ mInitSelfCollisionData = cloth.mInitSelfCollisionData;
+ cloth.mInitSelfCollisionData = false;
+}
+
+namespace
+{
+void copySquareTransposed(float* dst, const float* src)
+{
+ dst[0] = src[0];
+ dst[1] = src[4];
+ dst[2] = src[8];
+ dst[3] = src[1];
+ dst[4] = src[5];
+ dst[5] = src[9];
+ dst[6] = src[2];
+ dst[7] = src[6];
+ dst[8] = src[10];
+}
+}
+
+cloth::DxIterationData::DxIterationData(const IterationState<Simd4f>& state)
+{
+ mIntegrationTrafo[0] = array(state.mPrevBias)[0];
+ mIntegrationTrafo[1] = array(state.mPrevBias)[1];
+ mIntegrationTrafo[2] = array(state.mPrevBias)[2];
+
+ mIntegrationTrafo[3] = array(state.mCurBias)[0];
+ mIntegrationTrafo[4] = array(state.mCurBias)[1];
+ mIntegrationTrafo[5] = array(state.mCurBias)[2];
+
+ copySquareTransposed(mIntegrationTrafo + 6, array(*state.mPrevMatrix));
+ copySquareTransposed(mIntegrationTrafo + 15, array(*state.mCurMatrix));
+
+ mIsTurning = uint32_t(state.mIsTurning);
+}
+
+#endif // NV_CLOTH_ENABLE_DX11
diff --git a/NvCloth/src/dx/DxClothData.h b/NvCloth/src/dx/DxClothData.h
new file mode 100644
index 0000000..bac1a98
--- /dev/null
+++ b/NvCloth/src/dx/DxClothData.h
@@ -0,0 +1,215 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifdef __cplusplus
+#pragma once
+
+#include <foundation/Px.h>
+#include "simd.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+class DxCloth;
+template <typename>
+struct IterationState;
+#else // __cplusplus
+typedef unsigned int uint32_t;
+typedef int int32_t;
+#endif
+
+static const uint32_t MaxParticlesInSharedMem = 1975;
+
+
+struct DxPhaseConfig
+{
+ float mStiffness;
+ float mStiffnessMultiplier;
+ float mCompressionLimit;
+ float mStretchLimit;
+
+ uint32_t mFirstConstraint;
+ uint32_t mNumConstraints;
+};
+
+struct DxConstraint
+{
+ float mRestvalue;
+#ifdef __cplusplus
+ uint16_t mFirstIndex;
+ uint16_t mSecondIndex;
+#else
+ uint32_t mIndices; // 2x uint16_t
+#endif
+};
+
+struct DxTether
+{
+#ifdef __cplusplus
+ DxTether(uint16_t, uint16_t);
+ uint16_t mAnchor;
+ uint16_t mLength;
+#else
+ uint32_t mValue; // 2x uint16_t
+#endif
+};
+
+// reference to cloth instance bulk data (POD)
+// should not need frequent updates (stored on device)
+struct DxClothData
+{
+#ifdef __cplusplus
+ DxClothData()
+ {
+ }
+ DxClothData(DxCloth&);
+#endif
+
+ uint32_t mNumParticles;
+ uint32_t mParticlesOffset;
+
+ // fabric constraints
+ uint32_t mNumPhases;
+ uint32_t mPhaseConfigOffset;
+ uint32_t mConstraintOffset;
+ uint32_t mStiffnessOffset; //Offset inside per constraint stiffness buffer
+
+ uint32_t mNumTethers;
+ uint32_t mTetherOffset;
+ float mTetherConstraintScale;
+
+ // motion constraint data
+ float mMotionConstraintScale;
+ float mMotionConstraintBias;
+
+ // collision
+ uint32_t mNumCapsules;
+ uint32_t mCapsuleOffset;
+ uint32_t mNumSpheres;
+
+ uint32_t mNumPlanes;
+ uint32_t mNumConvexes;
+ uint32_t mConvexMasksOffset;
+
+ uint32_t mNumCollisionTriangles;
+
+ uint32_t mEnableContinuousCollision; //bool stored in uint32_t for dx alignment
+ float mCollisionMassScale;
+ float mFrictionScale;
+
+ float mSelfCollisionDistance;
+
+ uint32_t mNumSelfCollisionIndices;
+ uint32_t mSelfCollisionIndicesOffset;
+ uint32_t mSelfCollisionParticlesOffset;
+ uint32_t mSelfCollisionDataOffset;
+
+ // sleep data
+ uint32_t mSleepTestInterval;
+ uint32_t mSleepAfterCount;
+ float mSleepThreshold;
+};
+
+// per-frame data (stored in pinned memory)
+struct DxFrameData
+{
+#ifdef __cplusplus
+ DxFrameData()
+ {
+ } // not initializing pointers to 0!
+
+ explicit DxFrameData(DxCloth&, uint32_t numSharedPositions, const IterationState<Simd4f>&, uint32_t firstIteration);
+#endif
+
+ bool mDeviceParticlesDirty;
+
+ // number of particle copies that fit in shared memory (0, 1, or 2)
+ uint32_t mNumSharedPositions;
+
+ // iteration data
+ float mIterDt;
+ uint32_t mFirstIteration;
+ uint32_t mNumIterations;
+
+ float mTetherConstraintStiffness;
+
+ // motion constraint data
+ float mMotionConstraintStiffness;
+ uint32_t mStartMotionConstrainsOffset;
+ uint32_t mTargetMotionConstrainsOffset;
+
+ // separation constraint data
+ uint32_t mStartSeparationConstrainsOffset;
+ uint32_t mTargetSeparationConstrainsOffset;
+
+ // particle acceleration data
+ uint32_t mParticleAccelerationsOffset;
+
+ uint32_t mStartSphereOffset;
+ uint32_t mTargetSphereOffset;
+
+ uint32_t mStartCollisionPlaneOffset;
+ uint32_t mTargetCollisionPlaneOffset;
+
+ uint32_t mStartCollisionTrianglesOffset;
+ uint32_t mTargetCollisionTrianglesOffset;
+
+ float mSelfCollisionStiffness;
+
+ float mParticleBounds[6]; // maxX, -minX, maxY, ...
+
+ uint32_t mSleepPassCounter;
+ uint32_t mSleepTestCounter;
+
+ float mStiffnessExponent;
+
+ uint32_t mRestPositionsOffset;
+
+ bool mInitSelfCollisionData;
+};
+
+// per-iteration data (stored in pinned memory)
+struct DxIterationData
+{
+#ifdef __cplusplus
+ DxIterationData()
+ {
+ } // not initializing!
+ explicit DxIterationData(const IterationState<Simd4f>&);
+#endif
+ float mIntegrationTrafo[24];
+ uint32_t mIsTurning;
+};
+
+#ifdef __cplusplus
+} // namespace cloth
+} // namespace nv
+#endif
diff --git a/NvCloth/src/dx/DxContextLock.cpp b/NvCloth/src/dx/DxContextLock.cpp
new file mode 100644
index 0000000..5f2fe56
--- /dev/null
+++ b/NvCloth/src/dx/DxContextLock.cpp
@@ -0,0 +1,63 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "NvCloth/DxContextManagerCallback.h"
+#include "DxContextLock.h"
+#include "DxFactory.h"
+
+#if NV_CLOTH_ENABLE_DX11
+
+using namespace nv;
+
+cloth::DxContextLock::DxContextLock(const DxFactory& factory) : mContextManager(factory.mContextManager)
+{
+ acquire();
+}
+
+cloth::DxContextLock::DxContextLock(DxContextManagerCallback* contextManager) : mContextManager(contextManager)
+{
+ acquire();
+}
+
+cloth::DxContextLock::~DxContextLock()
+{
+ release();
+}
+
+void cloth::DxContextLock::acquire()
+{
+ mContextManager->acquireContext();
+}
+
+void cloth::DxContextLock::release()
+{
+ mContextManager->releaseContext();
+}
+
+#endif // NV_CLOTH_ENABLE_DX11
diff --git a/NvCloth/src/dx/DxContextLock.h b/NvCloth/src/dx/DxContextLock.h
new file mode 100644
index 0000000..fd85b1b
--- /dev/null
+++ b/NvCloth/src/dx/DxContextLock.h
@@ -0,0 +1,56 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+namespace nv
+{
+namespace cloth
+{
+class DxContextManagerCallback;
+class DxFactory;
+
+// acquires cuda context for the lifetime of the instance
+class DxContextLock
+{
+ DxContextLock& operator = (const DxContextLock&); // not implemented
+
+ public:
+ DxContextLock(const DxFactory&);
+ DxContextLock(DxContextManagerCallback*);
+ ~DxContextLock();
+
+ void acquire();
+ void release();
+
+ private:
+ DxContextManagerCallback* mContextManager;
+};
+}
+}
diff --git a/NvCloth/src/dx/DxDeviceVector.h b/NvCloth/src/dx/DxDeviceVector.h
new file mode 100644
index 0000000..d64bd77
--- /dev/null
+++ b/NvCloth/src/dx/DxDeviceVector.h
@@ -0,0 +1,388 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/DxContextManagerCallback.h"
+#include "DxCheckSuccess.h"
+#include "NvCloth/Allocator.h"
+
+namespace nv
+{
+namespace cloth
+{
+
+struct DxBufferFlags
+{
+ D3D11_USAGE mUsage;
+ D3D11_BIND_FLAG mBindFlag;
+ D3D11_RESOURCE_MISC_FLAG mMiscFlag;
+ D3D11_CPU_ACCESS_FLAG mCpuAccessFlag;
+};
+
+inline DxBufferFlags DxDefaultBufferPolicy()
+{
+ DxBufferFlags result = { D3D11_USAGE_DEFAULT,
+ D3D11_BIND_FLAG(D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_UNORDERED_ACCESS),
+ D3D11_RESOURCE_MISC_BUFFER_STRUCTURED,
+ D3D11_CPU_ACCESS_FLAG(0) };
+ return result;
+};
+
+inline DxBufferFlags DxDynamicBufferPolicy()
+{
+ DxBufferFlags result = { D3D11_USAGE_DYNAMIC, D3D11_BIND_SHADER_RESOURCE,
+ D3D11_RESOURCE_MISC_BUFFER_STRUCTURED, D3D11_CPU_ACCESS_WRITE };
+ return result;
+};
+
+inline DxBufferFlags DxStagingBufferPolicy()
+{
+ DxBufferFlags result = { D3D11_USAGE_STAGING,
+ D3D11_BIND_FLAG(0),
+ D3D11_RESOURCE_MISC_BUFFER_STRUCTURED,
+ D3D11_CPU_ACCESS_FLAG(D3D11_CPU_ACCESS_WRITE | D3D11_CPU_ACCESS_READ) };
+ return result;
+};
+
+template <typename T>
+class DxBuffer : public DxBufferFlags
+{
+ static const uint32_t SizeOfT = sizeof(T);
+
+ public:
+ DxBuffer(DxContextManagerCallback* manager, const DxBufferFlags& flags = DxDefaultBufferPolicy())
+ : DxBufferFlags(flags), mCapacity(0), mBuffer(0), mManager(manager), mResourceView(nullptr), mAccessView(nullptr)
+ {
+ }
+
+ DxBuffer(const T* first, const T* last, DxContextManagerCallback* manager,
+ const DxBufferFlags& flags = DxDefaultBufferPolicy())
+ : DxBufferFlags(flags), mCapacity(0), mBuffer(0), mManager(manager), mResourceView(nullptr), mAccessView(nullptr)
+ {
+ D3D11_SUBRESOURCE_DATA data = { first };
+ create(uint32_t(last - first), &data);
+ }
+
+ DxBuffer(const DxBuffer& other)
+ : DxBufferFlags(other), mCapacity(0), mBuffer(0), mManager(other.mManager), mResourceView(nullptr), mAccessView(nullptr)
+ {
+ operator=(other);
+ }
+
+ ~DxBuffer()
+ {
+ if (mAccessView)
+ mAccessView->Release();
+ if (mResourceView)
+ mResourceView->Release();
+ if (mBuffer)
+ mBuffer->Release();
+ }
+
+ DxBuffer& operator = (const DxBuffer& other)
+ {
+ if (mCapacity < other.mCapacity)
+ {
+ if (mBuffer)
+ mBuffer->Release();
+
+ create(other.mCapacity);
+ }
+
+ if (other.mCapacity)
+ {
+ CD3D11_BOX box(0, 0, 0, other.mCapacity * SizeOfT, 1, 1);
+ context()->CopySubresourceRegion(mBuffer, 0, 0, 0, 0, other.mBuffer, 0, &box);
+ }
+
+ return *this;
+ }
+
+ ID3D11DeviceContext* context() const
+ {
+ return mManager->getContext();
+ }
+
+ void reserve(uint32_t n)
+ {
+ if (n <= mCapacity)
+ return;
+
+ ID3D11Buffer* oldBuffer = mBuffer;
+ CD3D11_BOX box(0, 0, 0, mCapacity * SizeOfT, 1, 1);
+
+ create(n);
+
+ if (oldBuffer)
+ {
+ context()->CopySubresourceRegion(mBuffer, 0, 0, 0, 0, oldBuffer, 0, &box);
+ oldBuffer->Release();
+ }
+ }
+
+ T* map(D3D11_MAP mapType)
+ {
+ D3D11_MAPPED_SUBRESOURCE map;
+ checkSuccess(context()->Map(mBuffer, 0, mapType, 0, &map));
+ return (T*)map.pData;
+ }
+
+ void unmap()
+ {
+ context()->Unmap(mBuffer, 0);
+ }
+
+ ID3D11ShaderResourceView* resourceView()
+ {
+ if (!mResourceView && mBuffer)
+ mManager->getDevice()->CreateShaderResourceView(mBuffer, NULL, &mResourceView);
+ return mResourceView;
+ }
+
+ ID3D11UnorderedAccessView* accessView()
+ {
+ if (!mAccessView && mBuffer)
+ mManager->getDevice()->CreateUnorderedAccessView(mBuffer, NULL, &mAccessView);
+ return mAccessView;
+ }
+
+ private:
+ void create(uint32_t capacity, D3D11_SUBRESOURCE_DATA* data = 0)
+ {
+ CD3D11_BUFFER_DESC desc(capacity * SizeOfT, mBindFlag, mUsage, mCpuAccessFlag, mMiscFlag, SizeOfT);
+ checkSuccess(mManager->getDevice()->CreateBuffer(&desc, data, &mBuffer));
+ mCapacity = capacity;
+
+ if (mResourceView)
+ mResourceView->Release();
+ mResourceView = nullptr;
+
+ if (mAccessView)
+ mAccessView->Release();
+ mAccessView = nullptr;
+ }
+
+ public:
+ uint32_t mCapacity;
+ ID3D11Buffer* mBuffer;
+ DxContextManagerCallback* mManager;
+ ID3D11ShaderResourceView* mResourceView;
+ ID3D11UnorderedAccessView* mAccessView;
+};
+
+// STL-style vector that holds POD types in DX device memory. The interface
+// is not complete, add whatever you need from the std::vector interface.
+template <typename T>
+class DxDeviceVector
+{
+ static const uint32_t SizeOfT = sizeof(T);
+
+ public:
+ DxDeviceVector(DxContextManagerCallback* manager, const DxBufferFlags& flags = DxDefaultBufferPolicy())
+ : mBuffer(manager, flags), mSize(0)
+ {
+ }
+
+ DxDeviceVector(const T* first, const T* last, DxContextManagerCallback* manager,
+ const DxBufferFlags& flags = DxDefaultBufferPolicy())
+ : mBuffer(first, last, manager, flags)
+ {
+ }
+
+ DxDeviceVector(const DxDeviceVector& other) : mBuffer(other.mBuffer), mSize(other.mSize)
+ {
+ }
+
+ template <typename Alloc>
+ DxDeviceVector& operator = (const physx::shdfnd::Array<T, Alloc>& other)
+ {
+ assign(other.begin(), other.end());
+ return *this;
+ }
+
+ uint32_t capacity() const
+ {
+ return mBuffer.mCapacity;
+ }
+ bool empty() const
+ {
+ return !mSize;
+ }
+ uint32_t size() const
+ {
+ return mSize;
+ }
+
+ void reserve(uint32_t n)
+ {
+ mBuffer.reserve(n);
+ }
+
+ void resize(uint32_t n)
+ {
+ if (mBuffer.mCapacity < n)
+ reserve(std::max(n, mBuffer.mCapacity * 2));
+ mSize = n;
+ }
+
+ void assign(const T* first, const T* last)
+ {
+ mSize = uint32_t(last - first);
+
+ if (!mSize)
+ return;
+
+ if (mSize > mBuffer.mCapacity)
+ {
+ mBuffer = DxBuffer<T>(first, last, mBuffer.mManager, mBuffer);
+ }
+ else
+ {
+ if (mBuffer.mUsage == D3D11_USAGE_DEFAULT)
+ {
+ CD3D11_BOX box(0, 0, 0, mSize * SizeOfT, 1, 1);
+ mBuffer.context()->UpdateSubresource(mBuffer.mBuffer, 0, &box, first, 0, 0);
+ }
+ else
+ {
+ memcpy(map(D3D11_MAP_WRITE_DISCARD), first, mSize * SizeOfT);
+ unmap();
+ }
+ }
+ }
+
+ void swap(DxDeviceVector& other)
+ {
+ physx::shdfnd::swap(mBuffer, other.mBuffer);
+ physx::shdfnd::swap(mSize, other.mSize);
+ }
+
+ T* map(D3D11_MAP mapType)
+ {
+ return mBuffer.map(mapType);
+ }
+
+ void unmap()
+ {
+ mBuffer.unmap();
+ }
+
+ // common interface with DxBatchedVector for DxVectorMap
+ DxContextManagerCallback* manager() const
+ {
+ return mBuffer.mManager;
+ }
+
+ public:
+ DxBuffer<T> mBuffer;
+ uint32_t mSize;
+};
+
+template <typename Vector>
+class DxVectorMap : DxContextLock
+{
+ DxVectorMap& operator = (const DxVectorMap&);
+
+ public:
+ typedef typename Vector::ValueType ValueType;
+
+ DxVectorMap(Vector& vector, D3D11_MAP mapType = D3D11_MAP_READ_WRITE)
+ : DxContextLock(vector.manager()), mVector(vector), mData(mVector.map(mapType))
+ {
+ }
+
+ ~DxVectorMap()
+ {
+ if (mData)
+ mVector.unmap();
+ }
+
+ // not actually initializing values!
+ void resize(uint32_t size, const ValueType& = ValueType())
+ {
+ NV_CLOTH_ASSERT(size <= mVector.capacity());
+ return mVector.resize(size);
+ }
+
+ uint32_t size() const
+ {
+ return mVector.size();
+ }
+
+ ValueType* begin()
+ {
+ return mData;
+ }
+
+ ValueType* end()
+ {
+ return mData + mVector.mSize;
+ }
+
+ ValueType& operator[](uint32_t i)
+ {
+ return mData[i];
+ }
+
+ void pushBack(const ValueType& value)
+ {
+ NV_CLOTH_ASSERT(mVector.mCapacity > mVector.mSize);
+ mData[mVector.mSize++] = value;
+ }
+
+ void replaceWithLast(ValueType* it)
+ {
+ *it = mData[--mVector.mSize];
+ }
+
+ private:
+ Vector& mVector;
+ ValueType* const mData;
+};
+
+} // namespace cloth
+
+} // namespace nv
+
+namespace physx
+{
+namespace shdfnd
+{
+template <typename T>
+void swap(nv::cloth::DxBuffer<T>& left, nv::cloth::DxBuffer<T>& right)
+{
+ swap(left.mCapacity, right.mCapacity);
+ swap(left.mBuffer, right.mBuffer);
+ swap(left.mManager, right.mManager);
+ swap(left.mResourceView, right.mResourceView);
+ swap(left.mAccessView, right.mAccessView);
+}
+}
+}
diff --git a/NvCloth/src/dx/DxFabric.cpp b/NvCloth/src/dx/DxFabric.cpp
new file mode 100644
index 0000000..4952e80
--- /dev/null
+++ b/NvCloth/src/dx/DxFabric.cpp
@@ -0,0 +1,208 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "DxFabric.h"
+#include "DxContextLock.h"
+#include "DxFactory.h"
+#include <algorithm>
+#include <PsUtilities.h>
+
+#if NV_CLOTH_ENABLE_DX11
+
+using namespace physx;
+using namespace nv;
+
+cloth::DxFabric::DxFabric(DxFactory& factory, uint32_t numParticles, Range<const uint32_t> phaseIndices,
+ Range<const uint32_t> sets, Range<const float> restvalues, Range<const float> stiffnessValues, Range<const uint32_t> indices,
+ Range<const uint32_t> anchors, Range<const float> tetherLengths,
+ Range<const uint32_t> triangles, uint32_t id)
+: DxContextLock(factory)
+, mFactory(factory)
+, mNumParticles(numParticles)
+, mPhases(phaseIndices.begin(), phaseIndices.end())
+, mSets(sets.begin(), sets.end())
+, mConstraints(mFactory.mConstraints)
+, mConstraintsHostCopy(mFactory.mConstraintsHostCopy)
+, mStiffnessValues(mFactory.mStiffnessValues)
+, mTethers(mFactory.mTethers)
+, mId(id)
+{
+ // should no longer be prefixed with 0
+ NV_CLOTH_ASSERT(sets.front() != 0);
+
+ NV_CLOTH_ASSERT(sets.back() == restvalues.size());
+ NV_CLOTH_ASSERT(restvalues.size() * 2 == indices.size());
+ NV_CLOTH_ASSERT(restvalues.size() == stiffnessValues.size() || stiffnessValues.size() == 0);
+ NV_CLOTH_ASSERT(mNumParticles > *shdfnd::maxElement(indices.begin(), indices.end()));
+
+ // manually convert uint32_t indices to uint16_t in temp memory
+ Vector<DxConstraint>::Type hostConstraints;
+ hostConstraints.resizeUninitialized(restvalues.size());
+ Vector<DxConstraint>::Type::Iterator cIt = hostConstraints.begin();
+ Vector<DxConstraint>::Type::Iterator cEnd = hostConstraints.end();
+
+ const uint32_t* iIt = indices.begin();
+ const float* rIt = restvalues.begin();
+ for (; cIt != cEnd; ++cIt)
+ {
+ cIt->mRestvalue = *rIt++;
+ cIt->mFirstIndex = uint16_t(*iIt++);
+ cIt->mSecondIndex = uint16_t(*iIt++);
+ }
+
+ // copy to device vector in one go
+#if 0
+ // Workaround for NvAPI SCG device updateSubresource size limit
+ mConstraintsHostCopy.assign(hostConstraints.begin(), hostConstraints.end());
+ mConstraints.resize(mConstraintsHostCopy.size());
+ mConstraints = mConstraintsHostCopy;
+#else
+ mConstraints.assign(hostConstraints.begin(), hostConstraints.end());
+#endif
+
+ mStiffnessValues.assign(stiffnessValues.begin(), stiffnessValues.end());
+
+ // gather data per phase
+ mFirstConstraintInPhase.reserve(phaseIndices.size());
+ mNumConstraintsInPhase.reserve(phaseIndices.size());
+ for (const uint32_t* pIt = phaseIndices.begin(); pIt != phaseIndices.end(); ++pIt)
+ {
+ uint32_t setIndex = *pIt;
+ uint32_t firstIndex = setIndex ? sets[setIndex - 1] : 0;
+ uint32_t lastIndex = sets[setIndex];
+ mFirstConstraintInPhase.pushBack(firstIndex);
+ mNumConstraintsInPhase.pushBack(lastIndex - firstIndex);
+ }
+
+ // tethers
+ NV_CLOTH_ASSERT(anchors.size() == tetherLengths.size());
+ mTetherLengthScale =
+ tetherLengths.empty() ? 1.0f : *shdfnd::maxElement(tetherLengths.begin(), tetherLengths.end()) / USHRT_MAX;
+ float inverseScale = 1 / (mTetherLengthScale + FLT_EPSILON);
+ Vector<DxTether>::Type tethers;
+ tethers.reserve(anchors.size());
+ for (; !anchors.empty(); anchors.popFront(), tetherLengths.popFront())
+ {
+ tethers.pushBack(DxTether(uint16_t(anchors.front()), uint16_t(tetherLengths.front() * inverseScale + 0.5f)));
+ }
+ mTethers.assign(tethers.begin(), tethers.end());
+
+ // triangles
+ Vector<uint16_t>::Type hostTriangles;
+ hostTriangles.resizeUninitialized(triangles.size());
+ Vector<uint16_t>::Type::Iterator tIt = hostTriangles.begin();
+
+ for (; !triangles.empty(); triangles.popFront())
+ *tIt++ = uint16_t(triangles.front());
+
+ mTriangles.assign(hostTriangles.begin(), hostTriangles.end());
+
+ DxContextLock::release();
+
+ // add to factory
+ mFactory.mFabrics.pushBack(this);
+}
+
+cloth::DxFabric::~DxFabric()
+{
+ DxContextLock::acquire();
+
+ Vector<DxFabric*>::Type::Iterator fIt = mFactory.mFabrics.find(this);
+
+ NV_CLOTH_ASSERT(fIt != mFactory.mFabrics.end());
+ mFactory.mFabrics.replaceWithLast(fIt);
+}
+
+cloth::Factory& cloth::DxFabric::getFactory() const
+{
+ return mFactory;
+}
+
+uint32_t cloth::DxFabric::getNumPhases() const
+{
+ return uint32_t(mPhases.size());
+}
+
+uint32_t cloth::DxFabric::getNumRestvalues() const
+{
+ return uint32_t(mConstraints.size());
+}
+
+uint32_t cloth::DxFabric::getNumStiffnessValues() const
+{
+ return uint32_t(mStiffnessValues.size());
+}
+
+uint32_t cloth::DxFabric::getNumSets() const
+{
+ return uint32_t(mSets.size());
+}
+
+uint32_t cloth::DxFabric::getNumIndices() const
+{
+ return uint32_t(mConstraints.size()) * 2;
+}
+
+uint32_t cloth::DxFabric::getNumParticles() const
+{
+ return mNumParticles;
+}
+
+uint32_t cloth::DxFabric::getNumTethers() const
+{
+ return uint32_t(mTethers.size());
+}
+
+uint32_t cloth::DxFabric::getNumTriangles() const
+{
+ return uint32_t(mTriangles.size()) / 3;
+}
+
+void cloth::DxFabric::scaleRestvalues(float scale)
+{
+ DxContextLock contextLock(mFactory);
+
+ Vector<DxConstraint>::Type constraints(uint32_t(mConstraints.size()));
+ mFactory.copyToHost(constraints.begin(), mConstraints.buffer(), mConstraints.mOffset * sizeof(DxConstraint),
+ constraints.size() * sizeof(DxConstraint));
+
+ Vector<DxConstraint>::Type::Iterator cIt, cEnd = constraints.end();
+ for (cIt = constraints.begin(); cIt != cEnd; ++cIt)
+ cIt->mRestvalue *= scale;
+
+ mConstraints = constraints;
+}
+
+void cloth::DxFabric::scaleTetherLengths(float scale)
+{
+ // cloth instances won't pick this up until DxClothData is dirty!
+ mTetherLengthScale *= scale;
+}
+
+#endif // NV_CLOTH_ENABLE_DX11
diff --git a/NvCloth/src/dx/DxFabric.h b/NvCloth/src/dx/DxFabric.h
new file mode 100644
index 0000000..2e64cee
--- /dev/null
+++ b/NvCloth/src/dx/DxFabric.h
@@ -0,0 +1,96 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Fabric.h"
+#include "NvCloth/Range.h"
+#include "DxClothData.h"
+#include "DxContextLock.h"
+#include "DxBatchedVector.h"
+
+namespace nv
+{
+
+namespace cloth
+{
+
+class DxFabric : private DxContextLock, public Fabric
+{
+ PX_NOCOPY(DxFabric);
+
+ public:
+ DxFabric(DxFactory& factory, uint32_t numParticles, Range<const uint32_t> phaseIndices, Range<const uint32_t> sets,
+ Range<const float> restvalues, Range<const float> stiffnessValues, Range<const uint32_t> indices, Range<const uint32_t> anchors,
+ Range<const float> tetherLengths, Range<const uint32_t> triangles, uint32_t id);
+
+ virtual ~DxFabric();
+
+ virtual Factory& getFactory() const;
+
+ virtual uint32_t getNumPhases() const;
+ virtual uint32_t getNumRestvalues() const;
+ virtual uint32_t getNumStiffnessValues() const;
+
+ virtual uint32_t getNumSets() const;
+ virtual uint32_t getNumIndices() const;
+
+ virtual uint32_t getNumParticles() const;
+
+ virtual uint32_t getNumTethers() const;
+
+ virtual uint32_t getNumTriangles() const;
+
+ virtual void scaleRestvalues(float);
+ virtual void scaleTetherLengths(float);
+
+public:
+ DxFactory& mFactory;
+
+ uint32_t mNumParticles;
+
+ Vector<uint32_t>::Type mPhases; // index of set to use
+ Vector<uint32_t>::Type mSets; // offset of last restvalue
+
+ DxBatchedVector<DxConstraint> mConstraints;
+ DxBatchedVector<DxConstraint> mConstraintsHostCopy;
+ DxBatchedVector<float> mStiffnessValues;
+
+ DxBatchedVector<DxTether> mTethers;
+ float mTetherLengthScale;
+
+ Vector<uint16_t>::Type mTriangles;
+
+ Vector<uint32_t>::Type mFirstConstraintInPhase;
+ Vector<uint32_t>::Type mNumConstraintsInPhase;
+
+ uint32_t mId;
+};
+}
+}
diff --git a/NvCloth/src/dx/DxFactory.cpp b/NvCloth/src/dx/DxFactory.cpp
new file mode 100644
index 0000000..0ab7129
--- /dev/null
+++ b/NvCloth/src/dx/DxFactory.cpp
@@ -0,0 +1,478 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "NvCloth/DxContextManagerCallback.h"
+#include "DxFactory.h"
+#include "DxFabric.h"
+#include "DxCloth.h"
+#include "DxSolver.h"
+#include "DxCheckSuccess.h"
+#include "DxContextLock.h"
+#include "../ClothImpl.h"
+#include <PsFoundation.h>
+
+#if NV_CLOTH_ENABLE_DX11
+
+#include "DxSolverKernelBlob.h"
+
+using namespace physx;
+using namespace nv;
+
+namespace nv
+{
+namespace cloth
+{
+// defined in Factory.cpp
+uint32_t getNextFabricId();
+
+typedef Vec4T<uint32_t> Vec4u;
+}
+}
+
+ void cloth::checkSuccessImpl(HRESULT err, const char* file, const int line)
+ {
+ if (err != S_OK)
+ {
+ NV_CLOTH_LOG_ERROR("direct compute error: %u at %s:%d", err, file, line);
+ }
+ }
+
+ namespace
+ {
+ // returns max threads as specified by launch bounds in DxSolverKernel.hlsl
+ uint32_t getMaxThreadsPerBlock()
+ {
+ return 1024;
+ }
+ }
+
+ cloth::DxFactory::DxFactory(DxContextManagerCallback* contextManager)
+ : mContextManager(contextManager)
+ , mStagingBuffer(0)
+ , mSolverKernelComputeShader(nullptr)
+ , mNumThreadsPerBlock(getMaxThreadsPerBlock())
+ , mMaxThreadsPerBlock(mNumThreadsPerBlock)
+ , mConstraints(mContextManager)
+ , mConstraintsHostCopy(mContextManager, DxStagingBufferPolicy())
+ , mStiffnessValues(mContextManager)
+ , mTethers(mContextManager)
+ , mParticles(mContextManager)
+ , mParticlesHostCopy(mContextManager, DxStagingBufferPolicy())
+ , mParticleAccelerations(mContextManager)
+ , mParticleAccelerationsHostCopy(mContextManager, DxStagingBufferPolicy())
+ , mPhaseConfigs(mContextManager)
+ , mCapsuleIndices(mContextManager, DxStagingBufferPolicy())
+ , mCapsuleIndicesDeviceCopy(mContextManager)
+ , mCollisionSpheres(mContextManager, DxStagingBufferPolicy())
+ , mCollisionSpheresDeviceCopy(mContextManager)
+ , mConvexMasks(mContextManager, DxStagingBufferPolicy())
+ , mConvexMasksDeviceCopy(mContextManager)
+ , mCollisionPlanes(mContextManager, DxStagingBufferPolicy())
+ , mCollisionPlanesDeviceCopy(mContextManager)
+ , mCollisionTriangles(mContextManager, DxStagingBufferPolicy())
+ , mCollisionTrianglesDeviceCopy(mContextManager)
+ , mMotionConstraints(mContextManager)
+ , mSeparationConstraints(mContextManager)
+ , mRestPositions(mContextManager, DxStagingBufferPolicy())
+ , mRestPositionsDeviceCopy(mContextManager)
+ , mSelfCollisionIndices(mContextManager)
+ , mSelfCollisionParticles(mContextManager)
+ , mSelfCollisionData(mContextManager)
+ {
+ if (mContextManager->synchronizeResources())
+ {
+ // allow particle interop with other device
+ mParticles.mBuffer.mMiscFlag =
+ D3D11_RESOURCE_MISC_FLAG(mParticles.mBuffer.mMiscFlag | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX);
+ }
+ }
+
+ cloth::DxFactory::~DxFactory()
+ {
+ DxContextLock(*this);
+ NV_CLOTH_ASSERT(("All fabrics created by this factory need to be deleted before this factory is destroyed.",mFabrics.size() == 0));
+ if (mStagingBuffer)
+ mStagingBuffer->Release();
+
+ if (mSolverKernelComputeShader)
+ mSolverKernelComputeShader->Release();
+ }
+
+cloth::Fabric* cloth::DxFactory::createFabric(uint32_t numParticles, Range<const uint32_t> phaseIndices,
+ Range<const uint32_t> sets, Range<const float> restvalues, Range<const float> stiffnessValues,
+ Range<const uint32_t> indices, Range<const uint32_t> anchors,
+ Range<const float> tetherLengths, Range<const uint32_t> triangles)
+{
+ return NV_CLOTH_NEW(DxFabric)(*this, numParticles, phaseIndices, sets, restvalues, stiffnessValues, indices, anchors, tetherLengths, triangles,
+ getNextFabricId());
+}
+
+cloth::Cloth* cloth::DxFactory::createCloth(Range<const PxVec4> particles, Fabric& fabric)
+{
+ return NV_CLOTH_NEW(DxClothImpl)(*this, fabric, particles);
+}
+
+cloth::Solver* cloth::DxFactory::createSolver()
+{
+ CompileComputeShaders(); //Make sure our compute shaders are ready
+ DxSolver* solver = NV_CLOTH_NEW(DxSolver)(*this);
+
+ if (solver->hasError())
+ {
+ NV_CLOTH_DELETE(solver);
+ return NULL;
+ }
+
+ return solver;
+}
+
+// DxFactory::clone() implemented in DxClothClone.cpp
+
+void cloth::DxFactory::copyToHost(void* dst, ID3D11Buffer* srcBuffer, uint32_t offset, uint32_t size) const
+{
+ if (!size)
+ return;
+
+ DxContextLock contextLock(*this);
+
+ const_cast<DxFactory*>(this)->reserveStagingBuffer(size);
+ CD3D11_BOX box(offset, 0, 0, offset + size, 1, 1);
+ mContextManager->getContext()->CopySubresourceRegion(mStagingBuffer, 0, 0, 0, 0, srcBuffer, 0, &box);
+ void* mapIt = mapStagingBuffer(D3D11_MAP_READ);
+ memcpy(dst, mapIt, size);
+ unmapStagingBuffer();
+}
+
+void cloth::DxFactory::extractFabricData(const Fabric& fabric, Range<uint32_t> phaseIndices, Range<uint32_t> sets,
+ Range<float> restvalues, Range<float> stiffnessValues, Range<uint32_t> indices, Range<uint32_t> anchors,
+ Range<float> tetherLengths, Range<uint32_t> triangles) const
+{
+ DxContextLock contextLock(*this);
+
+ const DxFabric& dxFabric = static_cast<const DxFabric&>(fabric);
+
+ if (!phaseIndices.empty())
+ {
+ NV_CLOTH_ASSERT(phaseIndices.size() == dxFabric.mPhases.size());
+ memcpy(phaseIndices.begin(), dxFabric.mPhases.begin(), phaseIndices.size() * sizeof(uint32_t));
+ }
+
+ if (!restvalues.empty())
+ {
+ NV_CLOTH_ASSERT(restvalues.size() == dxFabric.mConstraints.size());
+ Vector<DxConstraint>::Type hostConstraints(restvalues.size());
+ copyToHost(hostConstraints.begin(), dxFabric.mConstraints.buffer(), dxFabric.mConstraints.mOffset * sizeof(DxConstraint),
+ uint32_t(hostConstraints.size() * sizeof(DxConstraint)));
+ for (uint32_t i = 0, n = restvalues.size(); i < n; ++i)
+ restvalues[i] = hostConstraints[i].mRestvalue;
+ }
+
+ if (!stiffnessValues.empty())
+ {
+ NV_CLOTH_ASSERT(stiffnessValues.size() == dxFabric.mStiffnessValues.size());
+ Vector<float>::Type hostStiffnessValues(stiffnessValues.size());
+ copyToHost(hostStiffnessValues.begin(), dxFabric.mStiffnessValues.buffer(), dxFabric.mStiffnessValues.mOffset * sizeof(float),
+ uint32_t(hostStiffnessValues.size() * sizeof(float)));
+ for (uint32_t i = 0, n = stiffnessValues.size(); i < n; ++i)
+ stiffnessValues[i] = hostStiffnessValues[i];
+ }
+
+ if (!sets.empty())
+ {
+ // need to skip copying the first element
+ NV_CLOTH_ASSERT(sets.size() == dxFabric.mSets.size());
+ memcpy(sets.begin(), dxFabric.mSets.begin(), sets.size() * sizeof(uint32_t));
+ }
+
+ if (!indices.empty())
+ {
+ NV_CLOTH_ASSERT(indices.size() == dxFabric.mConstraints.size()*2);
+ Vector<DxConstraint>::Type hostConstraints(dxFabric.mConstraints.size());
+ copyToHost(hostConstraints.begin(), dxFabric.mConstraints.buffer(), dxFabric.mConstraints.mOffset * sizeof(DxConstraint),
+ uint32_t(hostConstraints.size() * sizeof(DxConstraint)));
+
+ auto cIt = hostConstraints.begin(), cEnd = hostConstraints.end();
+ for (uint32_t* iIt = indices.begin(); cIt != cEnd; ++cIt)
+ {
+ *iIt++ = cIt->mFirstIndex;
+ *iIt++ = cIt->mSecondIndex;
+ }
+ }
+
+ if (!anchors.empty() || !tetherLengths.empty())
+ {
+ uint32_t numTethers = uint32_t(dxFabric.mTethers.size());
+ Vector<DxTether>::Type tethers(numTethers, DxTether(0, 0));
+ copyToHost(tethers.begin(), dxFabric.mTethers.buffer(), dxFabric.mTethers.mOffset * sizeof(DxTether),
+ uint32_t(tethers.size() * sizeof(DxTether)));
+
+ NV_CLOTH_ASSERT(anchors.empty() || anchors.size() == tethers.size());
+ for (uint32_t i = 0; !anchors.empty(); ++i, anchors.popFront())
+ anchors.front() = tethers[i].mAnchor;
+
+ NV_CLOTH_ASSERT(tetherLengths.empty() || tetherLengths.size() == tethers.size());
+ for (uint32_t i = 0; !tetherLengths.empty(); ++i, tetherLengths.popFront())
+ tetherLengths.front() = tethers[i].mLength * dxFabric.mTetherLengthScale;
+ }
+
+ if (!triangles.empty())
+ {
+ // todo triangles
+ }
+}
+
+
+ void cloth::DxFactory::extractCollisionData(const Cloth& cloth, Range<PxVec4> spheres, Range<uint32_t> capsules,
+ Range<PxVec4> planes, Range<uint32_t> convexes, Range<PxVec3> triangles) const
+ {
+ PX_ASSERT(&cloth.getFactory() == this);
+
+ const DxCloth& dxCloth = static_cast<const DxClothImpl&>(cloth).mCloth;
+
+ PX_ASSERT(spheres.empty() || spheres.size() == dxCloth.mStartCollisionSpheres.size());
+ PX_ASSERT(capsules.empty() || capsules.size() == dxCloth.mCapsuleIndices.size() * 2);
+ PX_ASSERT(planes.empty() || planes.size() == dxCloth.mStartCollisionPlanes.size());
+ PX_ASSERT(convexes.empty() || convexes.size() == dxCloth.mConvexMasks.size());
+ PX_ASSERT(triangles.empty() || triangles.size() == dxCloth.mStartCollisionTriangles.size());
+
+ // collision spheres are in pinned memory, so memcpy directly
+ if (!dxCloth.mStartCollisionSpheres.empty() && !spheres.empty())
+ {
+ memcpy(spheres.begin(),
+ DxCloth::MappedVec4fVectorType(const_cast<DxCloth&>(dxCloth).mStartCollisionSpheres).begin(),
+ spheres.size() * sizeof(PxVec4));
+ }
+
+ if (!dxCloth.mCapsuleIndices.empty() && !capsules.empty())
+ {
+ memcpy(capsules.begin(), DxCloth::MappedIndexVectorType(const_cast<DxCloth&>(dxCloth).mCapsuleIndices).begin(),
+ capsules.size() * sizeof(uint32_t));
+ }
+
+ if (!dxCloth.mStartCollisionPlanes.empty() && !planes.empty())
+ {
+ memcpy(planes.begin(), DxCloth::MappedVec4fVectorType(const_cast<DxCloth&>(dxCloth).mStartCollisionPlanes).begin(),
+ dxCloth.mStartCollisionPlanes.size() * sizeof(PxVec4));
+ }
+
+ if (!dxCloth.mConvexMasks.empty() && !convexes.empty())
+ {
+ memcpy(convexes.begin(), DxCloth::MappedMaskVectorType(const_cast<DxCloth&>(dxCloth).mConvexMasks).begin(),
+ dxCloth.mConvexMasks.size() * sizeof(uint32_t));
+ }
+
+ if (!dxCloth.mStartCollisionTriangles.empty() && !triangles.empty())
+ {
+ memcpy(triangles.begin(), DxCloth::MappedVec3fVectorType(const_cast<DxCloth&>(dxCloth).mStartCollisionTriangles).begin(),
+ dxCloth.mStartCollisionTriangles.size() * sizeof(PxVec3));
+ }
+ }
+
+ void cloth::DxFactory::extractMotionConstraints(const Cloth& cloth, Range<PxVec4> destConstraints) const
+ {
+ PX_ASSERT(&cloth.getFactory() == this);
+
+ const DxCloth& dxCloth = static_cast<const DxClothImpl&>(cloth).mCloth;
+
+ if (dxCloth.mMotionConstraints.mHostCopy.size())
+ {
+ PX_ASSERT(destConstraints.size() == dxCloth.mMotionConstraints.mHostCopy.size());
+
+ memcpy(destConstraints.begin(), dxCloth.mMotionConstraints.mHostCopy.begin(),
+ sizeof(PxVec4) * dxCloth.mMotionConstraints.mHostCopy.size());
+ }
+ else
+ {
+ DxContextLock contextLock(*this);
+
+ DxBatchedVector<PxVec4> const& srcConstraints = !dxCloth.mMotionConstraints.mTarget.empty()
+ ? dxCloth.mMotionConstraints.mTarget
+ : dxCloth.mMotionConstraints.mStart;
+
+ PX_ASSERT(destConstraints.size() == srcConstraints.size());
+ copyToHost(destConstraints.begin(), srcConstraints.buffer(), 0, destConstraints.size() * sizeof(PxVec4));
+ }
+ }
+
+ void cloth::DxFactory::extractSeparationConstraints(const Cloth& cloth, Range<PxVec4> destConstraints) const
+ {
+ PX_ASSERT(&cloth.getFactory() == this);
+
+ const DxCloth& dxCloth = static_cast<const DxClothImpl&>(cloth).mCloth;
+
+ if (dxCloth.mSeparationConstraints.mHostCopy.size())
+ {
+ PX_ASSERT(destConstraints.size() == dxCloth.mSeparationConstraints.mHostCopy.size());
+
+ memcpy(destConstraints.begin(), dxCloth.mSeparationConstraints.mHostCopy.begin(),
+ sizeof(PxVec4) * dxCloth.mSeparationConstraints.mHostCopy.size());
+ }
+ else
+ {
+ DxContextLock contextLock(*this);
+
+ DxBatchedVector<PxVec4> const& srcConstraints = !dxCloth.mSeparationConstraints.mTarget.empty()
+ ? dxCloth.mSeparationConstraints.mTarget
+ : dxCloth.mSeparationConstraints.mStart;
+
+ PX_ASSERT(destConstraints.size() == srcConstraints.size());
+
+ copyToHost(destConstraints.begin(), srcConstraints.buffer(), 0, destConstraints.size() * sizeof(PxVec4));
+ }
+ }
+
+ void cloth::DxFactory::extractParticleAccelerations(const Cloth& cloth, Range<PxVec4> destAccelerations) const
+ {
+ /*
+ PX_ASSERT(&cloth.getFactory() == this);
+ const DxCloth& dxCloth = static_cast<const DxClothImpl&>(cloth).mCloth;
+
+ if (dxCloth.mParticleAccelerationsHostCopy.size())
+ {
+ PX_ASSERT(dxCloth.mParticleAccelerationsHostCopy.size());
+
+ memcpy(destAccelerations.begin(), dxCloth.mParticleAccelerationsHostCopy.begin(),
+ sizeof(PxVec4) * dxCloth.mParticleAccelerationsHostCopy.size());
+ }
+ else
+ {
+ DxContextLock contextLock(*this);
+
+ DxBatchedVector<PxVec4> const& srcAccelerations = dxCloth.mParticleAccelerations;
+
+ PX_ASSERT(destAccelerations.size() == srcAccelerations.size());
+
+ copyToHost(destAccelerations.begin(), srcAccelerations.buffer(), 0, destAccelerations.size() * sizeof(PxVec4));
+ }
+ */
+ PX_UNUSED(&cloth);
+ PX_UNUSED(&destAccelerations);
+ PX_ASSERT(0);
+ }
+
+ void cloth::DxFactory::extractVirtualParticles(const Cloth& cloth, Range<uint32_t[4]> destIndices,
+ Range<PxVec3> destWeights) const
+ {
+ PX_ASSERT(&cloth.getFactory() == this);
+
+ DxContextLock contextLock(*this);
+
+ const DxCloth& dxCloth = static_cast<const DxClothImpl&>(cloth).mCloth;
+
+ if (destWeights.size() > 0)
+ {
+ uint32_t numWeights = cloth.getNumVirtualParticleWeights();
+
+ Vector<PxVec4>::Type hostWeights(numWeights, PxVec4(0.0f));
+ copyToHost(hostWeights.begin(), dxCloth.mVirtualParticleWeights.mBuffer.mBuffer, 0,
+ hostWeights.size() * sizeof(PxVec4));
+
+ // convert weights to Vec3f
+ PxVec3* destIt = reinterpret_cast<PxVec3*>(destWeights.begin());
+ Vector<PxVec4>::Type::ConstIterator srcIt = hostWeights.begin();
+ Vector<PxVec4>::Type::ConstIterator srcEnd = srcIt + numWeights;
+ for (; srcIt != srcEnd; ++srcIt, ++destIt)
+ *destIt = reinterpret_cast<const PxVec3&>(*srcIt);
+
+ PX_ASSERT(destIt <= destWeights.end());
+ }
+
+ if (destIndices.size() > 0)
+ {
+ uint32_t numIndices = cloth.getNumVirtualParticles();
+
+ Vector<Vec4us>::Type hostIndices(numIndices);
+ copyToHost(hostIndices.begin(), dxCloth.mVirtualParticleIndices.mBuffer.mBuffer, 0,
+ hostIndices.size() * sizeof(Vec4us));
+
+ // convert indices to 32 bit
+ Vec4u* destIt = reinterpret_cast<Vec4u*>(destIndices.begin());
+ Vector<Vec4us>::Type::ConstIterator srcIt = hostIndices.begin();
+ Vector<Vec4us>::Type::ConstIterator srcEnd = srcIt + numIndices;
+ for (; srcIt != srcEnd; ++srcIt, ++destIt)
+ *destIt = Vec4u(*srcIt);
+
+ PX_ASSERT(&array(*destIt) <= destIndices.end());
+ }
+ }
+
+ void cloth::DxFactory::extractSelfCollisionIndices(const Cloth& cloth, Range<uint32_t> destIndices) const
+ {
+ const DxCloth& dxCloth = static_cast<const DxClothImpl&>(cloth).mCloth;
+ PX_ASSERT(destIndices.size() == dxCloth.mSelfCollisionIndices.size());
+ intrinsics::memCopy(destIndices.begin(), dxCloth.mSelfCollisionIndicesHost.begin(),
+ destIndices.size() * sizeof(uint32_t));
+ }
+
+ void cloth::DxFactory::extractRestPositions(const Cloth& cloth, Range<PxVec4> destRestPositions) const
+ {
+ const DxCloth& dxCloth = static_cast<const DxClothImpl&>(cloth).mCloth;
+ PX_ASSERT(destRestPositions.size() == dxCloth.mRestPositions.size());
+ intrinsics::memCopy(destRestPositions.begin(), DxCloth::MappedVec4fVectorType(const_cast<DxCloth&>(dxCloth).mRestPositions).begin(),
+ destRestPositions.size() * sizeof(PxVec4));
+ }
+
+ void cloth::DxFactory::reserveStagingBuffer(uint32_t size)
+ {
+ if (mStagingBuffer)
+ {
+ D3D11_BUFFER_DESC desc;
+ mStagingBuffer->GetDesc(&desc);
+ if (desc.ByteWidth >= size)
+ return;
+ mStagingBuffer->Release();
+ }
+
+ CD3D11_BUFFER_DESC desc(size, 0, D3D11_USAGE_STAGING, D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE);
+ mContextManager->getDevice()->CreateBuffer(&desc, 0, &mStagingBuffer);
+ }
+
+ void* cloth::DxFactory::mapStagingBuffer(D3D11_MAP mapType) const
+ {
+ D3D11_MAPPED_SUBRESOURCE mapped;
+ mContextManager->getContext()->Map(mStagingBuffer, 0, mapType, 0, &mapped);
+ return mapped.pData;
+ }
+
+ void cloth::DxFactory::unmapStagingBuffer() const
+ {
+ mContextManager->getContext()->Unmap(mStagingBuffer, 0);
+ }
+
+ void cloth::DxFactory::CompileComputeShaders()
+ {
+ if (mSolverKernelComputeShader == nullptr)
+ {
+ DxContextLock(*this);
+ ID3D11Device* device = mContextManager->getDevice();
+ device->CreateComputeShader(gDxSolverKernel, sizeof(gDxSolverKernel), NULL, &mSolverKernelComputeShader);
+ }
+ }
+
+#endif // NV_CLOTH_ENABLE_DX11
diff --git a/NvCloth/src/dx/DxFactory.h b/NvCloth/src/dx/DxFactory.h
new file mode 100644
index 0000000..0fec0d8
--- /dev/null
+++ b/NvCloth/src/dx/DxFactory.h
@@ -0,0 +1,160 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Factory.h"
+#include "DxClothData.h"
+#include "DxBatchedVector.h"
+#include "../IndexPair.h"
+#include <foundation/PxVec4.h>
+#include <foundation/PxVec3.h>
+
+#if _MSC_VER >= 1700
+#pragma warning(disable : 4471)
+#endif
+
+struct ID3D11Device;
+struct ID3D11Buffer;
+enum D3D11_MAP;
+
+namespace nv
+{
+
+namespace cloth
+{
+class DxFabric;
+class DxCloth;
+template <typename>
+class ClothImpl;
+class DxContextManagerCallback;
+
+class DxFactory : public Factory
+{
+protected:
+ DxFactory(const DxFactory&); // not implemented
+ DxFactory& operator = (const DxFactory&); // not implemented
+
+ public:
+ typedef DxFabric FabricType;
+ typedef ClothImpl<DxCloth> ImplType;
+
+ explicit DxFactory(DxContextManagerCallback*);
+ virtual ~DxFactory();
+
+ virtual Platform getPlatform() const { return Platform::DX11; }
+
+ virtual Fabric* createFabric(uint32_t numParticles, Range<const uint32_t> phaseIndices, Range<const uint32_t> sets,
+ Range<const float> restvalues, Range<const float> stiffnessValues, Range<const uint32_t> indices,
+ Range<const uint32_t> anchors, Range<const float> tetherLengths,
+ Range<const uint32_t> triangles);
+
+ virtual Cloth* createCloth(Range<const physx::PxVec4> particles, Fabric& fabric);
+
+ virtual Solver* createSolver();
+
+ virtual Cloth* clone(const Cloth& cloth);
+
+ virtual void extractFabricData(const Fabric& fabric, Range<uint32_t> phaseIndices, Range<uint32_t> sets,
+ Range<float> restvalues, Range<float> stiffnessValues, Range<uint32_t> indices, Range<uint32_t> anchors,
+ Range<float> tetherLengths, Range<uint32_t> triangles) const;
+
+ virtual void extractCollisionData(const Cloth& cloth, Range<physx::PxVec4> spheres, Range<uint32_t> capsules,
+ Range<physx::PxVec4> planes, Range<uint32_t> convexes, Range<physx::PxVec3> triangles) const;
+
+ virtual void extractMotionConstraints(const Cloth& cloth, Range<physx::PxVec4> destConstraints) const;
+
+ virtual void extractSeparationConstraints(const Cloth& cloth, Range<physx::PxVec4> destConstraints) const;
+
+ virtual void extractParticleAccelerations(const Cloth& cloth, Range<physx::PxVec4> destAccelerations) const;
+
+ virtual void extractVirtualParticles(const Cloth& cloth, Range<uint32_t[4]> destIndices,
+ Range<physx::PxVec3> destWeights) const;
+
+ virtual void extractSelfCollisionIndices(const Cloth& cloth, Range<uint32_t> destIndices) const;
+
+ virtual void extractRestPositions(const Cloth& cloth, Range<physx::PxVec4> destRestPositions) const;
+
+ public:
+ void copyToHost(void* dst, ID3D11Buffer* buffer, uint32_t offset, uint32_t size) const; //size and offset in bytes (or in pixels when buffer is a texture?)
+ void CompileComputeShaders(); // this is called once to setup the shaders
+
+ void reserveStagingBuffer(uint32_t size);
+ void* mapStagingBuffer(D3D11_MAP) const;
+ void unmapStagingBuffer() const;
+
+ Vector<DxFabric*>::Type mFabrics;
+
+ DxContextManagerCallback* mContextManager;
+ ID3D11Buffer* mStagingBuffer;
+
+ ID3D11ComputeShader* mSolverKernelComputeShader;
+
+ uint32_t mNumThreadsPerBlock;
+
+ const uint32_t mMaxThreadsPerBlock;
+
+ DxBatchedStorage<DxConstraint> mConstraints;
+ DxBatchedStorage<DxConstraint> mConstraintsHostCopy;
+ DxBatchedStorage<float> mStiffnessValues;
+ DxBatchedStorage<DxTether> mTethers;
+ DxBatchedStorage<physx::PxVec4> mParticles;
+ DxBatchedStorage<physx::PxVec4> mParticlesHostCopy;
+ DxBatchedStorage<DxPhaseConfig> mPhaseConfigs;
+
+ DxBatchedStorage<physx::PxVec4> mParticleAccelerations;
+ DxBatchedStorage<physx::PxVec4> mParticleAccelerationsHostCopy;
+
+ DxBatchedStorage<IndexPair> mCapsuleIndices;
+ DxBuffer<IndexPair> mCapsuleIndicesDeviceCopy;
+
+ DxBatchedStorage<physx::PxVec4> mCollisionSpheres;
+ DxBuffer<physx::PxVec4> mCollisionSpheresDeviceCopy;
+
+ DxBatchedStorage<uint32_t> mConvexMasks;
+ DxBuffer<uint32_t> mConvexMasksDeviceCopy;
+
+ DxBatchedStorage<physx::PxVec4> mCollisionPlanes;
+ DxBuffer<physx::PxVec4> mCollisionPlanesDeviceCopy;
+
+ DxBatchedStorage<physx::PxVec3> mCollisionTriangles;
+ DxBuffer<physx::PxVec3> mCollisionTrianglesDeviceCopy;
+
+ DxBatchedStorage<physx::PxVec4> mMotionConstraints;
+ DxBatchedStorage<physx::PxVec4> mSeparationConstraints;
+
+ DxBatchedStorage<physx::PxVec4> mRestPositions;
+ DxBuffer<physx::PxVec4> mRestPositionsDeviceCopy;
+
+ DxBatchedStorage<uint32_t> mSelfCollisionIndices;
+ DxBatchedStorage<physx::PxVec4> mSelfCollisionParticles;
+ DxBatchedStorage<uint32_t> mSelfCollisionData;
+};
+}
+}
diff --git a/NvCloth/src/dx/DxSolver.cpp b/NvCloth/src/dx/DxSolver.cpp
new file mode 100644
index 0000000..4670bb6
--- /dev/null
+++ b/NvCloth/src/dx/DxSolver.cpp
@@ -0,0 +1,542 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "DxSolver.h"
+#include "DxCloth.h"
+#include "../ClothImpl.h"
+#include "DxFabric.h"
+#include "DxFactory.h"
+#include "DxContextLock.h"
+#include "../IterationState.h"
+#include <PsSort.h>
+#include <foundation/PxProfiler.h>
+#include <PsFoundation.h>
+
+#if NV_CLOTH_ENABLE_DX11
+
+#include "DxSolverKernelBlob.h"
+
+using namespace physx;
+using namespace nv;
+
+cloth::DxSolver::DxSolver(DxFactory& factory)
+: DxContextLock(factory)
+, mFactory(factory)
+, mFrameDt(0.0f)
+, mSharedMemorySize(0)
+, mSharedMemoryLimit(0)
+, mClothData(mFactory.mContextManager)
+, mFrameData(mFactory.mContextManager)
+, mFrameDataHostCopy(mFactory.mContextManager, DxStagingBufferPolicy())
+, mIterationData(mFactory.mContextManager, DxDynamicBufferPolicy())
+, mClothDataDirty(false)
+, mKernelSharedMemorySize(0)
+, mSyncQuery(0)
+, mInterCollisionDistance(0.0f)
+, mInterCollisionStiffness(1.0f)
+, mInterCollisionIterations(0)
+, mInterCollisionFilter(nullptr)
+, mInterCollisionScratchMem(NULL)
+, mInterCollisionScratchMemSize(0)
+, mComputeError(false)
+{
+ ID3D11Device* device = mFactory.mContextManager->getDevice();
+ if (device->GetFeatureLevel() < D3D_FEATURE_LEVEL_11_0)
+ {
+ D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS hwopts = { 0 };
+ device->CheckFeatureSupport(D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS, &hwopts, sizeof(hwopts));
+ if (!hwopts.ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x)
+ {
+ NV_CLOTH_LOG_WARNING("DirectCompute is not supported by this device\n");
+ mComputeError = true;
+ }
+ }
+
+ mSharedMemoryLimit = 32 * 1024 - mKernelSharedMemorySize;
+
+ D3D11_QUERY_DESC queryDesc = { D3D11_QUERY_EVENT, 0 };
+ device->CreateQuery(&queryDesc, &mSyncQuery);
+
+ DxContextLock::release();
+
+ mSimulateProfileEventData = nullptr;
+}
+
+cloth::DxSolver::~DxSolver()
+{
+ NV_CLOTH_ASSERT(mCloths.empty());
+
+ DxContextLock::acquire();
+
+ if (mSyncQuery)
+ mSyncQuery->Release();
+
+ if (mInterCollisionScratchMem)
+ NV_CLOTH_FREE(mInterCollisionScratchMem);
+}
+
+namespace
+{
+struct ClothSimCostGreater
+{
+ bool operator()(const cloth::DxCloth* left, const cloth::DxCloth* right) const
+ {
+ return left->mNumParticles * left->mSolverFrequency > right->mNumParticles * right->mSolverFrequency;
+ }
+};
+}
+
+void cloth::DxSolver::addCloth(Cloth* cloth)
+{
+ DxCloth& dxCloth = static_cast<DxClothImpl&>(*cloth).mCloth;
+
+ NV_CLOTH_ASSERT(mCloths.find(&dxCloth) == mCloths.end());
+
+ mCloths.pushBack(&dxCloth);
+ // trigger update of mClothData array
+ dxCloth.notifyChanged();
+
+ // sort cloth instances by size
+ shdfnd::sort(mCloths.begin(), mCloths.size(), ClothSimCostGreater(), NonTrackingAllocator());
+
+ DxContextLock contextLock(mFactory);
+
+ // resize containers and update kernel data
+ mClothDataHostCopy.resize(mCloths.size());
+ mClothData.resize(mCloths.size());
+ mFrameDataHostCopy.resize(mCloths.size());
+
+ // lazy compilation of compute shader
+ mComputeError |= mFactory.mSolverKernelComputeShader == nullptr;
+#if 0
+ if (!mSortComputeShader && !mComputeError)
+ {
+ ID3D11Device* device = mFactory.mContextManager->getDevice();
+ device->CreateComputeShader(gDxSortKernel, sizeof(gDxSortKernel), NULL, &mSortComputeShader);
+ mComputeError |= mSortComputeShader == NULL;
+ if (mSortComputeShader)
+ {
+ const uint32_t SortMaxN = 10000;
+ ID3D11DeviceContext* context = mFactory.mContextManager->getContext();
+
+
+ DxDeviceVector<DxSortData> _SortData(mFactory.mContextManager);
+ Vector<DxSortData>::Type _SortDataHostCopy;
+ _SortData.resize(1);
+ _SortDataHostCopy.resize(1);
+
+ DxDeviceVector<uint32_t> _SortElems(mFactory.mContextManager);
+ DxDeviceVector<uint32_t> _SortElemsHostCopy(mFactory.mContextManager, DxStagingBufferPolicy());
+ Vector<uint32_t>::Type _SortElemsRef;
+ _SortElems.reserve(SortMaxN * 2);
+ _SortElemsHostCopy.reserve(SortMaxN * 2);
+ _SortElemsRef.reserve(SortMaxN);
+
+ context->CSSetShader(mSortComputeShader, NULL, 0);
+
+ //for (uint32_t n = 1; n < SortMaxN; ++n)
+ uint32_t n = 2000;
+ {
+ _SortDataHostCopy[0].mCount = n;
+ _SortData = _SortDataHostCopy;
+
+ _SortElems.resize(n * 2);
+ _SortElemsHostCopy.resize(n * 2);
+ _SortElemsRef.resize(n);
+
+ srand(95123);
+ uint32_t* sortElems = _SortElemsHostCopy.map(D3D11_MAP_WRITE);
+ for (uint32_t i = 0; i < n; ++i)
+ {
+ _SortElemsRef[i] = ((uint32_t(rand()) ) << 16);
+ sortElems[i] = _SortElemsRef[i] | i;
+ sortElems[i + n] = 0x89abcdef;
+ }
+ _SortElemsHostCopy.unmap();
+ std::sort(_SortElemsRef.begin(), _SortElemsRef.end());
+
+ _SortElems = _SortElemsHostCopy;
+
+ ID3D11ShaderResourceView* resourceViews[1] = { _SortData.mBuffer.resourceView() };
+ context->CSSetShaderResources(0, 1, resourceViews);
+
+ ID3D11UnorderedAccessView* accessViews[1] = { _SortElems.mBuffer.accessView() };
+ context->CSSetUnorderedAccessViews(0, 1, accessViews, NULL);
+
+ context->Dispatch(1, 1, 1);
+
+ _SortElemsHostCopy = _SortElems;
+
+ sortElems = _SortElemsHostCopy.map(D3D11_MAP_READ);
+ for (uint32_t i = 0; i < n; ++i)
+ {
+ uint32_t key = sortElems[i] & ~0xffff;
+ uint32_t keyRef = _SortElemsRef[i] & ~0xffff;
+ PX_ASSERT(key == keyRef);
+ }
+ _SortElemsHostCopy.unmap();
+ }
+
+ context->CSSetShader(NULL, NULL, 0);
+ }
+ }
+#endif
+}
+
+void cloth::DxSolver::removeCloth(Cloth* cloth)
+{
+ DxCloth& cuCloth = static_cast<DxClothImpl&>(*cloth).mCloth;
+
+ ClothVector::Iterator begin = mCloths.begin(), end = mCloths.end();
+ ClothVector::Iterator it = mCloths.find(&cuCloth);
+
+ if (it == end)
+ return; // not found
+
+ uint32_t index = uint32_t(it - begin);
+
+ mCloths.remove(index);
+ mClothDataHostCopy.remove(index);
+ mClothData.resize(mCloths.size());
+ mClothDataDirty = true;
+}
+
+bool cloth::DxSolver::beginSimulation(float dt)
+{
+ if (mCloths.empty())
+ return false;
+ mFrameDt = dt;
+ DxSolver::beginFrame();
+ return true;
+}
+void cloth::DxSolver::simulateChunk(int idx)
+{
+ PX_UNUSED(idx);
+ NV_CLOTH_ASSERT(!mCloths.empty());
+ NV_CLOTH_ASSERT(idx == 0);
+ DxSolver::executeKernel();
+}
+void cloth::DxSolver::endSimulation()
+{
+ NV_CLOTH_ASSERT(!mCloths.empty());
+ DxSolver::endFrame();
+}
+int cloth::DxSolver::getSimulationChunkCount() const
+{
+ return 1;
+}
+
+void cloth::DxSolver::beginFrame()
+{
+ DxContextLock contextLock(mFactory);
+
+ mSimulateProfileEventData = NV_CLOTH_PROFILE_START_CROSSTHREAD("cloth::DxSolver::simulate", 0);
+ /*
+ ID3DUserDefinedAnnotation* annotation;
+ mFactory.mContextManager->getContext()->QueryInterface(&annotation);
+ annotation->BeginEvent(L"cloth::DxSolver::simulate");
+ annotation->Release();
+ */
+
+ mIterationDataHostCopy.resize(0);
+
+ // update cloth data
+ ClothVector::Iterator cIt, cEnd = mCloths.end();
+ Vector<DxClothData>::Type::Iterator dIt = mClothDataHostCopy.begin();
+ for (cIt = mCloths.begin(); cIt != cEnd; ++cIt, ++dIt)
+ mClothDataDirty |= (*cIt)->updateClothData(*dIt);
+
+ uint32_t maxSharedMemorySize = 0;
+ DxFrameData* frameDataIt = mFrameDataHostCopy.map(D3D11_MAP_WRITE);
+ for (cIt = mCloths.begin(); cIt != cEnd; ++cIt)
+ {
+ DxCloth& cloth = **cIt;
+
+ uint32_t sharedMemorySize = cloth.mSharedMemorySize;
+ uint32_t positionsSize = cloth.mNumParticles * sizeof(PxVec4);
+
+ uint32_t numSharedPositions = std::min(2u, (mSharedMemoryLimit - sharedMemorySize) / positionsSize);
+
+ maxSharedMemorySize = std::max(maxSharedMemorySize, sharedMemorySize + numSharedPositions * positionsSize);
+
+ IterationStateFactory factory(cloth, mFrameDt);
+ IterationState<Simd4f> state = factory.create<Simd4f>(cloth);
+
+ *(frameDataIt++) = DxFrameData(cloth, numSharedPositions, state, mIterationDataHostCopy.size());
+
+ while (state.mRemainingIterations)
+ {
+ mIterationDataHostCopy.pushBack(DxIterationData(state));
+ state.update();
+ }
+
+ if (cloth.mDeviceParticlesDirty)
+ cloth.mParticles = cloth.mParticlesHostCopy;
+
+ // copy to device
+ cloth.mParticleAccelerations = cloth.mParticleAccelerationsHostCopy;
+ if (!cloth.mMotionConstraints.mHostCopy.empty())
+ {
+ (cloth.mMotionConstraints.mTarget.empty() ? cloth.mMotionConstraints.mStart : cloth.mMotionConstraints.mTarget) = cloth.mMotionConstraints.mHostCopy;
+ }
+ if (!cloth.mSeparationConstraints.mHostCopy.empty())
+ {
+ (cloth.mSeparationConstraints.mTarget.empty() ? cloth.mSeparationConstraints.mStart : cloth.mSeparationConstraints.mTarget) = cloth.mSeparationConstraints.mHostCopy;
+ }
+ }
+ mFrameDataHostCopy.unmap();
+ mSharedMemorySize = maxSharedMemorySize;
+
+ mFrameData = mFrameDataHostCopy;
+ mIterationData = mIterationDataHostCopy;
+
+ mFactory.mCapsuleIndicesDeviceCopy = mFactory.mCapsuleIndices.mBuffer;
+ mFactory.mCollisionSpheresDeviceCopy = mFactory.mCollisionSpheres.mBuffer;
+ mFactory.mConvexMasksDeviceCopy = mFactory.mConvexMasks.mBuffer;
+ mFactory.mCollisionPlanesDeviceCopy = mFactory.mCollisionPlanes.mBuffer;
+ mFactory.mCollisionTrianglesDeviceCopy = mFactory.mCollisionTriangles.mBuffer;
+// mFactory.mParticleAccelerations = mFactory.mParticleAccelerationsHostCopy;
+ mFactory.mRestPositionsDeviceCopy = mFactory.mRestPositions.mBuffer;
+}
+
+void cloth::DxSolver::executeKernel()
+{
+ DxContextLock contextLock(mFactory);
+
+ if (mClothDataDirty)
+ {
+ NV_CLOTH_ASSERT(mClothDataHostCopy.size() == mClothData.size());
+ mClothData = mClothDataHostCopy;
+ mClothDataDirty = false;
+ }
+
+ ID3D11DeviceContext* context = mFactory.mContextManager->getContext();
+ {
+ context->CSSetShader(mFactory.mSolverKernelComputeShader, NULL, 0);
+
+ ID3D11ShaderResourceView* resourceViews[17] = {
+ mClothData.mBuffer.resourceView(), /*mFrameData.mBuffer.resourceView()*/NULL,
+ mIterationData.mBuffer.resourceView(), mFactory.mPhaseConfigs.mBuffer.resourceView(),
+ mFactory.mConstraints.mBuffer.resourceView(), mFactory.mTethers.mBuffer.resourceView(),
+ mFactory.mCapsuleIndicesDeviceCopy.resourceView(), mFactory.mCollisionSpheresDeviceCopy.resourceView(),
+ mFactory.mConvexMasksDeviceCopy.resourceView(), mFactory.mCollisionPlanesDeviceCopy.resourceView(),
+ mFactory.mCollisionTriangles.mBuffer.resourceView(),
+ mFactory.mMotionConstraints.mBuffer.resourceView(),
+ mFactory.mSeparationConstraints.mBuffer.resourceView(),
+ mFactory.mParticleAccelerations.mBuffer.resourceView(),
+ mFactory.mRestPositionsDeviceCopy.resourceView(),
+ mFactory.mSelfCollisionIndices.mBuffer.resourceView(),
+ mFactory.mStiffnessValues.mBuffer.resourceView()
+ };
+ context->CSSetShaderResources(0, 17, resourceViews);
+
+ ID3D11UnorderedAccessView* accessViews[4] = {
+ mFactory.mParticles.mBuffer.accessView(),
+ mFactory.mSelfCollisionParticles.mBuffer.accessView(),
+ mFactory.mSelfCollisionData.mBuffer.accessView(),
+ mFrameData.mBuffer.accessView()
+ };
+ context->CSSetUnorderedAccessViews(0, 4, accessViews, NULL);
+
+ context->Dispatch(mCloths.size(), 1, 1);
+
+ context->CSSetShader(NULL, NULL, 0);
+
+ ID3D11ShaderResourceView* resourceViewsNULL[17] = {
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+ };
+ context->CSSetShaderResources(0, 17, resourceViewsNULL);
+ ID3D11UnorderedAccessView* accessViewsNULL[4] = { NULL, NULL, NULL, NULL };
+ context->CSSetUnorderedAccessViews(0, 4, accessViewsNULL, NULL);
+#if 0
+ if (!mCloths[0]->mSelfCollisionData.empty())
+ {
+ DxDeviceVector<PxVec4> _ParticlesHostCopy(mFactory.mContextManager, DxStagingBufferPolicy());
+ DxDeviceVector<uint32_t> _SortDataHostCopy(mFactory.mContextManager, DxStagingBufferPolicy());
+ _ParticlesHostCopy.resize(mCloths[0]->mParticles.mSize);
+ _SortDataHostCopy.resize(mCloths[0]->mSelfCollisionData.mSize);
+ {
+ CD3D11_BOX box(mCloths[0]->mParticles.mOffset * sizeof(PxVec4), 0, 0, (mCloths[0]->mParticles.mOffset + mCloths[0]->mParticles.mSize) * sizeof(PxVec4), 1, 1);
+ context->CopySubresourceRegion(_ParticlesHostCopy.mBuffer.mBuffer, 0, 0, 0, 0, mCloths[0]->mParticles.buffer(), 0, &box);
+ }
+ {
+ CD3D11_BOX box(mCloths[0]->mSelfCollisionData.mOffset * sizeof(uint32_t), 0, 0, (mCloths[0]->mSelfCollisionData.mOffset + mCloths[0]->mSelfCollisionData.mSize) * sizeof(uint32_t), 1, 1);
+ context->CopySubresourceRegion(_SortDataHostCopy.mBuffer.mBuffer, 0, 0, 0, 0, mCloths[0]->mSelfCollisionData.buffer(), 0, &box);
+ }
+
+ PxVec4* particles = mCloths[0]->mParticlesHostCopy.map(D3D11_MAP_READ);
+ PX_UNUSED(particles);
+ uint32_t* sortData = _SortDataHostCopy.map(D3D11_MAP_READ);
+ PX_UNUSED(sortData);
+ _SortDataHostCopy.unmap();
+ mCloths[0]->mParticlesHostCopy.unmap();
+ }
+#endif
+ }
+
+ // copy particle data from device to host
+ for (auto it : mCloths)
+ {
+ it->mParticlesHostCopy = it->mParticles;
+ }
+
+ mFrameDataHostCopy = mFrameData;
+
+#if PX_DEBUG
+ // cpu synchronization
+ context->End(mSyncQuery);
+ while (context->GetData(mSyncQuery, nullptr, 0, 0));
+#endif
+}
+
+void cloth::DxSolver::endFrame()
+{
+ DxContextLock contextLock(mFactory);
+
+ DxFrameData* fIt = mFrameDataHostCopy.map(D3D11_MAP_READ);
+ ClothVector::Iterator cIt, cEnd = mCloths.end();
+ for (cIt = mCloths.begin(); cIt != cEnd; ++cIt, ++fIt)
+ {
+ DxCloth& cloth = **cIt;
+
+ cloth.mHostParticlesDirty = false;
+ cloth.mDeviceParticlesDirty = false;
+
+ cloth.mMotionConstraints.pop();
+ // don't clear host copy because nothing is being uploaded yet
+ // cloth.mMotionConstraints.mHostCopy.resize(0);
+
+ cloth.mSeparationConstraints.pop();
+ // don't clear host copy because nothing is being uploaded yet
+ // cloth.mSeparationConstraints.mHostCopy.resize(0);
+
+ if (!cloth.mTargetCollisionSpheres.empty())
+ {
+ shdfnd::swap(cloth.mStartCollisionSpheres, cloth.mTargetCollisionSpheres);
+ cloth.mTargetCollisionSpheres.resize(0);
+ }
+
+ if (!cloth.mTargetCollisionPlanes.empty())
+ {
+ shdfnd::swap(cloth.mStartCollisionPlanes, cloth.mTargetCollisionPlanes);
+ cloth.mTargetCollisionPlanes.resize(0);
+ }
+
+ if (!cloth.mTargetCollisionTriangles.empty())
+ {
+ shdfnd::swap(cloth.mStartCollisionTriangles, cloth.mTargetCollisionTriangles);
+ cloth.mTargetCollisionTriangles.resize(0);
+ }
+
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ float upper = fIt->mParticleBounds[i * 2 + 0];
+ float negativeLower = fIt->mParticleBounds[i * 2 + 1];
+ cloth.mParticleBoundsCenter[i] = (upper - negativeLower) * 0.5f;
+ cloth.mParticleBoundsHalfExtent[i] = (upper + negativeLower) * 0.5f;
+ }
+
+ cloth.mSleepPassCounter = fIt->mSleepPassCounter;
+ cloth.mSleepTestCounter = fIt->mSleepTestCounter;
+ }
+ mFrameDataHostCopy.unmap();
+
+ interCollision();
+
+ /*
+ ID3DUserDefinedAnnotation* annotation;
+ mFactory.mContextManager->getContext()->QueryInterface(&annotation);
+ annotation->EndEvent();
+ annotation->Release();
+ */
+ NV_CLOTH_PROFILE_STOP_CROSSTHREAD(mSimulateProfileEventData,"cloth::DxSolver::simulate", 0);
+}
+
+void cloth::DxSolver::interCollision()
+{
+ if (!mInterCollisionIterations || mInterCollisionDistance == 0.0f)
+ return;
+ if (mInterCollisionFilter == nullptr)
+ {
+ NV_CLOTH_LOG_WARNING("Inter collision will not work unless an inter collision filter is set using Solver::setInterCollisionFilter.");
+ return;
+ }
+
+ typedef SwInterCollision<Simd4f> SwInterCollision;
+
+ // rebuild cloth instance array
+ mInterCollisionInstances.resize(0);
+ DxFrameData* frameData = mFrameDataHostCopy.map(D3D11_MAP_READ);
+ for (uint32_t i = 0, n = mCloths.size(); i < n; ++i)
+ {
+ DxCloth& cloth = *mCloths[i];
+
+ cloth.mapParticles();
+ float elasticity = 1.0f / frameData[i].mNumIterations;
+ NV_CLOTH_ASSERT(!cloth.mHostParticlesDirty);
+ PxVec4* particles = cloth.mParticlesMapPointer;
+ uint32_t* indices = NULL, numIndices = cloth.mNumParticles;
+ if (!cloth.mSelfCollisionIndices.empty())
+ {
+ indices = cloth.mSelfCollisionIndicesHost.begin();
+ numIndices = uint32_t(cloth.mSelfCollisionIndices.size());
+ }
+
+ mInterCollisionInstances.pushBack(SwInterCollisionData(
+ particles, particles + cloth.mNumParticles, numIndices, indices, cloth.mTargetMotion,
+ cloth.mParticleBoundsCenter, cloth.mParticleBoundsHalfExtent, elasticity, cloth.mUserData));
+
+ cloth.mDeviceParticlesDirty = true;
+ }
+ mFrameDataHostCopy.unmap();
+
+ uint32_t requiredTempMemorySize = uint32_t(
+ SwInterCollision::estimateTemporaryMemory(&mInterCollisionInstances[0], mInterCollisionInstances.size()));
+
+ // realloc temp memory if necessary
+ if (mInterCollisionScratchMemSize < requiredTempMemorySize)
+ {
+ if (mInterCollisionScratchMem)
+ NV_CLOTH_FREE(mInterCollisionScratchMem);
+
+ mInterCollisionScratchMem = NV_CLOTH_ALLOC(requiredTempMemorySize, "cloth::SwSolver::mInterCollisionScratchMem");
+ mInterCollisionScratchMemSize = requiredTempMemorySize;
+ }
+
+ SwKernelAllocator allocator(mInterCollisionScratchMem, mInterCollisionScratchMemSize);
+
+ // run inter-collision
+ SwInterCollision(mInterCollisionInstances.begin(), mInterCollisionInstances.size(), mInterCollisionDistance,
+ mInterCollisionStiffness, mInterCollisionIterations, mInterCollisionFilter, allocator)();
+
+ for (uint32_t i = 0, n = mCloths.size(); i < n; ++i)
+ mCloths[i]->unmapParticles();
+}
+
+#endif // NV_CLOTH_ENABLE_DX11
diff --git a/NvCloth/src/dx/DxSolver.h b/NvCloth/src/dx/DxSolver.h
new file mode 100644
index 0000000..cafbfa5
--- /dev/null
+++ b/NvCloth/src/dx/DxSolver.h
@@ -0,0 +1,148 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#include "NvCloth/Solver.h"
+#include "DxClothData.h"
+#include "DxContextLock.h"
+#include "DxDeviceVector.h"
+#include "../SwInterCollision.h"
+
+struct ID3D11ShaderResourceView;
+
+namespace nv
+{
+namespace cloth
+{
+
+class DxCloth;
+class DxFabric;
+struct PhaseConfig;
+struct DxKernelData;
+
+class DxSolver : private DxContextLock, public Solver
+{
+ PX_NOCOPY(DxSolver);
+
+ public:
+ DxSolver(DxFactory&);
+ ~DxSolver();
+
+ virtual void addCloth(Cloth*);
+ virtual void removeCloth(Cloth*);
+
+ // functions executing the simulation work.
+ virtual bool beginSimulation(float dt);
+ virtual void simulateChunk(int idx);
+ virtual void endSimulation();
+ virtual int getSimulationChunkCount() const override;
+
+ virtual bool hasError() const
+ {
+ return mComputeError;
+ }
+
+ virtual void setInterCollisionDistance(float distance)
+ {
+ mInterCollisionDistance = distance;
+ }
+ virtual float getInterCollisionDistance() const
+ {
+ return mInterCollisionDistance;
+ }
+ virtual void setInterCollisionStiffness(float stiffness)
+ {
+ mInterCollisionStiffness = stiffness;
+ }
+ virtual float getInterCollisionStiffness() const
+ {
+ return mInterCollisionStiffness;
+ }
+ virtual void setInterCollisionNbIterations(uint32_t nbIterations)
+ {
+ mInterCollisionIterations = nbIterations;
+ }
+ virtual uint32_t getInterCollisionNbIterations() const
+ {
+ return mInterCollisionIterations;
+ }
+ virtual void setInterCollisionFilter(InterCollisionFilter filter)
+ {
+ mInterCollisionFilter = filter;
+ }
+
+ private:
+ // simulate helper functions
+ void beginFrame();
+ void executeKernel();
+ void endFrame();
+
+ void interCollision();
+
+ private:
+ DxFactory& mFactory;
+
+ typedef Vector<DxCloth*>::Type ClothVector;
+ ClothVector mCloths;
+
+ DxDeviceVector<DxClothData> mClothData;
+ Vector<DxClothData>::Type mClothDataHostCopy;
+ bool mClothDataDirty;
+
+ DxDeviceVector<DxFrameData> mFrameData;
+ DxDeviceVector<DxFrameData> mFrameDataHostCopy;
+
+ DxDeviceVector<DxIterationData> mIterationData;
+ Vector<DxIterationData>::Type mIterationDataHostCopy;
+
+ float mFrameDt;
+
+ uint32_t mSharedMemorySize;
+ uint32_t mSharedMemoryLimit;
+
+ int mKernelSharedMemorySize;
+ ID3D11Query* mSyncQuery;
+
+ float mInterCollisionDistance;
+ float mInterCollisionStiffness;
+ uint32_t mInterCollisionIterations;
+ InterCollisionFilter mInterCollisionFilter;
+ void* mInterCollisionScratchMem;
+ uint32_t mInterCollisionScratchMemSize;
+ Vector<SwInterCollisionData>::Type mInterCollisionInstances;
+
+ bool mComputeError;
+
+ friend void record(const DxSolver&);
+
+ void* mSimulateProfileEventData;
+};
+}
+}
diff --git a/NvCloth/src/dx/DxSolverKernel.hlsl b/NvCloth/src/dx/DxSolverKernel.hlsl
new file mode 100644
index 0000000..2d42dea
--- /dev/null
+++ b/NvCloth/src/dx/DxSolverKernel.hlsl
@@ -0,0 +1,1665 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#include "DxClothData.h"
+
+#ifndef FLT_MAX
+#define FLT_MAX 3.402823466e+38F
+#endif
+
+
+struct IndexPair
+{
+ uint32_t first;
+ uint32_t second;
+};
+
+RWStructuredBuffer<float4> bParticles : register(u0);
+RWStructuredBuffer<float4> bSelfCollisionParticles : register(u1);
+RWStructuredBuffer<uint32_t> bSelfCollisionData : register(u2);
+
+RWStructuredBuffer<DxFrameData> bFrameData : register(u3);
+
+StructuredBuffer<DxClothData> bClothData : register(t0);
+StructuredBuffer<DxIterationData> bIterData : register(t2);
+
+StructuredBuffer<DxPhaseConfig> bPhaseConfigs : register(t3);
+StructuredBuffer<DxConstraint> bConstraints : register(t4);
+StructuredBuffer<DxTether> bTetherConstraints : register(t5);
+
+StructuredBuffer<IndexPair> bCapsuleIndices : register(t6);
+StructuredBuffer<float4> bCollisionSpheres : register(t7);
+
+StructuredBuffer<uint32_t> bConvexMasks : register(t8);
+StructuredBuffer<float4> bCollisionPlanes : register(t9);
+
+StructuredBuffer<float3> bCollisionTriangles : register(t10);
+
+StructuredBuffer<float4> bMotionConstraints : register(t11);
+StructuredBuffer<float4> bSeparationConstraints : register(t12);
+
+StructuredBuffer<float4> bParticleAccelerations : register(t13);
+
+StructuredBuffer<float4> bRestPositions : register(t14);
+
+StructuredBuffer<int32_t> bSelfCollisionIndices : register(t15);
+
+StructuredBuffer<float> bPerConstraintStiffness : register(t16);
+
+
+groupshared DxClothData gClothData;
+groupshared DxFrameData gFrameData;
+groupshared DxIterationData gIterData;
+groupshared uint gCurParticles[MaxParticlesInSharedMem * 4];
+
+groupshared float gBounds[192];
+
+
+static const uint32_t blockDim = 1024;
+static const uint32_t BlockSize = blockDim;
+static const uint32_t WarpsPerBlock = (BlockSize >> 5);
+
+#include "DxSortKernel.inc"
+
+#define FLT_EPSILON 1.192092896e-07F
+
+interface IParticles
+{
+ float4 get(uint32_t index);
+ void set(uint32_t index, float4 value);
+};
+
+
+void integrateParticles(IParticles curParticles, IParticles prevParticles, uint32_t threadIdx)
+{
+ for (uint32_t i = threadIdx; i < gClothData.mNumParticles; i += blockDim)
+ {
+ float4 curPos = curParticles.get(i);
+
+ float nextX = curPos.x, curX = nextX;
+ float nextY = curPos.y, curY = nextY;
+ float nextZ = curPos.z, curZ = nextZ;
+ float nextW = curPos.w;
+
+ float4 prevPos = prevParticles.get(i);
+
+ if (nextW == 0.0f)
+ nextW = prevPos.w;
+
+ if (nextW > 0.0f)
+ {
+ float prevX = prevPos.x;
+ float prevY = prevPos.y;
+ float prevZ = prevPos.z;
+
+ if (gIterData.mIsTurning)
+ {
+ nextX = nextX + gIterData.mIntegrationTrafo[3] + curX * gIterData.mIntegrationTrafo[15] +
+ prevX * gIterData.mIntegrationTrafo[6] + curY * gIterData.mIntegrationTrafo[16] +
+ prevY * gIterData.mIntegrationTrafo[7] + curZ * gIterData.mIntegrationTrafo[17] +
+ prevZ * gIterData.mIntegrationTrafo[8];
+
+ nextY = nextY + gIterData.mIntegrationTrafo[4] + curX * gIterData.mIntegrationTrafo[18] +
+ prevX * gIterData.mIntegrationTrafo[9] + curY * gIterData.mIntegrationTrafo[19] +
+ prevY * gIterData.mIntegrationTrafo[10] + curZ * gIterData.mIntegrationTrafo[20] +
+ prevZ * gIterData.mIntegrationTrafo[11];
+
+ nextZ = nextZ + gIterData.mIntegrationTrafo[5] + curX * gIterData.mIntegrationTrafo[21] +
+ prevX * gIterData.mIntegrationTrafo[12] + curY * gIterData.mIntegrationTrafo[22] +
+ prevY * gIterData.mIntegrationTrafo[13] + curZ * gIterData.mIntegrationTrafo[23] +
+ prevZ * gIterData.mIntegrationTrafo[14];
+ }
+ else
+ {
+ nextX += (curX - prevX) * gIterData.mIntegrationTrafo[6] + gIterData.mIntegrationTrafo[3];
+ nextY += (curY - prevY) * gIterData.mIntegrationTrafo[9] + gIterData.mIntegrationTrafo[4];
+ nextZ += (curZ - prevZ) * gIterData.mIntegrationTrafo[12] + gIterData.mIntegrationTrafo[5];
+ }
+
+ curX += gIterData.mIntegrationTrafo[0];
+ curY += gIterData.mIntegrationTrafo[1];
+ curZ += gIterData.mIntegrationTrafo[2];
+ }
+
+ curPos.x = nextX;
+ curPos.y = nextY;
+ curPos.z = nextZ;
+ curPos.w = nextW;
+ curParticles.set(i, curPos);
+
+ prevPos.x = curX;
+ prevPos.y = curY;
+ prevPos.z = curZ;
+ prevParticles.set(i, prevPos);
+ }
+}
+
+void accelerateParticles(IParticles curParticles, uint32_t threadIdx)
+{
+ // might be better to move this into integrate particles
+ uint32_t accelerationsOffset = gFrameData.mParticleAccelerationsOffset;
+
+ GroupMemoryBarrierWithGroupSync(); // looping with 4 instead of 1 thread per particle
+
+ float sqrIterDt = ~threadIdx & 0x3 ? gFrameData.mIterDt * gFrameData.mIterDt : 0.0f;
+ for (uint32_t i = threadIdx; i < gClothData.mNumParticles * 4; i += blockDim)
+ {
+ float4 acceleration = bParticleAccelerations[accelerationsOffset + i];
+
+ float4 curPos = curParticles.get(i / 4);
+ if (curPos.w > 0.0f)
+ {
+ curPos += acceleration * sqrIterDt;
+ curParticles.set(i / 4, curPos);
+ }
+ }
+
+ GroupMemoryBarrierWithGroupSync();
+}
+
+void constrainMotion(IParticles curParticles, uint32_t threadIdx, float alpha)
+{
+ if (gFrameData.mStartMotionConstrainsOffset == -1)
+ return;
+
+ // negative because of fused multiply-add optimization
+ float negativeScale = -gClothData.mMotionConstraintScale;
+ float negativeBias = -gClothData.mMotionConstraintBias;
+
+ uint32_t startMotionConstrainsOffset = gFrameData.mStartMotionConstrainsOffset;
+ uint32_t targetMotionConstrainsOffset = gFrameData.mTargetMotionConstrainsOffset;
+
+ for (uint32_t i = threadIdx; i < gClothData.mNumParticles; i += blockDim.x)
+ {
+ float4 startPos = bMotionConstraints[startMotionConstrainsOffset + i];
+ float4 targetPos = bMotionConstraints[targetMotionConstrainsOffset + i];
+
+ float4 sphere = startPos + (targetPos - startPos) * alpha;
+
+ float4 curPos = curParticles.get(i);
+
+ float3 delta = sphere.xyz - curPos.xyz;
+
+ float sqrLength = FLT_EPSILON + dot(delta, delta);
+ float negativeRadius = min(0.0f, sphere.w * negativeScale + negativeBias);
+
+ float slack = max(negativeRadius * rsqrt(sqrLength) + 1.0f, 0.0f) * gFrameData.mMotionConstraintStiffness;
+
+ curPos.xyz += slack * delta;
+
+ // set invMass to zero if radius is zero
+ if (negativeRadius >= 0.0f)
+ curPos.w = 0.0f;
+
+ curParticles.set(i, curPos);
+ }
+
+}
+
+void constrainTether(IParticles curParticles, uint32_t threadIdx)
+{
+ if (0.0f == gFrameData.mTetherConstraintStiffness || !gClothData.mNumTethers)
+ return;
+
+ uint32_t numParticles = gClothData.mNumParticles;
+ uint32_t numTethers = gClothData.mNumTethers;
+
+ float stiffness = numParticles * gFrameData.mTetherConstraintStiffness / numTethers;
+ float scale = gClothData.mTetherConstraintScale;
+
+ for (uint32_t i = threadIdx; i < gClothData.mNumParticles; i += blockDim)
+ {
+ float4 curPos = curParticles.get(i);
+ float posX = curPos.x;
+ float posY = curPos.y;
+ float posZ = curPos.z;
+
+ float offsetX = 0.0f;
+ float offsetY = 0.0f;
+ float offsetZ = 0.0f;
+
+ for (uint32_t j = i; j < numTethers; j += gClothData.mNumParticles)
+ {
+ uint32_t tether = bTetherConstraints[gClothData.mTetherOffset + j].mValue;
+
+ uint32_t anchor = tether & 0xffff;
+ float4 anchorPos = curParticles.get(anchor);
+ float deltaX = anchorPos.x - posX;
+ float deltaY = anchorPos.y - posY;
+ float deltaZ = anchorPos.z - posZ;
+
+ float sqrLength = FLT_EPSILON + deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ;
+
+ float radius = (tether >> 16) * scale;
+ float slack = 1.0f - radius * rsqrt(sqrLength);
+
+ if (slack > 0.0f)
+ {
+ offsetX += deltaX * slack;
+ offsetY += deltaY * slack;
+ offsetZ += deltaZ * slack;
+ }
+ }
+
+ curPos.x = posX + offsetX * stiffness;
+ curPos.y = posY + offsetY * stiffness;
+ curPos.z = posZ + offsetZ * stiffness;
+
+ curParticles.set(i, curPos);
+ }
+}
+
+void solveFabric(IParticles curParticles, uint32_t threadIdx)
+{
+ for (uint32_t i = 0; i < gClothData.mNumPhases; ++i)
+ {
+ DxPhaseConfig phaseConfig = bPhaseConfigs[i + gClothData.mPhaseConfigOffset];
+
+ float exponent = gFrameData.mStiffnessExponent;
+ phaseConfig.mStiffness = 1.0f - exp2(phaseConfig.mStiffness * exponent);
+ phaseConfig.mStiffnessMultiplier = 1.0f - exp2(phaseConfig.mStiffnessMultiplier * exponent);
+
+ uint32_t firstConstraint = gClothData.mConstraintOffset + phaseConfig.mFirstConstraint;
+ bool useStiffnessPerConstraint = gClothData.mStiffnessOffset!=-1;
+ uint32_t firstStiffnessValue = gClothData.mStiffnessOffset + phaseConfig.mFirstConstraint;
+
+ GroupMemoryBarrierWithGroupSync();
+
+ for (uint32_t j = threadIdx; j < phaseConfig.mNumConstraints; j += blockDim)
+ {
+ DxConstraint constraint = bConstraints[firstConstraint + j];
+
+ uint32_t vpi = (constraint.mIndices) & 0xffff;
+ uint32_t vpj = (constraint.mIndices >> 16) & 0xffff;
+ float rij = constraint.mRestvalue;
+
+ float4 vpiPos = curParticles.get(vpi);
+ float vxi = vpiPos.x;
+ float vyi = vpiPos.y;
+ float vzi = vpiPos.z;
+ float vwi = vpiPos.w;
+
+ float4 vpjPos = curParticles.get(vpj);
+ float vxj = vpjPos.x;
+ float vyj = vpjPos.y;
+ float vzj = vpjPos.z;
+ float vwj = vpjPos.w;
+
+ float hxij = vxj - vxi;
+ float hyij = vyj - vyi;
+ float hzij = vzj - vzi;
+
+ float e2ij = FLT_EPSILON + hxij * hxij + hyij * hyij + hzij * hzij;
+ float negErij = rij > FLT_EPSILON ? -1.0f + rij * rsqrt(e2ij) : 0.0f;
+
+ negErij = negErij + phaseConfig.mStiffnessMultiplier *
+ max(phaseConfig.mCompressionLimit, min(-negErij, phaseConfig.mStretchLimit));
+
+ float stiffness = useStiffnessPerConstraint?
+ 1.0f - exp2(bPerConstraintStiffness[firstStiffnessValue + j] * exponent)
+ :
+ phaseConfig.mStiffness;
+ float negExij = negErij * stiffness / (FLT_EPSILON + vwi + vwj);
+
+ float vmi = -vwi * negExij;
+ vpiPos.x = vxi + vmi * hxij;
+ vpiPos.y = vyi + vmi * hyij;
+ vpiPos.z = vzi + vmi * hzij;
+ curParticles.set(vpi, vpiPos);
+
+ float vmj = +vwj * negExij;
+ vpjPos.x = vxj + vmj * hxij;
+ vpjPos.y = vyj + vmj * hyij;
+ vpjPos.z = vzj + vmj * hzij;
+ curParticles.set(vpj, vpjPos);
+ }
+ }
+}
+
+float3 calcFrictionImpulse(float3 prevPos, float3 curPos, float3 shapeVelocity, float scale, float3 collisionImpulse)
+{
+ const float frictionScale = gClothData.mFrictionScale;
+
+ // calculate collision normal
+ float deltaSq = dot(collisionImpulse, collisionImpulse);
+
+ float rcpDelta = rsqrt(deltaSq + FLT_EPSILON);
+
+ float3 norm = collisionImpulse * rcpDelta;
+
+ // calculate relative velocity scaled by number of collision
+ float3 relVel = curPos - prevPos - shapeVelocity * scale;
+
+ // calculate relative tangential velocity
+ float3 relVelTang = relVel - dot(relVel, norm) * norm;
+
+ // calculate magnitude of vt
+ float rcpVt = rsqrt(dot(relVelTang, relVelTang) + FLT_EPSILON);
+
+ // magnitude of friction impulse (cannot be larger than -|vt|)
+ float j = max(-frictionScale * deltaSq * rcpDelta * scale * rcpVt, -1.0f);
+ return relVelTang * j;
+}
+
+float calcPlaneDist(float3 position, float alpha, uint32_t planeIndex, out float3 norm)
+{
+ float4 startPlane = bCollisionPlanes[gFrameData.mStartCollisionPlaneOffset + planeIndex];
+ float4 targetPlane = bCollisionPlanes[gFrameData.mTargetCollisionPlaneOffset + planeIndex];
+
+ float4 plane = lerp(startPlane, targetPlane, alpha);
+
+ norm = plane.xyz;
+ return dot(position, norm) + plane.w;
+}
+
+uint32_t collideConvexes(float3 position, float alpha, out float3 delta)
+{
+ delta.xyz = float3(0.0f, 0.0f, 0.0f);
+
+ uint32_t numCollisions = 0;
+ for (uint32_t i = 0; i < gClothData.mNumConvexes; ++i)
+ {
+ uint32_t mask = bConvexMasks[gClothData.mConvexMasksOffset + i];
+
+ float3 maxNorm;
+ float maxDist = calcPlaneDist(position, alpha, firstbitlow(mask), maxNorm);
+
+ while ((maxDist < 0.0f) && (mask &= mask - 1))
+ {
+ float3 norm;
+ float dist = calcPlaneDist(position, alpha, firstbitlow(mask), norm);
+ if (dist > maxDist)
+ maxDist = dist, maxNorm = norm;
+ }
+
+ if (maxDist < 0.0f)
+ {
+ delta.xyz -= maxNorm * maxDist;
+
+ ++numCollisions;
+ }
+ }
+ return numCollisions;
+}
+
+void collideConvexes(IParticles curParticles, IParticles prevParticles, uint32_t threadIdx, float alpha)
+{
+ if (!gClothData.mNumConvexes)
+ return;
+
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+
+ for (uint32_t j = threadIdx; j < gClothData.mNumParticles; j += blockDim)
+ {
+ float4 curPos = curParticles.get(j);
+
+ float3 delta;
+ uint32_t numCollisions = collideConvexes(curPos.xyz, alpha, delta);
+ if (numCollisions > 0)
+ {
+ float scale = 1.0f / numCollisions;
+
+ if (frictionEnabled)
+ {
+ float4 prevPos = prevParticles.get(j);
+
+ float3 frictionImpulse =
+ calcFrictionImpulse(prevPos.xyz, curPos.xyz, float3(0.0f, 0.0f, 0.0f), scale, delta);
+
+ prevPos.xyz -= frictionImpulse;
+ prevParticles.set(j, prevPos);
+ }
+
+ curPos.xyz += delta.xyz * scale;
+ curParticles.set(j, curPos);
+ }
+ }
+}
+
+
+struct TriangleData
+{
+ float3 base;
+ float edge0DotEdge1;
+
+ float3 edge0;
+ float edge0SqrLength;
+
+ float3 edge1;
+ float edge1SqrLength;
+
+ float3 normal;
+
+ float det;
+ float denom;
+
+ float edge0InvSqrLength;
+ float edge1InvSqrLength;
+
+ // initialize struct after vertices have been stored in first 9 members
+ void initialize()
+ {
+ edge0 -= base;
+ edge1 -= base;
+
+ normal = cross(edge0, edge1);
+
+ float normalInvLength = rsqrt(dot(normal, normal));
+ normal *= normalInvLength;
+
+ edge0DotEdge1 = dot(edge0, edge1);
+ edge0SqrLength = dot(edge0, edge0);
+ edge1SqrLength = dot(edge1, edge1);
+
+ det = 1.0f / (edge0SqrLength * edge1SqrLength - edge0DotEdge1 * edge0DotEdge1);
+ denom = 1.0f / (edge0SqrLength + edge1SqrLength - edge0DotEdge1 - edge0DotEdge1);
+
+ edge0InvSqrLength = 1.0f / edge0SqrLength;
+ edge1InvSqrLength = 1.0f / edge1SqrLength;
+ }
+};
+
+
+void collideTriangles(IParticles curParticles, int32_t i)
+{
+ float4 curPos = curParticles.get(i);
+ float3 pos = curPos.xyz;
+
+ float4 normal = float4(0.0f, 0.0f, 0.0f, 0.0f);
+ float minSqrLength = FLT_MAX;
+
+ for (uint32_t j = 0; j < gClothData.mNumCollisionTriangles; ++j)
+ {
+ TriangleData tIt;
+ tIt.base = bCollisionTriangles[gFrameData.mStartCollisionTrianglesOffset + 3 * j];
+ tIt.edge0 = bCollisionTriangles[gFrameData.mStartCollisionTrianglesOffset + 3 * j + 1];
+ tIt.edge1 = bCollisionTriangles[gFrameData.mStartCollisionTrianglesOffset + 3 * j + 2];
+
+ tIt.initialize();
+
+ float3 delta = pos - tIt.base;
+
+ float deltaDotEdge0 = dot(delta, tIt.edge0);
+ float deltaDotEdge1 = dot(delta, tIt.edge1);
+ float deltaDotNormal = dot(delta, tIt.normal);
+
+ float s = tIt.edge1SqrLength * deltaDotEdge0 - tIt.edge0DotEdge1 * deltaDotEdge1;
+ float t = tIt.edge0SqrLength * deltaDotEdge1 - tIt.edge0DotEdge1 * deltaDotEdge0;
+
+ s = t > 0.0f ? s * tIt.det : deltaDotEdge0 * tIt.edge0InvSqrLength;
+ t = s > 0.0f ? t * tIt.det : deltaDotEdge1 * tIt.edge1InvSqrLength;
+
+ if (s + t > 1.0f)
+ {
+ s = (tIt.edge1SqrLength - tIt.edge0DotEdge1 + deltaDotEdge0 - deltaDotEdge1) * tIt.denom;
+ }
+
+ // Probably we should check NaN?
+ s = max(0.0f, min(1.0f, s));
+ t = max(0.0f, min(1.0f - s, t));
+
+ delta -= (tIt.edge0 * s + tIt.edge1 * t);
+
+ float sqrLength = dot(delta, delta);
+
+ if (0.0f > deltaDotNormal)
+ sqrLength *= 1.0001f;
+
+ if (sqrLength < minSqrLength)
+ {
+ normal.xyz = tIt.normal;
+ normal.w = deltaDotNormal;
+
+ minSqrLength = sqrLength;
+ }
+ }
+
+ if (normal.w < 0.0f)
+ {
+ curPos.xyz = pos - normal.xyz * normal.w;
+ curParticles.set(i, curPos);
+ }
+}
+
+
+void collideTriangles(IParticles curParticles, uint32_t threadIdx, float alpha)
+{
+ if (!gClothData.mNumCollisionTriangles)
+ return;
+
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+
+ // interpolate triangle vertices and store in shared memory
+// for (int32_t i = threadIdx.x, n = gClothData.mNumCollisionTriangles * 3; i < n; i += blockDim.x)
+// {
+// float3 start = bCollisionTriangles[gFrameData.mStartCollisionTrianglesOffset + i];
+// float3 target = bCollisionTriangles[gFrameData.mTargetCollisionTrianglesOffset + i];
+//
+// mCurData.mSphereX[offset] = start + (target - start) * alpha;
+// }
+//
+// GroupMemoryBarrierWithGroupSync();
+
+ for (uint32_t j = threadIdx; j < gClothData.mNumParticles; j += blockDim)
+ {
+ // float4 curPos = curParticles.get(j);
+
+ // float3 delta;
+ collideTriangles(curParticles, j);
+ // if (numCollisions > 0)
+ // {
+ // float scale = 1.0f / numCollisions;
+ //
+ // curPos.xyz += delta.xyz * scale;
+ // curParticles.set(j, curPos);
+ // }
+ }
+
+ GroupMemoryBarrierWithGroupSync();
+}
+
+uint32_t collideCapsules(float3 curPos, float alpha, float prevAlpha, out float3 outDelta, out float3 outVelocity)
+{
+ outDelta = float3(0.0f, 0.0f, 0.0f);
+ outVelocity = float3(0.0f, 0.0f, 0.0f);
+ uint32_t numCollisions = 0;
+
+ uint32_t capsuleOffset = gClothData.mCapsuleOffset;
+ uint32_t startSphereOffset = gFrameData.mStartSphereOffset;
+ uint32_t targetSphereOffset = gFrameData.mTargetSphereOffset;
+
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+
+ // cone collision
+ for (uint32_t i = 0; i < gClothData.mNumCapsules; ++i)
+ {
+ IndexPair indices = bCapsuleIndices[capsuleOffset + i];
+
+ float4 startSphere0 = bCollisionSpheres[startSphereOffset + indices.first];
+ float4 targetSphere0 = bCollisionSpheres[targetSphereOffset + indices.first];
+ float4 sphere0 = lerp(startSphere0, targetSphere0, alpha);
+
+ float4 startSphere1 = bCollisionSpheres[startSphereOffset + indices.second];
+ float4 targetSphere1 = bCollisionSpheres[targetSphereOffset + indices.second];
+ float4 sphere1 = lerp(startSphere1, targetSphere1, alpha);
+
+ sphere0.w = max(sphere0.w, 0.0f);
+ sphere1.w = max(sphere1.w, 0.0f);
+
+ float4 axis = (sphere1 - sphere0) * 0.5f;
+
+ float sqrAxisLength = dot(axis.xyz, axis.xyz);
+ float sqrConeLength = sqrAxisLength - axis.w * axis.w;
+
+ if (sqrConeLength <= 0.0f)
+ continue;
+
+ float invAxisLength = rsqrt(sqrAxisLength);
+ float invConeLength = rsqrt(sqrConeLength);
+
+ float axisLength = sqrAxisLength * invAxisLength;
+
+ float3 coneCenter = (sphere1.xyz + sphere0.xyz) * 0.5f;
+ float coneRadius = (axis.w + sphere0.w) * invConeLength * axisLength;
+
+ float3 coneAxis = axis.xyz * invAxisLength;
+ float coneSlope = axis.w * invConeLength;
+
+ float sine = axis.w * invAxisLength;
+ float coneSqrCosine = 1.f - sine * sine;
+ float coneHalfLength = axisLength;
+
+ {
+ float3 delta = curPos - coneCenter;
+
+ float deltaDotAxis = dot(delta, coneAxis);
+ float radius = max(deltaDotAxis * coneSlope + coneRadius, 0.0f);
+ float sqrDistance = dot(delta, delta) - deltaDotAxis * deltaDotAxis;
+
+ if (sqrDistance > radius * radius)
+ continue;
+
+ sqrDistance = max(sqrDistance, FLT_EPSILON);
+ float invDistance = rsqrt(sqrDistance);
+
+ float base = deltaDotAxis + coneSlope * sqrDistance * invDistance;
+ float halfLength = coneHalfLength;
+
+ if (abs(base) < halfLength)
+ {
+ delta = delta - base * coneAxis;
+
+ float sqrCosine = coneSqrCosine;
+ float scale = radius * invDistance * sqrCosine - sqrCosine;
+
+ outDelta += delta * scale;
+
+ if (frictionEnabled)
+ {
+ // get previous sphere pos
+ float4 prevSphere0 = lerp(startSphere0, targetSphere0, prevAlpha);
+ float4 prevSphere1 = lerp(startSphere1, targetSphere1, prevAlpha);
+
+ // interpolate velocity between the two spheres
+ float t = deltaDotAxis * 0.5f + 0.5f;
+ outVelocity += lerp(sphere0.xyz - prevSphere0.xyz, sphere1.xyz - prevSphere1.xyz, t);
+ }
+
+ ++numCollisions;
+ }
+ }
+ }
+
+ // sphere collision
+ for (uint32_t j = 0; j < gClothData.mNumSpheres; ++j)
+ {
+ float4 startSphere = bCollisionSpheres[startSphereOffset + j];
+ float4 targetSphere = bCollisionSpheres[targetSphereOffset + j];
+ float4 sphere = lerp(startSphere, targetSphere, alpha);
+ sphere.w = max(sphere.w, 0.0f);
+
+ {
+ float3 delta = curPos - sphere.xyz;
+
+ float sqrDistance = FLT_EPSILON + dot(delta, delta);
+ float relDistance = rsqrt(sqrDistance) * sphere.w;
+
+ if (relDistance > 1.0f)
+ {
+ float scale = relDistance - 1.0f;
+
+ outDelta += delta * scale;
+
+ if (frictionEnabled)
+ {
+ // get previous sphere pos
+ float4 prevSphere = lerp(startSphere, targetSphere, prevAlpha);
+
+ outVelocity += (sphere.xyz - prevSphere.xyz);
+ }
+
+ ++numCollisions;
+ }
+ }
+ }
+
+ return numCollisions;
+}
+
+void collideCapsules(IParticles curParticles, IParticles prevParticles, uint32_t threadIdx, float alpha, float prevAlpha)
+{
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+ bool massScaleEnabled = gClothData.mCollisionMassScale > 0.0f;
+
+ for (uint32_t j = threadIdx; j < gClothData.mNumParticles; j += blockDim)
+ {
+ float4 curPos = curParticles.get(j);
+
+ float3 delta, velocity;
+ uint32_t numCollisions = collideCapsules(curPos.xyz, alpha, prevAlpha, delta, velocity);
+ if (numCollisions > 0)
+ {
+ float scale = 1.0f / numCollisions;
+
+ if (frictionEnabled)
+ {
+ float4 prevPos = prevParticles.get(j);
+
+ float3 frictionImpulse =
+ calcFrictionImpulse(prevPos.xyz, curPos.xyz, velocity, scale, delta);
+
+ prevPos.xyz -= frictionImpulse;
+ prevParticles.set(j, prevPos);
+ }
+
+ curPos.xyz += delta * scale;
+
+ //TODO: current impl. causes glitches - fix it!
+ if (massScaleEnabled)
+ {
+ float deltaLengthSq = dot(delta, delta);
+ float massScale = 1.0f + gClothData.mCollisionMassScale * deltaLengthSq;
+ curPos.w /= massScale;
+ }
+
+ curParticles.set(j, curPos);
+ }
+ }
+}
+
+static const float gSkeletonWidth = (1.f - 0.2f) * (1.f - 0.2f) - 1.f;
+
+uint32_t collideCapsules(float3 curPos, float3 prevPos, float alpha, float prevAlpha, out float3 outDelta, out float3 outVelocity)
+{
+ outDelta = float3(0.0f, 0.0f, 0.0f);
+ outVelocity = float3(0.0f, 0.0f, 0.0f);
+ uint32_t numCollisions = 0;
+
+ uint32_t capsuleOffset = gClothData.mCapsuleOffset;
+ uint32_t startSphereOffset = gFrameData.mStartSphereOffset;
+ uint32_t targetSphereOffset = gFrameData.mTargetSphereOffset;
+
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+
+ // cone collision
+ for (uint32_t i = 0; i < gClothData.mNumCapsules; ++i)
+ {
+ IndexPair indices = bCapsuleIndices[capsuleOffset + i];
+
+ // current
+ float4 startSphere0 = bCollisionSpheres[startSphereOffset + indices.first];
+ float4 targetSphere0 = bCollisionSpheres[targetSphereOffset + indices.first];
+
+ float4 startSphere1 = bCollisionSpheres[startSphereOffset + indices.second];
+ float4 targetSphere1 = bCollisionSpheres[targetSphereOffset + indices.second];
+
+ // prev
+ float4 prevSphere0 = lerp(startSphere0, targetSphere0, prevAlpha);
+ float4 prevSphere1 = lerp(startSphere1, targetSphere1, prevAlpha);
+
+ prevSphere0.w = max(prevSphere0.w, 0.0f);
+ prevSphere1.w = max(prevSphere1.w, 0.0f);
+
+ float4 prevAxis = (prevSphere1 - prevSphere0) * 0.5f;
+ float3 prevConeCenter = (prevSphere1.xyz + prevSphere0.xyz) * 0.5f;
+
+ float3 prevDelta = prevPos - prevConeCenter;
+
+ float prevSqrAxisLength = dot(prevAxis.xyz, prevAxis.xyz);
+ float prevSqrConeLength = prevSqrAxisLength - prevAxis.w * prevAxis.w;
+
+ if (prevSqrAxisLength <= 0.0f)
+ continue;
+
+ float prevInvAxisLength = rsqrt(prevSqrAxisLength);
+ float prevInvConeLength = rsqrt(prevSqrConeLength);
+
+ float prevAxisLength = prevSqrAxisLength * prevInvAxisLength;
+
+ float prevConeRadius = (prevAxis.w + prevSphere0.w) * prevInvConeLength * prevAxisLength;
+
+ float3 prevConeAxis = prevAxis.xyz * prevInvAxisLength;
+ float prevConeSlope = prevAxis.w * prevInvConeLength;
+
+ float3 prevCross = cross(prevDelta, prevConeAxis);
+ float prevDot = dot(prevPos, prevConeAxis);
+
+ // current
+ float4 sphere0 = lerp(startSphere0, targetSphere0, alpha);
+ float4 sphere1 = lerp(startSphere1, targetSphere1, alpha);
+
+ sphere0.w = max(sphere0.w, 0.0f);
+ sphere1.w = max(sphere1.w, 0.0f);
+
+ float4 curAxis = (sphere1 - sphere0) * 0.5f;
+ float3 curConeCenter = (sphere1.xyz + sphere0.xyz) * 0.5f;
+
+ float3 curDelta = curPos - curConeCenter;
+
+ float sqrAxisLength = dot(curAxis.xyz, curAxis.xyz);
+ float sqrConeLength = sqrAxisLength - curAxis.w * curAxis.w;
+
+ if (sqrConeLength <= 0.0f)
+ continue;
+
+ float invAxisLength = rsqrt(sqrAxisLength);
+ float invConeLength = rsqrt(sqrConeLength);
+
+ float axisLength = sqrAxisLength * invAxisLength;
+
+ float3 coneCenter = (sphere1.xyz + sphere0.xyz) * 0.5f;
+ float coneRadius = (curAxis.w + sphere0.w) * invConeLength * axisLength;
+
+ float3 curConeAxis = curAxis.xyz * invAxisLength;
+
+ float3 curCross = cross(curDelta, curConeAxis);
+ float curDot = dot(curPos, curConeAxis);
+
+ float curSqrDistance = FLT_EPSILON + dot(curCross, curCross);
+
+ float prevRadius = max(prevDot * prevConeSlope + coneRadius, 0.0f);
+
+ float curSlope = curAxis.w * invConeLength;
+ float curRadius = max(curDot * curSlope + coneRadius, 0.0f);
+
+ float sine = curAxis.w * invAxisLength;
+ float coneSqrCosine = 1.f - sine * sine;
+ float curHalfLength = axisLength;
+
+ float dotPrevPrev = dot(prevCross, prevCross) - prevCross.x * prevCross.x - prevRadius * prevRadius;
+ float dotPrevCur = dot(prevCross, curCross) - prevRadius * curRadius;
+ float dotCurCur = curSqrDistance - curRadius * curRadius;
+
+ float discriminant = dotPrevCur * dotPrevCur - dotCurCur * dotPrevPrev;
+ float sqrtD = sqrt(discriminant);
+ float halfB = dotPrevCur - dotPrevPrev;
+ float minusA = dotPrevCur - dotCurCur + halfB;
+
+ // time of impact or 0 if prevPos inside cone
+ float toi = min(0.0f, halfB + sqrtD) / minusA;
+ bool hasCollision = toi < 1.0f && halfB < sqrtD;
+
+ // skip continuous collision if the (un-clamped) particle
+ // trajectory only touches the outer skin of the cone.
+ float rMin = prevRadius + halfB * minusA * (curRadius - prevRadius);
+ hasCollision = hasCollision && (discriminant > minusA * rMin * rMin * gSkeletonWidth);
+
+ // a is negative when one cone is contained in the other,
+ // which is already handled by discrete collision.
+ hasCollision = hasCollision && minusA < -FLT_EPSILON;
+
+ if (hasCollision)
+ {
+ float3 delta = prevPos - curPos;
+
+ // interpolate delta at toi
+ float3 pos = prevPos - delta * toi;
+
+ // float axisLength = sqrAxisLength * invAxisLength;
+
+ // float curHalfLength = axisLength; // ?
+ float3 curConeAxis = curAxis.xyz * invAxisLength;
+ float3 curScaledAxis = curAxis.xyz * curHalfLength;
+
+ float4 prevAxis = (prevSphere1 - prevSphere0) * 0.5f;
+ float3 prevConeCenter = (prevSphere1.xyz + prevSphere0.xyz) * 0.5f;
+
+ float prevSqrAxisLength = dot(prevAxis.xyz, prevAxis.xyz);
+ float prevSqrConeLength = prevSqrAxisLength - prevAxis.w * prevAxis.w;
+
+ float prevInvAxisLength = rsqrt(prevSqrAxisLength);
+ float prevInvConeLength = rsqrt(prevSqrConeLength);
+
+ float prevAxisLength = prevSqrAxisLength * prevInvAxisLength;
+ float prevConeRadius = (prevAxis.w + prevSphere0.w) * prevInvConeLength * prevAxisLength;
+
+ float3 prevConeAxis = prevAxis.xyz * prevInvAxisLength;
+
+ float prevHalfLength = prevAxisLength;
+
+ float3 deltaScaledAxis = curScaledAxis - prevAxis.xyz * prevHalfLength;
+
+ float oneMinusToi = 1.0f - toi;
+
+ // interpolate axis at toi
+ float3 axis = curScaledAxis - deltaScaledAxis * oneMinusToi;
+
+ float slope = prevConeSlope * oneMinusToi + curSlope * toi;
+
+ float sqrHalfLength = dot(axis, axis); // axisX * axisX + axisY * axisY + axisZ * axisZ;
+ float invHalfLength = rsqrt(sqrHalfLength);
+ float dotf = dot(pos, axis) * invHalfLength;
+
+ float sqrDistance = dot(pos, pos) - dotf * dotf;
+ float invDistance = sqrDistance > 0.0f ? rsqrt(sqrDistance) : 0.0f;
+
+ float base = dotf + slope * sqrDistance * invDistance;
+ float scale = base * invHalfLength;
+
+ if (abs(scale) < 1.0f)
+ {
+ delta += deltaScaledAxis * scale;
+
+ // reduce ccd impulse if (clamped) particle trajectory stays in cone skin,
+ // i.e. scale by exp2(-k) or 1/(1+k) with k = (tmin - toi) / (1 - toi)
+ float minusK = sqrtD / (minusA * oneMinusToi);
+ oneMinusToi = oneMinusToi / (1.f - minusK);
+
+ curDelta += delta * oneMinusToi;
+
+ curDot = dot(curDelta, curAxis.xyz);
+ float curConeRadius = (curAxis.w + sphere0.w) * invConeLength * axisLength;
+
+ curRadius = max(curDot * curSlope + curConeRadius, 0.0f); // Duplicate?
+ curSqrDistance = dot(curDelta, curDelta) - curDot * curDot;
+
+ curPos = coneCenter + curDelta;
+ }
+ }
+
+ {
+ float3 delta = curPos - coneCenter;
+
+ float deltaDotAxis = dot(delta, curConeAxis);
+ float radius = max(deltaDotAxis * curSlope + coneRadius, 0.0f);
+ float sqrDistance = dot(delta, delta) - deltaDotAxis * deltaDotAxis;
+
+ if (sqrDistance > radius * radius)
+ continue;
+
+ sqrDistance = max(sqrDistance, FLT_EPSILON);
+ float invDistance = rsqrt(sqrDistance);
+
+ float base = deltaDotAxis + curSlope * sqrDistance * invDistance;
+ float halfLength = axisLength;
+ // float halfLength = coneHalfLength;
+
+ if (abs(base) < halfLength)
+ {
+ delta = delta - base * curAxis;
+
+ float sqrCosine = coneSqrCosine;
+ float scale = radius * invDistance * sqrCosine - sqrCosine;
+
+ outDelta += delta * scale;
+
+ if (frictionEnabled)
+ {
+ // get previous sphere pos
+ float4 prevSphere0 = lerp(startSphere0, targetSphere0, prevAlpha);
+ float4 prevSphere1 = lerp(startSphere1, targetSphere1, prevAlpha);
+
+ // interpolate velocity between the two spheres
+ float t = deltaDotAxis * 0.5f + 0.5f;
+ outVelocity += lerp(sphere0.xyz - prevSphere0.xyz, sphere1.xyz - prevSphere1.xyz, t);
+ }
+
+ ++numCollisions;
+ }
+ }
+
+ // curPos inside cone (discrete collision)
+ bool hasContact = curRadius * curRadius > curSqrDistance;
+
+ if (!hasContact)
+ continue;
+
+ float invDistance = curSqrDistance > 0.0f ? rsqrt(curSqrDistance) : 0.0f;
+ float base = curDot + curSlope * curSqrDistance * invDistance;
+
+ // float axisLength = sqrAxisLength * invAxisLength;
+
+ // float halfLength = axisLength; // ?
+ // float halfLength = coneHalfLength;
+ /*
+ if (abs(base) < halfLength)
+ {
+ float3 delta = curPos - base * curAxis.xyz;
+
+ float sine = axis.w * invAxisLength; // Remove?
+ float sqrCosine = 1.f - sine * sine;
+
+ float scale = curRadius * invDistance * coneSqrCosine - coneSqrCosine;
+
+
+ // delta += de
+
+ outDelta += delta * scale;
+
+ if (frictionEnabled)
+ {
+ // interpolate velocity between the two spheres
+ float t = curDot * 0.5f + 0.5f;
+ outVelocity += lerp(sphere0.xyz - prevSphere0.xyz, sphere1.xyz - prevSphere1.xyz, t);
+ }
+
+ ++numCollisions;
+ }*/
+ }
+
+ // sphere collision
+ for (uint32_t j = 0; j < gClothData.mNumSpheres; ++j)
+ {
+ float4 startSphere = bCollisionSpheres[startSphereOffset + j];
+ float4 targetSphere = bCollisionSpheres[targetSphereOffset + j];
+ float4 sphere = lerp(startSphere, targetSphere, alpha);
+ sphere.w = max(sphere.w, 0.0f);
+ float curRadius = sphere.w;
+
+ // get previous sphere pos
+ float4 prevSphere = lerp(startSphere, targetSphere, prevAlpha);
+ prevSphere.w = max(sphere.w, 0.0f);
+ float prevRadius = prevSphere.w;
+
+ {
+ float3 curDelta = curPos - sphere.xyz;
+ float3 prevDelta = prevPos - prevSphere.xyz;
+
+ float sqrDistance = FLT_EPSILON + dot(curDelta, curDelta);
+
+ float dotPrevPrev = dot(prevDelta, prevDelta) - prevRadius * prevRadius;
+ float dotPrevCur = dot(prevDelta, curDelta) - prevRadius * curRadius;
+ float dotCurCur = sqrDistance - curRadius * curRadius;
+
+ float discriminant = dotPrevCur * dotPrevCur - dotCurCur * dotPrevPrev;
+ float sqrtD = sqrt(discriminant);
+ float halfB = dotPrevCur - dotPrevPrev;
+ float minusA = dotPrevCur - dotCurCur + halfB;
+
+ // time of impact or 0 if prevPos inside sphere
+ float toi = min(0.0f, halfB + sqrtD) / minusA;
+ bool hasCollision = toi < 1.0f && halfB < sqrtD;
+
+ // skip continuous collision if the (un-clamped) particle
+ // trajectory only touches the outer skin of the cone.
+ float rMin = prevRadius + halfB * minusA * (curRadius - prevRadius);
+ hasCollision = hasCollision && (discriminant > minusA * rMin * rMin * gSkeletonWidth);
+
+ // a is negative when one cone is contained in the other,
+ // which is already handled by discrete collision.
+ hasCollision = hasCollision && minusA < -FLT_EPSILON;
+
+ if (hasCollision)
+ {
+ float3 delta = prevDelta - curDelta;
+
+ float oneMinusToi = 1.0f - toi;
+
+ // reduce ccd impulse if (clamped) particle trajectory stays in cone skin,
+ // i.e. scale by exp2(-k) or 1/(1 + k) with k = (tmin - toi) / (1 - toi)
+ float minusK = sqrtD / (minusA * oneMinusToi);
+ oneMinusToi = oneMinusToi / (1.f - minusK);
+
+ curDelta += delta * oneMinusToi;
+ curPos = sphere.xyz + curDelta;
+
+ sqrDistance = FLT_EPSILON + dot(curDelta, curDelta);
+ }
+
+ float relDistance = rsqrt(sqrDistance) * sphere.w;
+
+ if (relDistance > 1.0f)
+ {
+ float scale = relDistance - 1.0f;
+
+ outDelta += curDelta * scale;
+
+ if (frictionEnabled)
+ {
+ outVelocity += (sphere.xyz - prevSphere.xyz);
+ }
+
+ ++numCollisions;
+ }
+ }
+ }
+
+ return numCollisions;
+}
+
+void collideContinuousCapsules(IParticles curParticles, IParticles prevParticles, uint32_t threadIdx, float alpha, float prevAlpha)
+{
+ bool frictionEnabled = gClothData.mFrictionScale > 0.0f;
+ bool massScaleEnabled = gClothData.mCollisionMassScale > 0.0f;
+
+ for (uint32_t j = threadIdx; j < gClothData.mNumParticles; j += blockDim)
+ {
+ float4 curPos = curParticles.get(j);
+ float4 prevPos = prevParticles.get(j);
+
+ float3 delta, velocity;
+ uint32_t numCollisions = collideCapsules(curPos.xyz, prevPos.xyz, alpha, prevAlpha, delta, velocity);
+ if (numCollisions > 0)
+ {
+ float scale = 1.0f / (float)numCollisions;
+
+ if (frictionEnabled)
+ {
+ float3 frictionImpulse =
+ calcFrictionImpulse(prevPos.xyz, curPos.xyz, velocity, scale, delta);
+
+ prevPos.xyz -= frictionImpulse;
+ prevParticles.set(j, prevPos);
+ }
+
+ curPos.xyz += delta * scale;
+
+ // TODO: current impl. causes glitches - fix it!
+ // if (massScaleEnabled)
+ // {
+ // float deltaLengthSq = dot(delta, delta);
+ // float massScale = 1.0f + gClothData.mCollisionMassScale * deltaLengthSq;
+ // curPos.w /= massScale;
+ // }
+
+ curParticles.set(j, curPos);
+ }
+ }
+}
+
+
+void collideParticles(IParticles curParticles, IParticles prevParticles, uint32_t threadIdx, float alpha, float prevAlpha)
+{
+ collideConvexes(curParticles, prevParticles, threadIdx, alpha);
+ collideTriangles(curParticles, alpha);
+ if (gClothData.mEnableContinuousCollision)
+ collideContinuousCapsules(curParticles, prevParticles, threadIdx, alpha, prevAlpha);
+ else
+ collideCapsules(curParticles, prevParticles, threadIdx, alpha, prevAlpha);
+}
+
+void constrainSeparation(IParticles curParticles, uint32_t threadIdx, float alpha)
+{
+ if (gFrameData.mStartSeparationConstrainsOffset == -1)
+ return;
+
+ for (uint32_t j = threadIdx; j < gClothData.mNumParticles; j += blockDim)
+ {
+ float4 startPos = bSeparationConstraints[j + gFrameData.mStartSeparationConstrainsOffset];
+ float4 targetPos = bSeparationConstraints[j + gFrameData.mTargetSeparationConstrainsOffset];
+
+ float4 sphere = startPos + (targetPos - startPos) * alpha;
+
+ float4 current = curParticles.get(j);
+
+ float3 delta = sphere.xyz - current.xyz;
+
+ float sqrLength = FLT_EPSILON + dot(delta, delta);
+
+ float slack = min(0.0f, 1.0f - sphere.w * rsqrt(sqrLength));
+
+ current.xyz += slack * delta;
+ curParticles.set(j, current);
+ }
+ GroupMemoryBarrierWithGroupSync();
+}
+
+void updateSleepState(IParticles curParticles, IParticles prevParticles, uint32_t threadIdx)
+{
+ if (!threadIdx)
+ gFrameData.mSleepTestCounter += max(1, uint32_t(gFrameData.mIterDt * 1000));
+
+ GroupMemoryBarrierWithGroupSync();
+
+ if (gFrameData.mSleepTestCounter < gClothData.mSleepTestInterval)
+ return;
+
+ float maxDelta = 0.0f;
+ for (uint32_t i = threadIdx; i < gClothData.mNumParticles; i += blockDim)
+ {
+ float4 curPos = curParticles.get(i);
+ float4 prevPos = prevParticles.get(i);
+
+ float3 delta = abs(curPos.xyz - prevPos.xyz);
+
+ maxDelta = max(max(max(delta.x, delta.y), delta.z), maxDelta);
+ }
+
+ if (!threadIdx)
+ {
+ ++gFrameData.mSleepPassCounter;
+ gFrameData.mSleepTestCounter -= gClothData.mSleepTestInterval;
+ }
+
+ if (maxDelta > gClothData.mSleepThreshold * gFrameData.mIterDt)
+ gFrameData.mSleepPassCounter = 0;
+}
+
+
+#define USE_SELF_COLLISION_SORT 1
+
+struct DxSelfCollisionGrid
+{
+ float mPosBias[3];
+ float mPosScale[3];
+ uint32_t mPosElemId[3];
+};
+groupshared DxSelfCollisionGrid gSelfCollisionGrid;
+groupshared float gExpandedEdgeLength[3];
+
+void selfCollideParticles(IParticles curParticles, uint32_t threadIdx)
+{
+ if (min(gClothData.mSelfCollisionDistance, gFrameData.mSelfCollisionStiffness) <= 0.0f)
+ {
+ return;
+ }
+
+ const int32_t numIndices = gClothData.mNumSelfCollisionIndices;
+ const int32_t numParticles = gClothData.mNumParticles;
+
+#if USE_SELF_COLLISION_SORT
+ float expandedNegativeLower = 0;
+ float expandedEdgeLength = 0;
+ if (threadIdx.x < 3)
+ {
+ float upper = gFrameData.mParticleBounds[threadIdx.x * 2];
+ float negativeLower = gFrameData.mParticleBounds[threadIdx.x * 2 + 1];
+
+ // expand bounds
+ float eps = (upper + negativeLower) * 1e-4f;
+ float expandedUpper = upper + eps;
+ expandedNegativeLower = negativeLower + eps;
+ expandedEdgeLength = expandedUpper + expandedNegativeLower;
+
+ gExpandedEdgeLength[threadIdx.x] = expandedEdgeLength;
+ }
+ GroupMemoryBarrierWithGroupSync();
+ if (threadIdx.x < 3)
+ {
+ // calculate shortest axis
+ int32_t shortestAxis = gExpandedEdgeLength[0] > gExpandedEdgeLength[1];
+ if (gExpandedEdgeLength[shortestAxis] > gExpandedEdgeLength[2])
+ shortestAxis = 2;
+
+ uint32_t writeAxis = threadIdx.x - shortestAxis;
+ writeAxis += writeAxis >> 30;
+
+ float maxInvCellSize = (127.0f / expandedEdgeLength);
+ float invCollisionDistance = (1.0f / gClothData.mSelfCollisionDistance);
+ float invCellSize = min(maxInvCellSize, invCollisionDistance);
+
+ gSelfCollisionGrid.mPosScale[writeAxis] = invCellSize;
+ gSelfCollisionGrid.mPosBias[writeAxis] = invCellSize * expandedNegativeLower;
+ gSelfCollisionGrid.mPosElemId[writeAxis] = threadIdx.x;
+ }
+ GroupMemoryBarrierWithGroupSync();
+
+ const int32_t cellStartOffset = gClothData.mSelfCollisionDataOffset + numIndices * 2;
+ const int32_t cellStartSize = (129 + 128 * 128 + 130);
+ if (gFrameData.mInitSelfCollisionData)
+ {
+ for (int32_t i = threadIdx; i < cellStartSize; i += BlockSize)
+ {
+ bSelfCollisionData[cellStartOffset + i] = -1;
+ }
+ }
+
+ //build acceleration grid
+ float rowScale = gSelfCollisionGrid.mPosScale[1], rowBias = gSelfCollisionGrid.mPosBias[1];
+ float colScale = gSelfCollisionGrid.mPosScale[2], colBias = gSelfCollisionGrid.mPosBias[2];
+ int32_t rowElemId = gSelfCollisionGrid.mPosElemId[1];
+ int32_t colElemId = gSelfCollisionGrid.mPosElemId[2];
+
+ // calculate keys
+ for (int32_t i = threadIdx.x; i < numIndices; i += BlockSize)
+ {
+ int32_t index = gClothData.mSelfCollisionIndicesOffset != -1 ? bSelfCollisionIndices[gClothData.mSelfCollisionIndicesOffset + i] : i;
+ //assert(index < gClothData.mNumParticles);
+
+ float4 pos = curParticles.get(index);
+
+ int32_t rowIndex = int32_t(max(0.0f, min(pos[rowElemId] * rowScale + rowBias, 127.5f)));
+ int32_t colIndex = int32_t(max(0.0f, min(pos[colElemId] * colScale + colBias, 127.5f)));
+ //assert(rowIndex >= 0 && rowIndex < 128 && colIndex >= 0 && colIndex < 128);
+
+ int32_t key = (colIndex << 7 | rowIndex) + 129; // + row and column sentinel
+ //assert(key <= 0x4080);
+
+ bSelfCollisionData[gClothData.mSelfCollisionDataOffset + i] = key << 16 | index; // (key, index) pair in a single int32_t
+ }
+ GroupMemoryBarrierWithGroupSync();
+
+ // sort keys
+ class SelfCollisionKeys : ISortElements
+ {
+ int inOffset;
+ int outOffset;
+ int get(int index)
+ {
+ return bSelfCollisionData[inOffset + index];
+ }
+ void set(int index, int value)
+ {
+ bSelfCollisionData[outOffset + index] = value;
+ }
+ void swap()
+ {
+ int temp = inOffset;
+ inOffset = outOffset;
+ outOffset = temp;
+ }
+ } sortedKeys;
+ sortedKeys.inOffset = gClothData.mSelfCollisionDataOffset;
+ sortedKeys.outOffset = gClothData.mSelfCollisionDataOffset + numIndices;
+
+
+ class SortShared : ISortShared
+ {
+ uint4 getReduce(int index)
+ {
+ uint4 res;
+ res.x = (gCurParticles[index + BlockSize * 0]);
+ res.y = (gCurParticles[index + BlockSize * 1]);
+ res.z = (gCurParticles[index + BlockSize * 2]);
+ res.w = (gCurParticles[index + BlockSize * 3]);
+ return res;
+ }
+ void setReduce(int index, uint4 value)
+ {
+ gCurParticles[index + BlockSize * 0] = (value.x);
+ gCurParticles[index + BlockSize * 1] = (value.y);
+ gCurParticles[index + BlockSize * 2] = (value.z);
+ gCurParticles[index + BlockSize * 3] = (value.w);
+ }
+
+ uint getScan(int index)
+ {
+ return gCurParticles[index + BlockSize * 4];
+ }
+ void setScan(int index, uint value)
+ {
+ gCurParticles[index + BlockSize * 4] = value;
+ }
+ } sortShared;
+#endif
+
+ // copy current particles to temporary array (radixSort reuses the same shared memory used for particles!)
+ for (int32_t j = threadIdx; j < numParticles; j += blockDim)
+ {
+ bSelfCollisionParticles[gClothData.mSelfCollisionParticlesOffset + j] = curParticles.get(j);
+ }
+ GroupMemoryBarrierWithGroupSync();
+
+#if USE_SELF_COLLISION_SORT
+ radixSort_BitCount(threadIdx, numIndices, sortedKeys, 16, 32, sortShared);
+
+ // mark cell start if keys are different between neighboring threads
+ for (int32_t k = threadIdx.x; k < numIndices; k += BlockSize)
+ {
+ int32_t key = sortedKeys.get(k) >> 16;
+ int32_t prevKey = k ? sortedKeys.get(k - 1) >> 16 : key - 1;
+ if (key != prevKey)
+ {
+ bSelfCollisionData[cellStartOffset + key] = k;
+ bSelfCollisionData[cellStartOffset + prevKey + 1] = k;
+ }
+ }
+#endif
+ //GroupMemoryBarrierWithGroupSync();
+
+#if USE_SELF_COLLISION_SORT
+ // copy only sorted (indexed) particles to shared mem
+ for (i = threadIdx.x; i < numIndices; i += blockDim)
+ {
+ int32_t index = bSelfCollisionData[gClothData.mSelfCollisionDataOffset + i] & 0xFFFF;
+ curParticles.set(i, bSelfCollisionParticles[gClothData.mSelfCollisionParticlesOffset + index]);
+ }
+ GroupMemoryBarrierWithGroupSync();
+
+ const float cdist = gClothData.mSelfCollisionDistance;
+ const float cdistSq = cdist * cdist;
+
+ for (i = threadIdx; i < numIndices; i += blockDim)
+#else
+ for (i = threadIdx; i < numParticles; i += blockDim)
+#endif
+ {
+#if USE_SELF_COLLISION_SORT
+ const int32_t index = bSelfCollisionData[gClothData.mSelfCollisionDataOffset + i] & 0xFFFF;
+#else
+ const int32_t index = i;
+#endif
+ //assert(index < gClothData.mNumParticles);
+
+ float4 iPos = curParticles.get(i);
+ float4 delta = float4(0.0f, 0.0f, 0.0f, FLT_EPSILON);
+
+ float4 iRestPos;
+ if (gFrameData.mRestPositionsOffset != -1)
+ {
+ iRestPos = bRestPositions[gFrameData.mRestPositionsOffset + index];
+ }
+
+#if USE_SELF_COLLISION_SORT
+ // get cell index for this particle
+ int32_t rowIndex = int32_t(max(0.0f, min(iPos[rowElemId] * rowScale + rowBias, 127.5f)));
+ int32_t colIndex = int32_t(max(0.0f, min(iPos[colElemId] * colScale + colBias, 127.5f)));
+ //assert(rowIndex >= 0 && rowIndex < 128 && colIndex >= 0 && colIndex < 128);
+
+ int32_t key = colIndex << 7 | rowIndex;
+ //assert(key <= 0x4080);
+
+ // check cells in 3 columns
+ for (int32_t keyEnd = key + 256; key <= keyEnd; key += 128)
+ {
+ uint32_t cellStart[4];
+ cellStart[0] = bSelfCollisionData[cellStartOffset + key + 0];
+ cellStart[1] = bSelfCollisionData[cellStartOffset + key + 1];
+ cellStart[2] = bSelfCollisionData[cellStartOffset + key + 2];
+ cellStart[3] = bSelfCollisionData[cellStartOffset + key + 3];
+
+ uint32_t startIndex = min(min(cellStart[0], cellStart[1]), cellStart[2]);
+ uint32_t endIndex = max(max(max(asint(cellStart[1]), asint(cellStart[2])), asint(cellStart[3])), 0);
+#else
+ {
+ uint32_t startIndex = 0;
+ uint32_t endIndex = numParticles;
+#endif
+ // comparison must be unsigned to skip cells with negative startIndex
+ for (int32_t j = startIndex; asuint(j) < endIndex; ++j)
+ {
+ if (j != i) // avoid same particle
+ {
+ float4 jPos = curParticles.get(j);
+ float3 diff = iPos.xyz - jPos.xyz;
+
+ float distSqr = dot(diff, diff);
+ if (distSqr > cdistSq)
+ continue;
+
+ float restScale = 1.0f;
+ if (gFrameData.mRestPositionsOffset != -1)
+ {
+#if USE_SELF_COLLISION_SORT
+ const int32_t jndex = bSelfCollisionData[gClothData.mSelfCollisionDataOffset + j] & 0xFFFF;
+#else
+ const int32_t jndex = j;
+#endif
+
+ float4 jRestPos = bRestPositions[gFrameData.mRestPositionsOffset + jndex];
+
+ // calculate distance in rest configuration
+ float3 rdiff = iRestPos.xyz - jRestPos.xyz;
+ float rdistSq = dot(rdiff, rdiff);
+ if (rdistSq <= cdistSq)
+ continue;
+
+ // ratio = rest distance / collision distance - 1.0
+ float stiffnessRatio = rsqrt(cdistSq / (rdistSq + FLT_EPSILON)) - 1.0f;
+ restScale = min(1.0, stiffnessRatio);
+ }
+ // premultiply ratio for weighted average
+ float ratio = max(0.0f, cdist * rsqrt(FLT_EPSILON + distSqr) - 1.0f);
+ float scale = (restScale * ratio * ratio) / (FLT_EPSILON + iPos.w + jPos.w);
+
+ delta.xyz += scale * diff;
+ delta.w += ratio;
+ }
+ }
+ }
+ const float stiffness = gFrameData.mSelfCollisionStiffness * iPos.w;
+ float scale = (stiffness / delta.w);
+
+ // apply collision impulse
+ float4 tmpPos = bSelfCollisionParticles[gClothData.mSelfCollisionParticlesOffset + index];
+ tmpPos.xyz += delta.xyz * scale;
+ bSelfCollisionParticles[gClothData.mSelfCollisionParticlesOffset + index] = tmpPos;
+ }
+ GroupMemoryBarrierWithGroupSync();
+
+ // copy temporary particle array back to shared mem
+ for (i = threadIdx; i < numParticles; i += blockDim)
+ {
+ curParticles.set(i, bSelfCollisionParticles[gClothData.mSelfCollisionParticlesOffset + i]);
+ }
+
+ // unmark occupied cells to empty again (faster than clearing all the cells)
+ for (i = threadIdx.x; i < numIndices; i += blockDim)
+ {
+ int32_t key = bSelfCollisionData[gClothData.mSelfCollisionDataOffset + i] >> 16;
+ bSelfCollisionData[cellStartOffset + key] = -1;
+ bSelfCollisionData[cellStartOffset + key + 1] = -1;
+ }
+ GroupMemoryBarrierWithGroupSync();
+}
+
+void computeParticleBounds(IParticles curParticles, uint32_t threadIdx)
+{
+ if (threadIdx < 192)
+ {
+ int32_t axisIdx = threadIdx >> 6; // x, y, or z
+ float signf = (threadIdx & 32) ? -1.0f : +1.0f; // sign bit (min or max)
+
+ uint32_t curIt = min(threadIdx.x & 31, gClothData.mNumParticles - 1);
+
+ gBounds[threadIdx] = curParticles.get(curIt)[axisIdx] * signf;
+ while (curIt += 32, curIt < gClothData.mNumParticles)
+ {
+ gBounds[threadIdx] = max(gBounds[threadIdx], curParticles.get(curIt)[axisIdx] * signf);
+ }
+ }
+ GroupMemoryBarrierWithGroupSync();
+ if (threadIdx < 192 - 16)
+ {
+ gBounds[threadIdx] = max(gBounds[threadIdx], gBounds[threadIdx + 16]);
+ }
+ GroupMemoryBarrierWithGroupSync();
+ if (threadIdx < 192 - 16)
+ {
+ gBounds[threadIdx] = max(gBounds[threadIdx], gBounds[threadIdx + 8]);
+ }
+ GroupMemoryBarrierWithGroupSync();
+ if (threadIdx < 192 - 16)
+ {
+ gBounds[threadIdx] = max(gBounds[threadIdx], gBounds[threadIdx + 4]);
+ }
+ GroupMemoryBarrierWithGroupSync();
+ if (threadIdx < 192 - 16)
+ {
+ gBounds[threadIdx] = max(gBounds[threadIdx], gBounds[threadIdx + 2]);
+ }
+ GroupMemoryBarrierWithGroupSync();
+ if (threadIdx < 192 - 16)
+ {
+ gBounds[threadIdx] = max(gBounds[threadIdx], gBounds[threadIdx + 1]);
+ }
+ GroupMemoryBarrierWithGroupSync();
+
+ if (threadIdx.x < 192 && !(threadIdx & 31))
+ {
+ gFrameData.mParticleBounds[threadIdx >> 5] = gBounds[threadIdx];
+ }
+ GroupMemoryBarrierWithGroupSync();
+}
+
+
+void simulateCloth(IParticles curParticles, IParticles prevParticles, uint32_t threadIdx)
+{
+ for (uint32_t i = 0; i < gFrameData.mNumIterations; ++i)
+ {
+ const float alpha = (i + 1.0f) / gFrameData.mNumIterations;
+
+ if (!threadIdx)
+ gIterData = bIterData[gFrameData.mFirstIteration + i];
+ GroupMemoryBarrierWithGroupSync();
+
+ integrateParticles(curParticles, prevParticles, threadIdx);
+ accelerateParticles(curParticles, threadIdx);
+ constrainMotion(curParticles, threadIdx, alpha);
+ constrainTether(curParticles, threadIdx);
+ // note: GroupMemoryBarrierWithGroupSync at beginning of each fabric phase
+ solveFabric(curParticles, threadIdx);
+ GroupMemoryBarrierWithGroupSync();
+ constrainSeparation(curParticles, threadIdx, alpha);
+ computeParticleBounds(curParticles, threadIdx);
+ collideParticles(curParticles, prevParticles, threadIdx, alpha, float(i) / gFrameData.mNumIterations);
+ selfCollideParticles(curParticles, threadIdx);
+ updateSleepState(curParticles, prevParticles, threadIdx);
+ }
+ GroupMemoryBarrierWithGroupSync();
+}
+
+class ParticlesInSharedMem : IParticles
+{
+ float4 get(uint32_t index)
+ {
+ float4 res;
+ res.x = asfloat(gCurParticles[index + MaxParticlesInSharedMem * 0]);
+ res.y = asfloat(gCurParticles[index + MaxParticlesInSharedMem * 1]);
+ res.z = asfloat(gCurParticles[index + MaxParticlesInSharedMem * 2]);
+ res.w = asfloat(gCurParticles[index + MaxParticlesInSharedMem * 3]);
+ return res;
+ }
+ void set(uint32_t index, float4 value)
+ {
+ gCurParticles[index + MaxParticlesInSharedMem * 0] = asuint(value.x);
+ gCurParticles[index + MaxParticlesInSharedMem * 1] = asuint(value.y);
+ gCurParticles[index + MaxParticlesInSharedMem * 2] = asuint(value.z);
+ gCurParticles[index + MaxParticlesInSharedMem * 3] = asuint(value.w);
+ }
+};
+
+class ParticlesInGlobalMem : IParticles
+{
+ uint32_t _offset;
+
+ float4 get(uint32_t index)
+ {
+ return bParticles[_offset + index];
+ }
+ void set(uint32_t index, float4 value)
+ {
+ bParticles[_offset + index] = value;
+ }
+};
+
+
+[numthreads(blockDim, 1, 1)] void main(uint32_t blockIdx : SV_GroupID, uint32_t threadIdx : SV_GroupThreadID)
+{
+ if (!threadIdx)
+ {
+ gClothData = bClothData[blockIdx];
+ gFrameData = bFrameData[blockIdx];
+ }
+ GroupMemoryBarrierWithGroupSync(); // wait for gClothData being written
+
+ ParticlesInGlobalMem prevParticles;
+ prevParticles._offset = gClothData.mParticlesOffset + gClothData.mNumParticles;
+
+ if (gClothData.mNumParticles <= MaxParticlesInSharedMem)
+ {
+ ParticlesInSharedMem curParticles;
+
+ uint32_t i;
+ for (i = threadIdx; i < gClothData.mNumParticles; i += blockDim)
+ {
+ curParticles.set(i, bParticles[gClothData.mParticlesOffset + i]);
+ }
+
+ simulateCloth(curParticles, prevParticles, threadIdx);
+
+ for (i = threadIdx; i < gClothData.mNumParticles; i += blockDim)
+ {
+ bParticles[gClothData.mParticlesOffset + i] = curParticles.get(i);
+ }
+ }
+ else
+ {
+ ParticlesInGlobalMem curParticles;
+ curParticles._offset = gClothData.mParticlesOffset;
+
+ simulateCloth(curParticles, prevParticles, threadIdx);
+ }
+
+ if (!threadIdx)
+ {
+ bFrameData[blockIdx] = gFrameData;
+ }
+}
diff --git a/NvCloth/src/dx/DxSolverKernelBlob.h b/NvCloth/src/dx/DxSolverKernelBlob.h
new file mode 100644
index 0000000..8c2dd82
--- /dev/null
+++ b/NvCloth/src/dx/DxSolverKernelBlob.h
@@ -0,0 +1,24502 @@
+#if 0
+//
+// Generated by Microsoft (R) HLSL Shader Compiler 6.3.9600.16384
+//
+//
+// Buffer Definitions:
+//
+// Resource bind info for bClothData
+// {
+//
+// struct DxClothData
+// {
+//
+// uint mNumParticles; // Offset: 0
+// uint mParticlesOffset; // Offset: 4
+// uint mNumPhases; // Offset: 8
+// uint mPhaseConfigOffset; // Offset: 12
+// uint mConstraintOffset; // Offset: 16
+// uint mStiffnessOffset; // Offset: 20
+// uint mNumTethers; // Offset: 24
+// uint mTetherOffset; // Offset: 28
+// float mTetherConstraintScale; // Offset: 32
+// float mMotionConstraintScale; // Offset: 36
+// float mMotionConstraintBias; // Offset: 40
+// uint mNumCapsules; // Offset: 44
+// uint mCapsuleOffset; // Offset: 48
+// uint mNumSpheres; // Offset: 52
+// uint mNumPlanes; // Offset: 56
+// uint mNumConvexes; // Offset: 60
+// uint mConvexMasksOffset; // Offset: 64
+// uint mNumCollisionTriangles; // Offset: 68
+// uint mEnableContinuousCollision;// Offset: 72
+// float mCollisionMassScale; // Offset: 76
+// float mFrictionScale; // Offset: 80
+// float mSelfCollisionDistance; // Offset: 84
+// uint mNumSelfCollisionIndices; // Offset: 88
+// uint mSelfCollisionIndicesOffset;// Offset: 92
+// uint mSelfCollisionParticlesOffset;// Offset: 96
+// uint mSelfCollisionDataOffset; // Offset: 100
+// uint mSleepTestInterval; // Offset: 104
+// uint mSleepAfterCount; // Offset: 108
+// float mSleepThreshold; // Offset: 112
+//
+// } $Element; // Offset: 0 Size: 116
+//
+// }
+//
+// Resource bind info for bIterData
+// {
+//
+// struct DxIterationData
+// {
+//
+// float mIntegrationTrafo[24]; // Offset: 0
+// uint mIsTurning; // Offset: 96
+//
+// } $Element; // Offset: 0 Size: 100
+//
+// }
+//
+// Resource bind info for bPhaseConfigs
+// {
+//
+// struct DxPhaseConfig
+// {
+//
+// float mStiffness; // Offset: 0
+// float mStiffnessMultiplier; // Offset: 4
+// float mCompressionLimit; // Offset: 8
+// float mStretchLimit; // Offset: 12
+// uint mFirstConstraint; // Offset: 16
+// uint mNumConstraints; // Offset: 20
+//
+// } $Element; // Offset: 0 Size: 24
+//
+// }
+//
+// Resource bind info for bConstraints
+// {
+//
+// struct DxConstraint
+// {
+//
+// float mRestvalue; // Offset: 0
+// uint mIndices; // Offset: 4
+//
+// } $Element; // Offset: 0 Size: 8
+//
+// }
+//
+// Resource bind info for bTetherConstraints
+// {
+//
+// struct DxTether
+// {
+//
+// uint mValue; // Offset: 0
+//
+// } $Element; // Offset: 0 Size: 4
+//
+// }
+//
+// Resource bind info for bCapsuleIndices
+// {
+//
+// struct IndexPair
+// {
+//
+// uint first; // Offset: 0
+// uint second; // Offset: 4
+//
+// } $Element; // Offset: 0 Size: 8
+//
+// }
+//
+// Resource bind info for bCollisionSpheres
+// {
+//
+// float4 $Element; // Offset: 0 Size: 16
+//
+// }
+//
+// Resource bind info for bConvexMasks
+// {
+//
+// uint $Element; // Offset: 0 Size: 4
+//
+// }
+//
+// Resource bind info for bCollisionPlanes
+// {
+//
+// float4 $Element; // Offset: 0 Size: 16
+//
+// }
+//
+// Resource bind info for bCollisionTriangles
+// {
+//
+// float3 $Element; // Offset: 0 Size: 12
+//
+// }
+//
+// Resource bind info for bMotionConstraints
+// {
+//
+// float4 $Element; // Offset: 0 Size: 16
+//
+// }
+//
+// Resource bind info for bSeparationConstraints
+// {
+//
+// float4 $Element; // Offset: 0 Size: 16
+//
+// }
+//
+// Resource bind info for bParticleAccelerations
+// {
+//
+// float4 $Element; // Offset: 0 Size: 16
+//
+// }
+//
+// Resource bind info for bRestPositions
+// {
+//
+// float4 $Element; // Offset: 0 Size: 16
+//
+// }
+//
+// Resource bind info for bSelfCollisionIndices
+// {
+//
+// int $Element; // Offset: 0 Size: 4
+//
+// }
+//
+// Resource bind info for bPerConstraintStiffness
+// {
+//
+// float $Element; // Offset: 0 Size: 4
+//
+// }
+//
+// Resource bind info for bParticles
+// {
+//
+// float4 $Element; // Offset: 0 Size: 16
+//
+// }
+//
+// Resource bind info for bSelfCollisionParticles
+// {
+//
+// float4 $Element; // Offset: 0 Size: 16
+//
+// }
+//
+// Resource bind info for bSelfCollisionData
+// {
+//
+// uint $Element; // Offset: 0 Size: 4
+//
+// }
+//
+// Resource bind info for bFrameData
+// {
+//
+// struct DxFrameData
+// {
+//
+// bool mDeviceParticlesDirty; // Offset: 0
+// uint mNumSharedPositions; // Offset: 4
+// float mIterDt; // Offset: 8
+// uint mFirstIteration; // Offset: 12
+// uint mNumIterations; // Offset: 16
+// float mTetherConstraintStiffness;// Offset: 20
+// float mMotionConstraintStiffness;// Offset: 24
+// uint mStartMotionConstrainsOffset;// Offset: 28
+// uint mTargetMotionConstrainsOffset;// Offset: 32
+// uint mStartSeparationConstrainsOffset;// Offset: 36
+// uint mTargetSeparationConstrainsOffset;// Offset: 40
+// uint mParticleAccelerationsOffset;// Offset: 44
+// uint mStartSphereOffset; // Offset: 48
+// uint mTargetSphereOffset; // Offset: 52
+// uint mStartCollisionPlaneOffset;// Offset: 56
+// uint mTargetCollisionPlaneOffset;// Offset: 60
+// uint mStartCollisionTrianglesOffset;// Offset: 64
+// uint mTargetCollisionTrianglesOffset;// Offset: 68
+// float mSelfCollisionStiffness; // Offset: 72
+// float mParticleBounds[6]; // Offset: 76
+// uint mSleepPassCounter; // Offset: 100
+// uint mSleepTestCounter; // Offset: 104
+// float mStiffnessExponent; // Offset: 108
+// uint mRestPositionsOffset; // Offset: 112
+// bool mInitSelfCollisionData; // Offset: 116
+//
+// } $Element; // Offset: 0 Size: 120
+//
+// }
+//
+//
+// Resource Bindings:
+//
+// Name Type Format Dim Slot Elements
+// ------------------------------ ---------- ------- ----------- ---- --------
+// bClothData texture struct r/o 0 1
+// bIterData texture struct r/o 2 1
+// bPhaseConfigs texture struct r/o 3 1
+// bConstraints texture struct r/o 4 1
+// bTetherConstraints texture struct r/o 5 1
+// bCapsuleIndices texture struct r/o 6 1
+// bCollisionSpheres texture struct r/o 7 1
+// bConvexMasks texture struct r/o 8 1
+// bCollisionPlanes texture struct r/o 9 1
+// bCollisionTriangles texture struct r/o 10 1
+// bMotionConstraints texture struct r/o 11 1
+// bSeparationConstraints texture struct r/o 12 1
+// bParticleAccelerations texture struct r/o 13 1
+// bRestPositions texture struct r/o 14 1
+// bSelfCollisionIndices texture struct r/o 15 1
+// bPerConstraintStiffness texture struct r/o 16 1
+// bParticles UAV struct r/w 0 1
+// bSelfCollisionParticles UAV struct r/w 1 1
+// bSelfCollisionData UAV struct r/w 2 1
+// bFrameData UAV struct r/w 3 1
+//
+//
+//
+// Input signature:
+//
+// Name Index Mask Register SysValue Format Used
+// -------------------- ----- ------ -------- -------- ------- ------
+// no Input
+//
+// Output signature:
+//
+// Name Index Mask Register SysValue Format Used
+// -------------------- ----- ------ -------- -------- ------- ------
+// no Output
+cs_5_0
+dcl_globalFlags refactoringAllowed
+dcl_immediateConstantBuffer { { 1.000000, 0, 0, 0},
+ { 0, 1.000000, 0, 0},
+ { 0, 0, 1.000000, 0},
+ { 0, 0, 0, 1.000000} }
+dcl_resource_structured t0, 116
+dcl_resource_structured t2, 100
+dcl_resource_structured t3, 24
+dcl_resource_structured t4, 8
+dcl_resource_structured t5, 4
+dcl_resource_structured t6, 8
+dcl_resource_structured t7, 16
+dcl_resource_structured t8, 4
+dcl_resource_structured t9, 16
+dcl_resource_structured t10, 12
+dcl_resource_structured t11, 16
+dcl_resource_structured t12, 16
+dcl_resource_structured t13, 16
+dcl_resource_structured t14, 16
+dcl_resource_structured t15, 4
+dcl_resource_structured t16, 4
+dcl_uav_structured u0, 16
+dcl_uav_structured u1, 16
+dcl_uav_structured u2, 4
+dcl_uav_structured u3, 120
+dcl_input vThreadGroupID.x
+dcl_input vThreadIDInGroup.x
+dcl_temps 48
+dcl_tgsm_raw g0, 116
+dcl_tgsm_raw g1, 120
+dcl_tgsm_raw g2, 100
+dcl_tgsm_structured g3, 4, 7900
+dcl_tgsm_structured g4, 4, 192
+dcl_tgsm_raw g5, 36
+dcl_tgsm_structured g6, 4, 3
+dcl_thread_group 1024, 1, 1
+if_z vThreadIDInGroup.x
+ ld_structured_indexable(structured_buffer, stride=116)(mixed,mixed,mixed,mixed) r0.xyzw, vThreadGroupID.x, l(0), t0.xyzw
+ ld_structured_indexable(structured_buffer, stride=116)(mixed,mixed,mixed,mixed) r1.xyzw, vThreadGroupID.x, l(16), t0.xyzw
+ ld_structured_indexable(structured_buffer, stride=116)(mixed,mixed,mixed,mixed) r2.xyzw, vThreadGroupID.x, l(32), t0.xyzw
+ ld_structured_indexable(structured_buffer, stride=116)(mixed,mixed,mixed,mixed) r3.xy, vThreadGroupID.x, l(48), t0.xyxx
+ ld_structured_indexable(structured_buffer, stride=116)(mixed,mixed,mixed,mixed) r4.xyzw, vThreadGroupID.x, l(60), t0.xyzw
+ ld_structured_indexable(structured_buffer, stride=116)(mixed,mixed,mixed,mixed) r5.xyzw, vThreadGroupID.x, l(76), t0.xyzw
+ ld_structured_indexable(structured_buffer, stride=116)(mixed,mixed,mixed,mixed) r6.xyzw, vThreadGroupID.x, l(92), t0.xyzw
+ ld_structured_indexable(structured_buffer, stride=116)(mixed,mixed,mixed,mixed) r3.z, vThreadGroupID.x, l(112), t0.xxxx
+ store_raw g0.xyzw, l(0), r0.xyzw
+ store_raw g0.xyzw, l(16), r1.xyzw
+ store_raw g0.xyzw, l(32), r2.xyzw
+ store_raw g0.xy, l(48), r3.xyxx
+ store_raw g0.xyzw, l(60), r4.xyzw
+ store_raw g0.xyzw, l(76), r5.xyzw
+ store_raw g0.xyzw, l(92), r6.xyzw
+ store_raw g0.x, l(112), r3.z
+ ld_structured_indexable(structured_buffer, stride=120)(mixed,mixed,mixed,mixed) r0.xyzw, vThreadGroupID.x, l(0), u3.xyzw
+ ld_structured_indexable(structured_buffer, stride=120)(mixed,mixed,mixed,mixed) r1.xyzw, vThreadGroupID.x, l(16), u3.xyzw
+ ld_structured_indexable(structured_buffer, stride=120)(mixed,mixed,mixed,mixed) r2.xyzw, vThreadGroupID.x, l(32), u3.xyzw
+ ld_structured_indexable(structured_buffer, stride=120)(mixed,mixed,mixed,mixed) r3.xyzw, vThreadGroupID.x, l(48), u3.xyzw
+ ld_structured_indexable(structured_buffer, stride=120)(mixed,mixed,mixed,mixed) r4.xyzw, vThreadGroupID.x, l(64), u3.xyzw
+ ld_structured_indexable(structured_buffer, stride=120)(mixed,mixed,mixed,mixed) r5.xyzw, vThreadGroupID.x, l(80), u3.xyzw
+ ld_structured_indexable(structured_buffer, stride=120)(mixed,mixed,mixed,mixed) r6.xyzw, vThreadGroupID.x, l(96), u3.xyzw
+ ld_structured_indexable(structured_buffer, stride=120)(mixed,mixed,mixed,mixed) r7.xy, vThreadGroupID.x, l(112), u3.xyxx
+ store_raw g1.xyzw, l(0), r0.xyzw
+ store_raw g1.xyzw, l(16), r1.xyzw
+ store_raw g1.xyzw, l(32), r2.xyzw
+ store_raw g1.xyzw, l(48), r3.xyzw
+ store_raw g1.xyzw, l(64), r4.xyzw
+ store_raw g1.xyzw, l(80), r5.xyzw
+ store_raw g1.xyzw, l(96), r6.xyzw
+ store_raw g1.xy, l(112), r7.xyxx
+endif
+sync_g_t
+ld_raw r0.xy, l(0), g0.xyxx
+iadd r0.x, r0.x, r0.y
+ld_raw r0.y, l(0), g0.xxxx
+uge r0.z, l(1975), r0.y
+if_nz r0.z
+ ld_raw r0.z, l(4), g0.xxxx
+ mov r1.x, vThreadIDInGroup.x
+ loop
+ uge r0.w, r1.x, r0.y
+ breakc_nz r0.w
+ iadd r0.w, r0.z, r1.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r2.xyzw, r0.w, l(0), u0.xyzw
+ store_structured g3.x, r1.x, l(0), r2.x
+ iadd r1.xyzw, r1.xxxx, l(1024, 1975, 3950, 5925)
+ store_structured g3.x, r1.y, l(0), r2.y
+ store_structured g3.x, r1.z, l(0), r2.z
+ store_structured g3.x, r1.w, l(0), r2.w
+ endloop
+ not r0.y, vThreadIDInGroup.x
+ and r0.y, r0.y, l(3)
+ ult r1.xyzw, vThreadIDInGroup.xxxx, l(192, 176, 3, 512)
+ and r2.xyzw, vThreadIDInGroup.xxxx, l(32, 31, 1, 2)
+ movc r0.z, r2.x, l(-1.000000), l(1.000000)
+ iadd r3.xyzw, vThreadIDInGroup.xxxx, l(16, 8, 4, 2)
+ ieq r0.w, r2.y, l(0)
+ and r0.w, r0.w, r1.x
+ ushr r4.xy, vThreadIDInGroup.xxxx, l(6, 5, 0, 0)
+ ishl r2.x, r4.y, l(2)
+ ishl r4.z, vThreadIDInGroup.x, l(3)
+ iadd r4.z, r4.z, l(76)
+ iadd r2.xz, r2.xxzx, l(76, 0, -1, 0)
+ ine r2.w, r2.w, l(0)
+ ieq r2.w, r2.w, l(0)
+ and r5.xyz, vThreadIDInGroup.xxxx, l(4, 8, 992, 0)
+ ine r5.xy, r5.xyxx, l(0, 0, 0, 0)
+ ieq r5.xy, r5.xyxx, l(0, 0, 0, 0)
+ iadd r6.xyzw, vThreadIDInGroup.xxxx, l(1, 1040, 3088, 1032)
+ ult r7.xyzw, r2.yyyy, l(16, 8, 4, 2)
+ ult r4.w, r2.y, l(1)
+ iadd r8.xyzw, vThreadIDInGroup.xxxx, l(3080, 1028, 3076, 1026)
+ iadd r9.xyzw, r3.xyzw, l(2048, 2048, 2048, 2048)
+ iadd r10.xyz, r5.zzzz, l(1024, 2048, 3072, 0)
+ iadd r11.xy, r4.yyyy, l(1, 513, 0, 0)
+ bfi r12.xyzw, l(10, 11, 10, 5), l(0, 0, 0, 5), vThreadIDInGroup.xxxx, l(1024, 2048, 3072, 0)
+ iadd r11.xz, r11.xxyx, r12.wwww
+ iadd r13.xyzw, vThreadIDInGroup.xxxx, l(3074, 1025, 3073, 514)
+ iadd r14.xy, r6.xxxx, l(2048, 4096, 0, 0)
+ iadd r5.w, vThreadIDInGroup.x, l(4096)
+ iadd r10.w, r13.w, l(4096)
+ iadd r15.xyzw, vThreadIDInGroup.xxxx, l(4097, 512, 4610, 4093)
+ iadd r11.w, r15.y, l(4096)
+ uge r16.xyzw, vThreadIDInGroup.xxxx, l(1, 2, 4, 8)
+ iadd r17.xyzw, vThreadIDInGroup.xxxx, l(506, 4081, 482, 4033)
+ iadd r14.zw, r17.xxxz, l(0, 0, 4096, 4096)
+ uge r18.xyzw, vThreadIDInGroup.xxxx, l(16, 32, 64, 128)
+ iadd r17.xz, vThreadIDInGroup.xxxx, l(386, 0, 3841, 0)
+ iadd r12.w, r17.x, l(4096)
+ uge r13.w, vThreadIDInGroup.x, l(256)
+ iadd r11.xz, r11.xxzx, l(4096, 0, 4096, 0)
+ mov r15.y, l(0)
+ loop
+ ld_raw r17.x, l(16), g1.xxxx
+ uge r19.x, r15.y, r17.x
+ breakc_nz r19.x
+ utof r19.x, r15.y
+ add r19.y, r19.x, l(1.000000)
+ utof r17.x, r17.x
+ div r17.x, r19.y, r17.x
+ if_z vThreadIDInGroup.x
+ ld_raw r19.y, l(12), g1.xxxx
+ iadd r19.y, r15.y, r19.y
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r20.xyzw, r19.y, l(0), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r21.xyzw, r19.y, l(16), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r22.xyzw, r19.y, l(32), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r23.xyzw, r19.y, l(48), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r24.xyzw, r19.y, l(64), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r25.xyzw, r19.y, l(80), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r19.y, r19.y, l(96), t2.xxxx
+ store_raw g2.xyzw, l(0), r20.xyzw
+ store_raw g2.xyzw, l(16), r21.xyzw
+ store_raw g2.xyzw, l(32), r22.xyzw
+ store_raw g2.xyzw, l(48), r23.xyzw
+ store_raw g2.xyzw, l(64), r24.xyzw
+ store_raw g2.xyzw, l(80), r25.xyzw
+ store_raw g2.x, l(96), r19.y
+ endif
+ sync_g_t
+ ld_raw r19.y, l(0), g0.xxxx
+ ld_raw r20.xyz, l(12), g2.xyzx
+ ld_raw r21.xyzw, l(60), g2.xyzw
+ ld_raw r22.xyzw, l(24), g2.xyzw
+ ld_raw r23.xyzw, l(76), g2.xyzw
+ ld_raw r24.xyzw, l(40), g2.xyzw
+ ld_raw r19.zw, l(92), g2.xxxy
+ ld_raw r20.w, l(56), g2.xxxx
+ ld_raw r25.xyzw, l(12), g2.xyzw
+ ld_raw r26.x, l(36), g2.xxxx
+ ld_raw r26.y, l(48), g2.xxxx
+ mov r26.z, vThreadIDInGroup.x
+ loop
+ uge r26.w, r26.z, r19.y
+ breakc_nz r26.w
+ ld_structured r27.x, r26.z, l(0), g3.xxxx
+ iadd r28.xyzw, r26.zzzz, l(1975, 3950, 5925, 1024)
+ ld_structured r27.y, r28.x, l(0), g3.xxxx
+ ld_structured r27.z, r28.y, l(0), g3.xxxx
+ ld_structured r26.w, r28.z, l(0), g3.xxxx
+ iadd r27.w, r0.x, r26.z
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r29.xyzw, r27.w, l(0), u0.xyzw
+ eq r30.x, r26.w, l(0.000000)
+ movc r26.w, r30.x, r29.w, r26.w
+ lt r29.w, l(0.000000), r26.w
+ if_nz r29.w
+ if_nz r19.w
+ add r30.xy, r20.xyxx, r27.xyxx
+ mad r30.xy, r27.xxxx, r21.xwxx, r30.xyxx
+ mad r30.xy, r29.xxxx, r22.xwxx, r30.xyxx
+ mad r29.w, r27.y, r21.y, r30.x
+ mad r29.w, r29.y, r22.y, r29.w
+ mad r29.w, r27.z, r21.z, r29.w
+ mad r31.x, r29.z, r22.z, r29.w
+ mad r29.w, r27.y, r23.x, r30.y
+ mad r29.w, r29.y, r24.x, r29.w
+ mad r29.w, r27.z, r23.y, r29.w
+ mad r31.y, r29.z, r24.y, r29.w
+ add r29.w, r20.z, r27.z
+ mad r29.w, r27.x, r23.z, r29.w
+ mad r29.w, r29.x, r24.z, r29.w
+ mad r29.w, r27.y, r23.w, r29.w
+ mad r29.w, r29.y, r24.w, r29.w
+ mad r29.w, r27.z, r19.z, r29.w
+ mad r29.w, r29.z, r20.w, r29.w
+ else
+ add r29.xy, r27.xyxx, -r29.xyxx
+ mad r29.x, r29.x, r25.w, r25.x
+ add r31.x, r27.x, r29.x
+ mad r29.x, r29.y, r26.x, r25.y
+ add r31.y, r27.y, r29.x
+ add r29.x, r27.z, -r29.z
+ mad r29.x, r29.x, r26.y, r25.z
+ add r29.w, r27.z, r29.x
+ endif
+ ld_raw r29.xyz, l(0), g2.xyzx
+ add r27.xyz, r27.xyzx, r29.xyzx
+ else
+ mov r31.xy, r27.xyxx
+ mov r29.w, r27.z
+ endif
+ store_structured g3.x, r26.z, l(0), r31.x
+ store_structured g3.x, r28.x, l(0), r31.y
+ store_structured g3.x, r28.y, l(0), r29.w
+ store_structured g3.x, r28.z, l(0), r26.w
+ store_structured u0.xyz, r27.w, l(0), r27.xyzx
+ mov r26.z, r28.w
+ endloop
+ ld_raw r19.y, l(44), g1.xxxx
+ sync_g_t
+ ld_raw r19.z, l(8), g1.xxxx
+ mul r19.z, r19.z, r19.z
+ movc r19.z, r0.y, r19.z, l(0)
+ ld_raw r19.w, l(0), g0.xxxx
+ ishl r20.x, r19.w, l(2)
+ mov r20.y, vThreadIDInGroup.x
+ loop
+ uge r20.z, r20.y, r20.x
+ breakc_nz r20.z
+ ushr r20.z, r20.y, l(2)
+ ld_structured r21.x, r20.z, l(0), g3.xxxx
+ iadd r22.xyz, r20.zzzz, l(1975, 3950, 5925, 0)
+ ld_structured r21.y, r22.x, l(0), g3.xxxx
+ ld_structured r21.z, r22.y, l(0), g3.xxxx
+ ld_structured r21.w, r22.z, l(0), g3.xxxx
+ lt r20.w, l(0.000000), r21.w
+ if_nz r20.w
+ iadd r20.w, r19.y, r20.y
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r23.xyzw, r20.w, l(0), t13.xyzw
+ mad r21.xyzw, r23.xyzw, r19.zzzz, r21.xyzw
+ store_structured g3.x, r20.z, l(0), r21.x
+ store_structured g3.x, r22.x, l(0), r21.y
+ store_structured g3.x, r22.y, l(0), r21.z
+ store_structured g3.x, r22.z, l(0), r21.w
+ endif
+ iadd r20.y, r20.y, l(1024)
+ endloop
+ sync_g_t
+ ld_raw r19.y, l(28), g1.xxxx
+ ine r19.y, r19.y, l(-1)
+ if_nz r19.y
+ ld_raw r19.yz, l(36), g0.xxyx
+ ld_raw r20.xyz, l(24), g1.xyzx
+ ld_raw r19.w, l(0), g0.xxxx
+ mov r20.w, vThreadIDInGroup.x
+ loop
+ uge r21.x, r20.w, r19.w
+ breakc_nz r21.x
+ iadd r21.xy, r20.wwww, r20.yzyy
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r22.xyzw, r21.x, l(0), t11.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r21.xyzw, r21.y, l(0), t11.xyzw
+ add r21.xyzw, -r22.xyzw, r21.xyzw
+ mad r21.xyzw, r21.xyzw, r17.xxxx, r22.xyzw
+ ld_structured r22.x, r20.w, l(0), g3.xxxx
+ iadd r23.xyzw, r20.wwww, l(1975, 3950, 5925, 1024)
+ ld_structured r22.y, r23.x, l(0), g3.xxxx
+ ld_structured r22.z, r23.y, l(0), g3.xxxx
+ ld_structured r22.w, r23.z, l(0), g3.xxxx
+ add r21.xyz, r21.xyzx, -r22.xyzx
+ dp3 r24.x, r21.xyzx, r21.xyzx
+ add r24.x, r24.x, l(0.000000)
+ mad r21.w, r21.w, -r19.y, -r19.z
+ min r21.w, r21.w, l(0.000000)
+ rsq r24.x, r24.x
+ mad r24.x, r21.w, r24.x, l(1.000000)
+ max r24.x, r24.x, l(0.000000)
+ mul r24.x, r20.x, r24.x
+ mad r21.xyz, r24.xxxx, r21.xyzx, r22.xyzx
+ ge r21.w, r21.w, l(0.000000)
+ movc r21.w, r21.w, l(0), r22.w
+ store_structured g3.x, r20.w, l(0), r21.x
+ store_structured g3.x, r23.x, l(0), r21.y
+ store_structured g3.x, r23.y, l(0), r21.z
+ store_structured g3.x, r23.z, l(0), r21.w
+ mov r20.w, r23.w
+ endloop
+ endif
+ ld_raw r19.y, l(20), g1.xxxx
+ eq r19.z, r19.y, l(0.000000)
+ ld_raw r19.w, l(24), g0.xxxx
+ ieq r20.x, r19.w, l(0)
+ or r19.z, r19.z, r20.x
+ if_z r19.z
+ ld_raw r19.z, l(0), g0.xxxx
+ utof r20.x, r19.z
+ mul r19.y, r19.y, r20.x
+ utof r20.x, r19.w
+ div r19.y, r19.y, r20.x
+ ld_raw r20.xy, l(28), g0.xyxx
+ mov r20.z, vThreadIDInGroup.x
+ loop
+ uge r20.w, r20.z, r19.z
+ breakc_nz r20.w
+ ld_structured r20.w, r20.z, l(0), g3.xxxx
+ iadd r21.xyzw, r20.zzzz, l(1975, 3950, 5925, 1024)
+ ld_structured r22.x, r21.x, l(0), g3.xxxx
+ ld_structured r22.y, r21.y, l(0), g3.xxxx
+ ld_structured r22.z, r21.z, l(0), g3.xxxx
+ mov r23.xyz, l(0,0,0,0)
+ mov r22.w, r20.z
+ loop
+ uge r23.w, r22.w, r19.w
+ breakc_nz r23.w
+ iadd r23.w, r20.x, r22.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r23.w, r23.w, l(0), t5.xxxx
+ and r24.x, r23.w, l(0x0000ffff)
+ ld_structured r24.y, r24.x, l(0), g3.xxxx
+ iadd r24.xz, r24.xxxx, l(1975, 0, 3950, 0)
+ ld_structured r24.x, r24.x, l(0), g3.xxxx
+ ld_structured r24.z, r24.z, l(0), g3.xxxx
+ add r24.y, -r20.w, r24.y
+ add r24.xz, -r22.xxyx, r24.xxzx
+ mad r24.w, r24.y, r24.y, l(0.000000)
+ mad r24.w, r24.x, r24.x, r24.w
+ mad r24.w, r24.z, r24.z, r24.w
+ ushr r23.w, r23.w, l(16)
+ utof r23.w, r23.w
+ mul r23.w, r20.y, r23.w
+ rsq r24.w, r24.w
+ mad r23.w, -r23.w, r24.w, l(1.000000)
+ lt r24.w, l(0.000000), r23.w
+ mad r25.x, r24.y, r23.w, r23.x
+ mad r25.y, r24.x, r23.w, r23.y
+ mad r25.z, r24.z, r23.w, r23.z
+ movc r23.xyz, r24.wwww, r25.xyzx, r23.xyzx
+ iadd r22.w, r19.z, r22.w
+ endloop
+ mad r20.w, r23.x, r19.y, r20.w
+ mad r22.x, r23.y, r19.y, r22.x
+ mad r22.y, r23.z, r19.y, r22.y
+ store_structured g3.x, r20.z, l(0), r20.w
+ store_structured g3.x, r21.x, l(0), r22.x
+ store_structured g3.x, r21.y, l(0), r22.y
+ store_structured g3.x, r21.z, l(0), r22.z
+ mov r20.z, r21.w
+ endloop
+ endif
+ mov r19.y, l(0)
+ loop
+ ld_raw r19.z, l(8), g0.xxxx
+ uge r19.z, r19.y, r19.z
+ breakc_nz r19.z
+ ld_raw r20.xyz, l(12), g0.xyzx
+ iadd r19.z, r19.y, r20.x
+ ld_structured_indexable(structured_buffer, stride=24)(mixed,mixed,mixed,mixed) r21.xyzw, r19.z, l(0), t3.xyzw
+ ld_structured_indexable(structured_buffer, stride=24)(mixed,mixed,mixed,mixed) r19.zw, r19.z, l(16), t3.xxxy
+ ld_raw r20.x, l(108), g1.xxxx
+ mul r21.xy, r20.xxxx, r21.xyxx
+ exp r21.xy, r21.xyxx
+ add r21.xy, -r21.xyxx, l(1.000000, 1.000000, 0.000000, 0.000000)
+ iadd r20.y, r19.z, r20.y
+ ine r20.z, r20.z, l(-1)
+ ld_raw r20.w, l(20), g0.xxxx
+ iadd r19.z, r19.z, r20.w
+ sync_g_t
+ mov r20.w, vThreadIDInGroup.x
+ loop
+ uge r22.x, r20.w, r19.w
+ breakc_nz r22.x
+ iadd r22.x, r20.w, r20.y
+ ld_structured_indexable(structured_buffer, stride=8)(mixed,mixed,mixed,mixed) r22.xy, r22.x, l(0), t4.xyxx
+ and r22.z, r22.y, l(0x0000ffff)
+ ushr r22.y, r22.y, l(16)
+ ld_structured r22.w, r22.z, l(0), g3.xxxx
+ iadd r24.xyz, r22.zzzz, l(1975, 3950, 5925, 0)
+ ld_structured r23.w, r24.x, l(0), g3.xxxx
+ ld_structured r24.w, r24.y, l(0), g3.xxxx
+ ld_structured r25.x, r24.z, l(0), g3.xxxx
+ ld_structured r25.y, r22.y, l(0), g3.xxxx
+ iadd r26.xyz, r22.yyyy, l(1975, 3950, 5925, 0)
+ ld_structured r25.z, r26.x, l(0), g3.xxxx
+ ld_structured r25.w, r26.y, l(0), g3.xxxx
+ ld_structured r26.w, r26.z, l(0), g3.xxxx
+ add r27.x, -r22.w, r25.y
+ add r27.y, -r23.w, r25.z
+ add r27.z, -r24.w, r25.w
+ mad r27.w, r27.x, r27.x, l(0.000000)
+ mad r27.w, r27.y, r27.y, r27.w
+ mad r27.w, r27.z, r27.z, r27.w
+ lt r28.x, l(0.000000), r22.x
+ rsq r27.w, r27.w
+ mad r22.x, r22.x, r27.w, l(-1.000000)
+ and r22.x, r22.x, r28.x
+ min r27.w, r21.w, -r22.x
+ max r27.w, r21.z, r27.w
+ mad r22.x, r21.y, r27.w, r22.x
+ iadd r27.w, r19.z, r20.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r27.w, r27.w, l(0), t16.xxxx
+ mul r27.w, r20.x, r27.w
+ exp r27.w, r27.w
+ add r27.w, -r27.w, l(1.000000)
+ movc r27.w, r20.z, r27.w, r21.x
+ mul r22.x, r22.x, r27.w
+ add r27.w, r25.x, l(0.000000)
+ add r27.w, r26.w, r27.w
+ div r22.x, r22.x, r27.w
+ mul r27.w, r22.x, -r25.x
+ mad r22.w, r27.w, r27.x, r22.w
+ mad r23.w, r27.w, r27.y, r23.w
+ mad r24.w, r27.w, r27.z, r24.w
+ store_structured g3.x, r22.z, l(0), r22.w
+ store_structured g3.x, r24.x, l(0), r23.w
+ store_structured g3.x, r24.y, l(0), r24.w
+ store_structured g3.x, r24.z, l(0), r25.x
+ mul r22.x, r22.x, r26.w
+ mad r22.z, r22.x, r27.x, r25.y
+ mad r22.w, r22.x, r27.y, r25.z
+ mad r22.x, r22.x, r27.z, r25.w
+ store_structured g3.x, r22.y, l(0), r22.z
+ store_structured g3.x, r26.x, l(0), r22.w
+ store_structured g3.x, r26.y, l(0), r22.x
+ store_structured g3.x, r26.z, l(0), r26.w
+ iadd r20.w, r20.w, l(1024)
+ endloop
+ iadd r19.y, r19.y, l(1)
+ endloop
+ sync_g_t
+ ld_raw r19.y, l(36), g1.xxxx
+ ine r19.y, r19.y, l(-1)
+ if_nz r19.y
+ ld_raw r19.y, l(0), g0.xxxx
+ ld_raw r19.zw, l(36), g1.xxxy
+ mov r20.x, vThreadIDInGroup.x
+ loop
+ uge r20.y, r20.x, r19.y
+ breakc_nz r20.y
+ iadd r20.yz, r19.zzwz, r20.xxxx
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r21.xyzw, r20.y, l(0), t12.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r22.xyzw, r20.z, l(0), t12.xyzw
+ add r22.xyzw, -r21.xyzw, r22.xyzw
+ mad r21.xyzw, r22.xyzw, r17.xxxx, r21.xyzw
+ ld_structured r22.x, r20.x, l(0), g3.xxxx
+ iadd r24.xyzw, r20.xxxx, l(1975, 3950, 5925, 1024)
+ ld_structured r22.y, r24.x, l(0), g3.xxxx
+ ld_structured r22.z, r24.y, l(0), g3.xxxx
+ ld_structured r20.y, r24.z, l(0), g3.xxxx
+ add r21.xyz, r21.xyzx, -r22.xyzx
+ dp3 r20.z, r21.xyzx, r21.xyzx
+ add r20.z, r20.z, l(0.000000)
+ rsq r20.z, r20.z
+ mad r20.z, -r21.w, r20.z, l(1.000000)
+ min r20.z, r20.z, l(0.000000)
+ mad r21.xyz, r20.zzzz, r21.xyzx, r22.xyzx
+ store_structured g3.x, r20.x, l(0), r21.x
+ store_structured g3.x, r24.x, l(0), r21.y
+ store_structured g3.x, r24.y, l(0), r21.z
+ store_structured g3.x, r24.z, l(0), r20.y
+ mov r20.x, r24.w
+ endloop
+ sync_g_t
+ endif
+ if_nz r1.x
+ ld_raw r19.y, l(0), g0.xxxx
+ iadd r19.z, r19.y, l(-1)
+ umin r19.z, r2.y, r19.z
+ ld_structured r20.x, r19.z, l(0), g3.xxxx
+ iadd r21.xyz, r19.zzzz, l(1975, 3950, 5925, 0)
+ ld_structured r20.y, r21.x, l(0), g3.xxxx
+ ld_structured r20.z, r21.y, l(0), g3.xxxx
+ ld_structured r20.w, r21.z, l(0), g3.xxxx
+ dp4 r19.w, r20.xyzw, icb[r4.x + 0].xyzw
+ mul r19.w, r0.z, r19.w
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.w
+ mov r19.w, r19.z
+ loop
+ iadd r20.x, r19.w, l(32)
+ uge r20.y, r20.x, r19.y
+ breakc_nz r20.y
+ ld_structured r20.y, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r21.x, r20.x, l(0), g3.xxxx
+ iadd r22.xyz, r19.wwww, l(2007, 3982, 5957, 0)
+ ld_structured r21.y, r22.x, l(0), g3.xxxx
+ ld_structured r21.z, r22.y, l(0), g3.xxxx
+ ld_structured r21.w, r22.z, l(0), g3.xxxx
+ dp4 r20.z, r21.xyzw, icb[r4.x + 0].xyzw
+ mul r20.z, r0.z, r20.z
+ max r20.y, r20.z, r20.y
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r20.y
+ mov r19.w, r20.x
+ endloop
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.y, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.z, r3.x, l(0), g4.xxxx
+ max r19.y, r19.z, r19.y
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.y
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.y, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.z, r3.y, l(0), g4.xxxx
+ max r19.y, r19.z, r19.y
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.y
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.y, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.z, r3.z, l(0), g4.xxxx
+ max r19.y, r19.z, r19.y
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.y
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.y, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.z, r3.w, l(0), g4.xxxx
+ max r19.y, r19.z, r19.y
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.y
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.y, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.z, r6.x, l(0), g4.xxxx
+ max r19.y, r19.z, r19.y
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.y
+ endif
+ sync_g_t
+ if_nz r0.w
+ ld_structured r19.y, vThreadIDInGroup.x, l(0), g4.xxxx
+ store_raw g1.x, r2.x, r19.y
+ endif
+ sync_g_t
+ ld_raw r19.y, l(16), g1.xxxx
+ utof r19.y, r19.y
+ div r19.x, r19.x, r19.y
+ ld_raw r19.y, l(60), g0.xxxx
+ if_nz r19.y
+ ld_raw r19.z, l(80), g0.xxxx
+ lt r19.z, l(0.000000), r19.z
+ ld_raw r19.w, l(0), g0.xxxx
+ ld_raw r20.x, l(64), g0.xxxx
+ ld_raw r20.yz, l(56), g1.xxyx
+ mov r20.w, vThreadIDInGroup.x
+ loop
+ uge r21.x, r20.w, r19.w
+ breakc_nz r21.x
+ ld_structured r21.x, r20.w, l(0), g3.xxxx
+ iadd r22.xyzw, r20.wwww, l(1975, 3950, 5925, 1024)
+ ld_structured r21.y, r22.x, l(0), g3.xxxx
+ ld_structured r21.z, r22.y, l(0), g3.xxxx
+ ld_structured r21.w, r22.z, l(0), g3.xxxx
+ mov r24.xyzw, l(0,0,0,0)
+ mov r23.w, l(0)
+ loop
+ uge r25.x, r23.w, r19.y
+ breakc_nz r25.x
+ iadd r25.x, r20.x, r23.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r25.x, r25.x, l(0), t8.xxxx
+ firstbit_lo r25.y, r25.x
+ iadd r25.yz, r20.yyzy, r25.yyyy
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r26.xyzw, r25.y, l(0), t9.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r27.xyzw, r25.z, l(0), t9.xyzw
+ add r27.xyzw, -r26.xyzw, r27.xyzw
+ mad r26.xyzw, r17.xxxx, r27.xyzw, r26.xyzw
+ dp3 r25.y, r21.xyzx, r26.xyzx
+ add r25.y, r26.w, r25.y
+ mov r27.xyz, r26.xyzx
+ mov r25.z, r25.x
+ mov r27.w, r25.y
+ loop
+ lt r25.w, r27.w, l(0.000000)
+ iadd r26.w, r25.z, l(-1)
+ and r25.z, r25.z, r26.w
+ ine r26.w, r25.z, l(0)
+ and r25.w, r25.w, r26.w
+ breakc_z r25.w
+ firstbit_lo r25.w, r25.z
+ iadd r28.xy, r20.yzyy, r25.wwww
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r29.xyzw, r28.x, l(0), t9.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r28.xyzw, r28.y, l(0), t9.xyzw
+ add r28.xyzw, -r29.xyzw, r28.xyzw
+ mad r28.xyzw, r17.xxxx, r28.xyzw, r29.xyzw
+ dp3 r25.w, r21.xyzx, r28.xyzx
+ add r28.w, r28.w, r25.w
+ lt r25.w, r27.w, r28.w
+ movc r27.xyzw, r25.wwww, r28.xyzw, r27.xyzw
+ endloop
+ lt r25.x, r27.w, l(0.000000)
+ mad r26.xyz, -r27.xyzx, r27.wwww, r24.xyzx
+ iadd r26.w, r24.w, l(1)
+ movc r24.xyzw, r25.xxxx, r26.xyzw, r24.xyzw
+ iadd r23.w, r23.w, l(1)
+ endloop
+ if_nz r24.w
+ utof r23.w, r24.w
+ div r23.w, l(1.000000, 1.000000, 1.000000, 1.000000), r23.w
+ if_nz r19.z
+ iadd r25.x, r0.x, r20.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r25.yzw, r25.x, l(0), u0.xxyz
+ ld_raw r26.x, l(80), g0.xxxx
+ dp3 r26.y, r24.xyzx, r24.xyzx
+ add r26.z, r26.y, l(0.000000)
+ rsq r26.z, r26.z
+ mul r28.xyz, r24.xyzx, r26.zzzz
+ add r29.xyz, r21.xyzx, -r25.yzwy
+ dp3 r26.w, r29.xyzx, r28.xyzx
+ mad r28.xyz, -r26.wwww, r28.xyzx, r29.xyzx
+ dp3 r26.w, r28.xyzx, r28.xyzx
+ add r26.w, r26.w, l(0.000000)
+ rsq r26.w, r26.w
+ mul r26.x, r26.y, -r26.x
+ mul r26.x, r26.z, r26.x
+ mul r26.x, r23.w, r26.x
+ mul r26.x, r26.w, r26.x
+ max r26.x, r26.x, l(-1.000000)
+ mad r25.yzw, -r28.xxyz, r26.xxxx, r25.yyzw
+ store_structured u0.xyz, r25.x, l(0), r25.yzwy
+ endif
+ mad r21.xyz, r24.xyzx, r23.wwww, r21.xyzx
+ store_structured g3.x, r20.w, l(0), r21.x
+ store_structured g3.x, r22.x, l(0), r21.y
+ store_structured g3.x, r22.y, l(0), r21.z
+ store_structured g3.x, r22.z, l(0), r21.w
+ endif
+ mov r20.w, r22.w
+ endloop
+ endif
+ ftoi r19.y, r17.x
+ ld_structured r20.x, r19.y, l(0), g3.xxxx
+ iadd r21.xyz, r19.yyyy, l(1975, 3950, 5925, 0)
+ ld_structured r20.y, r21.x, l(0), g3.xxxx
+ ld_structured r20.z, r21.y, l(0), g3.xxxx
+ ld_structured r19.z, r21.z, l(0), g3.xxxx
+ ld_raw r19.w, l(68), g0.xxxx
+ ld_raw r20.w, l(64), g1.xxxx
+ mov r22.xyz, l(0,0,0,0)
+ mov r25.xy, l(0,340282346638528860000000000000000000000.000000,0,0)
+ mov r21.w, l(0)
+ loop
+ uge r22.w, r21.w, r19.w
+ breakc_nz r22.w
+ imad r22.w, l(3), r21.w, r20.w
+ ld_structured_indexable(structured_buffer, stride=12)(mixed,mixed,mixed,mixed) r26.xyz, r22.w, l(0), t10.xyzx
+ iadd r25.zw, r22.wwww, l(0, 0, 1, 2)
+ ld_structured_indexable(structured_buffer, stride=12)(mixed,mixed,mixed,mixed) r28.xyz, r25.z, l(0), t10.xyzx
+ ld_structured_indexable(structured_buffer, stride=12)(mixed,mixed,mixed,mixed) r29.xyz, r25.w, l(0), t10.xyzx
+ add r28.xyz, -r26.xyzx, r28.xyzx
+ add r29.xyz, -r26.xyzx, r29.xyzx
+ mul r30.xyz, r28.zxyz, r29.yzxy
+ mad r30.xyz, r28.yzxy, r29.zxyz, -r30.xyzx
+ dp3 r22.w, r30.xyzx, r30.xyzx
+ rsq r22.w, r22.w
+ mul r30.xyz, r22.wwww, r30.xyzx
+ dp3 r22.w, r28.xyzx, r29.xyzx
+ dp3 r23.w, r28.xyzx, r28.xyzx
+ dp3 r25.z, r29.xyzx, r29.xyzx
+ mul r25.w, r22.w, r22.w
+ mad r25.w, r23.w, r25.z, -r25.w
+ div r25.w, l(1.000000, 1.000000, 1.000000, 1.000000), r25.w
+ add r26.w, r23.w, r25.z
+ mad r26.w, r22.w, l(-2.000000), r26.w
+ div r26.w, l(1.000000, 1.000000, 1.000000, 1.000000), r26.w
+ div r28.w, l(1.000000, 1.000000, 1.000000, 1.000000), r23.w
+ div r29.w, l(1.000000, 1.000000, 1.000000, 1.000000), r25.z
+ add r26.xyz, r20.xyzx, -r26.xyzx
+ dp3 r30.w, r26.xyzx, r28.xyzx
+ dp3 r31.x, r26.xyzx, r29.xyzx
+ dp3 r32.x, r26.xyzx, r30.xyzx
+ mul r31.y, r22.w, r31.x
+ mad r31.y, r25.z, r30.w, -r31.y
+ mul r31.z, r22.w, r30.w
+ mad r23.w, r23.w, r31.x, -r31.z
+ lt r31.z, l(0.000000), r23.w
+ mul r31.y, r25.w, r31.y
+ mul r28.w, r28.w, r30.w
+ movc r28.w, r31.z, r31.y, r28.w
+ lt r31.y, l(0.000000), r28.w
+ mul r23.w, r25.w, r23.w
+ mul r25.w, r29.w, r31.x
+ movc r23.w, r31.y, r23.w, r25.w
+ add r25.w, r23.w, r28.w
+ lt r25.w, l(1.000000), r25.w
+ add r22.w, -r22.w, r25.z
+ add r22.w, r30.w, r22.w
+ add r22.w, -r31.x, r22.w
+ mul r22.w, r26.w, r22.w
+ movc_sat r22.w, r25.w, r22.w, r28.w
+ add r25.z, -r22.w, l(1.000000)
+ min r23.w, r23.w, r25.z
+ max r23.w, r23.w, l(0.000000)
+ mul r29.xyz, r23.wwww, r29.xyzx
+ mad r28.xyz, r28.xyzx, r22.wwww, r29.xyzx
+ add r26.xyz, r26.xyzx, -r28.xyzx
+ dp3 r22.w, r26.xyzx, r26.xyzx
+ lt r23.w, r32.x, l(0.000000)
+ mul r25.z, r22.w, l(1.000100)
+ movc r32.y, r23.w, r25.z, r22.w
+ lt r22.w, r32.y, r25.y
+ movc r22.xyz, r22.wwww, r30.xyzx, r22.xyzx
+ movc r25.xy, r22.wwww, r32.xyxx, r25.xyxx
+ iadd r21.w, r21.w, l(1)
+ endloop
+ lt r19.w, r25.x, l(0.000000)
+ if_nz r19.w
+ mad r20.xyz, -r22.xyzx, r25.xxxx, r20.xyzx
+ store_structured g3.x, r19.y, l(0), r20.x
+ store_structured g3.x, r21.x, l(0), r20.y
+ store_structured g3.x, r21.y, l(0), r20.z
+ store_structured g3.x, r21.z, l(0), r19.z
+ endif
+ ld_raw r19.y, l(72), g0.xxxx
+ if_nz r19.y
+ ld_raw r19.y, l(80), g0.xxxx
+ lt r19.z, l(0.000000), r19.y
+ ld_raw r19.w, l(0), g0.xxxx
+ ld_raw r20.xy, l(48), g1.xyxx
+ ld_raw r20.zw, l(44), g0.xxxy
+ mov r21.x, vThreadIDInGroup.x
+ loop
+ uge r21.y, r21.x, r19.w
+ breakc_nz r21.y
+ ld_structured r26.x, r21.x, l(0), g3.xxxx
+ iadd r28.xyzw, r21.xxxx, l(1975, 3950, 5925, 1024)
+ ld_structured r26.y, r28.x, l(0), g3.xxxx
+ ld_structured r26.z, r28.y, l(0), g3.xxxx
+ ld_structured r21.y, r28.z, l(0), g3.xxxx
+ iadd r21.z, r0.x, r21.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r25.yzw, r21.z, l(0), u0.xxyz
+ mov r29.xyz, r26.xyzx
+ mov r30.xyz, l(0,0,0,0)
+ mov r31.xyzw, l(0,0,0,0)
+ mov r21.w, l(0)
+ loop
+ uge r22.w, r21.w, r20.z
+ breakc_nz r22.w
+ iadd r22.w, r20.w, r21.w
+ ld_structured_indexable(structured_buffer, stride=8)(mixed,mixed,mixed,mixed) r32.xy, r22.w, l(0), t6.xyxx
+ iadd r32.xyzw, r20.xyxy, r32.xxyy
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r33.xyzw, r32.x, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r34.xyzw, r32.y, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r35.xyzw, r32.z, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r32.xyzw, r32.w, l(0), t7.xyzw
+ add r36.xyzw, -r33.xyzw, r34.xyzw
+ mad r37.xyzw, r19.xxxx, r36.xyzw, r33.xyzw
+ add r38.xyzw, -r35.xyzw, r32.xyzw
+ mad r39.xyzw, r19.xxxx, r38.xyzw, r35.xyzw
+ max r37.w, r37.w, l(0.000000)
+ max r39.w, r39.w, l(0.000000)
+ add r40.xyzw, -r37.xyzw, r39.xyzw
+ mul r40.xyzw, r40.xyzw, l(0.500000, 0.500000, 0.500000, 0.500000)
+ dp3 r22.w, r40.xyzx, r40.xyzx
+ ge r23.w, l(0.000000), r22.w
+ if_nz r23.w
+ iadd r23.w, r21.w, l(1)
+ mov r21.w, r23.w
+ continue
+ endif
+ mad r36.xyzw, r17.xxxx, r36.xyzw, r33.xyzw
+ mad r38.xyzw, r17.xxxx, r38.xyzw, r35.xyzw
+ max r36.w, r36.w, l(0.000000)
+ max r38.w, r38.w, l(0.000000)
+ add r41.xyzw, -r36.xyzw, r38.xyzw
+ mul r42.xyzw, r41.xyzw, l(0.500000, 0.500000, 0.500000, 0.500000)
+ dp3 r23.w, r42.xyzx, r42.xyzx
+ mad r26.w, -r42.w, r42.w, r23.w
+ ge r29.w, l(0.000000), r26.w
+ if_nz r29.w
+ iadd r29.w, r21.w, l(1)
+ mov r21.w, r29.w
+ continue
+ endif
+ add r41.xyz, r37.zxyz, r39.zxyz
+ mad r41.xyz, -r41.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r25.wyzw
+ mad r29.w, -r40.w, r40.w, r22.w
+ rsq r22.w, r22.w
+ rsq r29.w, r29.w
+ mul r40.xyz, r22.wwww, r40.xyzx
+ mul r22.w, r29.w, r40.w
+ mul r43.xyz, r40.yzxy, r41.xyzx
+ mad r41.xyz, r41.zxyz, r40.zxyz, -r43.xyzx
+ dp3 r29.w, r25.yzwy, r40.xyzx
+ add r40.xyz, r36.xyzx, r38.xyzx
+ mad r43.xyz, -r40.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r29.xyzx
+ rsq r30.w, r23.w
+ rsq r26.w, r26.w
+ mul r23.w, r23.w, r30.w
+ mad r32.w, r41.w, l(0.500000), r36.w
+ mul r32.w, r26.w, r32.w
+ mul r32.w, r23.w, r32.w
+ mul r44.xyz, r30.wwww, r42.xyzx
+ mul r45.xyz, r43.zxyz, r44.yzxy
+ mad r45.xyz, r43.yzxy, r44.zxyz, -r45.xyzx
+ dp3 r33.w, r29.xyzx, r44.xyzx
+ dp3 r34.w, r45.xyzx, r45.xyzx
+ add r46.x, r34.w, l(0.000000)
+ mad r29.w, r29.w, r22.w, r32.w
+ max r29.w, r29.w, l(0.000000)
+ mul r26.w, r26.w, r42.w
+ mad r33.w, r33.w, r26.w, r32.w
+ max r46.y, r33.w, l(0.000000)
+ dp3 r33.w, r41.xyzx, r41.xyzx
+ mad r33.w, -r41.x, r41.x, r33.w
+ mad r33.w, -r29.w, r29.w, r33.w
+ dp3 r34.w, r41.xyzx, r45.xyzx
+ mad r34.w, -r29.w, r46.y, r34.w
+ mad r35.w, -r46.y, r46.y, r46.x
+ mul r36.w, r33.w, r35.w
+ mad r36.w, r34.w, r34.w, -r36.w
+ sqrt r37.w, r36.w
+ add r33.w, -r33.w, r34.w
+ add r34.w, r34.w, -r35.w
+ add r34.w, r33.w, r34.w
+ add r35.w, r37.w, r33.w
+ min r35.w, r35.w, l(0.000000)
+ div r35.w, r35.w, r34.w
+ lt r38.w, r35.w, l(1.000000)
+ lt r39.w, r33.w, r37.w
+ and r38.w, r38.w, r39.w
+ mul r33.w, r33.w, r34.w
+ add r39.w, -r29.w, r46.y
+ mad r29.w, r33.w, r39.w, r29.w
+ mul r29.w, r29.w, r29.w
+ mul r29.w, r34.w, r29.w
+ mul r29.w, r29.w, l(-0.360000)
+ lt r29.w, r29.w, r36.w
+ and r29.w, r29.w, r38.w
+ lt r33.w, r34.w, l(-0.000000)
+ and r29.w, r29.w, r33.w
+ add r41.xyz, r25.yzwy, -r29.xyzx
+ mad r45.xyz, -r41.xyzx, r35.wwww, r25.yzwy
+ mul r47.xyz, r23.wwww, r42.xyzx
+ add r37.xyz, -r37.xyzx, r39.xyzx
+ mul r37.xyz, r37.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000)
+ dp3 r33.w, r37.xyzx, r37.xyzx
+ rsq r36.w, r33.w
+ mul r33.w, r33.w, r36.w
+ mad r37.xyz, -r37.xyzx, r33.wwww, r47.xyzx
+ add r33.w, -r35.w, l(1.000000)
+ mad r39.xyz, -r37.xyzx, r33.wwww, r47.xyzx
+ mul r35.w, r26.w, r35.w
+ mad r22.w, r22.w, r33.w, r35.w
+ dp3 r35.w, r39.xyzx, r39.xyzx
+ rsq r35.w, r35.w
+ dp3 r36.w, r45.xyzx, r39.xyzx
+ mul r36.w, r35.w, r36.w
+ dp3 r38.w, r45.xyzx, r45.xyzx
+ mad r38.w, -r36.w, r36.w, r38.w
+ lt r39.x, l(0.000000), r38.w
+ rsq r39.y, r38.w
+ and r39.x, r39.y, r39.x
+ mul r22.w, r22.w, r38.w
+ mad r22.w, r22.w, r39.x, r36.w
+ mul r22.w, r35.w, r22.w
+ lt r35.w, |r22.w|, l(1.000000)
+ mad r37.xyz, r37.xyzx, r22.wwww, r41.xyzx
+ mul r22.w, r33.w, r34.w
+ div r22.w, r37.w, r22.w
+ add r22.w, -r22.w, l(1.000000)
+ div r22.w, r33.w, r22.w
+ mad r37.xyz, r37.xyzx, r22.wwww, r43.xyzx
+ mad r39.xyz, r40.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r37.xyzx
+ movc r39.xyz, r35.wwww, r39.xyzx, r29.xyzx
+ movc r39.xyz, r29.wwww, r39.xyzx, r29.xyzx
+ mad r40.xyz, -r40.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r39.xyzx
+ dp3 r22.w, r40.xyzx, r44.xyzx
+ mad r33.w, r22.w, r26.w, r32.w
+ max r33.w, r33.w, l(0.000000)
+ dp3 r34.w, r40.xyzx, r40.xyzx
+ mad r34.w, -r22.w, r22.w, r34.w
+ mul r36.w, r33.w, r33.w
+ lt r36.w, r36.w, r34.w
+ if_nz r36.w
+ iadd r36.w, r21.w, l(1)
+ mov r29.xyz, r39.xyzx
+ mov r21.w, r36.w
+ continue
+ endif
+ mul r30.w, r30.w, r42.w
+ mad r30.w, -r30.w, r30.w, l(1.000000)
+ max r34.w, r34.w, l(0.000000)
+ rsq r36.w, r34.w
+ mul r34.w, r26.w, r34.w
+ mad r34.w, r34.w, r36.w, r22.w
+ lt r23.w, |r34.w|, r23.w
+ mad r40.xyz, -r34.wwww, r42.xyzx, r40.xyzx
+ mul r33.w, r33.w, r36.w
+ mad r30.w, r33.w, r30.w, -r30.w
+ mad r40.xyz, r40.xyzx, r30.wwww, r30.xyzx
+ add r34.xyz, -r33.xyzx, r34.xyzx
+ mad r33.xyz, r19.xxxx, r34.xyzx, r33.xyzx
+ add r32.xyz, -r35.xyzx, r32.xyzx
+ mad r32.xyz, r19.xxxx, r32.xyzx, r35.xyzx
+ mad r22.w, r22.w, l(0.500000), l(0.500000)
+ add r33.xyz, -r33.xyzx, r36.xyzx
+ add r32.xyz, -r32.xyzx, r38.xyzx
+ add r32.xyz, -r33.xyzx, r32.xyzx
+ mad r32.xyz, r22.wwww, r32.xyzx, r33.xyzx
+ add r32.xyz, r31.xyzx, r32.xyzx
+ movc r33.xyz, r19.zzzz, r32.xyzx, r31.xyzx
+ iadd r33.w, r31.w, l(1)
+ movc r32.xyz, r23.wwww, r40.xyzx, r30.xyzx
+ movc r33.xyzw, r23.wwww, r33.xyzw, r31.xyzw
+ dp3 r22.w, r37.xyzx, r42.xyzx
+ mad r23.w, r22.w, r26.w, r32.w
+ max r34.y, r23.w, l(0.000000)
+ dp3 r23.w, r37.xyzx, r37.xyzx
+ mad r34.x, -r22.w, r22.w, r23.w
+ movc r34.xy, r35.wwww, r34.xyxx, r46.xyxx
+ movc r34.xy, r29.wwww, r34.xyxx, r46.xyxx
+ mul r22.w, r34.y, r34.y
+ ge r22.w, r34.x, r22.w
+ if_nz r22.w
+ iadd r22.w, r21.w, l(1)
+ mov r29.xyz, r39.xyzx
+ mov r30.xyz, r32.xyzx
+ mov r31.xyzw, r33.xyzw
+ mov r21.w, r22.w
+ continue
+ endif
+ mov r29.xyz, r39.xyzx
+ mov r30.xyz, r32.xyzx
+ mov r31.xyzw, r33.xyzw
+ iadd r21.w, r21.w, l(1)
+ endloop
+ ld_raw r21.w, l(52), g0.xxxx
+ mov r32.xyz, r29.xyzx
+ mov r33.xyz, r30.xyzx
+ mov r34.xyzw, r31.xyzw
+ mov r22.w, l(0)
+ loop
+ uge r23.w, r22.w, r21.w
+ breakc_nz r23.w
+ iadd r35.xy, r20.xyxx, r22.wwww
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.xyzw, r35.x, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r35.xyzw, r35.y, l(0), t7.xyzw
+ add r35.xyzw, -r36.xyzw, r35.xyzw
+ mad r37.xyzw, r17.xxxx, r35.xyzw, r36.xyzw
+ max r23.w, r37.w, l(0.000000)
+ mad r35.xyz, r19.xxxx, r35.xyzx, r36.xyzx
+ add r36.xyz, r32.xyzx, -r37.xyzx
+ add r38.xyz, r25.yzwy, -r35.xyzx
+ dp3 r26.w, r36.xyzx, r36.xyzx
+ add r36.w, r26.w, l(0.000000)
+ dp3 r26.w, r38.xyzx, r38.xyzx
+ mul r29.w, r23.w, r23.w
+ mad r26.w, -r23.w, r23.w, r26.w
+ dp3 r30.w, r38.xyzx, r36.xyzx
+ mad r30.w, -r23.w, r23.w, r30.w
+ mad r32.w, -r23.w, r23.w, r36.w
+ mul r33.w, r26.w, r32.w
+ mad r33.w, r30.w, r30.w, -r33.w
+ sqrt r35.w, r33.w
+ add r26.w, -r26.w, r30.w
+ add r30.w, r30.w, -r32.w
+ add r30.w, r26.w, r30.w
+ add r32.w, r35.w, r26.w
+ min r32.w, r32.w, l(0.000000)
+ div r32.w, r32.w, r30.w
+ lt r37.w, r32.w, l(1.000000)
+ lt r26.w, r26.w, r35.w
+ and r26.w, r26.w, r37.w
+ mul r29.w, r29.w, r30.w
+ mul r29.w, r29.w, l(-0.360000)
+ lt r29.w, r29.w, r33.w
+ and r26.w, r26.w, r29.w
+ lt r29.w, r30.w, l(-0.000000)
+ and r26.w, r26.w, r29.w
+ add r38.xyz, -r36.xyzx, r38.xyzx
+ add r29.w, -r32.w, l(1.000000)
+ mul r30.w, r29.w, r30.w
+ div r30.w, r35.w, r30.w
+ add r30.w, -r30.w, l(1.000000)
+ div r29.w, r29.w, r30.w
+ mad r38.xyz, r38.xyzx, r29.wwww, r36.xyzx
+ add r39.xyz, r37.xyzx, r38.xyzx
+ dp3 r29.w, r38.xyzx, r38.xyzx
+ add r38.w, r29.w, l(0.000000)
+ movc r32.xyz, r26.wwww, r39.xyzx, r32.xyzx
+ movc r36.xyzw, r26.wwww, r38.xyzw, r36.xyzw
+ rsq r26.w, r36.w
+ mul r29.w, r23.w, r26.w
+ lt r29.w, l(1.000000), r29.w
+ mad r23.w, r26.w, r23.w, l(-1.000000)
+ mad r36.xyz, r36.xyzx, r23.wwww, r33.xyzx
+ add r35.xyz, -r35.xyzx, r37.xyzx
+ add r35.xyz, r34.xyzx, r35.xyzx
+ movc r35.xyz, r19.zzzz, r35.xyzx, r34.xyzx
+ iadd r35.w, r34.w, l(1)
+ movc r33.xyz, r29.wwww, r36.xyzx, r33.xyzx
+ movc r34.xyzw, r29.wwww, r35.xyzw, r34.xyzw
+ iadd r22.w, r22.w, l(1)
+ endloop
+ if_nz r34.w
+ utof r21.w, r34.w
+ div r21.w, l(1.000000, 1.000000, 1.000000, 1.000000), r21.w
+ if_nz r19.z
+ ld_raw r22.w, l(80), g0.xxxx
+ dp3 r23.w, r33.xyzx, r33.xyzx
+ add r26.w, r23.w, l(0.000000)
+ rsq r26.w, r26.w
+ mul r32.xyz, r26.wwww, r33.xyzx
+ add r35.xyz, -r25.yzwy, r26.xyzx
+ mad r35.xyz, -r34.xyzx, r21.wwww, r35.xyzx
+ dp3 r29.w, r35.xyzx, r32.xyzx
+ mad r32.xyz, -r29.wwww, r32.xyzx, r35.xyzx
+ dp3 r29.w, r32.xyzx, r32.xyzx
+ add r29.w, r29.w, l(0.000000)
+ rsq r29.w, r29.w
+ mul r22.w, -r22.w, r23.w
+ mul r22.w, r26.w, r22.w
+ mul r22.w, r21.w, r22.w
+ mul r22.w, r29.w, r22.w
+ max r22.w, r22.w, l(-1.000000)
+ mad r25.yzw, -r32.xxyz, r22.wwww, r25.yyzw
+ store_structured u0.xyz, r21.z, l(0), r25.yzwy
+ endif
+ mad r25.yzw, r33.xxyz, r21.wwww, r26.xxyz
+ store_structured g3.x, r21.x, l(0), r25.y
+ store_structured g3.x, r28.x, l(0), r25.z
+ store_structured g3.x, r28.y, l(0), r25.w
+ store_structured g3.x, r28.z, l(0), r21.y
+ endif
+ mov r21.x, r28.w
+ endloop
+ else
+ ld_raw r19.yz, l(76), g0.xxyx
+ lt r19.yz, l(0.000000, 0.000000, 0.000000, 0.000000), r19.zzyz
+ ld_raw r19.w, l(0), g0.xxxx
+ ld_raw r20.xy, l(48), g1.xyxx
+ ld_raw r20.z, l(80), g0.xxxx
+ lt r20.w, l(0.000000), r20.z
+ ld_raw r21.xy, l(44), g0.xyxx
+ mov r21.z, vThreadIDInGroup.x
+ loop
+ uge r21.w, r21.z, r19.w
+ breakc_nz r21.w
+ ld_structured r26.x, r21.z, l(0), g3.xxxx
+ iadd r28.xyzw, r21.zzzz, l(1975, 3950, 5925, 1024)
+ ld_structured r26.y, r28.x, l(0), g3.xxxx
+ ld_structured r26.z, r28.y, l(0), g3.xxxx
+ ld_structured r21.w, r28.z, l(0), g3.xxxx
+ mov r25.yzw, l(0,0,0,0)
+ mov r32.xyzw, l(0,0,0,0)
+ mov r22.w, l(0)
+ loop
+ uge r23.w, r22.w, r21.x
+ breakc_nz r23.w
+ iadd r23.w, r21.y, r22.w
+ ld_structured_indexable(structured_buffer, stride=8)(mixed,mixed,mixed,mixed) r35.xy, r23.w, l(0), t6.xyxx
+ iadd r35.xyzw, r20.xyxy, r35.xxyy
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.xyzw, r35.x, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyzw, r35.y, l(0), t7.xyzw
+ add r38.xyzw, -r36.xyzw, r37.xyzw
+ mad r38.xyzw, r17.xxxx, r38.xyzw, r36.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r39.xyzw, r35.z, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r35.xyzw, r35.w, l(0), t7.xyzw
+ add r40.xyzw, -r39.xyzw, r35.xyzw
+ mad r40.xyzw, r17.xxxx, r40.xyzw, r39.xyzw
+ max r38.w, r38.w, l(0.000000)
+ max r40.w, r40.w, l(0.000000)
+ add r41.xyzw, -r38.xyzw, r40.xyzw
+ mul r42.xyzw, r41.xyzw, l(0.500000, 0.500000, 0.500000, 0.500000)
+ dp3 r23.w, r42.xyzx, r42.xyzx
+ mad r26.w, -r42.w, r42.w, r23.w
+ ge r29.w, l(0.000000), r26.w
+ if_nz r29.w
+ iadd r29.w, r22.w, l(1)
+ mov r22.w, r29.w
+ continue
+ endif
+ rsq r29.w, r23.w
+ rsq r26.w, r26.w
+ mul r23.w, r23.w, r29.w
+ add r41.xyz, r38.xyzx, r40.xyzx
+ mad r30.w, r41.w, l(0.500000), r38.w
+ mul r30.w, r26.w, r30.w
+ mul r30.w, r23.w, r30.w
+ mul r42.xyz, r29.wwww, r42.xyzx
+ mul r26.w, r26.w, r42.w
+ mad r41.xyz, -r41.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r26.xyzx
+ dp3 r33.w, r41.xyzx, r42.xyzx
+ mad r30.w, r33.w, r26.w, r30.w
+ max r30.w, r30.w, l(0.000000)
+ dp3 r35.w, r41.xyzx, r41.xyzx
+ mad r35.w, -r33.w, r33.w, r35.w
+ mul r36.w, r30.w, r30.w
+ lt r36.w, r36.w, r35.w
+ if_nz r36.w
+ iadd r36.w, r22.w, l(1)
+ mov r22.w, r36.w
+ continue
+ endif
+ mul r29.w, r29.w, r42.w
+ mad r29.w, -r29.w, r29.w, l(1.000000)
+ max r35.w, r35.w, l(0.000000)
+ rsq r36.w, r35.w
+ mul r26.w, r26.w, r35.w
+ mad r26.w, r26.w, r36.w, r33.w
+ lt r23.w, |r26.w|, r23.w
+ mad r41.xyz, -r26.wwww, r42.xyzx, r41.xyzx
+ mul r26.w, r30.w, r36.w
+ mad r26.w, r26.w, r29.w, -r29.w
+ mad r41.xyz, r41.xyzx, r26.wwww, r25.yzwy
+ add r37.xyz, -r36.xyzx, r37.xyzx
+ mad r36.xyz, r19.xxxx, r37.xyzx, r36.xyzx
+ add r35.xyz, -r39.xyzx, r35.xyzx
+ mad r35.xyz, r19.xxxx, r35.xyzx, r39.xyzx
+ mad r26.w, r33.w, l(0.500000), l(0.500000)
+ add r36.xyz, -r36.xyzx, r38.xyzx
+ add r35.xyz, -r35.xyzx, r40.xyzx
+ add r35.xyz, -r36.xyzx, r35.xyzx
+ mad r35.xyz, r26.wwww, r35.xyzx, r36.xyzx
+ add r35.xyz, r32.xyzx, r35.xyzx
+ movc r35.xyz, r20.wwww, r35.xyzx, r32.xyzx
+ iadd r35.w, r32.w, l(1)
+ movc r25.yzw, r23.wwww, r41.xxyz, r25.yyzw
+ movc r32.xyzw, r23.wwww, r35.xyzw, r32.xyzw
+ iadd r22.w, r22.w, l(1)
+ endloop
+ ld_raw r22.w, l(52), g0.xxxx
+ mov r35.xyz, r25.yzwy
+ mov r36.xyzw, r32.xyzw
+ mov r23.w, l(0)
+ loop
+ uge r26.w, r23.w, r22.w
+ breakc_nz r26.w
+ iadd r37.xy, r20.xyxx, r23.wwww
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r38.xyzw, r37.x, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyzw, r37.y, l(0), t7.xyzw
+ add r37.xyzw, -r38.xyzw, r37.xyzw
+ mad r39.xyzw, r17.xxxx, r37.xyzw, r38.xyzw
+ max r26.w, r39.w, l(0.000000)
+ add r40.xyz, r26.xyzx, -r39.xyzx
+ dp3 r29.w, r40.xyzx, r40.xyzx
+ add r29.w, r29.w, l(0.000000)
+ rsq r29.w, r29.w
+ mul r30.w, r26.w, r29.w
+ lt r30.w, l(1.000000), r30.w
+ mad r26.w, r29.w, r26.w, l(-1.000000)
+ mad r40.xyz, r40.xyzx, r26.wwww, r35.xyzx
+ mad r37.xyz, r19.xxxx, r37.xyzx, r38.xyzx
+ add r37.xyz, -r37.xyzx, r39.xyzx
+ add r37.xyz, r36.xyzx, r37.xyzx
+ movc r37.xyz, r20.wwww, r37.xyzx, r36.xyzx
+ iadd r37.w, r36.w, l(1)
+ movc r35.xyz, r30.wwww, r40.xyzx, r35.xyzx
+ movc r36.xyzw, r30.wwww, r37.xyzw, r36.xyzw
+ iadd r23.w, r23.w, l(1)
+ endloop
+ if_nz r36.w
+ utof r22.w, r36.w
+ div r22.w, l(1.000000, 1.000000, 1.000000, 1.000000), r22.w
+ if_nz r19.y
+ iadd r23.w, r0.x, r21.z
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyz, r23.w, l(0), u0.xyzx
+ ld_raw r26.w, l(80), g0.xxxx
+ dp3 r29.w, r35.xyzx, r35.xyzx
+ add r30.w, r29.w, l(0.000000)
+ rsq r30.w, r30.w
+ mul r38.xyz, r30.wwww, r35.xyzx
+ add r39.xyz, r26.xyzx, -r37.xyzx
+ mad r39.xyz, -r36.xyzx, r22.wwww, r39.xyzx
+ dp3 r33.w, r39.xyzx, r38.xyzx
+ mad r38.xyz, -r33.wwww, r38.xyzx, r39.xyzx
+ dp3 r33.w, r38.xyzx, r38.xyzx
+ add r33.w, r33.w, l(0.000000)
+ rsq r33.w, r33.w
+ mul r26.w, -r26.w, r29.w
+ mul r26.w, r30.w, r26.w
+ mul r26.w, r22.w, r26.w
+ mul r26.w, r33.w, r26.w
+ max r26.w, r26.w, l(-1.000000)
+ mad r37.xyz, -r38.xyzx, r26.wwww, r37.xyzx
+ store_structured u0.xyz, r23.w, l(0), r37.xyzx
+ endif
+ mad r26.xyz, r35.xyzx, r22.wwww, r26.xyzx
+ dp3 r22.w, r35.xyzx, r35.xyzx
+ ld_raw r23.w, l(76), g0.xxxx
+ mad r22.w, r23.w, r22.w, l(1.000000)
+ div r22.w, r21.w, r22.w
+ movc r21.w, r19.z, r22.w, r21.w
+ store_structured g3.x, r21.z, l(0), r26.x
+ store_structured g3.x, r28.x, l(0), r26.y
+ store_structured g3.x, r28.y, l(0), r26.z
+ store_structured g3.x, r28.z, l(0), r21.w
+ endif
+ mov r21.z, r28.w
+ endloop
+ endif
+ ld_raw r17.x, l(84), g0.xxxx
+ ld_raw r19.x, l(72), g1.xxxx
+ min r17.x, r17.x, r19.x
+ lt r17.x, l(0.000000), r17.x
+ if_nz r17.x
+ ld_raw r17.x, l(88), g0.xxxx
+ ld_raw r19.x, l(0), g0.xxxx
+ if_nz r1.z
+ ld_raw r19.yz, r4.z, g1.xxyx
+ add r19.w, r19.z, r19.y
+ mad r19.yz, r19.wwww, l(0.000000, 0.000100, 0.000100, 0.000000), r19.zzyz
+ add r19.z, r19.y, r19.z
+ store_structured g6.x, vThreadIDInGroup.x, l(0), r19.z
+ else
+ mov r19.yz, l(0,0,0,0)
+ endif
+ sync_g_t
+ if_nz r1.z
+ ld_structured r19.w, l(0), l(0), g6.xxxx
+ ld_structured r20.x, l(1), l(0), g6.xxxx
+ lt r19.w, r20.x, r19.w
+ and r20.x, r19.w, l(1)
+ ld_structured r20.x, r20.x, l(0), g6.xxxx
+ ld_structured r20.y, l(2), l(0), g6.xxxx
+ lt r20.x, r20.y, r20.x
+ movc r19.w, r20.x, l(-2), r19.w
+ iadd r19.w, r19.w, vThreadIDInGroup.x
+ ushr r20.x, r19.w, l(30)
+ iadd r19.w, r19.w, r20.x
+ div r19.z, l(127.000000), r19.z
+ ld_raw r20.x, l(84), g0.xxxx
+ div r20.x, l(1.000000, 1.000000, 1.000000, 1.000000), r20.x
+ min r19.z, r19.z, r20.x
+ ishl r19.w, r19.w, l(2)
+ iadd r20.xy, r19.wwww, l(12, 24, 0, 0)
+ store_raw g5.x, r20.x, r19.z
+ mul r19.y, r19.y, r19.z
+ store_raw g5.x, r19.w, r19.y
+ store_raw g5.x, r20.y, vThreadIDInGroup.x
+ endif
+ sync_g_t
+ ld_raw r19.y, l(100), g0.xxxx
+ ishl r19.z, r17.x, l(1)
+ iadd r19.y, r19.z, r19.y
+ ld_raw r19.z, l(116), g1.xxxx
+ if_nz r19.z
+ mov r19.z, vThreadIDInGroup.x
+ loop
+ ige r19.w, r19.z, l(0x00004103)
+ breakc_nz r19.w
+ iadd r19.w, r19.z, r19.y
+ store_structured u2.x, r19.w, l(0), l(-1)
+ iadd r19.z, r19.z, l(1024)
+ endloop
+ endif
+ ld_raw r19.zw, l(16), g5.xxxy
+ ld_raw r20.xy, l(4), g5.xyxx
+ ld_raw r20.zw, l(28), g5.xxxy
+ ld_raw r21.x, l(92), g0.xxxx
+ ine r21.y, r21.x, l(-1)
+ ld_raw r21.z, l(100), g0.xxxx
+ mov r21.w, vThreadIDInGroup.x
+ loop
+ ige r22.w, r21.w, r17.x
+ breakc_nz r22.w
+ iadd r22.w, r21.w, r21.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r22.w, r22.w, l(0), t15.xxxx
+ movc r22.w, r21.y, r22.w, r21.w
+ ld_structured r26.x, r22.w, l(0), g3.xxxx
+ iadd r28.xyz, r22.wwww, l(1975, 3950, 5925, 0)
+ ld_structured r26.y, r28.x, l(0), g3.xxxx
+ ld_structured r26.z, r28.y, l(0), g3.xxxx
+ ld_structured r26.w, r28.z, l(0), g3.xxxx
+ dp4 r23.w, r26.xyzw, icb[r20.z + 0].xyzw
+ mad r23.w, r23.w, r19.z, r20.x
+ min r23.w, r23.w, l(127.500000)
+ max r23.w, r23.w, l(0.000000)
+ ftoi r23.w, r23.w
+ dp4 r26.x, r26.xyzw, icb[r20.w + 0].xyzw
+ mad r26.x, r26.x, r19.w, r20.y
+ min r26.x, r26.x, l(127.500000)
+ max r26.x, r26.x, l(0.000000)
+ ftoi r26.x, r26.x
+ iadd r26.y, r21.w, r21.z
+ ishl r26.x, r26.x, l(23)
+ imad r23.w, r23.w, l(0x00010000), r26.x
+ iadd r23.w, r23.w, l(0x00810000)
+ or r22.w, r22.w, r23.w
+ store_structured u2.x, r26.y, l(0), r22.w
+ iadd r21.w, r21.w, l(1024)
+ endloop
+ sync_g_t
+ ld_raw r21.xy, l(96), g0.xyxx
+ iadd r21.z, r17.x, r21.y
+ mov r26.x, vThreadIDInGroup.x
+ loop
+ ige r21.w, r26.x, r19.x
+ breakc_nz r21.w
+ iadd r21.w, r21.x, r26.x
+ ld_structured r28.x, r26.x, l(0), g3.xxxx
+ iadd r26.xyzw, r26.xxxx, l(1024, 1975, 3950, 5925)
+ ld_structured r28.y, r26.y, l(0), g3.xxxx
+ ld_structured r28.z, r26.z, l(0), g3.xxxx
+ ld_structured r28.w, r26.w, l(0), g3.xxxx
+ store_structured u1.xyzw, r21.w, l(0), r28.xyzw
+ endloop
+ sync_g_t
+ iadd r21.x, r17.x, l(31)
+ ubfe r21.w, l(5), l(5), r21.x
+ ushr r21.x, r21.x, l(10)
+ ult r22.w, r4.y, r21.w
+ and r23.w, r22.w, l(1)
+ iadd r23.w, r21.x, r23.w
+ movc r26.x, r22.w, l(0), l(1)
+ imul null, r26.x, r21.w, r26.x
+ imad r26.x, r4.y, r23.w, r26.x
+ ishl r26.y, r26.x, l(5)
+ ishl r23.w, r23.w, l(5)
+ iadd r23.w, r23.w, r26.y
+ umin r23.w, r17.x, r23.w
+ bfi r26.x, l(27), l(5), r26.x, vThreadIDInGroup.x
+ mov r26.y, l(16)
+ mov r26.zw, r21.yyyz
+ loop
+ uge r28.x, r26.y, l(32)
+ breakc_nz r28.x
+ mov r28.xy, l(0,0,0,0)
+ mov r28.z, r26.x
+ loop
+ uge r28.w, r28.y, r21.x
+ breakc_nz r28.w
+ ult r28.w, r28.z, r23.w
+ iadd r29.w, r26.z, r28.z
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r29.w, r29.w, l(0), u2.xxxx
+ movc r28.w, r28.w, r29.w, l(-1)
+ ushr r28.w, r28.w, r26.y
+ and r37.xyzw, r28.wwww, l(1, 2, 4, 8)
+ ine r37.xyzw, r37.xyzw, l(0, 0, 0, 0)
+ bfi r37.xyzw, l(1, 1, 1, 1), vThreadIDInGroup.xxxx, r37.xyzw, l(0, 0, 0, 0)
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ sync_g_t
+ if_nz r7.x
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.x, l(0), g3.xxxx
+ ld_structured r38.y, r6.y, l(0), g3.xxxx
+ ld_structured r38.z, r9.x, l(0), g3.xxxx
+ ld_structured r38.w, r6.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ sync_g_t
+ if_nz r7.y
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.y, l(0), g3.xxxx
+ ld_structured r38.y, r6.w, l(0), g3.xxxx
+ ld_structured r38.z, r9.y, l(0), g3.xxxx
+ ld_structured r38.w, r8.x, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ sync_g_t
+ if_nz r7.z
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.z, l(0), g3.xxxx
+ ld_structured r38.y, r8.y, l(0), g3.xxxx
+ ld_structured r38.z, r9.z, l(0), g3.xxxx
+ ld_structured r38.w, r8.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ sync_g_t
+ if_nz r7.w
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.w, l(0), g3.xxxx
+ ld_structured r38.y, r8.w, l(0), g3.xxxx
+ ld_structured r38.z, r9.w, l(0), g3.xxxx
+ ld_structured r38.w, r13.x, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ sync_g_t
+ if_nz r4.w
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r6.x, l(0), g3.xxxx
+ ld_structured r38.y, r13.y, l(0), g3.xxxx
+ ld_structured r38.z, r14.x, l(0), g3.xxxx
+ ld_structured r38.w, r13.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ sync_g_t
+ ld_structured r28.w, r5.z, l(0), g3.xxxx
+ ld_structured r29.w, r10.x, l(0), g3.xxxx
+ ld_structured r30.w, r10.y, l(0), g3.xxxx
+ ld_structured r33.w, r10.z, l(0), g3.xxxx
+ xor r28.w, r2.z, r28.w
+ xor r29.w, r2.w, r29.w
+ and r28.w, r28.w, r29.w
+ xor r29.w, r5.x, r30.w
+ and r28.w, r28.w, r29.w
+ xor r29.w, r5.y, r33.w
+ and r28.w, r28.w, r29.w
+ countbits r28.w, r28.w
+ iadd r28.x, r28.w, r28.x
+ iadd r28.yz, r28.yyzy, l(0, 1, 32, 0)
+ endloop
+ if_nz r21.w
+ iadd r28.y, r26.z, r28.z
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r28.y, r28.y, l(0), u2.xxxx
+ if_nz r22.w
+ ult r28.w, r28.z, r23.w
+ movc r28.y, r28.w, r28.y, l(-1)
+ ushr r28.y, r28.y, r26.y
+ and r37.xyzw, r28.yyyy, l(1, 2, 4, 8)
+ ine r37.xyzw, r37.xyzw, l(0, 0, 0, 0)
+ bfi r37.xyzw, l(1, 1, 1, 1), vThreadIDInGroup.xxxx, r37.xyzw, l(0, 0, 0, 0)
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.x
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.x, l(0), g3.xxxx
+ ld_structured r38.y, r6.y, l(0), g3.xxxx
+ ld_structured r38.z, r9.x, l(0), g3.xxxx
+ ld_structured r38.w, r6.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.y
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.y, l(0), g3.xxxx
+ ld_structured r38.y, r6.w, l(0), g3.xxxx
+ ld_structured r38.z, r9.y, l(0), g3.xxxx
+ ld_structured r38.w, r8.x, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.z
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.z, l(0), g3.xxxx
+ ld_structured r38.y, r8.y, l(0), g3.xxxx
+ ld_structured r38.z, r9.z, l(0), g3.xxxx
+ ld_structured r38.w, r8.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.w
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.w, l(0), g3.xxxx
+ ld_structured r38.y, r8.w, l(0), g3.xxxx
+ ld_structured r38.z, r9.w, l(0), g3.xxxx
+ ld_structured r38.w, r13.x, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r4.w
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r6.x, l(0), g3.xxxx
+ ld_structured r38.y, r13.y, l(0), g3.xxxx
+ ld_structured r38.z, r14.x, l(0), g3.xxxx
+ ld_structured r38.w, r13.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ ld_structured r37.x, r5.z, l(0), g3.xxxx
+ ld_structured r37.y, r10.x, l(0), g3.xxxx
+ ld_structured r37.z, r10.y, l(0), g3.xxxx
+ ld_structured r37.w, r10.z, l(0), g3.xxxx
+ else
+ mov r37.xyzw, l(0,0,0,0)
+ endif
+ xor r28.yw, r2.zzzw, r37.xxxy
+ and r28.y, r28.w, r28.y
+ xor r37.xy, r5.xyxx, r37.zwzz
+ and r28.y, r28.y, r37.x
+ and r28.y, r37.y, r28.y
+ countbits r28.y, r28.y
+ and r28.y, r22.w, r28.y
+ iadd r28.x, r28.y, r28.x
+ endif
+ if_nz r7.x
+ store_structured g3.x, r11.x, l(0), r28.x
+ endif
+ sync_g_t
+ if_z vThreadIDInGroup.x
+ store_structured g3.x, l(4096), l(0), l(0)
+ store_structured g3.x, l(4609), l(0), l(0)
+ endif
+ if_nz r1.w
+ if_nz r16.x
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ ld_structured r28.w, r5.w, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ store_structured g3.x, r10.w, l(0), r28.y
+ else
+ ld_structured r28.y, l(4097), l(0), g3.xxxx
+ store_structured g3.x, l(4610), l(0), r28.y
+ endif
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r16.y
+ ld_structured r28.y, r10.w, l(0), g3.xxxx
+ ld_structured r28.w, r11.w, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ else
+ ld_structured r28.y, r10.w, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.x, l(0), r28.y
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r16.z
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ ld_structured r28.w, r15.w, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ else
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.z, l(0), r28.y
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r16.w
+ ld_structured r28.y, r10.w, l(0), g3.xxxx
+ ld_structured r28.w, r14.z, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ else
+ ld_structured r28.y, r10.w, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.x, l(0), r28.y
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r18.x
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ ld_structured r28.w, r17.y, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ else
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.z, l(0), r28.y
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r18.y
+ ld_structured r28.y, r10.w, l(0), g3.xxxx
+ ld_structured r28.w, r14.w, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ else
+ ld_structured r28.y, r10.w, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.x, l(0), r28.y
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r18.z
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ ld_structured r28.w, r17.w, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ else
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.z, l(0), r28.y
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r18.w
+ ld_structured r28.y, r10.w, l(0), g3.xxxx
+ ld_structured r28.w, r12.w, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ else
+ ld_structured r28.y, r10.w, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.x, l(0), r28.y
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r13.w
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ ld_structured r28.w, r17.z, l(0), g3.xxxx
+ iadd r28.y, r28.w, r28.y
+ else
+ ld_structured r28.y, r14.y, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.z, l(0), r28.y
+ endif
+ sync_g_t
+ mov r28.y, l(0)
+ mov r28.w, r26.x
+ loop
+ uge r29.w, r28.y, r21.x
+ breakc_nz r29.w
+ ult r29.w, r28.w, r23.w
+ iadd r30.w, r26.z, r28.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r30.w, r30.w, l(0), u2.xxxx
+ movc r30.w, r29.w, r30.w, l(-1)
+ ushr r33.w, r30.w, r26.y
+ and r37.xyzw, r33.wwww, l(1, 2, 4, 8)
+ ine r38.xyzw, r37.xyzw, l(0, 0, 0, 0)
+ bfi r39.xyzw, l(1, 1, 1, 1), vThreadIDInGroup.xxxx, r38.xyzw, l(0, 0, 0, 0)
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ sync_g_t
+ if_nz r7.x
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r3.x, l(0), g3.xxxx
+ ld_structured r40.y, r6.y, l(0), g3.xxxx
+ ld_structured r40.z, r9.x, l(0), g3.xxxx
+ ld_structured r40.w, r6.z, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ if_nz r7.y
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r3.y, l(0), g3.xxxx
+ ld_structured r40.y, r6.w, l(0), g3.xxxx
+ ld_structured r40.z, r9.y, l(0), g3.xxxx
+ ld_structured r40.w, r8.x, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ if_nz r7.z
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r3.z, l(0), g3.xxxx
+ ld_structured r40.y, r8.y, l(0), g3.xxxx
+ ld_structured r40.z, r9.z, l(0), g3.xxxx
+ ld_structured r40.w, r8.z, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ if_nz r7.w
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r3.w, l(0), g3.xxxx
+ ld_structured r40.y, r8.w, l(0), g3.xxxx
+ ld_structured r40.z, r9.w, l(0), g3.xxxx
+ ld_structured r40.w, r13.x, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ if_nz r4.w
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r6.x, l(0), g3.xxxx
+ ld_structured r40.y, r13.y, l(0), g3.xxxx
+ ld_structured r40.z, r14.x, l(0), g3.xxxx
+ ld_structured r40.w, r13.z, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ ld_structured r35.w, r5.z, l(0), g3.xxxx
+ ld_structured r37.y, r10.x, l(0), g3.xxxx
+ ld_structured r37.z, r10.y, l(0), g3.xxxx
+ ld_structured r37.w, r10.z, l(0), g3.xxxx
+ bfi r33.w, l(4), l(5), r33.w, l(0)
+ iadd r33.w, r11.y, r33.w
+ iadd r33.w, r33.w, l(4096)
+ ld_structured r33.w, r33.w, l(0), g3.xxxx
+ if_nz r29.w
+ iadd r29.w, r37.x, l(-1)
+ xor r29.w, r35.w, r29.w
+ ieq r38.xyz, r38.yzwy, l(0, 0, 0, 0)
+ xor r37.x, r37.y, r38.x
+ and r29.w, r29.w, r37.x
+ xor r37.x, r37.z, r38.y
+ and r29.w, r29.w, r37.x
+ xor r37.x, r37.w, r38.z
+ and r29.w, r29.w, r37.x
+ ubfe r29.w, vThreadIDInGroup.x, l(0), r29.w
+ countbits r29.w, r29.w
+ iadd r29.w, r29.w, r33.w
+ iadd r29.w, r26.w, r29.w
+ store_structured u2.x, r29.w, l(0), r30.w
+ endif
+ sync_g_t
+ if_nz r7.x
+ ld_structured r29.w, r11.z, l(0), g3.xxxx
+ xor r30.w, r2.z, r35.w
+ xor r33.w, r2.w, r37.y
+ and r30.w, r30.w, r33.w
+ xor r33.w, r5.x, r37.z
+ and r30.w, r30.w, r33.w
+ xor r33.w, r5.y, r37.w
+ and r30.w, r30.w, r33.w
+ countbits r30.w, r30.w
+ iadd r29.w, r29.w, r30.w
+ store_structured g3.x, r11.z, l(0), r29.w
+ endif
+ iadd r28.yw, r28.yyyw, l(0, 1, 0, 32)
+ endloop
+ if_nz r21.w
+ ult r28.y, r28.w, r23.w
+ iadd r29.w, r26.z, r28.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r29.w, r29.w, l(0), u2.xxxx
+ movc r29.w, r28.y, r29.w, l(-1)
+ ushr r30.w, r29.w, r26.y
+ if_nz r22.w
+ and r37.xyzw, r30.wwww, l(1, 2, 4, 8)
+ ine r37.xyzw, r37.xyzw, l(0, 0, 0, 0)
+ bfi r37.xyzw, l(1, 1, 1, 1), vThreadIDInGroup.xxxx, r37.xyzw, l(0, 0, 0, 0)
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.x
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.x, l(0), g3.xxxx
+ ld_structured r38.y, r6.y, l(0), g3.xxxx
+ ld_structured r38.z, r9.x, l(0), g3.xxxx
+ ld_structured r38.w, r6.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.y
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.y, l(0), g3.xxxx
+ ld_structured r38.y, r6.w, l(0), g3.xxxx
+ ld_structured r38.z, r9.y, l(0), g3.xxxx
+ ld_structured r38.w, r8.x, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.z
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.z, l(0), g3.xxxx
+ ld_structured r38.y, r8.y, l(0), g3.xxxx
+ ld_structured r38.z, r9.z, l(0), g3.xxxx
+ ld_structured r38.w, r8.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.w
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.w, l(0), g3.xxxx
+ ld_structured r38.y, r8.w, l(0), g3.xxxx
+ ld_structured r38.z, r9.w, l(0), g3.xxxx
+ ld_structured r38.w, r13.x, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r4.w
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r6.x, l(0), g3.xxxx
+ ld_structured r38.y, r13.y, l(0), g3.xxxx
+ ld_structured r38.z, r14.x, l(0), g3.xxxx
+ ld_structured r38.w, r13.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r22.w
+ ld_structured r37.x, r5.z, l(0), g3.xxxx
+ ld_structured r37.y, r10.x, l(0), g3.xxxx
+ ld_structured r37.z, r10.y, l(0), g3.xxxx
+ ld_structured r37.w, r10.z, l(0), g3.xxxx
+ and r38.xyzw, r30.wwww, l(1, 2, 4, 8)
+ iadd r33.w, r38.x, l(-1)
+ xor r33.w, r37.x, r33.w
+ ine r38.xyz, r38.yzwy, l(0, 0, 0, 0)
+ ieq r38.xyz, r38.xyzx, l(0, 0, 0, 0)
+ xor r38.xyz, r37.yzwy, r38.xyzx
+ and r33.w, r33.w, r38.x
+ and r33.w, r38.y, r33.w
+ and r33.w, r38.z, r33.w
+ bfi r30.w, l(4), l(5), r30.w, l(0)
+ iadd r30.w, r11.y, r30.w
+ iadd r30.w, r30.w, l(4096)
+ ld_structured r30.w, r30.w, l(0), g3.xxxx
+ ubfe r33.w, vThreadIDInGroup.x, l(0), r33.w
+ countbits r33.w, r33.w
+ iadd r30.w, r30.w, r33.w
+ else
+ mov r37.xyzw, l(0,0,0,0)
+ mov r30.w, l(0)
+ endif
+ if_nz r28.y
+ iadd r28.y, r26.w, r30.w
+ store_structured u2.x, r28.y, l(0), r29.w
+ endif
+ sync_g_t
+ if_nz r22.w
+ if_nz r7.x
+ ld_structured r28.y, r11.z, l(0), g3.xxxx
+ xor r29.w, r2.z, r37.x
+ xor r30.w, r2.w, r37.y
+ and r29.w, r29.w, r30.w
+ xor r37.xy, r5.xyxx, r37.zwzz
+ and r29.w, r29.w, r37.x
+ and r29.w, r37.y, r29.w
+ countbits r29.w, r29.w
+ iadd r28.y, r28.y, r29.w
+ store_structured g3.x, r11.z, l(0), r28.y
+ endif
+ endif
+ endif
+ iadd r26.y, r26.y, l(4)
+ mov r28.y, r26.w
+ mov r26.w, r26.z
+ mov r26.z, r28.y
+ endloop
+ mov r21.x, vThreadIDInGroup.x
+ loop
+ ige r21.z, r21.x, r17.x
+ breakc_nz r21.z
+ iadd r21.z, r21.x, r21.y
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r21.w, r21.z, l(0), u2.xxxx
+ iadd r21.z, r21.z, l(-1)
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r21.z, r21.z, l(0), u2.xxxx
+ ishr r21.zw, r21.zzzw, l(0, 0, 16, 16)
+ iadd r22.w, r21.w, l(-1)
+ movc r21.z, r21.x, r21.z, r22.w
+ ine r22.w, r21.z, r21.w
+ if_nz r22.w
+ iadd r21.zw, r19.yyyy, r21.zzzw
+ store_structured u2.x, r21.w, l(0), r21.x
+ iadd r21.z, r21.z, l(1)
+ store_structured u2.x, r21.z, l(0), r21.x
+ endif
+ iadd r21.x, r21.x, l(1024)
+ endloop
+ ld_raw r21.xy, l(96), g0.xyxx
+ mov r26.x, vThreadIDInGroup.x
+ loop
+ ige r21.z, r26.x, r17.x
+ breakc_nz r21.z
+ iadd r21.z, r21.y, r26.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r21.z, r21.z, l(0), u2.xxxx
+ and r21.z, r21.z, l(0x0000ffff)
+ iadd r21.z, r21.z, r21.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyzw, r21.z, l(0), u1.xyzw
+ store_structured g3.x, r26.x, l(0), r37.x
+ iadd r26.xyzw, r26.xxxx, l(1024, 1975, 3950, 5925)
+ store_structured g3.x, r26.y, l(0), r37.y
+ store_structured g3.x, r26.z, l(0), r37.z
+ store_structured g3.x, r26.w, l(0), r37.w
+ endloop
+ sync_g_t
+ ld_raw r21.x, l(84), g0.xxxx
+ mul r21.y, r21.x, r21.x
+ ld_raw r21.z, l(100), g0.xxxx
+ ld_raw r21.w, l(112), g1.xxxx
+ mov r22.w, vThreadIDInGroup.x
+ loop
+ ige r23.w, r22.w, r17.x
+ breakc_nz r23.w
+ iadd r23.w, r21.z, r22.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r23.w, r23.w, l(0), u2.xxxx
+ and r23.w, r23.w, l(0x0000ffff)
+ ld_structured r26.x, r22.w, l(0), g3.xxxx
+ iadd r37.xyzw, r22.wwww, l(1975, 3950, 5925, 1024)
+ ld_structured r26.y, r37.x, l(0), g3.xxxx
+ ld_structured r26.z, r37.y, l(0), g3.xxxx
+ ld_structured r26.w, r37.z, l(0), g3.xxxx
+ iadd r28.y, r21.w, r23.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyz, r28.y, l(0), t14.xyzx
+ dp4 r28.y, r26.xyzw, icb[r20.z + 0].xyzw
+ mad r28.y, r28.y, r19.z, r20.x
+ min r28.y, r28.y, l(127.500000)
+ max r28.y, r28.y, l(0.000000)
+ ftoi r28.y, r28.y
+ dp4 r29.w, r26.xyzw, icb[r20.w + 0].xyzw
+ mad r29.w, r29.w, r19.w, r20.y
+ min r29.w, r29.w, l(127.500000)
+ max r29.w, r29.w, l(0.000000)
+ ftoi r29.w, r29.w
+ imad r28.y, r29.w, l(128), r28.y
+ iadd r29.w, r28.y, l(256)
+ add r30.w, r26.w, l(0.000000)
+ mov r38.xyz, l(0,0,0,0)
+ mov r33.w, l(0.000000)
+ mov r35.w, r28.y
+ loop
+ ilt r38.w, r29.w, r35.w
+ breakc_nz r38.w
+ iadd r38.w, r19.y, r35.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r39.x, r38.w, l(0), u2.xxxx
+ iadd r39.yzw, r38.wwww, l(0, 1, 2, 3)
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r38.w, r39.y, l(0), u2.xxxx
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r39.y, r39.z, l(0), u2.xxxx
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r39.z, r39.w, l(0), u2.xxxx
+ umin r39.x, r38.w, r39.x
+ umin r39.x, r39.y, r39.x
+ imax r38.w, r38.w, r39.y
+ imax r38.w, r39.z, r38.w
+ imax r38.w, r38.w, l(0)
+ mov r39.yzw, r38.xxyz
+ mov r40.x, r33.w
+ mov r40.y, r39.x
+ loop
+ uge r40.z, r40.y, r38.w
+ breakc_nz r40.z
+ ine r40.z, r22.w, r40.y
+ if_nz r40.z
+ ld_structured r41.x, r40.y, l(0), g3.xxxx
+ iadd r42.xyz, r40.yyyy, l(1975, 3950, 5925, 0)
+ ld_structured r41.y, r42.x, l(0), g3.xxxx
+ ld_structured r41.z, r42.y, l(0), g3.xxxx
+ ld_structured r40.z, r42.z, l(0), g3.xxxx
+ add r41.xyz, r26.xyzx, -r41.xyzx
+ dp3 r40.w, r41.xyzx, r41.xyzx
+ lt r41.w, r21.y, r40.w
+ if_nz r41.w
+ iadd r41.w, r40.y, l(1)
+ mov r40.y, r41.w
+ continue
+ endif
+ ld_raw r41.w, l(112), g1.xxxx
+ ine r42.x, r41.w, l(-1)
+ if_nz r42.x
+ ld_raw r42.x, l(100), g0.xxxx
+ iadd r42.x, r40.y, r42.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r42.x, r42.x, l(0), u2.xxxx
+ and r42.x, r42.x, l(0x0000ffff)
+ iadd r41.w, r41.w, r42.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r42.xyz, r41.w, l(0), t14.xyzx
+ add r42.xyz, r37.xyzx, -r42.xyzx
+ dp3 r41.w, r42.xyzx, r42.xyzx
+ ge r42.x, r21.y, r41.w
+ if_nz r42.x
+ iadd r42.x, r40.y, l(1)
+ mov r40.y, r42.x
+ continue
+ endif
+ add r41.w, r41.w, l(0.000000)
+ div r41.w, r21.y, r41.w
+ rsq r41.w, r41.w
+ add r41.w, r41.w, l(-1.000000)
+ min r41.w, r41.w, l(1.000000)
+ else
+ mov r41.w, l(1.000000)
+ endif
+ add r40.w, r40.w, l(0.000000)
+ rsq r40.w, r40.w
+ mad r40.w, r21.x, r40.w, l(-1.000000)
+ max r40.w, r40.w, l(0.000000)
+ mul r42.x, r40.w, r40.w
+ mul r41.w, r41.w, r42.x
+ add r40.z, r30.w, r40.z
+ div r40.z, r41.w, r40.z
+ mad r39.yzw, r40.zzzz, r41.xxyz, r39.yyzw
+ add r40.x, r40.w, r40.x
+ endif
+ iadd r40.y, r40.y, l(1)
+ endloop
+ mov r38.xyz, r39.yzwy
+ mov r33.w, r40.x
+ iadd r35.w, r35.w, l(128)
+ endloop
+ ld_raw r26.x, l(72), g1.xxxx
+ mul r26.x, r26.w, r26.x
+ div r26.x, r26.x, r33.w
+ ld_raw r26.y, l(96), g0.xxxx
+ iadd r23.w, r23.w, r26.y
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyz, r23.w, l(0), u1.xyzx
+ mad r26.xzw, r38.xxyz, r26.xxxx, r37.xxyz
+ store_structured u1.xyz, r23.w, l(0), r26.xzwx
+ mov r22.w, r37.w
+ endloop
+ sync_g_t
+ ld_raw r19.z, l(96), g0.xxxx
+ mov r20.x, vThreadIDInGroup.x
+ loop
+ ige r19.w, r20.x, r19.x
+ breakc_nz r19.w
+ iadd r19.w, r19.z, r20.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r21.xyzw, r19.w, l(0), u1.xyzw
+ store_structured g3.x, r20.x, l(0), r21.x
+ iadd r20.xyzw, r20.xxxx, l(1024, 1975, 3950, 5925)
+ store_structured g3.x, r20.y, l(0), r21.y
+ store_structured g3.x, r20.z, l(0), r21.z
+ store_structured g3.x, r20.w, l(0), r21.w
+ endloop
+ ld_raw r19.x, l(100), g0.xxxx
+ mov r19.z, vThreadIDInGroup.x
+ loop
+ ige r19.w, r19.z, r17.x
+ breakc_nz r19.w
+ iadd r19.w, r19.z, r19.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r19.w, r19.w, l(0), u2.xxxx
+ ushr r19.w, r19.w, l(16)
+ iadd r19.w, r19.w, r19.y
+ store_structured u2.x, r19.w, l(0), l(-1)
+ iadd r20.x, r19.w, l(1)
+ store_structured u2.x, r20.x, l(0), l(-1)
+ iadd r19.z, r19.z, l(1024)
+ endloop
+ endif
+ if_z vThreadIDInGroup.x
+ ld_raw r17.x, l(8), g1.xxxx
+ mul r17.x, r17.x, l(1000.000000)
+ ftou r17.x, r17.x
+ umax r17.x, r17.x, l(1)
+ ld_raw r19.x, l(104), g1.xxxx
+ iadd r17.x, r17.x, r19.x
+ store_raw g1.x, l(104), r17.x
+ endif
+ sync_g_t
+ ld_raw r17.x, l(104), g1.xxxx
+ ld_raw r19.x, l(104), g0.xxxx
+ uge r17.x, r17.x, r19.x
+ if_nz r17.x
+ ld_raw r17.x, l(0), g0.xxxx
+ mov r19.x, l(0)
+ mov r19.y, vThreadIDInGroup.x
+ loop
+ uge r19.z, r19.y, r17.x
+ breakc_nz r19.z
+ ld_structured r20.x, r19.y, l(0), g3.xxxx
+ iadd r21.xyz, r19.yyyy, l(1975, 3950, 1024, 0)
+ ld_structured r20.y, r21.x, l(0), g3.xxxx
+ ld_structured r20.z, r21.y, l(0), g3.xxxx
+ iadd r19.z, r0.x, r19.y
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r21.xyw, r19.z, l(0), u0.xyxz
+ add r20.xyz, r20.xyzx, -r21.xywx
+ max r19.z, |r20.y|, |r20.x|
+ max r19.z, |r20.z|, r19.z
+ max r19.x, r19.x, r19.z
+ mov r19.y, r21.z
+ endloop
+ if_z vThreadIDInGroup.x
+ ld_raw r17.x, l(100), g1.xxxx
+ iadd r17.x, r17.x, l(1)
+ store_raw g1.x, l(100), r17.x
+ ld_raw r17.x, l(104), g0.xxxx
+ ld_raw r19.y, l(104), g1.xxxx
+ iadd r17.x, -r17.x, r19.y
+ store_raw g1.x, l(104), r17.x
+ endif
+ ld_raw r17.x, l(112), g0.xxxx
+ ld_raw r19.y, l(8), g1.xxxx
+ mul r17.x, r17.x, r19.y
+ lt r17.x, r17.x, r19.x
+ if_nz r17.x
+ store_raw g1.x, l(100), l(0)
+ endif
+ endif
+ iadd r15.y, r15.y, l(1)
+ endloop
+ sync_g_t
+ ld_raw r0.yz, l(0), g0.xxyx
+ mov r1.x, vThreadIDInGroup.x
+ loop
+ uge r0.w, r1.x, r0.y
+ breakc_nz r0.w
+ iadd r0.w, r0.z, r1.x
+ ld_structured r2.x, r1.x, l(0), g3.xxxx
+ iadd r1.xyzw, r1.xxxx, l(1024, 1975, 3950, 5925)
+ ld_structured r2.y, r1.y, l(0), g3.xxxx
+ ld_structured r2.z, r1.z, l(0), g3.xxxx
+ ld_structured r2.w, r1.w, l(0), g3.xxxx
+ store_structured u0.xyzw, r0.w, l(0), r2.xyzw
+ endloop
+else
+ ld_raw r0.y, l(4), g0.xxxx
+ not r0.z, vThreadIDInGroup.x
+ and r0.z, r0.z, l(3)
+ ult r1.xyzw, vThreadIDInGroup.xxxx, l(192, 176, 3, 512)
+ and r2.xyzw, vThreadIDInGroup.xxxx, l(32, 31, 1, 2)
+ movc r0.w, r2.x, l(-1.000000), l(1.000000)
+ iadd r3.xyzw, vThreadIDInGroup.xxxx, l(16, 8, 4, 2)
+ ieq r2.x, r2.y, l(0)
+ and r2.x, r1.x, r2.x
+ ushr r4.xy, vThreadIDInGroup.xxxx, l(6, 5, 0, 0)
+ ishl r4.z, r4.y, l(2)
+ ishl r4.w, vThreadIDInGroup.x, l(3)
+ iadd r4.zw, r4.zzzw, l(0, 0, 76, 76)
+ iadd r2.z, r2.z, l(-1)
+ ine r2.w, r2.w, l(0)
+ ieq r2.w, r2.w, l(0)
+ and r5.xyz, vThreadIDInGroup.xxxx, l(4, 8, 992, 0)
+ ine r5.xy, r5.xyxx, l(0, 0, 0, 0)
+ ieq r5.xy, r5.xyxx, l(0, 0, 0, 0)
+ iadd r6.xyzw, vThreadIDInGroup.xxxx, l(1, 1040, 3088, 1032)
+ ult r7.xyzw, r2.yyyy, l(16, 8, 4, 2)
+ ult r5.w, r2.y, l(1)
+ iadd r8.xyzw, vThreadIDInGroup.xxxx, l(3080, 1028, 3076, 1026)
+ iadd r9.xyzw, r3.xyzw, l(2048, 2048, 2048, 2048)
+ iadd r10.xyz, r5.zzzz, l(1024, 2048, 3072, 0)
+ iadd r11.xy, r4.yyyy, l(1, 513, 0, 0)
+ bfi r12.xyzw, l(10, 11, 10, 5), l(0, 0, 0, 5), vThreadIDInGroup.xxxx, l(1024, 2048, 3072, 0)
+ iadd r11.xz, r11.xxyx, r12.wwww
+ iadd r13.xyzw, vThreadIDInGroup.xxxx, l(3074, 1025, 3073, 514)
+ iadd r14.xy, r6.xxxx, l(2048, 4096, 0, 0)
+ iadd r10.w, vThreadIDInGroup.x, l(4096)
+ iadd r11.w, r13.w, l(4096)
+ iadd r15.xyzw, vThreadIDInGroup.xxxx, l(4097, 512, 4610, 4093)
+ iadd r12.w, r15.y, l(4096)
+ uge r16.xyzw, vThreadIDInGroup.xxxx, l(1, 2, 4, 8)
+ iadd r17.xyzw, vThreadIDInGroup.xxxx, l(506, 4081, 482, 4033)
+ iadd r14.zw, r17.xxxz, l(0, 0, 4096, 4096)
+ uge r18.xyzw, vThreadIDInGroup.xxxx, l(16, 32, 64, 128)
+ iadd r17.xz, vThreadIDInGroup.xxxx, l(386, 0, 3841, 0)
+ iadd r13.w, r17.x, l(4096)
+ uge r15.y, vThreadIDInGroup.x, l(256)
+ iadd r11.xz, r11.xxzx, l(4096, 0, 4096, 0)
+ mov r17.x, l(0)
+ loop
+ ld_raw r19.x, l(16), g1.xxxx
+ uge r19.y, r17.x, r19.x
+ breakc_nz r19.y
+ utof r19.y, r17.x
+ add r19.z, r19.y, l(1.000000)
+ utof r19.x, r19.x
+ div r19.x, r19.z, r19.x
+ if_z vThreadIDInGroup.x
+ ld_raw r19.z, l(12), g1.xxxx
+ iadd r19.z, r17.x, r19.z
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r20.xyzw, r19.z, l(0), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r21.xyzw, r19.z, l(16), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r22.xyzw, r19.z, l(32), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r23.xyzw, r19.z, l(48), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r24.xyzw, r19.z, l(64), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r25.xyzw, r19.z, l(80), t2.xyzw
+ ld_structured_indexable(structured_buffer, stride=100)(mixed,mixed,mixed,mixed) r19.z, r19.z, l(96), t2.xxxx
+ store_raw g2.xyzw, l(0), r20.xyzw
+ store_raw g2.xyzw, l(16), r21.xyzw
+ store_raw g2.xyzw, l(32), r22.xyzw
+ store_raw g2.xyzw, l(48), r23.xyzw
+ store_raw g2.xyzw, l(64), r24.xyzw
+ store_raw g2.xyzw, l(80), r25.xyzw
+ store_raw g2.x, l(96), r19.z
+ endif
+ sync_g_t
+ ld_raw r19.z, l(0), g0.xxxx
+ ld_raw r20.xyz, l(12), g2.xyzx
+ ld_raw r21.xyzw, l(60), g2.xyzw
+ ld_raw r22.xyzw, l(24), g2.xyzw
+ ld_raw r23.xyzw, l(76), g2.xyzw
+ ld_raw r24.xyzw, l(40), g2.xyzw
+ ld_raw r25.xy, l(92), g2.xyxx
+ ld_raw r19.w, l(56), g2.xxxx
+ ld_raw r26.xyzw, l(12), g2.xyzw
+ ld_raw r20.w, l(36), g2.xxxx
+ ld_raw r25.z, l(48), g2.xxxx
+ mov r25.w, vThreadIDInGroup.x
+ loop
+ uge r27.x, r25.w, r19.z
+ breakc_nz r27.x
+ iadd r27.x, r0.y, r25.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r28.xyzw, r27.x, l(0), u0.xyzw
+ iadd r27.y, r0.x, r25.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r29.xyzw, r27.y, l(0), u0.xyzw
+ eq r27.z, r28.w, l(0.000000)
+ movc r28.w, r27.z, r29.w, r28.w
+ lt r27.z, l(0.000000), r28.w
+ if_nz r27.z
+ if_nz r25.y
+ add r30.xyz, r20.xyzx, r28.xyzx
+ mad r27.zw, r28.xxxx, r21.xxxw, r30.xxxy
+ mad r27.zw, r29.xxxx, r22.xxxw, r27.zzzw
+ mad r27.z, r28.y, r21.y, r27.z
+ mad r27.z, r29.y, r22.y, r27.z
+ mad r27.z, r28.z, r21.z, r27.z
+ mad r30.x, r29.z, r22.z, r27.z
+ mad r27.z, r28.y, r23.x, r27.w
+ mad r27.z, r29.y, r24.x, r27.z
+ mad r27.z, r28.z, r23.y, r27.z
+ mad r30.y, r29.z, r24.y, r27.z
+ mad r27.z, r28.x, r23.z, r30.z
+ mad r27.z, r29.x, r24.z, r27.z
+ mad r27.z, r28.y, r23.w, r27.z
+ mad r27.z, r29.y, r24.w, r27.z
+ mad r27.z, r28.z, r25.x, r27.z
+ mad r27.z, r29.z, r19.w, r27.z
+ else
+ add r31.xyz, r28.xyzx, -r29.xyzx
+ mad r27.w, r31.x, r26.w, r26.x
+ add r30.x, r27.w, r28.x
+ mad r27.w, r31.y, r20.w, r26.y
+ add r30.y, r27.w, r28.y
+ mad r27.w, r31.z, r25.z, r26.z
+ add r27.z, r27.w, r28.z
+ endif
+ ld_raw r31.xyz, l(0), g2.xyzx
+ add r29.xyz, r28.xyzx, r31.xyzx
+ mov r28.xy, r30.xyxx
+ mov r28.z, r27.z
+ else
+ mov r29.xyz, r28.xyzx
+ endif
+ store_structured u0.xyzw, r27.x, l(0), r28.xyzw
+ store_structured u0.xyzw, r27.y, l(0), r29.xyzw
+ iadd r25.w, r25.w, l(1024)
+ endloop
+ ld_raw r19.z, l(44), g1.xxxx
+ sync_g_t
+ ld_raw r19.w, l(8), g1.xxxx
+ mul r19.w, r19.w, r19.w
+ movc r19.w, r0.z, r19.w, l(0)
+ ld_raw r20.x, l(0), g0.xxxx
+ ishl r20.y, r20.x, l(2)
+ mov r20.z, vThreadIDInGroup.x
+ loop
+ uge r20.w, r20.z, r20.y
+ breakc_nz r20.w
+ ushr r20.w, r20.z, l(2)
+ iadd r20.w, r0.y, r20.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r21.xyzw, r20.w, l(0), u0.xyzw
+ lt r22.x, l(0.000000), r21.w
+ if_nz r22.x
+ iadd r22.x, r19.z, r20.z
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r22.xyzw, r22.x, l(0), t13.xyzw
+ mad r21.xyzw, r22.xyzw, r19.wwww, r21.xyzw
+ store_structured u0.xyzw, r20.w, l(0), r21.xyzw
+ endif
+ iadd r20.z, r20.z, l(1024)
+ endloop
+ sync_g_t
+ ld_raw r19.z, l(28), g1.xxxx
+ ine r19.z, r19.z, l(-1)
+ if_nz r19.z
+ ld_raw r19.zw, l(36), g0.xxxy
+ ld_raw r20.xyz, l(24), g1.xyzx
+ ld_raw r20.w, l(0), g0.xxxx
+ mov r21.x, vThreadIDInGroup.x
+ loop
+ uge r21.y, r21.x, r20.w
+ breakc_nz r21.y
+ iadd r21.yz, r20.yyzy, r21.xxxx
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r22.xyzw, r21.y, l(0), t11.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r23.xyzw, r21.z, l(0), t11.xyzw
+ add r23.xyzw, -r22.xyzw, r23.xyzw
+ mad r22.xyzw, r23.xyzw, r19.xxxx, r22.xyzw
+ iadd r21.y, r0.y, r21.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r23.xyzw, r21.y, l(0), u0.xyzw
+ add r22.xyz, r22.xyzx, -r23.xyzx
+ dp3 r21.z, r22.xyzx, r22.xyzx
+ add r21.z, r21.z, l(0.000000)
+ mad r21.w, r22.w, -r19.z, -r19.w
+ min r21.w, r21.w, l(0.000000)
+ rsq r21.z, r21.z
+ mad r21.z, r21.w, r21.z, l(1.000000)
+ max r21.z, r21.z, l(0.000000)
+ mul r21.z, r20.x, r21.z
+ mad r22.xyz, r21.zzzz, r22.xyzx, r23.xyzx
+ ge r21.z, r21.w, l(0.000000)
+ movc r22.w, r21.z, l(0), r23.w
+ store_structured u0.xyzw, r21.y, l(0), r22.xyzw
+ iadd r21.x, r21.x, l(1024)
+ endloop
+ endif
+ ld_raw r19.z, l(20), g1.xxxx
+ eq r19.w, r19.z, l(0.000000)
+ ld_raw r20.x, l(24), g0.xxxx
+ ieq r20.y, r20.x, l(0)
+ or r19.w, r19.w, r20.y
+ if_z r19.w
+ ld_raw r19.w, l(0), g0.xxxx
+ utof r20.y, r19.w
+ mul r19.z, r19.z, r20.y
+ utof r20.y, r20.x
+ div r19.z, r19.z, r20.y
+ ld_raw r20.yz, l(28), g0.xxyx
+ mov r20.w, vThreadIDInGroup.x
+ loop
+ uge r21.x, r20.w, r19.w
+ breakc_nz r21.x
+ iadd r21.x, r0.y, r20.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r22.xyzw, r21.x, l(0), u0.xyzw
+ mov r21.yzw, l(0,0,0,0)
+ mov r23.x, r20.w
+ loop
+ uge r23.y, r23.x, r20.x
+ breakc_nz r23.y
+ iadd r23.y, r20.y, r23.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r23.y, r23.y, l(0), t5.xxxx
+ and r23.z, r23.y, l(0x0000ffff)
+ iadd r23.z, r0.y, r23.z
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r24.xyz, r23.z, l(0), u0.xyzx
+ add r24.xyz, -r22.xyzx, r24.xyzx
+ mad r23.z, r24.x, r24.x, l(0.000000)
+ mad r23.z, r24.y, r24.y, r23.z
+ mad r23.z, r24.z, r24.z, r23.z
+ ushr r23.y, r23.y, l(16)
+ utof r23.y, r23.y
+ mul r23.y, r20.z, r23.y
+ rsq r23.z, r23.z
+ mad r23.y, -r23.y, r23.z, l(1.000000)
+ lt r23.z, l(0.000000), r23.y
+ mad r24.xyz, r24.xyzx, r23.yyyy, r21.yzwy
+ movc r21.yzw, r23.zzzz, r24.xxyz, r21.yyzw
+ iadd r23.x, r19.w, r23.x
+ endloop
+ mad r22.xyz, r21.yzwy, r19.zzzz, r22.xyzx
+ store_structured u0.xyzw, r21.x, l(0), r22.xyzw
+ iadd r20.w, r20.w, l(1024)
+ endloop
+ endif
+ mov r19.z, l(0)
+ loop
+ ld_raw r19.w, l(8), g0.xxxx
+ uge r19.w, r19.z, r19.w
+ breakc_nz r19.w
+ ld_raw r20.xyz, l(12), g0.xyzx
+ iadd r19.w, r19.z, r20.x
+ ld_structured_indexable(structured_buffer, stride=24)(mixed,mixed,mixed,mixed) r22.xyzw, r19.w, l(0), t3.xyzw
+ ld_structured_indexable(structured_buffer, stride=24)(mixed,mixed,mixed,mixed) r20.xw, r19.w, l(16), t3.xxxy
+ ld_raw r19.w, l(108), g1.xxxx
+ mul r22.xy, r19.wwww, r22.xyxx
+ exp r22.xy, r22.xyxx
+ add r22.xy, -r22.xyxx, l(1.000000, 1.000000, 0.000000, 0.000000)
+ iadd r20.y, r20.x, r20.y
+ ine r20.z, r20.z, l(-1)
+ ld_raw r21.x, l(20), g0.xxxx
+ iadd r20.x, r20.x, r21.x
+ sync_g_t
+ mov r21.x, vThreadIDInGroup.x
+ loop
+ uge r23.x, r21.x, r20.w
+ breakc_nz r23.x
+ iadd r23.x, r20.y, r21.x
+ ld_structured_indexable(structured_buffer, stride=8)(mixed,mixed,mixed,mixed) r23.xy, r23.x, l(0), t4.xyxx
+ and r23.z, r23.y, l(0x0000ffff)
+ ushr r23.y, r23.y, l(16)
+ iadd r23.yz, r0.yyyy, r23.yyzy
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r24.xyzw, r23.z, l(0), u0.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r25.xyzw, r23.y, l(0), u0.xyzw
+ add r26.xyz, -r24.xyzx, r25.xyzx
+ mad r23.w, r26.x, r26.x, l(0.000000)
+ mad r23.w, r26.y, r26.y, r23.w
+ mad r23.w, r26.z, r26.z, r23.w
+ lt r26.w, l(0.000000), r23.x
+ rsq r23.w, r23.w
+ mad r23.x, r23.x, r23.w, l(-1.000000)
+ and r23.x, r23.x, r26.w
+ min r23.w, r22.w, -r23.x
+ max r23.w, r22.z, r23.w
+ mad r23.x, r22.y, r23.w, r23.x
+ iadd r23.w, r20.x, r21.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r23.w, r23.w, l(0), t16.xxxx
+ mul r23.w, r19.w, r23.w
+ exp r23.w, r23.w
+ add r23.w, -r23.w, l(1.000000)
+ movc r23.w, r20.z, r23.w, r22.x
+ mul r23.x, r23.w, r23.x
+ add r23.w, r24.w, r25.w
+ add r23.w, r23.w, l(0.000000)
+ div r23.x, r23.x, r23.w
+ mul r23.w, r23.x, -r24.w
+ mad r24.xyz, r23.wwww, r26.xyzx, r24.xyzx
+ store_structured u0.xyz, r23.z, l(0), r24.xyzx
+ mul r23.x, r23.x, r25.w
+ mad r23.xzw, r23.xxxx, r26.xxyz, r25.xxyz
+ store_structured u0.xyz, r23.y, l(0), r23.xzwx
+ iadd r21.x, r21.x, l(1024)
+ endloop
+ iadd r19.z, r19.z, l(1)
+ endloop
+ sync_g_t
+ ld_raw r19.z, l(36), g1.xxxx
+ ine r19.z, r19.z, l(-1)
+ if_nz r19.z
+ ld_raw r19.z, l(0), g0.xxxx
+ ld_raw r20.xy, l(36), g1.xyxx
+ mov r19.w, vThreadIDInGroup.x
+ loop
+ uge r20.z, r19.w, r19.z
+ breakc_nz r20.z
+ iadd r20.zw, r20.xxxy, r19.wwww
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r22.xyzw, r20.z, l(0), t12.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r23.xyzw, r20.w, l(0), t12.xyzw
+ add r23.xyzw, -r22.xyzw, r23.xyzw
+ mad r22.xyzw, r23.xyzw, r19.xxxx, r22.xyzw
+ iadd r20.z, r0.y, r19.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r23.xyz, r20.z, l(0), u0.xyzx
+ add r22.xyz, r22.xyzx, -r23.xyzx
+ dp3 r20.w, r22.xyzx, r22.xyzx
+ add r20.w, r20.w, l(0.000000)
+ rsq r20.w, r20.w
+ mad r20.w, -r22.w, r20.w, l(1.000000)
+ min r20.w, r20.w, l(0.000000)
+ mad r22.xyz, r20.wwww, r22.xyzx, r23.xyzx
+ store_structured u0.xyz, r20.z, l(0), r22.xyzx
+ iadd r19.w, r19.w, l(1024)
+ endloop
+ sync_g_t
+ endif
+ if_nz r1.x
+ ld_raw r19.z, l(0), g0.xxxx
+ iadd r19.w, r19.z, l(-1)
+ umin r19.w, r2.y, r19.w
+ iadd r20.x, r0.y, r19.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r20.xyzw, r20.x, l(0), u0.xyzw
+ dp4 r20.x, r20.xyzw, icb[r4.x + 0].xyzw
+ mul r20.x, r0.w, r20.x
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r20.x
+ mov r20.x, r19.w
+ loop
+ iadd r20.x, r20.x, l(32)
+ uge r20.y, r20.x, r19.z
+ breakc_nz r20.y
+ ld_structured r20.y, vThreadIDInGroup.x, l(0), g4.xxxx
+ iadd r20.z, r0.y, r20.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r22.xyzw, r20.z, l(0), u0.xyzw
+ dp4 r20.z, r22.xyzw, icb[r4.x + 0].xyzw
+ mul r20.z, r0.w, r20.z
+ max r20.y, r20.z, r20.y
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r20.y
+ endloop
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.z, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.w, r3.x, l(0), g4.xxxx
+ max r19.z, r19.w, r19.z
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.z
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.z, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.w, r3.y, l(0), g4.xxxx
+ max r19.z, r19.w, r19.z
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.z
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.z, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.w, r3.z, l(0), g4.xxxx
+ max r19.z, r19.w, r19.z
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.z
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.z, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.w, r3.w, l(0), g4.xxxx
+ max r19.z, r19.w, r19.z
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.z
+ endif
+ sync_g_t
+ if_nz r1.y
+ ld_structured r19.z, vThreadIDInGroup.x, l(0), g4.xxxx
+ ld_structured r19.w, r6.x, l(0), g4.xxxx
+ max r19.z, r19.w, r19.z
+ store_structured g4.x, vThreadIDInGroup.x, l(0), r19.z
+ endif
+ sync_g_t
+ if_nz r2.x
+ ld_structured r19.z, vThreadIDInGroup.x, l(0), g4.xxxx
+ store_raw g1.x, r4.z, r19.z
+ endif
+ sync_g_t
+ ld_raw r19.z, l(16), g1.xxxx
+ utof r19.z, r19.z
+ div r19.y, r19.y, r19.z
+ ld_raw r19.z, l(60), g0.xxxx
+ if_nz r19.z
+ ld_raw r19.w, l(80), g0.xxxx
+ lt r19.w, l(0.000000), r19.w
+ ld_raw r20.x, l(0), g0.xxxx
+ ld_raw r20.y, l(64), g0.xxxx
+ ld_raw r20.zw, l(56), g1.xxxy
+ mov r21.x, vThreadIDInGroup.x
+ loop
+ uge r22.x, r21.x, r20.x
+ breakc_nz r22.x
+ iadd r22.x, r0.y, r21.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r23.xyzw, r22.x, l(0), u0.xyzw
+ mov r24.xyzw, l(0,0,0,0)
+ mov r22.y, l(0)
+ loop
+ uge r22.z, r22.y, r19.z
+ breakc_nz r22.z
+ iadd r22.z, r20.y, r22.y
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r22.z, r22.z, l(0), t8.xxxx
+ firstbit_lo r22.w, r22.z
+ iadd r25.xy, r20.zwzz, r22.wwww
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r26.xyzw, r25.x, l(0), t9.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r25.xyzw, r25.y, l(0), t9.xyzw
+ add r25.xyzw, -r26.xyzw, r25.xyzw
+ mad r25.xyzw, r19.xxxx, r25.xyzw, r26.xyzw
+ dp3 r22.w, r23.xyzx, r25.xyzx
+ add r22.w, r25.w, r22.w
+ mov r26.xyz, r25.xyzx
+ mov r25.w, r22.z
+ mov r26.w, r22.w
+ loop
+ lt r27.x, r26.w, l(0.000000)
+ iadd r27.y, r25.w, l(-1)
+ and r25.w, r25.w, r27.y
+ ine r27.y, r25.w, l(0)
+ and r27.x, r27.y, r27.x
+ breakc_z r27.x
+ firstbit_lo r27.x, r25.w
+ iadd r27.xy, r20.zwzz, r27.xxxx
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r28.xyzw, r27.x, l(0), t9.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r27.xyzw, r27.y, l(0), t9.xyzw
+ add r27.xyzw, -r28.xyzw, r27.xyzw
+ mad r27.xyzw, r19.xxxx, r27.xyzw, r28.xyzw
+ dp3 r28.x, r23.xyzx, r27.xyzx
+ add r27.w, r27.w, r28.x
+ lt r28.x, r26.w, r27.w
+ movc r26.xyzw, r28.xxxx, r27.xyzw, r26.xyzw
+ endloop
+ lt r22.z, r26.w, l(0.000000)
+ mad r25.xyz, -r26.xyzx, r26.wwww, r24.xyzx
+ iadd r25.w, r24.w, l(1)
+ movc r24.xyzw, r22.zzzz, r25.xyzw, r24.xyzw
+ iadd r22.y, r22.y, l(1)
+ endloop
+ if_nz r24.w
+ utof r22.y, r24.w
+ div r22.y, l(1.000000, 1.000000, 1.000000, 1.000000), r22.y
+ if_nz r19.w
+ iadd r22.z, r0.x, r21.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r25.xyz, r22.z, l(0), u0.xyzx
+ ld_raw r22.w, l(80), g0.xxxx
+ dp3 r25.w, r24.xyzx, r24.xyzx
+ add r27.x, r25.w, l(0.000000)
+ rsq r27.x, r27.x
+ mul r27.yzw, r24.xxyz, r27.xxxx
+ add r28.xyz, r23.xyzx, -r25.xyzx
+ dp3 r28.w, r28.xyzx, r27.yzwy
+ mad r27.yzw, -r28.wwww, r27.yyzw, r28.xxyz
+ dp3 r28.x, r27.yzwy, r27.yzwy
+ add r28.x, r28.x, l(0.000000)
+ rsq r28.x, r28.x
+ mul r22.w, -r22.w, r25.w
+ mul r22.w, r27.x, r22.w
+ mul r22.w, r22.y, r22.w
+ mul r22.w, r28.x, r22.w
+ max r22.w, r22.w, l(-1.000000)
+ mad r25.xyz, -r27.yzwy, r22.wwww, r25.xyzx
+ store_structured u0.xyz, r22.z, l(0), r25.xyzx
+ endif
+ mad r23.xyz, r24.xyzx, r22.yyyy, r23.xyzx
+ store_structured u0.xyzw, r22.x, l(0), r23.xyzw
+ endif
+ iadd r21.x, r21.x, l(1024)
+ endloop
+ endif
+ ftoi r19.z, r19.x
+ iadd r19.z, r0.y, r19.z
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r20.xyz, r19.z, l(0), u0.xyzx
+ ld_raw r19.w, l(68), g0.xxxx
+ ld_raw r20.w, l(64), g1.xxxx
+ mov r22.xyz, l(0,0,0,0)
+ mov r23.xy, l(0,340282346638528860000000000000000000000.000000,0,0)
+ mov r21.x, l(0)
+ loop
+ uge r22.w, r21.x, r19.w
+ breakc_nz r22.w
+ imad r22.w, l(3), r21.x, r20.w
+ ld_structured_indexable(structured_buffer, stride=12)(mixed,mixed,mixed,mixed) r25.xyz, r22.w, l(0), t10.xyzx
+ iadd r23.zw, r22.wwww, l(0, 0, 1, 2)
+ ld_structured_indexable(structured_buffer, stride=12)(mixed,mixed,mixed,mixed) r27.xyz, r23.z, l(0), t10.xyzx
+ ld_structured_indexable(structured_buffer, stride=12)(mixed,mixed,mixed,mixed) r28.xyz, r23.w, l(0), t10.xyzx
+ add r27.xyz, -r25.xyzx, r27.xyzx
+ add r28.xyz, -r25.xyzx, r28.xyzx
+ mul r29.xyz, r27.zxyz, r28.yzxy
+ mad r29.xyz, r27.yzxy, r28.zxyz, -r29.xyzx
+ dp3 r22.w, r29.xyzx, r29.xyzx
+ rsq r22.w, r22.w
+ mul r29.xyz, r22.wwww, r29.xyzx
+ dp3 r22.w, r27.xyzx, r28.xyzx
+ dp3 r23.z, r27.xyzx, r27.xyzx
+ dp3 r23.w, r28.xyzx, r28.xyzx
+ mul r25.w, r22.w, r22.w
+ mad r25.w, r23.z, r23.w, -r25.w
+ div r25.w, l(1.000000, 1.000000, 1.000000, 1.000000), r25.w
+ add r27.w, r23.w, r23.z
+ mad r27.w, r22.w, l(-2.000000), r27.w
+ div r27.w, l(1.000000, 1.000000, 1.000000, 1.000000), r27.w
+ div r28.w, l(1.000000, 1.000000, 1.000000, 1.000000), r23.z
+ div r29.w, l(1.000000, 1.000000, 1.000000, 1.000000), r23.w
+ add r25.xyz, r20.xyzx, -r25.xyzx
+ dp3 r30.x, r25.xyzx, r27.xyzx
+ dp3 r30.y, r25.xyzx, r28.xyzx
+ dp3 r31.x, r25.xyzx, r29.xyzx
+ mul r30.zw, r22.wwww, r30.yyyx
+ mad r30.z, r23.w, r30.x, -r30.z
+ mad r23.z, r23.z, r30.y, -r30.w
+ lt r30.w, l(0.000000), r23.z
+ mul r30.z, r25.w, r30.z
+ mul r28.w, r28.w, r30.x
+ movc r28.w, r30.w, r30.z, r28.w
+ lt r30.z, l(0.000000), r28.w
+ mul r23.z, r25.w, r23.z
+ mul r25.w, r29.w, r30.y
+ movc r23.z, r30.z, r23.z, r25.w
+ add r25.w, r23.z, r28.w
+ lt r25.w, l(1.000000), r25.w
+ add r22.w, -r22.w, r23.w
+ add r22.w, r30.x, r22.w
+ add r22.w, -r30.y, r22.w
+ mul r22.w, r27.w, r22.w
+ movc_sat r22.w, r25.w, r22.w, r28.w
+ add r23.w, -r22.w, l(1.000000)
+ min r23.z, r23.z, r23.w
+ max r23.z, r23.z, l(0.000000)
+ mul r28.xyz, r23.zzzz, r28.xyzx
+ mad r27.xyz, r27.xyzx, r22.wwww, r28.xyzx
+ add r25.xyz, r25.xyzx, -r27.xyzx
+ dp3 r22.w, r25.xyzx, r25.xyzx
+ lt r23.z, r31.x, l(0.000000)
+ mul r23.w, r22.w, l(1.000100)
+ movc r31.y, r23.z, r23.w, r22.w
+ lt r22.w, r31.y, r23.y
+ movc r22.xyz, r22.wwww, r29.xyzx, r22.xyzx
+ movc r23.xy, r22.wwww, r31.xyxx, r23.xyxx
+ iadd r21.x, r21.x, l(1)
+ endloop
+ lt r19.w, r23.x, l(0.000000)
+ if_nz r19.w
+ mad r20.xyz, -r22.xyzx, r23.xxxx, r20.xyzx
+ store_structured u0.xyz, r19.z, l(0), r20.xyzx
+ endif
+ ld_raw r19.z, l(72), g0.xxxx
+ if_nz r19.z
+ ld_raw r19.z, l(80), g0.xxxx
+ lt r19.w, l(0.000000), r19.z
+ ld_raw r20.x, l(0), g0.xxxx
+ ld_raw r20.yz, l(48), g1.xxyx
+ ld_raw r23.yz, l(44), g0.xxyx
+ mov r20.w, vThreadIDInGroup.x
+ loop
+ uge r21.x, r20.w, r20.x
+ breakc_nz r21.x
+ iadd r21.x, r0.y, r20.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r25.xyzw, r21.x, l(0), u0.xyzw
+ iadd r22.w, r0.x, r20.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r27.xyz, r22.w, l(0), u0.xyzx
+ mov r28.xyz, r25.xyzx
+ mov r29.xyz, l(0,0,0,0)
+ mov r30.xyzw, l(0,0,0,0)
+ mov r23.w, l(0)
+ loop
+ uge r27.w, r23.w, r23.y
+ breakc_nz r27.w
+ iadd r27.w, r23.w, r23.z
+ ld_structured_indexable(structured_buffer, stride=8)(mixed,mixed,mixed,mixed) r31.xy, r27.w, l(0), t6.xyxx
+ iadd r31.xyzw, r20.yzyz, r31.xxyy
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r32.xyzw, r31.x, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r33.xyzw, r31.y, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r34.xyzw, r31.z, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r31.xyzw, r31.w, l(0), t7.xyzw
+ add r35.xyzw, -r32.xyzw, r33.xyzw
+ mad r36.xyzw, r19.yyyy, r35.xyzw, r32.xyzw
+ add r37.xyzw, -r34.xyzw, r31.xyzw
+ mad r38.xyzw, r19.yyyy, r37.xyzw, r34.xyzw
+ max r36.w, r36.w, l(0.000000)
+ max r38.w, r38.w, l(0.000000)
+ add r39.xyzw, -r36.xyzw, r38.xyzw
+ mul r39.xyzw, r39.xyzw, l(0.500000, 0.500000, 0.500000, 0.500000)
+ dp3 r27.w, r39.xyzx, r39.xyzx
+ ge r28.w, l(0.000000), r27.w
+ if_nz r28.w
+ iadd r28.w, r23.w, l(1)
+ mov r23.w, r28.w
+ continue
+ endif
+ mad r35.xyzw, r19.xxxx, r35.xyzw, r32.xyzw
+ mad r37.xyzw, r19.xxxx, r37.xyzw, r34.xyzw
+ max r35.w, r35.w, l(0.000000)
+ max r37.w, r37.w, l(0.000000)
+ add r40.xyzw, -r35.xyzw, r37.xyzw
+ mul r41.xyzw, r40.xyzw, l(0.500000, 0.500000, 0.500000, 0.500000)
+ dp3 r28.w, r41.xyzx, r41.xyzx
+ mad r29.w, -r41.w, r41.w, r28.w
+ ge r31.w, l(0.000000), r29.w
+ if_nz r31.w
+ iadd r31.w, r23.w, l(1)
+ mov r23.w, r31.w
+ continue
+ endif
+ add r40.xyz, r36.zxyz, r38.zxyz
+ mad r40.xyz, -r40.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r27.zxyz
+ mad r31.w, -r39.w, r39.w, r27.w
+ rsq r27.w, r27.w
+ rsq r31.w, r31.w
+ mul r39.xyz, r27.wwww, r39.xyzx
+ mul r27.w, r31.w, r39.w
+ mul r42.xyz, r39.yzxy, r40.xyzx
+ mad r40.xyz, r40.zxyz, r39.zxyz, -r42.xyzx
+ dp3 r31.w, r27.xyzx, r39.xyzx
+ add r39.xyz, r35.xyzx, r37.xyzx
+ mad r42.xyz, -r39.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r28.xyzx
+ rsq r32.w, r28.w
+ rsq r29.w, r29.w
+ mul r28.w, r28.w, r32.w
+ mad r33.w, r40.w, l(0.500000), r35.w
+ mul r33.w, r29.w, r33.w
+ mul r33.w, r28.w, r33.w
+ mul r43.xyz, r32.wwww, r41.xyzx
+ mul r44.xyz, r42.zxyz, r43.yzxy
+ mad r44.xyz, r42.yzxy, r43.zxyz, -r44.xyzx
+ dp3 r34.w, r28.xyzx, r43.xyzx
+ dp3 r35.w, r44.xyzx, r44.xyzx
+ add r45.x, r35.w, l(0.000000)
+ mad r31.w, r31.w, r27.w, r33.w
+ max r31.w, r31.w, l(0.000000)
+ mul r29.w, r29.w, r41.w
+ mad r34.w, r34.w, r29.w, r33.w
+ max r45.y, r34.w, l(0.000000)
+ dp3 r34.w, r40.xyzx, r40.xyzx
+ mad r34.w, -r40.x, r40.x, r34.w
+ mad r34.w, -r31.w, r31.w, r34.w
+ dp3 r35.w, r40.xyzx, r44.xyzx
+ mad r35.w, -r31.w, r45.y, r35.w
+ mad r36.w, -r45.y, r45.y, r45.x
+ mul r37.w, r34.w, r36.w
+ mad r37.w, r35.w, r35.w, -r37.w
+ sqrt r38.w, r37.w
+ add r34.w, -r34.w, r35.w
+ add r35.w, r35.w, -r36.w
+ add r35.w, r34.w, r35.w
+ add r36.w, r38.w, r34.w
+ min r36.w, r36.w, l(0.000000)
+ div r36.w, r36.w, r35.w
+ lt r39.w, r36.w, l(1.000000)
+ lt r40.x, r34.w, r38.w
+ and r39.w, r39.w, r40.x
+ mul r34.w, r34.w, r35.w
+ add r40.x, -r31.w, r45.y
+ mad r31.w, r34.w, r40.x, r31.w
+ mul r31.w, r31.w, r31.w
+ mul r31.w, r35.w, r31.w
+ mul r31.w, r31.w, l(-0.360000)
+ lt r31.w, r31.w, r37.w
+ and r31.w, r31.w, r39.w
+ lt r34.w, r35.w, l(-0.000000)
+ and r31.w, r31.w, r34.w
+ add r40.xyz, r27.xyzx, -r28.xyzx
+ mad r44.xyz, -r40.xyzx, r36.wwww, r27.xyzx
+ mul r46.xyz, r28.wwww, r41.xyzx
+ add r36.xyz, -r36.xyzx, r38.xyzx
+ mul r36.xyz, r36.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000)
+ dp3 r34.w, r36.xyzx, r36.xyzx
+ rsq r37.w, r34.w
+ mul r34.w, r34.w, r37.w
+ mad r36.xyz, -r36.xyzx, r34.wwww, r46.xyzx
+ add r34.w, -r36.w, l(1.000000)
+ mad r38.xyz, -r36.xyzx, r34.wwww, r46.xyzx
+ mul r36.w, r29.w, r36.w
+ mad r27.w, r27.w, r34.w, r36.w
+ dp3 r36.w, r38.xyzx, r38.xyzx
+ rsq r36.w, r36.w
+ dp3 r37.w, r44.xyzx, r38.xyzx
+ mul r37.w, r36.w, r37.w
+ dp3 r38.x, r44.xyzx, r44.xyzx
+ mad r38.x, -r37.w, r37.w, r38.x
+ lt r38.y, l(0.000000), r38.x
+ rsq r38.z, r38.x
+ and r38.y, r38.z, r38.y
+ mul r27.w, r27.w, r38.x
+ mad r27.w, r27.w, r38.y, r37.w
+ mul r27.w, r36.w, r27.w
+ lt r36.w, |r27.w|, l(1.000000)
+ mad r36.xyz, r36.xyzx, r27.wwww, r40.xyzx
+ mul r27.w, r34.w, r35.w
+ div r27.w, r38.w, r27.w
+ add r27.w, -r27.w, l(1.000000)
+ div r27.w, r34.w, r27.w
+ mad r36.xyz, r36.xyzx, r27.wwww, r42.xyzx
+ mad r38.xyz, r39.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r36.xyzx
+ movc r38.xyz, r36.wwww, r38.xyzx, r28.xyzx
+ movc r38.xyz, r31.wwww, r38.xyzx, r28.xyzx
+ mad r39.xyz, -r39.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r38.xyzx
+ dp3 r27.w, r39.xyzx, r43.xyzx
+ mad r34.w, r27.w, r29.w, r33.w
+ max r34.w, r34.w, l(0.000000)
+ dp3 r35.w, r39.xyzx, r39.xyzx
+ mad r35.w, -r27.w, r27.w, r35.w
+ mul r37.w, r34.w, r34.w
+ lt r37.w, r37.w, r35.w
+ if_nz r37.w
+ iadd r37.w, r23.w, l(1)
+ mov r28.xyz, r38.xyzx
+ mov r23.w, r37.w
+ continue
+ endif
+ mul r32.w, r32.w, r41.w
+ mad r32.w, -r32.w, r32.w, l(1.000000)
+ max r35.w, r35.w, l(0.000000)
+ rsq r37.w, r35.w
+ mul r35.w, r29.w, r35.w
+ mad r35.w, r35.w, r37.w, r27.w
+ lt r28.w, |r35.w|, r28.w
+ mad r39.xyz, -r35.wwww, r41.xyzx, r39.xyzx
+ mul r34.w, r34.w, r37.w
+ mad r32.w, r34.w, r32.w, -r32.w
+ mad r39.xyz, r39.xyzx, r32.wwww, r29.xyzx
+ add r33.xyz, -r32.xyzx, r33.xyzx
+ mad r32.xyz, r19.yyyy, r33.xyzx, r32.xyzx
+ add r31.xyz, -r34.xyzx, r31.xyzx
+ mad r31.xyz, r19.yyyy, r31.xyzx, r34.xyzx
+ mad r27.w, r27.w, l(0.500000), l(0.500000)
+ add r32.xyz, -r32.xyzx, r35.xyzx
+ add r31.xyz, -r31.xyzx, r37.xyzx
+ add r31.xyz, -r32.xyzx, r31.xyzx
+ mad r31.xyz, r27.wwww, r31.xyzx, r32.xyzx
+ add r31.xyz, r30.xyzx, r31.xyzx
+ movc r32.xyz, r19.wwww, r31.xyzx, r30.xyzx
+ iadd r32.w, r30.w, l(1)
+ movc r31.xyz, r28.wwww, r39.xyzx, r29.xyzx
+ movc r32.xyzw, r28.wwww, r32.xyzw, r30.xyzw
+ dp3 r27.w, r36.xyzx, r41.xyzx
+ mad r28.w, r27.w, r29.w, r33.w
+ max r33.y, r28.w, l(0.000000)
+ dp3 r28.w, r36.xyzx, r36.xyzx
+ mad r33.x, -r27.w, r27.w, r28.w
+ movc r33.xy, r36.wwww, r33.xyxx, r45.xyxx
+ movc r33.xy, r31.wwww, r33.xyxx, r45.xyxx
+ mul r27.w, r33.y, r33.y
+ ge r27.w, r33.x, r27.w
+ if_nz r27.w
+ iadd r27.w, r23.w, l(1)
+ mov r28.xyz, r38.xyzx
+ mov r29.xyz, r31.xyzx
+ mov r30.xyzw, r32.xyzw
+ mov r23.w, r27.w
+ continue
+ endif
+ mov r28.xyz, r38.xyzx
+ mov r29.xyz, r31.xyzx
+ mov r30.xyzw, r32.xyzw
+ iadd r23.w, r23.w, l(1)
+ endloop
+ ld_raw r23.w, l(52), g0.xxxx
+ mov r31.xyz, r28.xyzx
+ mov r32.xyz, r29.xyzx
+ mov r33.xyzw, r30.xyzw
+ mov r27.w, l(0)
+ loop
+ uge r28.w, r27.w, r23.w
+ breakc_nz r28.w
+ iadd r34.xy, r20.yzyy, r27.wwww
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r35.xyzw, r34.x, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r34.xyzw, r34.y, l(0), t7.xyzw
+ add r34.xyzw, -r35.xyzw, r34.xyzw
+ mad r36.xyzw, r19.xxxx, r34.xyzw, r35.xyzw
+ max r28.w, r36.w, l(0.000000)
+ mad r34.xyz, r19.yyyy, r34.xyzx, r35.xyzx
+ add r35.xyz, r31.xyzx, -r36.xyzx
+ add r37.xyz, r27.xyzx, -r34.xyzx
+ dp3 r29.w, r35.xyzx, r35.xyzx
+ add r35.w, r29.w, l(0.000000)
+ dp3 r29.w, r37.xyzx, r37.xyzx
+ mul r31.w, r28.w, r28.w
+ mad r29.w, -r28.w, r28.w, r29.w
+ dp3 r32.w, r37.xyzx, r35.xyzx
+ mad r32.w, -r28.w, r28.w, r32.w
+ mad r34.w, -r28.w, r28.w, r35.w
+ mul r36.w, r29.w, r34.w
+ mad r36.w, r32.w, r32.w, -r36.w
+ sqrt r37.w, r36.w
+ add r29.w, -r29.w, r32.w
+ add r32.w, r32.w, -r34.w
+ add r32.w, r29.w, r32.w
+ add r34.w, r37.w, r29.w
+ min r34.w, r34.w, l(0.000000)
+ div r34.w, r34.w, r32.w
+ lt r38.x, r34.w, l(1.000000)
+ lt r29.w, r29.w, r37.w
+ and r29.w, r29.w, r38.x
+ mul r31.w, r31.w, r32.w
+ mul r31.w, r31.w, l(-0.360000)
+ lt r31.w, r31.w, r36.w
+ and r29.w, r29.w, r31.w
+ lt r31.w, r32.w, l(-0.000000)
+ and r29.w, r29.w, r31.w
+ add r37.xyz, -r35.xyzx, r37.xyzx
+ add r31.w, -r34.w, l(1.000000)
+ mul r32.w, r31.w, r32.w
+ div r32.w, r37.w, r32.w
+ add r32.w, -r32.w, l(1.000000)
+ div r31.w, r31.w, r32.w
+ mad r37.xyz, r37.xyzx, r31.wwww, r35.xyzx
+ add r38.xyz, r36.xyzx, r37.xyzx
+ dp3 r31.w, r37.xyzx, r37.xyzx
+ add r37.w, r31.w, l(0.000000)
+ movc r31.xyz, r29.wwww, r38.xyzx, r31.xyzx
+ movc r35.xyzw, r29.wwww, r37.xyzw, r35.xyzw
+ rsq r29.w, r35.w
+ mul r31.w, r28.w, r29.w
+ lt r31.w, l(1.000000), r31.w
+ mad r28.w, r29.w, r28.w, l(-1.000000)
+ mad r35.xyz, r35.xyzx, r28.wwww, r32.xyzx
+ add r34.xyz, -r34.xyzx, r36.xyzx
+ add r34.xyz, r33.xyzx, r34.xyzx
+ movc r34.xyz, r19.wwww, r34.xyzx, r33.xyzx
+ iadd r34.w, r33.w, l(1)
+ movc r32.xyz, r31.wwww, r35.xyzx, r32.xyzx
+ movc r33.xyzw, r31.wwww, r34.xyzw, r33.xyzw
+ iadd r27.w, r27.w, l(1)
+ endloop
+ if_nz r33.w
+ utof r23.w, r33.w
+ div r23.w, l(1.000000, 1.000000, 1.000000, 1.000000), r23.w
+ if_nz r19.w
+ ld_raw r27.w, l(80), g0.xxxx
+ dp3 r28.w, r32.xyzx, r32.xyzx
+ add r29.w, r28.w, l(0.000000)
+ rsq r29.w, r29.w
+ mul r31.xyz, r29.wwww, r32.xyzx
+ add r34.xyz, r25.xyzx, -r27.xyzx
+ mad r34.xyz, -r33.xyzx, r23.wwww, r34.xyzx
+ dp3 r31.w, r34.xyzx, r31.xyzx
+ mad r31.xyz, -r31.wwww, r31.xyzx, r34.xyzx
+ dp3 r31.w, r31.xyzx, r31.xyzx
+ add r31.w, r31.w, l(0.000000)
+ rsq r31.w, r31.w
+ mul r27.w, -r27.w, r28.w
+ mul r27.w, r29.w, r27.w
+ mul r27.w, r23.w, r27.w
+ mul r27.w, r31.w, r27.w
+ max r27.w, r27.w, l(-1.000000)
+ mad r27.xyz, -r31.xyzx, r27.wwww, r27.xyzx
+ store_structured u0.xyz, r22.w, l(0), r27.xyzx
+ endif
+ mad r25.xyz, r32.xyzx, r23.wwww, r25.xyzx
+ store_structured u0.xyzw, r21.x, l(0), r25.xyzw
+ endif
+ iadd r20.w, r20.w, l(1024)
+ endloop
+ else
+ ld_raw r19.zw, l(76), g0.xxxy
+ lt r19.zw, l(0.000000, 0.000000, 0.000000, 0.000000), r19.wwwz
+ ld_raw r20.x, l(0), g0.xxxx
+ ld_raw r20.yz, l(48), g1.xxyx
+ ld_raw r20.w, l(80), g0.xxxx
+ lt r21.x, l(0.000000), r20.w
+ ld_raw r23.yz, l(44), g0.xxyx
+ mov r22.w, vThreadIDInGroup.x
+ loop
+ uge r23.w, r22.w, r20.x
+ breakc_nz r23.w
+ iadd r23.w, r0.y, r22.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r25.xyzw, r23.w, l(0), u0.xyzw
+ mov r31.xyzw, l(0,0,0,0)
+ mov r27.xyzw, l(0,0,0,0)
+ loop
+ uge r28.w, r27.w, r23.y
+ breakc_nz r28.w
+ iadd r28.w, r23.z, r27.w
+ ld_structured_indexable(structured_buffer, stride=8)(mixed,mixed,mixed,mixed) r34.xy, r28.w, l(0), t6.xyxx
+ iadd r34.xyzw, r20.yzyz, r34.xxyy
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r35.xyzw, r34.x, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.xyzw, r34.y, l(0), t7.xyzw
+ add r37.xyzw, -r35.xyzw, r36.xyzw
+ mad r37.xyzw, r19.xxxx, r37.xyzw, r35.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r38.xyzw, r34.z, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r34.xyzw, r34.w, l(0), t7.xyzw
+ add r39.xyzw, -r38.xyzw, r34.xyzw
+ mad r39.xyzw, r19.xxxx, r39.xyzw, r38.xyzw
+ max r37.w, r37.w, l(0.000000)
+ max r39.w, r39.w, l(0.000000)
+ add r40.xyzw, -r37.xyzw, r39.xyzw
+ mul r41.xyzw, r40.xyzw, l(0.500000, 0.500000, 0.500000, 0.500000)
+ dp3 r28.w, r41.xyzx, r41.xyzx
+ mad r29.w, -r41.w, r41.w, r28.w
+ ge r32.w, l(0.000000), r29.w
+ if_nz r32.w
+ iadd r32.w, r27.w, l(1)
+ mov r27.w, r32.w
+ continue
+ endif
+ rsq r32.w, r28.w
+ rsq r29.w, r29.w
+ mul r28.w, r28.w, r32.w
+ add r40.xyz, r37.xyzx, r39.xyzx
+ mad r34.w, r40.w, l(0.500000), r37.w
+ mul r34.w, r29.w, r34.w
+ mul r34.w, r28.w, r34.w
+ mul r41.xyz, r32.wwww, r41.xyzx
+ mul r29.w, r29.w, r41.w
+ mad r40.xyz, -r40.xyzx, l(0.500000, 0.500000, 0.500000, 0.000000), r25.xyzx
+ dp3 r35.w, r40.xyzx, r41.xyzx
+ mad r34.w, r35.w, r29.w, r34.w
+ max r34.w, r34.w, l(0.000000)
+ dp3 r36.w, r40.xyzx, r40.xyzx
+ mad r36.w, -r35.w, r35.w, r36.w
+ mul r37.w, r34.w, r34.w
+ lt r37.w, r37.w, r36.w
+ if_nz r37.w
+ iadd r37.w, r27.w, l(1)
+ mov r27.w, r37.w
+ continue
+ endif
+ mul r32.w, r32.w, r41.w
+ mad r32.w, -r32.w, r32.w, l(1.000000)
+ max r36.w, r36.w, l(0.000000)
+ rsq r37.w, r36.w
+ mul r29.w, r29.w, r36.w
+ mad r29.w, r29.w, r37.w, r35.w
+ lt r28.w, |r29.w|, r28.w
+ mad r40.xyz, -r29.wwww, r41.xyzx, r40.xyzx
+ mul r29.w, r34.w, r37.w
+ mad r29.w, r29.w, r32.w, -r32.w
+ mad r40.xyz, r40.xyzx, r29.wwww, r27.xyzx
+ add r36.xyz, -r35.xyzx, r36.xyzx
+ mad r35.xyz, r19.yyyy, r36.xyzx, r35.xyzx
+ add r34.xyz, -r38.xyzx, r34.xyzx
+ mad r34.xyz, r19.yyyy, r34.xyzx, r38.xyzx
+ mad r29.w, r35.w, l(0.500000), l(0.500000)
+ add r35.xyz, -r35.xyzx, r37.xyzx
+ add r34.xyz, -r34.xyzx, r39.xyzx
+ add r34.xyz, -r35.xyzx, r34.xyzx
+ mad r34.xyz, r29.wwww, r34.xyzx, r35.xyzx
+ add r34.xyz, r31.xyzx, r34.xyzx
+ movc r34.xyz, r21.xxxx, r34.xyzx, r31.xyzx
+ iadd r34.w, r31.w, l(1)
+ movc r27.xyz, r28.wwww, r40.xyzx, r27.xyzx
+ movc r31.xyzw, r28.wwww, r34.xyzw, r31.xyzw
+ iadd r27.w, r27.w, l(1)
+ endloop
+ ld_raw r27.w, l(52), g0.xxxx
+ mov r34.xyz, r27.xyzx
+ mov r35.xyzw, r31.xyzw
+ mov r28.w, l(0)
+ loop
+ uge r29.w, r28.w, r27.w
+ breakc_nz r29.w
+ iadd r36.xy, r20.yzyy, r28.wwww
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyzw, r36.x, l(0), t7.xyzw
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.xyzw, r36.y, l(0), t7.xyzw
+ add r36.xyzw, -r37.xyzw, r36.xyzw
+ mad r38.xyzw, r19.xxxx, r36.xyzw, r37.xyzw
+ max r29.w, r38.w, l(0.000000)
+ add r39.xyz, r25.xyzx, -r38.xyzx
+ dp3 r32.w, r39.xyzx, r39.xyzx
+ add r32.w, r32.w, l(0.000000)
+ rsq r32.w, r32.w
+ mul r34.w, r29.w, r32.w
+ lt r34.w, l(1.000000), r34.w
+ mad r29.w, r32.w, r29.w, l(-1.000000)
+ mad r39.xyz, r39.xyzx, r29.wwww, r34.xyzx
+ mad r36.xyz, r19.yyyy, r36.xyzx, r37.xyzx
+ add r36.xyz, -r36.xyzx, r38.xyzx
+ add r36.xyz, r35.xyzx, r36.xyzx
+ movc r36.xyz, r21.xxxx, r36.xyzx, r35.xyzx
+ iadd r36.w, r35.w, l(1)
+ movc r34.xyz, r34.wwww, r39.xyzx, r34.xyzx
+ movc r35.xyzw, r34.wwww, r36.xyzw, r35.xyzw
+ iadd r28.w, r28.w, l(1)
+ endloop
+ if_nz r35.w
+ utof r27.w, r35.w
+ div r27.w, l(1.000000, 1.000000, 1.000000, 1.000000), r27.w
+ if_nz r19.z
+ iadd r28.w, r0.x, r22.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.xyz, r28.w, l(0), u0.xyzx
+ ld_raw r29.w, l(80), g0.xxxx
+ dp3 r32.w, r34.xyzx, r34.xyzx
+ add r34.w, r32.w, l(0.000000)
+ rsq r34.w, r34.w
+ mul r37.xyz, r34.wwww, r34.xyzx
+ add r38.xyz, r25.xyzx, -r36.xyzx
+ mad r38.xyz, -r35.xyzx, r27.wwww, r38.xyzx
+ dp3 r36.w, r38.xyzx, r37.xyzx
+ mad r37.xyz, -r36.wwww, r37.xyzx, r38.xyzx
+ dp3 r36.w, r37.xyzx, r37.xyzx
+ add r36.w, r36.w, l(0.000000)
+ rsq r36.w, r36.w
+ mul r29.w, -r29.w, r32.w
+ mul r29.w, r34.w, r29.w
+ mul r29.w, r27.w, r29.w
+ mul r29.w, r36.w, r29.w
+ max r29.w, r29.w, l(-1.000000)
+ mad r36.xyz, -r37.xyzx, r29.wwww, r36.xyzx
+ store_structured u0.xyz, r28.w, l(0), r36.xyzx
+ endif
+ mad r36.xyz, r34.xyzx, r27.wwww, r25.xyzx
+ dp3 r25.x, r34.xyzx, r34.xyzx
+ ld_raw r25.y, l(76), g0.xxxx
+ mad r25.x, r25.y, r25.x, l(1.000000)
+ div r25.x, r25.w, r25.x
+ movc r36.w, r19.w, r25.x, r25.w
+ store_structured u0.xyzw, r23.w, l(0), r36.xyzw
+ endif
+ iadd r22.w, r22.w, l(1024)
+ endloop
+ endif
+ ld_raw r19.x, l(84), g0.xxxx
+ ld_raw r19.y, l(72), g1.xxxx
+ min r19.x, r19.y, r19.x
+ lt r19.x, l(0.000000), r19.x
+ if_nz r19.x
+ ld_raw r19.x, l(88), g0.xxxx
+ ld_raw r19.y, l(0), g0.xxxx
+ if_nz r1.z
+ ld_raw r19.zw, r4.w, g1.xxxy
+ add r20.x, r19.w, r19.z
+ mad r19.zw, r20.xxxx, l(0.000000, 0.000000, 0.000100, 0.000100), r19.wwwz
+ add r19.w, r19.z, r19.w
+ store_structured g6.x, vThreadIDInGroup.x, l(0), r19.w
+ else
+ mov r19.zw, l(0,0,0,0)
+ endif
+ sync_g_t
+ if_nz r1.z
+ ld_structured r20.x, l(0), l(0), g6.xxxx
+ ld_structured r20.y, l(1), l(0), g6.xxxx
+ lt r20.x, r20.y, r20.x
+ and r20.y, r20.x, l(1)
+ ld_structured r20.y, r20.y, l(0), g6.xxxx
+ ld_structured r20.z, l(2), l(0), g6.xxxx
+ lt r20.y, r20.z, r20.y
+ movc r20.x, r20.y, l(-2), r20.x
+ iadd r20.x, r20.x, vThreadIDInGroup.x
+ ushr r20.y, r20.x, l(30)
+ iadd r20.x, r20.y, r20.x
+ div r19.w, l(127.000000), r19.w
+ ld_raw r20.y, l(84), g0.xxxx
+ div r20.y, l(1.000000, 1.000000, 1.000000, 1.000000), r20.y
+ min r19.w, r19.w, r20.y
+ ishl r20.x, r20.x, l(2)
+ iadd r20.yz, r20.xxxx, l(0, 12, 24, 0)
+ store_raw g5.x, r20.y, r19.w
+ mul r19.z, r19.z, r19.w
+ store_raw g5.x, r20.x, r19.z
+ store_raw g5.x, r20.z, vThreadIDInGroup.x
+ endif
+ sync_g_t
+ ld_raw r19.z, l(100), g0.xxxx
+ ishl r19.w, r19.x, l(1)
+ iadd r19.z, r19.w, r19.z
+ ld_raw r19.w, l(116), g1.xxxx
+ if_nz r19.w
+ mov r19.w, vThreadIDInGroup.x
+ loop
+ ige r20.x, r19.w, l(0x00004103)
+ breakc_nz r20.x
+ iadd r20.x, r19.w, r19.z
+ store_structured u2.x, r20.x, l(0), l(-1)
+ iadd r19.w, r19.w, l(1024)
+ endloop
+ endif
+ ld_raw r20.xy, l(16), g5.xyxx
+ ld_raw r20.zw, l(4), g5.xxxy
+ ld_raw r23.yz, l(28), g5.xxyx
+ ld_raw r19.w, l(92), g0.xxxx
+ ine r21.x, r19.w, l(-1)
+ ld_raw r22.w, l(100), g0.xxxx
+ mov r23.w, vThreadIDInGroup.x
+ loop
+ ige r25.x, r23.w, r19.x
+ breakc_nz r25.x
+ iadd r25.x, r19.w, r23.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r25.x, r25.x, l(0), t15.xxxx
+ movc r25.x, r21.x, r25.x, r23.w
+ iadd r25.y, r0.y, r25.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.xyzw, r25.y, l(0), u0.xyzw
+ dp4 r25.y, r36.xyzw, icb[r23.y + 0].xyzw
+ mad r25.y, r25.y, r20.x, r20.z
+ min r25.y, r25.y, l(127.500000)
+ max r25.y, r25.y, l(0.000000)
+ dp4 r25.z, r36.xyzw, icb[r23.z + 0].xyzw
+ mad r25.z, r25.z, r20.y, r20.w
+ min r25.z, r25.z, l(127.500000)
+ max r25.z, r25.z, l(0.000000)
+ ftoi r25.yz, r25.yyzy
+ iadd r25.w, r22.w, r23.w
+ ishl r25.z, r25.z, l(23)
+ imad r25.y, r25.y, l(0x00010000), r25.z
+ iadd r25.y, r25.y, l(0x00810000)
+ or r25.x, r25.x, r25.y
+ store_structured u2.x, r25.w, l(0), r25.x
+ iadd r23.w, r23.w, l(1024)
+ endloop
+ sync_g_t
+ ld_raw r25.xy, l(96), g0.xyxx
+ iadd r19.w, r19.x, r25.y
+ mov r21.x, vThreadIDInGroup.x
+ loop
+ ige r22.w, r21.x, r19.y
+ breakc_nz r22.w
+ iadd r22.w, r21.x, r25.x
+ iadd r23.w, r0.y, r21.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.xyzw, r23.w, l(0), u0.xyzw
+ store_structured u1.xyzw, r22.w, l(0), r36.xyzw
+ iadd r21.x, r21.x, l(1024)
+ endloop
+ sync_g_t
+ iadd r21.x, r19.x, l(31)
+ ubfe r22.w, l(5), l(5), r21.x
+ ushr r21.x, r21.x, l(10)
+ ult r23.w, r4.y, r22.w
+ and r25.x, r23.w, l(1)
+ iadd r25.x, r21.x, r25.x
+ movc r25.z, r23.w, l(0), l(1)
+ imul null, r25.z, r22.w, r25.z
+ imad r25.z, r4.y, r25.x, r25.z
+ ishl r25.xw, r25.xxxz, l(5, 0, 0, 5)
+ iadd r25.x, r25.x, r25.w
+ umin r25.x, r19.x, r25.x
+ bfi r25.z, l(27), l(5), r25.z, vThreadIDInGroup.x
+ mov r25.w, l(16)
+ mov r27.w, r25.y
+ mov r28.w, r19.w
+ loop
+ uge r29.w, r25.w, l(32)
+ breakc_nz r29.w
+ mov r29.w, l(0)
+ mov r32.w, l(0)
+ mov r34.w, r25.z
+ loop
+ uge r36.x, r32.w, r21.x
+ breakc_nz r36.x
+ ult r36.x, r34.w, r25.x
+ iadd r36.y, r27.w, r34.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r36.y, r36.y, l(0), u2.xxxx
+ movc r36.x, r36.x, r36.y, l(-1)
+ ushr r36.x, r36.x, r25.w
+ and r36.xyzw, r36.xxxx, l(1, 2, 4, 8)
+ ine r36.xyzw, r36.xyzw, l(0, 0, 0, 0)
+ bfi r36.xyzw, l(1, 1, 1, 1), vThreadIDInGroup.xxxx, r36.xyzw, l(0, 0, 0, 0)
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ sync_g_t
+ if_nz r7.x
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r3.x, l(0), g3.xxxx
+ ld_structured r37.y, r6.y, l(0), g3.xxxx
+ ld_structured r37.z, r9.x, l(0), g3.xxxx
+ ld_structured r37.w, r6.z, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ sync_g_t
+ if_nz r7.y
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r3.y, l(0), g3.xxxx
+ ld_structured r37.y, r6.w, l(0), g3.xxxx
+ ld_structured r37.z, r9.y, l(0), g3.xxxx
+ ld_structured r37.w, r8.x, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ sync_g_t
+ if_nz r7.z
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r3.z, l(0), g3.xxxx
+ ld_structured r37.y, r8.y, l(0), g3.xxxx
+ ld_structured r37.z, r9.z, l(0), g3.xxxx
+ ld_structured r37.w, r8.z, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ sync_g_t
+ if_nz r7.w
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r3.w, l(0), g3.xxxx
+ ld_structured r37.y, r8.w, l(0), g3.xxxx
+ ld_structured r37.z, r9.w, l(0), g3.xxxx
+ ld_structured r37.w, r13.x, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ sync_g_t
+ if_nz r5.w
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r6.x, l(0), g3.xxxx
+ ld_structured r37.y, r13.y, l(0), g3.xxxx
+ ld_structured r37.z, r14.x, l(0), g3.xxxx
+ ld_structured r37.w, r13.z, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ sync_g_t
+ ld_structured r36.x, r5.z, l(0), g3.xxxx
+ ld_structured r36.y, r10.x, l(0), g3.xxxx
+ ld_structured r36.z, r10.y, l(0), g3.xxxx
+ ld_structured r36.w, r10.z, l(0), g3.xxxx
+ xor r36.xy, r2.zwzz, r36.xyxx
+ and r36.x, r36.y, r36.x
+ xor r36.y, r5.x, r36.z
+ and r36.x, r36.y, r36.x
+ xor r36.y, r5.y, r36.w
+ and r36.x, r36.y, r36.x
+ countbits r36.x, r36.x
+ iadd r29.w, r29.w, r36.x
+ iadd r32.w, r32.w, l(1)
+ iadd r34.w, r34.w, l(32)
+ endloop
+ if_nz r22.w
+ iadd r32.w, r27.w, r34.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r32.w, r32.w, l(0), u2.xxxx
+ if_nz r23.w
+ ult r36.x, r34.w, r25.x
+ movc r32.w, r36.x, r32.w, l(-1)
+ ushr r32.w, r32.w, r25.w
+ and r36.xyzw, r32.wwww, l(1, 2, 4, 8)
+ ine r36.xyzw, r36.xyzw, l(0, 0, 0, 0)
+ bfi r36.xyzw, l(1, 1, 1, 1), vThreadIDInGroup.xxxx, r36.xyzw, l(0, 0, 0, 0)
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.x
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r3.x, l(0), g3.xxxx
+ ld_structured r37.y, r6.y, l(0), g3.xxxx
+ ld_structured r37.z, r9.x, l(0), g3.xxxx
+ ld_structured r37.w, r6.z, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.y
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r3.y, l(0), g3.xxxx
+ ld_structured r37.y, r6.w, l(0), g3.xxxx
+ ld_structured r37.z, r9.y, l(0), g3.xxxx
+ ld_structured r37.w, r8.x, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.z
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r3.z, l(0), g3.xxxx
+ ld_structured r37.y, r8.y, l(0), g3.xxxx
+ ld_structured r37.z, r9.z, l(0), g3.xxxx
+ ld_structured r37.w, r8.z, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.w
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r3.w, l(0), g3.xxxx
+ ld_structured r37.y, r8.w, l(0), g3.xxxx
+ ld_structured r37.z, r9.w, l(0), g3.xxxx
+ ld_structured r37.w, r13.x, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r5.w
+ ld_structured r36.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r36.y, r12.x, l(0), g3.xxxx
+ ld_structured r36.z, r12.y, l(0), g3.xxxx
+ ld_structured r36.w, r12.z, l(0), g3.xxxx
+ ld_structured r37.x, r6.x, l(0), g3.xxxx
+ ld_structured r37.y, r13.y, l(0), g3.xxxx
+ ld_structured r37.z, r14.x, l(0), g3.xxxx
+ ld_structured r37.w, r13.z, l(0), g3.xxxx
+ or r36.xyzw, r36.xyzw, r37.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r36.x
+ store_structured g3.x, r12.x, l(0), r36.y
+ store_structured g3.x, r12.y, l(0), r36.z
+ store_structured g3.x, r12.z, l(0), r36.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ ld_structured r36.x, r5.z, l(0), g3.xxxx
+ ld_structured r36.y, r10.x, l(0), g3.xxxx
+ ld_structured r36.z, r10.y, l(0), g3.xxxx
+ ld_structured r36.w, r10.z, l(0), g3.xxxx
+ else
+ mov r36.xyzw, l(0,0,0,0)
+ endif
+ xor r32.w, r2.z, r36.x
+ xor r36.x, r2.w, r36.y
+ and r32.w, r32.w, r36.x
+ xor r36.xy, r5.xyxx, r36.zwzz
+ and r32.w, r32.w, r36.x
+ and r32.w, r36.y, r32.w
+ countbits r32.w, r32.w
+ and r32.w, r23.w, r32.w
+ iadd r29.w, r29.w, r32.w
+ endif
+ if_nz r7.x
+ store_structured g3.x, r11.x, l(0), r29.w
+ endif
+ sync_g_t
+ if_z vThreadIDInGroup.x
+ store_structured g3.x, l(4096), l(0), l(0)
+ store_structured g3.x, l(4609), l(0), l(0)
+ endif
+ if_nz r1.w
+ if_nz r16.x
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ ld_structured r36.x, r10.w, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ store_structured g3.x, r11.w, l(0), r32.w
+ else
+ ld_structured r32.w, l(4097), l(0), g3.xxxx
+ store_structured g3.x, l(4610), l(0), r32.w
+ endif
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r16.y
+ ld_structured r32.w, r11.w, l(0), g3.xxxx
+ ld_structured r36.x, r12.w, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ else
+ ld_structured r32.w, r11.w, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.x, l(0), r32.w
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r16.z
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ ld_structured r36.x, r15.w, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ else
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.z, l(0), r32.w
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r16.w
+ ld_structured r32.w, r11.w, l(0), g3.xxxx
+ ld_structured r36.x, r14.z, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ else
+ ld_structured r32.w, r11.w, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.x, l(0), r32.w
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r18.x
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ ld_structured r36.x, r17.y, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ else
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.z, l(0), r32.w
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r18.y
+ ld_structured r32.w, r11.w, l(0), g3.xxxx
+ ld_structured r36.x, r14.w, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ else
+ ld_structured r32.w, r11.w, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.x, l(0), r32.w
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r18.z
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ ld_structured r36.x, r17.w, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ else
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.z, l(0), r32.w
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r18.w
+ ld_structured r32.w, r11.w, l(0), g3.xxxx
+ ld_structured r36.x, r13.w, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ else
+ ld_structured r32.w, r11.w, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.x, l(0), r32.w
+ endif
+ sync_g_t
+ if_nz r1.w
+ if_nz r15.y
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ ld_structured r36.x, r17.z, l(0), g3.xxxx
+ iadd r32.w, r32.w, r36.x
+ else
+ ld_structured r32.w, r14.y, l(0), g3.xxxx
+ endif
+ store_structured g3.x, r15.z, l(0), r32.w
+ endif
+ sync_g_t
+ mov r32.w, l(0)
+ mov r36.x, r25.z
+ loop
+ uge r36.y, r32.w, r21.x
+ breakc_nz r36.y
+ ult r36.y, r36.x, r25.x
+ iadd r36.z, r27.w, r36.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r36.z, r36.z, l(0), u2.xxxx
+ movc r36.z, r36.y, r36.z, l(-1)
+ ushr r36.w, r36.z, r25.w
+ and r37.xyzw, r36.wwww, l(1, 2, 4, 8)
+ ine r38.xyzw, r37.xyzw, l(0, 0, 0, 0)
+ bfi r39.xyzw, l(1, 1, 1, 1), vThreadIDInGroup.xxxx, r38.xyzw, l(0, 0, 0, 0)
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ sync_g_t
+ if_nz r7.x
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r3.x, l(0), g3.xxxx
+ ld_structured r40.y, r6.y, l(0), g3.xxxx
+ ld_structured r40.z, r9.x, l(0), g3.xxxx
+ ld_structured r40.w, r6.z, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ if_nz r7.y
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r3.y, l(0), g3.xxxx
+ ld_structured r40.y, r6.w, l(0), g3.xxxx
+ ld_structured r40.z, r9.y, l(0), g3.xxxx
+ ld_structured r40.w, r8.x, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ if_nz r7.z
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r3.z, l(0), g3.xxxx
+ ld_structured r40.y, r8.y, l(0), g3.xxxx
+ ld_structured r40.z, r9.z, l(0), g3.xxxx
+ ld_structured r40.w, r8.z, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ if_nz r7.w
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r3.w, l(0), g3.xxxx
+ ld_structured r40.y, r8.w, l(0), g3.xxxx
+ ld_structured r40.z, r9.w, l(0), g3.xxxx
+ ld_structured r40.w, r13.x, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ if_nz r5.w
+ ld_structured r39.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r39.y, r12.x, l(0), g3.xxxx
+ ld_structured r39.z, r12.y, l(0), g3.xxxx
+ ld_structured r39.w, r12.z, l(0), g3.xxxx
+ ld_structured r40.x, r6.x, l(0), g3.xxxx
+ ld_structured r40.y, r13.y, l(0), g3.xxxx
+ ld_structured r40.z, r14.x, l(0), g3.xxxx
+ ld_structured r40.w, r13.z, l(0), g3.xxxx
+ or r39.xyzw, r39.xyzw, r40.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r39.x
+ store_structured g3.x, r12.x, l(0), r39.y
+ store_structured g3.x, r12.y, l(0), r39.z
+ store_structured g3.x, r12.z, l(0), r39.w
+ endif
+ sync_g_t
+ ld_structured r37.y, r5.z, l(0), g3.xxxx
+ ld_structured r37.z, r10.x, l(0), g3.xxxx
+ ld_structured r37.w, r10.y, l(0), g3.xxxx
+ ld_structured r38.x, r10.z, l(0), g3.xxxx
+ bfi r36.w, l(4), l(5), r36.w, l(0)
+ iadd r36.w, r11.y, r36.w
+ iadd r36.w, r36.w, l(4096)
+ ld_structured r36.w, r36.w, l(0), g3.xxxx
+ if_nz r36.y
+ iadd r36.y, r37.x, l(-1)
+ xor r36.y, r37.y, r36.y
+ ieq r38.yzw, r38.yyzw, l(0, 0, 0, 0)
+ xor r37.x, r37.z, r38.y
+ and r36.y, r36.y, r37.x
+ xor r37.x, r37.w, r38.z
+ and r36.y, r36.y, r37.x
+ xor r37.x, r38.x, r38.w
+ and r36.y, r36.y, r37.x
+ ubfe r36.y, vThreadIDInGroup.x, l(0), r36.y
+ countbits r36.y, r36.y
+ iadd r36.y, r36.y, r36.w
+ iadd r36.y, r28.w, r36.y
+ store_structured u2.x, r36.y, l(0), r36.z
+ endif
+ sync_g_t
+ if_nz r7.x
+ ld_structured r36.y, r11.z, l(0), g3.xxxx
+ xor r36.zw, r2.zzzw, r37.yyyz
+ and r36.z, r36.w, r36.z
+ xor r36.w, r5.x, r37.w
+ and r36.z, r36.w, r36.z
+ xor r36.w, r5.y, r38.x
+ and r36.z, r36.w, r36.z
+ countbits r36.z, r36.z
+ iadd r36.y, r36.z, r36.y
+ store_structured g3.x, r11.z, l(0), r36.y
+ endif
+ iadd r32.w, r32.w, l(1)
+ iadd r36.x, r36.x, l(32)
+ endloop
+ if_nz r22.w
+ ult r32.w, r36.x, r25.x
+ iadd r36.y, r27.w, r36.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r36.y, r36.y, l(0), u2.xxxx
+ movc r36.y, r32.w, r36.y, l(-1)
+ ushr r36.z, r36.y, r25.w
+ if_nz r23.w
+ and r37.xyzw, r36.zzzz, l(1, 2, 4, 8)
+ ine r37.xyzw, r37.xyzw, l(0, 0, 0, 0)
+ bfi r37.xyzw, l(1, 1, 1, 1), vThreadIDInGroup.xxxx, r37.xyzw, l(0, 0, 0, 0)
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.x
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.x, l(0), g3.xxxx
+ ld_structured r38.y, r6.y, l(0), g3.xxxx
+ ld_structured r38.z, r9.x, l(0), g3.xxxx
+ ld_structured r38.w, r6.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.y
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.y, l(0), g3.xxxx
+ ld_structured r38.y, r6.w, l(0), g3.xxxx
+ ld_structured r38.z, r9.y, l(0), g3.xxxx
+ ld_structured r38.w, r8.x, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.z
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.z, l(0), g3.xxxx
+ ld_structured r38.y, r8.y, l(0), g3.xxxx
+ ld_structured r38.z, r9.z, l(0), g3.xxxx
+ ld_structured r38.w, r8.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.w
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r3.w, l(0), g3.xxxx
+ ld_structured r38.y, r8.w, l(0), g3.xxxx
+ ld_structured r38.z, r9.w, l(0), g3.xxxx
+ ld_structured r38.w, r13.x, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r5.w
+ ld_structured r37.x, vThreadIDInGroup.x, l(0), g3.xxxx
+ ld_structured r37.y, r12.x, l(0), g3.xxxx
+ ld_structured r37.z, r12.y, l(0), g3.xxxx
+ ld_structured r37.w, r12.z, l(0), g3.xxxx
+ ld_structured r38.x, r6.x, l(0), g3.xxxx
+ ld_structured r38.y, r13.y, l(0), g3.xxxx
+ ld_structured r38.z, r14.x, l(0), g3.xxxx
+ ld_structured r38.w, r13.z, l(0), g3.xxxx
+ or r37.xyzw, r37.xyzw, r38.xyzw
+ store_structured g3.x, vThreadIDInGroup.x, l(0), r37.x
+ store_structured g3.x, r12.x, l(0), r37.y
+ store_structured g3.x, r12.y, l(0), r37.z
+ store_structured g3.x, r12.z, l(0), r37.w
+ endif
+ endif
+ sync_g_t
+ if_nz r23.w
+ ld_structured r37.x, r5.z, l(0), g3.xxxx
+ ld_structured r37.y, r10.x, l(0), g3.xxxx
+ ld_structured r37.z, r10.y, l(0), g3.xxxx
+ ld_structured r37.w, r10.z, l(0), g3.xxxx
+ and r38.xyzw, r36.zzzz, l(1, 2, 4, 8)
+ iadd r36.w, r38.x, l(-1)
+ xor r36.w, r37.x, r36.w
+ ine r38.xyz, r38.yzwy, l(0, 0, 0, 0)
+ ieq r38.xyz, r38.xyzx, l(0, 0, 0, 0)
+ xor r38.xyz, r37.yzwy, r38.xyzx
+ and r36.w, r36.w, r38.x
+ and r36.w, r38.y, r36.w
+ and r36.w, r38.z, r36.w
+ bfi r36.z, l(4), l(5), r36.z, l(0)
+ iadd r36.z, r11.y, r36.z
+ iadd r36.z, r36.z, l(4096)
+ ld_structured r36.z, r36.z, l(0), g3.xxxx
+ ubfe r36.w, vThreadIDInGroup.x, l(0), r36.w
+ countbits r36.w, r36.w
+ iadd r36.z, r36.w, r36.z
+ else
+ mov r37.xyzw, l(0,0,0,0)
+ mov r36.z, l(0)
+ endif
+ if_nz r32.w
+ iadd r32.w, r28.w, r36.z
+ store_structured u2.x, r32.w, l(0), r36.y
+ endif
+ sync_g_t
+ if_nz r23.w
+ if_nz r7.x
+ ld_structured r32.w, r11.z, l(0), g3.xxxx
+ xor r36.yz, r2.zzwz, r37.xxyx
+ and r36.y, r36.z, r36.y
+ xor r36.zw, r5.xxxy, r37.zzzw
+ and r36.y, r36.z, r36.y
+ and r36.y, r36.w, r36.y
+ countbits r36.y, r36.y
+ iadd r32.w, r32.w, r36.y
+ store_structured g3.x, r11.z, l(0), r32.w
+ endif
+ endif
+ endif
+ iadd r25.w, r25.w, l(4)
+ mov r32.w, r28.w
+ mov r28.w, r27.w
+ mov r27.w, r32.w
+ endloop
+ mov r19.w, vThreadIDInGroup.x
+ loop
+ ige r21.x, r19.w, r19.x
+ breakc_nz r21.x
+ iadd r21.x, r19.w, r25.y
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r22.w, r21.x, l(0), u2.xxxx
+ ishr r22.w, r22.w, l(16)
+ iadd r21.x, r21.x, l(-1)
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r21.x, r21.x, l(0), u2.xxxx
+ ishr r21.x, r21.x, l(16)
+ iadd r23.w, r22.w, l(-1)
+ movc r21.x, r19.w, r21.x, r23.w
+ ine r23.w, r21.x, r22.w
+ if_nz r23.w
+ iadd r22.w, r19.z, r22.w
+ store_structured u2.x, r22.w, l(0), r19.w
+ iadd r21.x, r19.z, r21.x
+ iadd r21.x, r21.x, l(1)
+ store_structured u2.x, r21.x, l(0), r19.w
+ endif
+ iadd r19.w, r19.w, l(1024)
+ endloop
+ ld_raw r25.xy, l(96), g0.xyxx
+ mov r19.w, vThreadIDInGroup.x
+ loop
+ ige r21.x, r19.w, r19.x
+ breakc_nz r21.x
+ iadd r21.x, r19.w, r25.y
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r21.x, r21.x, l(0), u2.xxxx
+ and r21.x, r21.x, l(0x0000ffff)
+ iadd r21.x, r21.x, r25.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyzw, r21.x, l(0), u1.xyzw
+ iadd r21.x, r0.y, r19.w
+ store_structured u0.xyzw, r21.x, l(0), r37.xyzw
+ iadd r19.w, r19.w, l(1024)
+ endloop
+ sync_g_t
+ ld_raw r19.w, l(84), g0.xxxx
+ mul r21.x, r19.w, r19.w
+ ld_raw r22.w, l(100), g0.xxxx
+ ld_raw r23.w, l(112), g1.xxxx
+ mov r25.x, vThreadIDInGroup.x
+ loop
+ ige r25.y, r25.x, r19.x
+ breakc_nz r25.y
+ iadd r25.y, r22.w, r25.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r25.y, r25.y, l(0), u2.xxxx
+ and r25.y, r25.y, l(0x0000ffff)
+ iadd r25.z, r0.y, r25.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r37.xyzw, r25.z, l(0), u0.xyzw
+ iadd r25.z, r23.w, r25.y
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.yzw, r25.z, l(0), t14.xxyz
+ dp4 r25.z, r37.xyzw, icb[r23.y + 0].xyzw
+ mad r25.z, r25.z, r20.x, r20.z
+ min r25.z, r25.z, l(127.500000)
+ max r25.z, r25.z, l(0.000000)
+ dp4 r25.w, r37.xyzw, icb[r23.z + 0].xyzw
+ mad r25.w, r25.w, r20.y, r20.w
+ min r25.w, r25.w, l(127.500000)
+ max r25.w, r25.w, l(0.000000)
+ ftoi r25.zw, r25.zzzw
+ imad r25.z, r25.w, l(128), r25.z
+ iadd r25.w, r25.z, l(256)
+ add r27.w, r37.w, l(0.000000)
+ mov r38.xyz, l(0,0,0,0)
+ mov r28.w, l(0.000000)
+ mov r32.w, r25.z
+ loop
+ ilt r38.w, r25.w, r32.w
+ breakc_nz r38.w
+ iadd r38.w, r19.z, r32.w
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r39.x, r38.w, l(0), u2.xxxx
+ iadd r39.yzw, r38.wwww, l(0, 1, 2, 3)
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r38.w, r39.y, l(0), u2.xxxx
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r39.y, r39.z, l(0), u2.xxxx
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r39.z, r39.w, l(0), u2.xxxx
+ umin r39.x, r38.w, r39.x
+ umin r39.x, r39.y, r39.x
+ imax r38.w, r38.w, r39.y
+ imax r38.w, r39.z, r38.w
+ imax r38.w, r38.w, l(0)
+ mov r39.yzw, r38.xxyz
+ mov r40.x, r28.w
+ mov r40.y, r39.x
+ loop
+ uge r40.z, r40.y, r38.w
+ breakc_nz r40.z
+ ine r40.z, r25.x, r40.y
+ if_nz r40.z
+ iadd r40.z, r0.y, r40.y
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r41.xyzw, r40.z, l(0), u0.xyzw
+ add r41.xyz, r37.xyzx, -r41.xyzx
+ dp3 r40.z, r41.xyzx, r41.xyzx
+ lt r40.w, r21.x, r40.z
+ if_nz r40.w
+ iadd r40.w, r40.y, l(1)
+ mov r40.y, r40.w
+ continue
+ endif
+ ld_raw r40.w, l(112), g1.xxxx
+ ine r42.x, r40.w, l(-1)
+ if_nz r42.x
+ ld_raw r42.x, l(100), g0.xxxx
+ iadd r42.x, r40.y, r42.x
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r42.x, r42.x, l(0), u2.xxxx
+ and r42.x, r42.x, l(0x0000ffff)
+ iadd r40.w, r40.w, r42.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r42.xyz, r40.w, l(0), t14.xyzx
+ add r42.xyz, r36.yzwy, -r42.xyzx
+ dp3 r40.w, r42.xyzx, r42.xyzx
+ ge r42.x, r21.x, r40.w
+ if_nz r42.x
+ iadd r42.x, r40.y, l(1)
+ mov r40.y, r42.x
+ continue
+ endif
+ add r40.w, r40.w, l(0.000000)
+ div r40.w, r21.x, r40.w
+ rsq r40.w, r40.w
+ add r40.w, r40.w, l(-1.000000)
+ min r40.w, r40.w, l(1.000000)
+ else
+ mov r40.w, l(1.000000)
+ endif
+ add r40.z, r40.z, l(0.000000)
+ rsq r40.z, r40.z
+ mad r40.z, r19.w, r40.z, l(-1.000000)
+ max r40.z, r40.z, l(0.000000)
+ mul r42.x, r40.z, r40.z
+ mul r40.w, r40.w, r42.x
+ add r41.w, r27.w, r41.w
+ div r40.w, r40.w, r41.w
+ mad r39.yzw, r40.wwww, r41.xxyz, r39.yyzw
+ add r40.x, r40.z, r40.x
+ endif
+ iadd r40.y, r40.y, l(1)
+ endloop
+ mov r38.xyz, r39.yzwy
+ mov r28.w, r40.x
+ iadd r32.w, r32.w, l(128)
+ endloop
+ ld_raw r25.z, l(72), g1.xxxx
+ mul r25.z, r37.w, r25.z
+ div r25.z, r25.z, r28.w
+ ld_raw r25.w, l(96), g0.xxxx
+ iadd r25.y, r25.y, r25.w
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r36.yzw, r25.y, l(0), u1.xxyz
+ mad r36.yzw, r38.xxyz, r25.zzzz, r36.yyzw
+ store_structured u1.xyz, r25.y, l(0), r36.yzwy
+ iadd r25.x, r25.x, l(1024)
+ endloop
+ sync_g_t
+ ld_raw r19.w, l(96), g0.xxxx
+ mov r20.x, vThreadIDInGroup.x
+ loop
+ ige r20.y, r20.x, r19.y
+ breakc_nz r20.y
+ iadd r20.y, r19.w, r20.x
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r25.xyzw, r20.y, l(0), u1.xyzw
+ iadd r20.y, r0.y, r20.x
+ store_structured u0.xyzw, r20.y, l(0), r25.xyzw
+ iadd r20.x, r20.x, l(1024)
+ endloop
+ ld_raw r19.y, l(100), g0.xxxx
+ mov r19.w, vThreadIDInGroup.x
+ loop
+ ige r20.x, r19.w, r19.x
+ breakc_nz r20.x
+ iadd r20.x, r19.w, r19.y
+ ld_structured_indexable(structured_buffer, stride=4)(mixed,mixed,mixed,mixed) r20.x, r20.x, l(0), u2.xxxx
+ ushr r20.x, r20.x, l(16)
+ iadd r20.x, r19.z, r20.x
+ store_structured u2.x, r20.x, l(0), l(-1)
+ iadd r20.y, r20.x, l(1)
+ store_structured u2.x, r20.y, l(0), l(-1)
+ iadd r19.w, r19.w, l(1024)
+ endloop
+ endif
+ if_z vThreadIDInGroup.x
+ ld_raw r19.x, l(8), g1.xxxx
+ mul r19.x, r19.x, l(1000.000000)
+ ftou r19.x, r19.x
+ umax r19.x, r19.x, l(1)
+ ld_raw r19.y, l(104), g1.xxxx
+ iadd r19.x, r19.x, r19.y
+ store_raw g1.x, l(104), r19.x
+ endif
+ sync_g_t
+ ld_raw r19.x, l(104), g1.xxxx
+ ld_raw r19.y, l(104), g0.xxxx
+ uge r19.x, r19.x, r19.y
+ if_nz r19.x
+ ld_raw r19.x, l(0), g0.xxxx
+ mov r19.y, l(0)
+ mov r19.z, vThreadIDInGroup.x
+ loop
+ uge r19.w, r19.z, r19.x
+ breakc_nz r19.w
+ iadd r19.w, r0.y, r19.z
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r20.xyz, r19.w, l(0), u0.xyzx
+ iadd r19.w, r0.x, r19.z
+ ld_structured_indexable(structured_buffer, stride=16)(mixed,mixed,mixed,mixed) r23.yzw, r19.w, l(0), u0.xxyz
+ add r20.xyz, r20.xyzx, -r23.yzwy
+ max r19.w, |r20.y|, |r20.x|
+ max r19.w, |r20.z|, r19.w
+ max r19.y, r19.y, r19.w
+ iadd r19.z, r19.z, l(1024)
+ endloop
+ if_z vThreadIDInGroup.x
+ ld_raw r19.x, l(100), g1.xxxx
+ iadd r19.x, r19.x, l(1)
+ store_raw g1.x, l(100), r19.x
+ ld_raw r19.x, l(104), g0.xxxx
+ ld_raw r19.z, l(104), g1.xxxx
+ iadd r19.x, -r19.x, r19.z
+ store_raw g1.x, l(104), r19.x
+ endif
+ ld_raw r19.x, l(112), g0.xxxx
+ ld_raw r19.z, l(8), g1.xxxx
+ mul r19.x, r19.z, r19.x
+ lt r19.x, r19.x, r19.y
+ if_nz r19.x
+ store_raw g1.x, l(100), l(0)
+ endif
+ endif
+ iadd r17.x, r17.x, l(1)
+ endloop
+ sync_g_t
+endif
+if_z vThreadIDInGroup.x
+ ld_raw r0.xyzw, l(0), g1.xyzw
+ ld_raw r1.xyzw, l(16), g1.xyzw
+ ld_raw r2.xyzw, l(32), g1.xyzw
+ ld_raw r3.xyzw, l(48), g1.xyzw
+ ld_raw r4.xyzw, l(64), g1.xyzw
+ ld_raw r5.xyzw, l(80), g1.xyzw
+ ld_raw r6.xyzw, l(96), g1.xyzw
+ ld_raw r7.xy, l(112), g1.xyxx
+ store_structured u3.xyzw, vThreadGroupID.x, l(0), r0.xyzw
+ store_structured u3.xyzw, vThreadGroupID.x, l(16), r1.xyzw
+ store_structured u3.xyzw, vThreadGroupID.x, l(32), r2.xyzw
+ store_structured u3.xyzw, vThreadGroupID.x, l(48), r3.xyzw
+ store_structured u3.xyzw, vThreadGroupID.x, l(64), r4.xyzw
+ store_structured u3.xyzw, vThreadGroupID.x, l(80), r5.xyzw
+ store_structured u3.xyzw, vThreadGroupID.x, l(96), r6.xyzw
+ store_structured u3.xy, vThreadGroupID.x, l(112), r7.xyxx
+endif
+ret
+// Approximately 4204 instruction slots used
+#endif
+
+const BYTE gDxSolverKernel[] =
+{
+ 68, 88, 66, 67, 15, 26,
+ 236, 198, 241, 164, 237, 240,
+ 132, 194, 14, 81, 193, 73,
+ 162, 40, 1, 0, 0, 0,
+ 32, 212, 1, 0, 5, 0,
+ 0, 0, 52, 0, 0, 0,
+ 108, 20, 0, 0, 124, 20,
+ 0, 0, 140, 20, 0, 0,
+ 132, 211, 1, 0, 82, 68,
+ 69, 70, 48, 20, 0, 0,
+ 20, 0, 0, 0, 20, 4,
+ 0, 0, 20, 0, 0, 0,
+ 60, 0, 0, 0, 0, 5,
+ 83, 67, 0, 1, 0, 0,
+ 252, 19, 0, 0, 82, 68,
+ 49, 49, 60, 0, 0, 0,
+ 24, 0, 0, 0, 32, 0,
+ 0, 0, 40, 0, 0, 0,
+ 36, 0, 0, 0, 12, 0,
+ 0, 0, 0, 0, 0, 0,
+ 188, 2, 0, 0, 5, 0,
+ 0, 0, 6, 0, 0, 0,
+ 1, 0, 0, 0, 116, 0,
+ 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 199, 2, 0, 0,
+ 5, 0, 0, 0, 6, 0,
+ 0, 0, 1, 0, 0, 0,
+ 100, 0, 0, 0, 2, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 209, 2,
+ 0, 0, 5, 0, 0, 0,
+ 6, 0, 0, 0, 1, 0,
+ 0, 0, 24, 0, 0, 0,
+ 3, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 223, 2, 0, 0, 5, 0,
+ 0, 0, 6, 0, 0, 0,
+ 1, 0, 0, 0, 8, 0,
+ 0, 0, 4, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 236, 2, 0, 0,
+ 5, 0, 0, 0, 6, 0,
+ 0, 0, 1, 0, 0, 0,
+ 4, 0, 0, 0, 5, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 255, 2,
+ 0, 0, 5, 0, 0, 0,
+ 6, 0, 0, 0, 1, 0,
+ 0, 0, 8, 0, 0, 0,
+ 6, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 15, 3, 0, 0, 5, 0,
+ 0, 0, 6, 0, 0, 0,
+ 1, 0, 0, 0, 16, 0,
+ 0, 0, 7, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 33, 3, 0, 0,
+ 5, 0, 0, 0, 6, 0,
+ 0, 0, 1, 0, 0, 0,
+ 4, 0, 0, 0, 8, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 46, 3,
+ 0, 0, 5, 0, 0, 0,
+ 6, 0, 0, 0, 1, 0,
+ 0, 0, 16, 0, 0, 0,
+ 9, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 63, 3, 0, 0, 5, 0,
+ 0, 0, 6, 0, 0, 0,
+ 1, 0, 0, 0, 12, 0,
+ 0, 0, 10, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 83, 3, 0, 0,
+ 5, 0, 0, 0, 6, 0,
+ 0, 0, 1, 0, 0, 0,
+ 16, 0, 0, 0, 11, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 102, 3,
+ 0, 0, 5, 0, 0, 0,
+ 6, 0, 0, 0, 1, 0,
+ 0, 0, 16, 0, 0, 0,
+ 12, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 125, 3, 0, 0, 5, 0,
+ 0, 0, 6, 0, 0, 0,
+ 1, 0, 0, 0, 16, 0,
+ 0, 0, 13, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 148, 3, 0, 0,
+ 5, 0, 0, 0, 6, 0,
+ 0, 0, 1, 0, 0, 0,
+ 16, 0, 0, 0, 14, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 163, 3,
+ 0, 0, 5, 0, 0, 0,
+ 6, 0, 0, 0, 1, 0,
+ 0, 0, 4, 0, 0, 0,
+ 15, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 185, 3, 0, 0, 5, 0,
+ 0, 0, 6, 0, 0, 0,
+ 1, 0, 0, 0, 4, 0,
+ 0, 0, 16, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 209, 3, 0, 0,
+ 6, 0, 0, 0, 6, 0,
+ 0, 0, 1, 0, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 220, 3,
+ 0, 0, 6, 0, 0, 0,
+ 6, 0, 0, 0, 1, 0,
+ 0, 0, 16, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 244, 3, 0, 0, 6, 0,
+ 0, 0, 6, 0, 0, 0,
+ 1, 0, 0, 0, 4, 0,
+ 0, 0, 2, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 7, 4, 0, 0,
+ 6, 0, 0, 0, 6, 0,
+ 0, 0, 1, 0, 0, 0,
+ 120, 0, 0, 0, 3, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 98, 67,
+ 108, 111, 116, 104, 68, 97,
+ 116, 97, 0, 98, 73, 116,
+ 101, 114, 68, 97, 116, 97,
+ 0, 98, 80, 104, 97, 115,
+ 101, 67, 111, 110, 102, 105,
+ 103, 115, 0, 98, 67, 111,
+ 110, 115, 116, 114, 97, 105,
+ 110, 116, 115, 0, 98, 84,
+ 101, 116, 104, 101, 114, 67,
+ 111, 110, 115, 116, 114, 97,
+ 105, 110, 116, 115, 0, 98,
+ 67, 97, 112, 115, 117, 108,
+ 101, 73, 110, 100, 105, 99,
+ 101, 115, 0, 98, 67, 111,
+ 108, 108, 105, 115, 105, 111,
+ 110, 83, 112, 104, 101, 114,
+ 101, 115, 0, 98, 67, 111,
+ 110, 118, 101, 120, 77, 97,
+ 115, 107, 115, 0, 98, 67,
+ 111, 108, 108, 105, 115, 105,
+ 111, 110, 80, 108, 97, 110,
+ 101, 115, 0, 98, 67, 111,
+ 108, 108, 105, 115, 105, 111,
+ 110, 84, 114, 105, 97, 110,
+ 103, 108, 101, 115, 0, 98,
+ 77, 111, 116, 105, 111, 110,
+ 67, 111, 110, 115, 116, 114,
+ 97, 105, 110, 116, 115, 0,
+ 98, 83, 101, 112, 97, 114,
+ 97, 116, 105, 111, 110, 67,
+ 111, 110, 115, 116, 114, 97,
+ 105, 110, 116, 115, 0, 98,
+ 80, 97, 114, 116, 105, 99,
+ 108, 101, 65, 99, 99, 101,
+ 108, 101, 114, 97, 116, 105,
+ 111, 110, 115, 0, 98, 82,
+ 101, 115, 116, 80, 111, 115,
+ 105, 116, 105, 111, 110, 115,
+ 0, 98, 83, 101, 108, 102,
+ 67, 111, 108, 108, 105, 115,
+ 105, 111, 110, 73, 110, 100,
+ 105, 99, 101, 115, 0, 98,
+ 80, 101, 114, 67, 111, 110,
+ 115, 116, 114, 97, 105, 110,
+ 116, 83, 116, 105, 102, 102,
+ 110, 101, 115, 115, 0, 98,
+ 80, 97, 114, 116, 105, 99,
+ 108, 101, 115, 0, 98, 83,
+ 101, 108, 102, 67, 111, 108,
+ 108, 105, 115, 105, 111, 110,
+ 80, 97, 114, 116, 105, 99,
+ 108, 101, 115, 0, 98, 83,
+ 101, 108, 102, 67, 111, 108,
+ 108, 105, 115, 105, 111, 110,
+ 68, 97, 116, 97, 0, 98,
+ 70, 114, 97, 109, 101, 68,
+ 97, 116, 97, 0, 171, 171,
+ 188, 2, 0, 0, 1, 0,
+ 0, 0, 244, 5, 0, 0,
+ 116, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 199, 2, 0, 0, 1, 0,
+ 0, 0, 40, 10, 0, 0,
+ 100, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 209, 2, 0, 0, 1, 0,
+ 0, 0, 224, 10, 0, 0,
+ 24, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 223, 2, 0, 0, 1, 0,
+ 0, 0, 228, 11, 0, 0,
+ 8, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 236, 2, 0, 0, 1, 0,
+ 0, 0, 108, 12, 0, 0,
+ 4, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 255, 2, 0, 0, 1, 0,
+ 0, 0, 212, 12, 0, 0,
+ 8, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 15, 3, 0, 0, 1, 0,
+ 0, 0, 80, 13, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 33, 3, 0, 0, 1, 0,
+ 0, 0, 164, 13, 0, 0,
+ 4, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 46, 3, 0, 0, 1, 0,
+ 0, 0, 204, 13, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 63, 3, 0, 0, 1, 0,
+ 0, 0, 244, 13, 0, 0,
+ 12, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 83, 3, 0, 0, 1, 0,
+ 0, 0, 72, 14, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 102, 3, 0, 0, 1, 0,
+ 0, 0, 112, 14, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 125, 3, 0, 0, 1, 0,
+ 0, 0, 152, 14, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 148, 3, 0, 0, 1, 0,
+ 0, 0, 192, 14, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 163, 3, 0, 0, 1, 0,
+ 0, 0, 232, 14, 0, 0,
+ 4, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 185, 3, 0, 0, 1, 0,
+ 0, 0, 60, 15, 0, 0,
+ 4, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 209, 3, 0, 0, 1, 0,
+ 0, 0, 100, 15, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 220, 3, 0, 0, 1, 0,
+ 0, 0, 140, 15, 0, 0,
+ 16, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 244, 3, 0, 0, 1, 0,
+ 0, 0, 180, 15, 0, 0,
+ 4, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 7, 4, 0, 0, 1, 0,
+ 0, 0, 220, 15, 0, 0,
+ 120, 0, 0, 0, 0, 0,
+ 0, 0, 3, 0, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 116, 0, 0, 0,
+ 2, 0, 0, 0, 4, 10,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 36, 69,
+ 108, 101, 109, 101, 110, 116,
+ 0, 68, 120, 67, 108, 111,
+ 116, 104, 68, 97, 116, 97,
+ 0, 109, 78, 117, 109, 80,
+ 97, 114, 116, 105, 99, 108,
+ 101, 115, 0, 117, 105, 110,
+ 116, 51, 50, 95, 116, 0,
+ 0, 0, 19, 0, 1, 0,
+ 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 63, 6, 0, 0,
+ 109, 80, 97, 114, 116, 105,
+ 99, 108, 101, 115, 79, 102,
+ 102, 115, 101, 116, 0, 109,
+ 78, 117, 109, 80, 104, 97,
+ 115, 101, 115, 0, 109, 80,
+ 104, 97, 115, 101, 67, 111,
+ 110, 102, 105, 103, 79, 102,
+ 102, 115, 101, 116, 0, 109,
+ 67, 111, 110, 115, 116, 114,
+ 97, 105, 110, 116, 79, 102,
+ 102, 115, 101, 116, 0, 109,
+ 83, 116, 105, 102, 102, 110,
+ 101, 115, 115, 79, 102, 102,
+ 115, 101, 116, 0, 109, 78,
+ 117, 109, 84, 101, 116, 104,
+ 101, 114, 115, 0, 109, 84,
+ 101, 116, 104, 101, 114, 79,
+ 102, 102, 115, 101, 116, 0,
+ 109, 84, 101, 116, 104, 101,
+ 114, 67, 111, 110, 115, 116,
+ 114, 97, 105, 110, 116, 83,
+ 99, 97, 108, 101, 0, 102,
+ 108, 111, 97, 116, 0, 171,
+ 171, 171, 0, 0, 3, 0,
+ 1, 0, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 239, 6,
+ 0, 0, 109, 77, 111, 116,
+ 105, 111, 110, 67, 111, 110,
+ 115, 116, 114, 97, 105, 110,
+ 116, 83, 99, 97, 108, 101,
+ 0, 109, 77, 111, 116, 105,
+ 111, 110, 67, 111, 110, 115,
+ 116, 114, 97, 105, 110, 116,
+ 66, 105, 97, 115, 0, 109,
+ 78, 117, 109, 67, 97, 112,
+ 115, 117, 108, 101, 115, 0,
+ 109, 67, 97, 112, 115, 117,
+ 108, 101, 79, 102, 102, 115,
+ 101, 116, 0, 109, 78, 117,
+ 109, 83, 112, 104, 101, 114,
+ 101, 115, 0, 109, 78, 117,
+ 109, 80, 108, 97, 110, 101,
+ 115, 0, 109, 78, 117, 109,
+ 67, 111, 110, 118, 101, 120,
+ 101, 115, 0, 109, 67, 111,
+ 110, 118, 101, 120, 77, 97,
+ 115, 107, 115, 79, 102, 102,
+ 115, 101, 116, 0, 109, 78,
+ 117, 109, 67, 111, 108, 108,
+ 105, 115, 105, 111, 110, 84,
+ 114, 105, 97, 110, 103, 108,
+ 101, 115, 0, 109, 69, 110,
+ 97, 98, 108, 101, 67, 111,
+ 110, 116, 105, 110, 117, 111,
+ 117, 115, 67, 111, 108, 108,
+ 105, 115, 105, 111, 110, 0,
+ 109, 67, 111, 108, 108, 105,
+ 115, 105, 111, 110, 77, 97,
+ 115, 115, 83, 99, 97, 108,
+ 101, 0, 109, 70, 114, 105,
+ 99, 116, 105, 111, 110, 83,
+ 99, 97, 108, 101, 0, 109,
+ 83, 101, 108, 102, 67, 111,
+ 108, 108, 105, 115, 105, 111,
+ 110, 68, 105, 115, 116, 97,
+ 110, 99, 101, 0, 109, 78,
+ 117, 109, 83, 101, 108, 102,
+ 67, 111, 108, 108, 105, 115,
+ 105, 111, 110, 73, 110, 100,
+ 105, 99, 101, 115, 0, 109,
+ 83, 101, 108, 102, 67, 111,
+ 108, 108, 105, 115, 105, 111,
+ 110, 73, 110, 100, 105, 99,
+ 101, 115, 79, 102, 102, 115,
+ 101, 116, 0, 109, 83, 101,
+ 108, 102, 67, 111, 108, 108,
+ 105, 115, 105, 111, 110, 80,
+ 97, 114, 116, 105, 99, 108,
+ 101, 115, 79, 102, 102, 115,
+ 101, 116, 0, 109, 83, 101,
+ 108, 102, 67, 111, 108, 108,
+ 105, 115, 105, 111, 110, 68,
+ 97, 116, 97, 79, 102, 102,
+ 115, 101, 116, 0, 109, 83,
+ 108, 101, 101, 112, 84, 101,
+ 115, 116, 73, 110, 116, 101,
+ 114, 118, 97, 108, 0, 109,
+ 83, 108, 101, 101, 112, 65,
+ 102, 116, 101, 114, 67, 111,
+ 117, 110, 116, 0, 109, 83,
+ 108, 101, 101, 112, 84, 104,
+ 114, 101, 115, 104, 111, 108,
+ 100, 0, 49, 6, 0, 0,
+ 72, 6, 0, 0, 0, 0,
+ 0, 0, 108, 6, 0, 0,
+ 72, 6, 0, 0, 4, 0,
+ 0, 0, 125, 6, 0, 0,
+ 72, 6, 0, 0, 8, 0,
+ 0, 0, 136, 6, 0, 0,
+ 72, 6, 0, 0, 12, 0,
+ 0, 0, 155, 6, 0, 0,
+ 72, 6, 0, 0, 16, 0,
+ 0, 0, 173, 6, 0, 0,
+ 72, 6, 0, 0, 20, 0,
+ 0, 0, 190, 6, 0, 0,
+ 72, 6, 0, 0, 24, 0,
+ 0, 0, 202, 6, 0, 0,
+ 72, 6, 0, 0, 28, 0,
+ 0, 0, 216, 6, 0, 0,
+ 248, 6, 0, 0, 32, 0,
+ 0, 0, 28, 7, 0, 0,
+ 248, 6, 0, 0, 36, 0,
+ 0, 0, 51, 7, 0, 0,
+ 248, 6, 0, 0, 40, 0,
+ 0, 0, 73, 7, 0, 0,
+ 72, 6, 0, 0, 44, 0,
+ 0, 0, 86, 7, 0, 0,
+ 72, 6, 0, 0, 48, 0,
+ 0, 0, 101, 7, 0, 0,
+ 72, 6, 0, 0, 52, 0,
+ 0, 0, 113, 7, 0, 0,
+ 72, 6, 0, 0, 56, 0,
+ 0, 0, 124, 7, 0, 0,
+ 72, 6, 0, 0, 60, 0,
+ 0, 0, 137, 7, 0, 0,
+ 72, 6, 0, 0, 64, 0,
+ 0, 0, 156, 7, 0, 0,
+ 72, 6, 0, 0, 68, 0,
+ 0, 0, 179, 7, 0, 0,
+ 72, 6, 0, 0, 72, 0,
+ 0, 0, 206, 7, 0, 0,
+ 248, 6, 0, 0, 76, 0,
+ 0, 0, 226, 7, 0, 0,
+ 248, 6, 0, 0, 80, 0,
+ 0, 0, 241, 7, 0, 0,
+ 248, 6, 0, 0, 84, 0,
+ 0, 0, 8, 8, 0, 0,
+ 72, 6, 0, 0, 88, 0,
+ 0, 0, 33, 8, 0, 0,
+ 72, 6, 0, 0, 92, 0,
+ 0, 0, 61, 8, 0, 0,
+ 72, 6, 0, 0, 96, 0,
+ 0, 0, 91, 8, 0, 0,
+ 72, 6, 0, 0, 100, 0,
+ 0, 0, 116, 8, 0, 0,
+ 72, 6, 0, 0, 104, 0,
+ 0, 0, 135, 8, 0, 0,
+ 72, 6, 0, 0, 108, 0,
+ 0, 0, 152, 8, 0, 0,
+ 248, 6, 0, 0, 112, 0,
+ 0, 0, 5, 0, 0, 0,
+ 1, 0, 29, 0, 0, 0,
+ 29, 0, 168, 8, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 37, 6,
+ 0, 0, 28, 6, 0, 0,
+ 0, 0, 0, 0, 100, 0,
+ 0, 0, 2, 0, 0, 0,
+ 188, 10, 0, 0, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 68, 120, 73, 116, 101, 114,
+ 97, 116, 105, 111, 110, 68,
+ 97, 116, 97, 0, 109, 73,
+ 110, 116, 101, 103, 114, 97,
+ 116, 105, 111, 110, 84, 114,
+ 97, 102, 111, 0, 171, 171,
+ 0, 0, 3, 0, 1, 0,
+ 1, 0, 24, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 239, 6, 0, 0,
+ 109, 73, 115, 84, 117, 114,
+ 110, 105, 110, 103, 0, 171,
+ 96, 10, 0, 0, 116, 10,
+ 0, 0, 0, 0, 0, 0,
+ 152, 10, 0, 0, 72, 6,
+ 0, 0, 96, 0, 0, 0,
+ 5, 0, 0, 0, 1, 0,
+ 25, 0, 0, 0, 2, 0,
+ 164, 10, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 80, 10, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 24, 0, 0, 0,
+ 2, 0, 0, 0, 192, 11,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 68, 120,
+ 80, 104, 97, 115, 101, 67,
+ 111, 110, 102, 105, 103, 0,
+ 109, 83, 116, 105, 102, 102,
+ 110, 101, 115, 115, 0, 109,
+ 83, 116, 105, 102, 102, 110,
+ 101, 115, 115, 77, 117, 108,
+ 116, 105, 112, 108, 105, 101,
+ 114, 0, 109, 67, 111, 109,
+ 112, 114, 101, 115, 115, 105,
+ 111, 110, 76, 105, 109, 105,
+ 116, 0, 109, 83, 116, 114,
+ 101, 116, 99, 104, 76, 105,
+ 109, 105, 116, 0, 109, 70,
+ 105, 114, 115, 116, 67, 111,
+ 110, 115, 116, 114, 97, 105,
+ 110, 116, 0, 109, 78, 117,
+ 109, 67, 111, 110, 115, 116,
+ 114, 97, 105, 110, 116, 115,
+ 0, 171, 22, 11, 0, 0,
+ 248, 6, 0, 0, 0, 0,
+ 0, 0, 33, 11, 0, 0,
+ 248, 6, 0, 0, 4, 0,
+ 0, 0, 54, 11, 0, 0,
+ 248, 6, 0, 0, 8, 0,
+ 0, 0, 72, 11, 0, 0,
+ 248, 6, 0, 0, 12, 0,
+ 0, 0, 86, 11, 0, 0,
+ 72, 6, 0, 0, 16, 0,
+ 0, 0, 103, 11, 0, 0,
+ 72, 6, 0, 0, 20, 0,
+ 0, 0, 5, 0, 0, 0,
+ 1, 0, 6, 0, 0, 0,
+ 6, 0, 120, 11, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 8, 11,
+ 0, 0, 28, 6, 0, 0,
+ 0, 0, 0, 0, 8, 0,
+ 0, 0, 2, 0, 0, 0,
+ 72, 12, 0, 0, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 68, 120, 67, 111, 110, 115,
+ 116, 114, 97, 105, 110, 116,
+ 0, 109, 82, 101, 115, 116,
+ 118, 97, 108, 117, 101, 0,
+ 109, 73, 110, 100, 105, 99,
+ 101, 115, 0, 171, 171, 171,
+ 25, 12, 0, 0, 248, 6,
+ 0, 0, 0, 0, 0, 0,
+ 36, 12, 0, 0, 72, 6,
+ 0, 0, 4, 0, 0, 0,
+ 5, 0, 0, 0, 1, 0,
+ 2, 0, 0, 0, 2, 0,
+ 48, 12, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 12, 12, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 4, 0, 0, 0,
+ 2, 0, 0, 0, 176, 12,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 68, 120,
+ 84, 101, 116, 104, 101, 114,
+ 0, 109, 86, 97, 108, 117,
+ 101, 0, 157, 12, 0, 0,
+ 72, 6, 0, 0, 0, 0,
+ 0, 0, 5, 0, 0, 0,
+ 1, 0, 1, 0, 0, 0,
+ 1, 0, 164, 12, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 148, 12,
+ 0, 0, 28, 6, 0, 0,
+ 0, 0, 0, 0, 8, 0,
+ 0, 0, 2, 0, 0, 0,
+ 44, 13, 0, 0, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 73, 110, 100, 101, 120, 80,
+ 97, 105, 114, 0, 102, 105,
+ 114, 115, 116, 0, 115, 101,
+ 99, 111, 110, 100, 0, 171,
+ 6, 13, 0, 0, 72, 6,
+ 0, 0, 0, 0, 0, 0,
+ 12, 13, 0, 0, 72, 6,
+ 0, 0, 4, 0, 0, 0,
+ 5, 0, 0, 0, 1, 0,
+ 2, 0, 0, 0, 2, 0,
+ 20, 13, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 252, 12, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 16, 0, 0, 0,
+ 2, 0, 0, 0, 128, 13,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 102, 108,
+ 111, 97, 116, 52, 0, 171,
+ 1, 0, 3, 0, 1, 0,
+ 4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 120, 13, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 4, 0, 0, 0,
+ 2, 0, 0, 0, 72, 6,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 28, 6,
+ 0, 0, 0, 0, 0, 0,
+ 16, 0, 0, 0, 2, 0,
+ 0, 0, 128, 13, 0, 0,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 28, 6, 0, 0,
+ 0, 0, 0, 0, 12, 0,
+ 0, 0, 2, 0, 0, 0,
+ 36, 14, 0, 0, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 102, 108, 111, 97, 116, 51,
+ 0, 171, 1, 0, 3, 0,
+ 1, 0, 3, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 28, 14,
+ 0, 0, 28, 6, 0, 0,
+ 0, 0, 0, 0, 16, 0,
+ 0, 0, 2, 0, 0, 0,
+ 128, 13, 0, 0, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 16, 0, 0, 0,
+ 2, 0, 0, 0, 128, 13,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 28, 6,
+ 0, 0, 0, 0, 0, 0,
+ 16, 0, 0, 0, 2, 0,
+ 0, 0, 128, 13, 0, 0,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 28, 6, 0, 0,
+ 0, 0, 0, 0, 16, 0,
+ 0, 0, 2, 0, 0, 0,
+ 128, 13, 0, 0, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 4, 0, 0, 0,
+ 2, 0, 0, 0, 24, 15,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 105, 110,
+ 116, 51, 50, 95, 116, 0,
+ 0, 0, 2, 0, 1, 0,
+ 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 16, 15, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 4, 0, 0, 0,
+ 2, 0, 0, 0, 248, 6,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 28, 6,
+ 0, 0, 0, 0, 0, 0,
+ 16, 0, 0, 0, 2, 0,
+ 0, 0, 128, 13, 0, 0,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 28, 6, 0, 0,
+ 0, 0, 0, 0, 16, 0,
+ 0, 0, 2, 0, 0, 0,
+ 128, 13, 0, 0, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 28, 6, 0, 0, 0, 0,
+ 0, 0, 4, 0, 0, 0,
+ 2, 0, 0, 0, 72, 6,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 255, 255, 255, 255,
+ 0, 0, 0, 0, 28, 6,
+ 0, 0, 0, 0, 0, 0,
+ 120, 0, 0, 0, 2, 0,
+ 0, 0, 216, 19, 0, 0,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 255, 255, 255, 255, 0, 0,
+ 0, 0, 68, 120, 70, 114,
+ 97, 109, 101, 68, 97, 116,
+ 97, 0, 109, 68, 101, 118,
+ 105, 99, 101, 80, 97, 114,
+ 116, 105, 99, 108, 101, 115,
+ 68, 105, 114, 116, 121, 0,
+ 98, 111, 111, 108, 0, 171,
+ 0, 0, 1, 0, 1, 0,
+ 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 38, 16, 0, 0,
+ 109, 78, 117, 109, 83, 104,
+ 97, 114, 101, 100, 80, 111,
+ 115, 105, 116, 105, 111, 110,
+ 115, 0, 109, 73, 116, 101,
+ 114, 68, 116, 0, 109, 70,
+ 105, 114, 115, 116, 73, 116,
+ 101, 114, 97, 116, 105, 111,
+ 110, 0, 109, 78, 117, 109,
+ 73, 116, 101, 114, 97, 116,
+ 105, 111, 110, 115, 0, 109,
+ 84, 101, 116, 104, 101, 114,
+ 67, 111, 110, 115, 116, 114,
+ 97, 105, 110, 116, 83, 116,
+ 105, 102, 102, 110, 101, 115,
+ 115, 0, 109, 77, 111, 116,
+ 105, 111, 110, 67, 111, 110,
+ 115, 116, 114, 97, 105, 110,
+ 116, 83, 116, 105, 102, 102,
+ 110, 101, 115, 115, 0, 109,
+ 83, 116, 97, 114, 116, 77,
+ 111, 116, 105, 111, 110, 67,
+ 111, 110, 115, 116, 114, 97,
+ 105, 110, 115, 79, 102, 102,
+ 115, 101, 116, 0, 109, 84,
+ 97, 114, 103, 101, 116, 77,
+ 111, 116, 105, 111, 110, 67,
+ 111, 110, 115, 116, 114, 97,
+ 105, 110, 115, 79, 102, 102,
+ 115, 101, 116, 0, 109, 83,
+ 116, 97, 114, 116, 83, 101,
+ 112, 97, 114, 97, 116, 105,
+ 111, 110, 67, 111, 110, 115,
+ 116, 114, 97, 105, 110, 115,
+ 79, 102, 102, 115, 101, 116,
+ 0, 109, 84, 97, 114, 103,
+ 101, 116, 83, 101, 112, 97,
+ 114, 97, 116, 105, 111, 110,
+ 67, 111, 110, 115, 116, 114,
+ 97, 105, 110, 115, 79, 102,
+ 102, 115, 101, 116, 0, 109,
+ 80, 97, 114, 116, 105, 99,
+ 108, 101, 65, 99, 99, 101,
+ 108, 101, 114, 97, 116, 105,
+ 111, 110, 115, 79, 102, 102,
+ 115, 101, 116, 0, 109, 83,
+ 116, 97, 114, 116, 83, 112,
+ 104, 101, 114, 101, 79, 102,
+ 102, 115, 101, 116, 0, 109,
+ 84, 97, 114, 103, 101, 116,
+ 83, 112, 104, 101, 114, 101,
+ 79, 102, 102, 115, 101, 116,
+ 0, 109, 83, 116, 97, 114,
+ 116, 67, 111, 108, 108, 105,
+ 115, 105, 111, 110, 80, 108,
+ 97, 110, 101, 79, 102, 102,
+ 115, 101, 116, 0, 109, 84,
+ 97, 114, 103, 101, 116, 67,
+ 111, 108, 108, 105, 115, 105,
+ 111, 110, 80, 108, 97, 110,
+ 101, 79, 102, 102, 115, 101,
+ 116, 0, 109, 83, 116, 97,
+ 114, 116, 67, 111, 108, 108,
+ 105, 115, 105, 111, 110, 84,
+ 114, 105, 97, 110, 103, 108,
+ 101, 115, 79, 102, 102, 115,
+ 101, 116, 0, 109, 84, 97,
+ 114, 103, 101, 116, 67, 111,
+ 108, 108, 105, 115, 105, 111,
+ 110, 84, 114, 105, 97, 110,
+ 103, 108, 101, 115, 79, 102,
+ 102, 115, 101, 116, 0, 109,
+ 83, 101, 108, 102, 67, 111,
+ 108, 108, 105, 115, 105, 111,
+ 110, 83, 116, 105, 102, 102,
+ 110, 101, 115, 115, 0, 109,
+ 80, 97, 114, 116, 105, 99,
+ 108, 101, 66, 111, 117, 110,
+ 100, 115, 0, 171, 171, 171,
+ 0, 0, 3, 0, 1, 0,
+ 1, 0, 6, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 239, 6, 0, 0,
+ 109, 83, 108, 101, 101, 112,
+ 80, 97, 115, 115, 67, 111,
+ 117, 110, 116, 101, 114, 0,
+ 109, 83, 108, 101, 101, 112,
+ 84, 101, 115, 116, 67, 111,
+ 117, 110, 116, 101, 114, 0,
+ 109, 83, 116, 105, 102, 102,
+ 110, 101, 115, 115, 69, 120,
+ 112, 111, 110, 101, 110, 116,
+ 0, 109, 82, 101, 115, 116,
+ 80, 111, 115, 105, 116, 105,
+ 111, 110, 115, 79, 102, 102,
+ 115, 101, 116, 0, 109, 73,
+ 110, 105, 116, 83, 101, 108,
+ 102, 67, 111, 108, 108, 105,
+ 115, 105, 111, 110, 68, 97,
+ 116, 97, 0, 171, 16, 16,
+ 0, 0, 44, 16, 0, 0,
+ 0, 0, 0, 0, 80, 16,
+ 0, 0, 72, 6, 0, 0,
+ 4, 0, 0, 0, 100, 16,
+ 0, 0, 248, 6, 0, 0,
+ 8, 0, 0, 0, 108, 16,
+ 0, 0, 72, 6, 0, 0,
+ 12, 0, 0, 0, 124, 16,
+ 0, 0, 72, 6, 0, 0,
+ 16, 0, 0, 0, 139, 16,
+ 0, 0, 248, 6, 0, 0,
+ 20, 0, 0, 0, 166, 16,
+ 0, 0, 248, 6, 0, 0,
+ 24, 0, 0, 0, 193, 16,
+ 0, 0, 72, 6, 0, 0,
+ 28, 0, 0, 0, 222, 16,
+ 0, 0, 72, 6, 0, 0,
+ 32, 0, 0, 0, 252, 16,
+ 0, 0, 72, 6, 0, 0,
+ 36, 0, 0, 0, 29, 17,
+ 0, 0, 72, 6, 0, 0,
+ 40, 0, 0, 0, 63, 17,
+ 0, 0, 72, 6, 0, 0,
+ 44, 0, 0, 0, 92, 17,
+ 0, 0, 72, 6, 0, 0,
+ 48, 0, 0, 0, 111, 17,
+ 0, 0, 72, 6, 0, 0,
+ 52, 0, 0, 0, 131, 17,
+ 0, 0, 72, 6, 0, 0,
+ 56, 0, 0, 0, 158, 17,
+ 0, 0, 72, 6, 0, 0,
+ 60, 0, 0, 0, 186, 17,
+ 0, 0, 72, 6, 0, 0,
+ 64, 0, 0, 0, 217, 17,
+ 0, 0, 72, 6, 0, 0,
+ 68, 0, 0, 0, 249, 17,
+ 0, 0, 248, 6, 0, 0,
+ 72, 0, 0, 0, 17, 18,
+ 0, 0, 36, 18, 0, 0,
+ 76, 0, 0, 0, 72, 18,
+ 0, 0, 72, 6, 0, 0,
+ 100, 0, 0, 0, 90, 18,
+ 0, 0, 72, 6, 0, 0,
+ 104, 0, 0, 0, 108, 18,
+ 0, 0, 248, 6, 0, 0,
+ 108, 0, 0, 0, 127, 18,
+ 0, 0, 72, 6, 0, 0,
+ 112, 0, 0, 0, 148, 18,
+ 0, 0, 44, 16, 0, 0,
+ 116, 0, 0, 0, 5, 0,
+ 0, 0, 1, 0, 30, 0,
+ 0, 0, 25, 0, 172, 18,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 4, 16, 0, 0, 77, 105,
+ 99, 114, 111, 115, 111, 102,
+ 116, 32, 40, 82, 41, 32,
+ 72, 76, 83, 76, 32, 83,
+ 104, 97, 100, 101, 114, 32,
+ 67, 111, 109, 112, 105, 108,
+ 101, 114, 32, 54, 46, 51,
+ 46, 57, 54, 48, 48, 46,
+ 49, 54, 51, 56, 52, 0,
+ 171, 171, 73, 83, 71, 78,
+ 8, 0, 0, 0, 0, 0,
+ 0, 0, 8, 0, 0, 0,
+ 79, 83, 71, 78, 8, 0,
+ 0, 0, 0, 0, 0, 0,
+ 8, 0, 0, 0, 83, 72,
+ 69, 88, 240, 190, 1, 0,
+ 80, 0, 5, 0, 188, 111,
+ 0, 0, 106, 8, 0, 1,
+ 53, 24, 0, 0, 18, 0,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 128, 63, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 128, 63, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 128, 63,
+ 162, 0, 0, 4, 0, 112,
+ 16, 0, 0, 0, 0, 0,
+ 116, 0, 0, 0, 162, 0,
+ 0, 4, 0, 112, 16, 0,
+ 2, 0, 0, 0, 100, 0,
+ 0, 0, 162, 0, 0, 4,
+ 0, 112, 16, 0, 3, 0,
+ 0, 0, 24, 0, 0, 0,
+ 162, 0, 0, 4, 0, 112,
+ 16, 0, 4, 0, 0, 0,
+ 8, 0, 0, 0, 162, 0,
+ 0, 4, 0, 112, 16, 0,
+ 5, 0, 0, 0, 4, 0,
+ 0, 0, 162, 0, 0, 4,
+ 0, 112, 16, 0, 6, 0,
+ 0, 0, 8, 0, 0, 0,
+ 162, 0, 0, 4, 0, 112,
+ 16, 0, 7, 0, 0, 0,
+ 16, 0, 0, 0, 162, 0,
+ 0, 4, 0, 112, 16, 0,
+ 8, 0, 0, 0, 4, 0,
+ 0, 0, 162, 0, 0, 4,
+ 0, 112, 16, 0, 9, 0,
+ 0, 0, 16, 0, 0, 0,
+ 162, 0, 0, 4, 0, 112,
+ 16, 0, 10, 0, 0, 0,
+ 12, 0, 0, 0, 162, 0,
+ 0, 4, 0, 112, 16, 0,
+ 11, 0, 0, 0, 16, 0,
+ 0, 0, 162, 0, 0, 4,
+ 0, 112, 16, 0, 12, 0,
+ 0, 0, 16, 0, 0, 0,
+ 162, 0, 0, 4, 0, 112,
+ 16, 0, 13, 0, 0, 0,
+ 16, 0, 0, 0, 162, 0,
+ 0, 4, 0, 112, 16, 0,
+ 14, 0, 0, 0, 16, 0,
+ 0, 0, 162, 0, 0, 4,
+ 0, 112, 16, 0, 15, 0,
+ 0, 0, 4, 0, 0, 0,
+ 162, 0, 0, 4, 0, 112,
+ 16, 0, 16, 0, 0, 0,
+ 4, 0, 0, 0, 158, 0,
+ 0, 4, 0, 224, 17, 0,
+ 0, 0, 0, 0, 16, 0,
+ 0, 0, 158, 0, 0, 4,
+ 0, 224, 17, 0, 1, 0,
+ 0, 0, 16, 0, 0, 0,
+ 158, 0, 0, 4, 0, 224,
+ 17, 0, 2, 0, 0, 0,
+ 4, 0, 0, 0, 158, 0,
+ 0, 4, 0, 224, 17, 0,
+ 3, 0, 0, 0, 120, 0,
+ 0, 0, 95, 0, 0, 2,
+ 18, 16, 2, 0, 95, 0,
+ 0, 2, 18, 32, 2, 0,
+ 104, 0, 0, 2, 48, 0,
+ 0, 0, 159, 0, 0, 4,
+ 0, 240, 17, 0, 0, 0,
+ 0, 0, 116, 0, 0, 0,
+ 159, 0, 0, 4, 0, 240,
+ 17, 0, 1, 0, 0, 0,
+ 120, 0, 0, 0, 159, 0,
+ 0, 4, 0, 240, 17, 0,
+ 2, 0, 0, 0, 100, 0,
+ 0, 0, 160, 0, 0, 5,
+ 0, 240, 17, 0, 3, 0,
+ 0, 0, 4, 0, 0, 0,
+ 220, 30, 0, 0, 160, 0,
+ 0, 5, 0, 240, 17, 0,
+ 4, 0, 0, 0, 4, 0,
+ 0, 0, 192, 0, 0, 0,
+ 159, 0, 0, 4, 0, 240,
+ 17, 0, 5, 0, 0, 0,
+ 36, 0, 0, 0, 160, 0,
+ 0, 5, 0, 240, 17, 0,
+ 6, 0, 0, 0, 4, 0,
+ 0, 0, 3, 0, 0, 0,
+ 155, 0, 0, 4, 0, 4,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 31, 0,
+ 0, 2, 10, 32, 2, 0,
+ 167, 0, 0, 138, 2, 163,
+ 3, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 0, 0,
+ 0, 0, 10, 16, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 138, 2, 163, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 1, 0, 0, 0,
+ 10, 16, 2, 0, 1, 64,
+ 0, 0, 16, 0, 0, 0,
+ 70, 126, 16, 0, 0, 0,
+ 0, 0, 167, 0, 0, 138,
+ 2, 163, 3, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 2, 0, 0, 0, 10, 16,
+ 2, 0, 1, 64, 0, 0,
+ 32, 0, 0, 0, 70, 126,
+ 16, 0, 0, 0, 0, 0,
+ 167, 0, 0, 138, 2, 163,
+ 3, 128, 131, 153, 25, 0,
+ 50, 0, 16, 0, 3, 0,
+ 0, 0, 10, 16, 2, 0,
+ 1, 64, 0, 0, 48, 0,
+ 0, 0, 70, 112, 16, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 138, 2, 163, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 4, 0, 0, 0,
+ 10, 16, 2, 0, 1, 64,
+ 0, 0, 60, 0, 0, 0,
+ 70, 126, 16, 0, 0, 0,
+ 0, 0, 167, 0, 0, 138,
+ 2, 163, 3, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 5, 0, 0, 0, 10, 16,
+ 2, 0, 1, 64, 0, 0,
+ 76, 0, 0, 0, 70, 126,
+ 16, 0, 0, 0, 0, 0,
+ 167, 0, 0, 138, 2, 163,
+ 3, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 6, 0,
+ 0, 0, 10, 16, 2, 0,
+ 1, 64, 0, 0, 92, 0,
+ 0, 0, 70, 126, 16, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 138, 2, 163, 3, 128,
+ 131, 153, 25, 0, 66, 0,
+ 16, 0, 3, 0, 0, 0,
+ 10, 16, 2, 0, 1, 64,
+ 0, 0, 112, 0, 0, 0,
+ 6, 112, 16, 0, 0, 0,
+ 0, 0, 166, 0, 0, 7,
+ 242, 240, 17, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 14,
+ 16, 0, 0, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 70, 14, 16, 0,
+ 1, 0, 0, 0, 166, 0,
+ 0, 7, 242, 240, 17, 0,
+ 0, 0, 0, 0, 1, 64,
+ 0, 0, 32, 0, 0, 0,
+ 70, 14, 16, 0, 2, 0,
+ 0, 0, 166, 0, 0, 7,
+ 50, 240, 17, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 48, 0, 0, 0, 70, 0,
+ 16, 0, 3, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 60, 0,
+ 0, 0, 70, 14, 16, 0,
+ 4, 0, 0, 0, 166, 0,
+ 0, 7, 242, 240, 17, 0,
+ 0, 0, 0, 0, 1, 64,
+ 0, 0, 76, 0, 0, 0,
+ 70, 14, 16, 0, 5, 0,
+ 0, 0, 166, 0, 0, 7,
+ 242, 240, 17, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 92, 0, 0, 0, 70, 14,
+ 16, 0, 6, 0, 0, 0,
+ 166, 0, 0, 7, 18, 240,
+ 17, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 112, 0,
+ 0, 0, 42, 0, 16, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 138, 2, 195, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 0, 0, 0, 0,
+ 10, 16, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 238, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 138,
+ 2, 195, 3, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 1, 0, 0, 0, 10, 16,
+ 2, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 70, 238,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 138, 2, 195,
+ 3, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 2, 0,
+ 0, 0, 10, 16, 2, 0,
+ 1, 64, 0, 0, 32, 0,
+ 0, 0, 70, 238, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 138, 2, 195, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 3, 0, 0, 0,
+ 10, 16, 2, 0, 1, 64,
+ 0, 0, 48, 0, 0, 0,
+ 70, 238, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 138,
+ 2, 195, 3, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 4, 0, 0, 0, 10, 16,
+ 2, 0, 1, 64, 0, 0,
+ 64, 0, 0, 0, 70, 238,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 138, 2, 195,
+ 3, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 5, 0,
+ 0, 0, 10, 16, 2, 0,
+ 1, 64, 0, 0, 80, 0,
+ 0, 0, 70, 238, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 138, 2, 195, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 6, 0, 0, 0,
+ 10, 16, 2, 0, 1, 64,
+ 0, 0, 96, 0, 0, 0,
+ 70, 238, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 138,
+ 2, 195, 3, 128, 131, 153,
+ 25, 0, 50, 0, 16, 0,
+ 7, 0, 0, 0, 10, 16,
+ 2, 0, 1, 64, 0, 0,
+ 112, 0, 0, 0, 70, 224,
+ 17, 0, 3, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 14, 16, 0,
+ 0, 0, 0, 0, 166, 0,
+ 0, 7, 242, 240, 17, 0,
+ 1, 0, 0, 0, 1, 64,
+ 0, 0, 16, 0, 0, 0,
+ 70, 14, 16, 0, 1, 0,
+ 0, 0, 166, 0, 0, 7,
+ 242, 240, 17, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 32, 0, 0, 0, 70, 14,
+ 16, 0, 2, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 48, 0,
+ 0, 0, 70, 14, 16, 0,
+ 3, 0, 0, 0, 166, 0,
+ 0, 7, 242, 240, 17, 0,
+ 1, 0, 0, 0, 1, 64,
+ 0, 0, 64, 0, 0, 0,
+ 70, 14, 16, 0, 4, 0,
+ 0, 0, 166, 0, 0, 7,
+ 242, 240, 17, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 80, 0, 0, 0, 70, 14,
+ 16, 0, 5, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 96, 0,
+ 0, 0, 70, 14, 16, 0,
+ 6, 0, 0, 0, 166, 0,
+ 0, 7, 50, 240, 17, 0,
+ 1, 0, 0, 0, 1, 64,
+ 0, 0, 112, 0, 0, 0,
+ 70, 0, 16, 0, 7, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 165, 0,
+ 0, 7, 50, 0, 16, 0,
+ 0, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 240, 17, 0, 0, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 80, 0,
+ 0, 7, 66, 0, 16, 0,
+ 0, 0, 0, 0, 1, 64,
+ 0, 0, 183, 7, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 31, 0, 4, 3,
+ 42, 0, 16, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 66, 0, 16, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 4, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 18, 0,
+ 16, 0, 1, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 1, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 1, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 2, 0, 0, 0,
+ 58, 0, 16, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 2, 0, 0, 0,
+ 30, 0, 0, 10, 242, 0,
+ 16, 0, 1, 0, 0, 0,
+ 6, 0, 16, 0, 1, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 4, 0, 0, 183, 7,
+ 0, 0, 110, 15, 0, 0,
+ 37, 23, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 2, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 2, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 58, 0,
+ 16, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 2, 0, 0, 0, 22, 0,
+ 0, 1, 59, 0, 0, 4,
+ 34, 0, 16, 0, 0, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 0, 0, 7, 34, 0,
+ 16, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 3, 0, 0, 0, 79, 0,
+ 0, 9, 242, 0, 16, 0,
+ 1, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 192, 0, 0, 0, 176, 0,
+ 0, 0, 3, 0, 0, 0,
+ 0, 2, 0, 0, 1, 0,
+ 0, 9, 242, 0, 16, 0,
+ 2, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 32, 0, 0, 0, 31, 0,
+ 0, 0, 1, 0, 0, 0,
+ 2, 0, 0, 0, 55, 0,
+ 0, 9, 66, 0, 16, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 2, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 191, 1, 64, 0, 0,
+ 0, 0, 128, 63, 30, 0,
+ 0, 9, 242, 0, 16, 0,
+ 3, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 16, 0, 0, 0, 8, 0,
+ 0, 0, 4, 0, 0, 0,
+ 2, 0, 0, 0, 32, 0,
+ 0, 7, 130, 0, 16, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 2, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 1, 0, 0, 0,
+ 85, 0, 0, 9, 50, 0,
+ 16, 0, 4, 0, 0, 0,
+ 6, 32, 2, 0, 2, 64,
+ 0, 0, 6, 0, 0, 0,
+ 5, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 41, 0, 0, 7, 18, 0,
+ 16, 0, 2, 0, 0, 0,
+ 26, 0, 16, 0, 4, 0,
+ 0, 0, 1, 64, 0, 0,
+ 2, 0, 0, 0, 41, 0,
+ 0, 6, 66, 0, 16, 0,
+ 4, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 4, 0, 0, 0, 42, 0,
+ 16, 0, 4, 0, 0, 0,
+ 1, 64, 0, 0, 76, 0,
+ 0, 0, 30, 0, 0, 10,
+ 82, 0, 16, 0, 2, 0,
+ 0, 0, 6, 2, 16, 0,
+ 2, 0, 0, 0, 2, 64,
+ 0, 0, 76, 0, 0, 0,
+ 0, 0, 0, 0, 255, 255,
+ 255, 255, 0, 0, 0, 0,
+ 39, 0, 0, 7, 130, 0,
+ 16, 0, 2, 0, 0, 0,
+ 58, 0, 16, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 32, 0,
+ 0, 7, 130, 0, 16, 0,
+ 2, 0, 0, 0, 58, 0,
+ 16, 0, 2, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 9,
+ 114, 0, 16, 0, 5, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 4, 0,
+ 0, 0, 8, 0, 0, 0,
+ 224, 3, 0, 0, 0, 0,
+ 0, 0, 39, 0, 0, 10,
+ 50, 0, 16, 0, 5, 0,
+ 0, 0, 70, 0, 16, 0,
+ 5, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 32, 0, 0, 10, 50, 0,
+ 16, 0, 5, 0, 0, 0,
+ 70, 0, 16, 0, 5, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 9, 242, 0, 16, 0,
+ 6, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 1, 0, 0, 0, 16, 4,
+ 0, 0, 16, 12, 0, 0,
+ 8, 4, 0, 0, 79, 0,
+ 0, 10, 242, 0, 16, 0,
+ 7, 0, 0, 0, 86, 5,
+ 16, 0, 2, 0, 0, 0,
+ 2, 64, 0, 0, 16, 0,
+ 0, 0, 8, 0, 0, 0,
+ 4, 0, 0, 0, 2, 0,
+ 0, 0, 79, 0, 0, 7,
+ 130, 0, 16, 0, 4, 0,
+ 0, 0, 26, 0, 16, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 30, 0, 0, 9, 242, 0,
+ 16, 0, 8, 0, 0, 0,
+ 6, 32, 2, 0, 2, 64,
+ 0, 0, 8, 12, 0, 0,
+ 4, 4, 0, 0, 4, 12,
+ 0, 0, 2, 4, 0, 0,
+ 30, 0, 0, 10, 242, 0,
+ 16, 0, 9, 0, 0, 0,
+ 70, 14, 16, 0, 3, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 8, 0, 0, 0, 8,
+ 0, 0, 0, 8, 0, 0,
+ 0, 8, 0, 0, 30, 0,
+ 0, 10, 114, 0, 16, 0,
+ 10, 0, 0, 0, 166, 10,
+ 16, 0, 5, 0, 0, 0,
+ 2, 64, 0, 0, 0, 4,
+ 0, 0, 0, 8, 0, 0,
+ 0, 12, 0, 0, 0, 0,
+ 0, 0, 30, 0, 0, 10,
+ 50, 0, 16, 0, 11, 0,
+ 0, 0, 86, 5, 16, 0,
+ 4, 0, 0, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 1, 2, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 140, 0, 0, 19, 242, 0,
+ 16, 0, 12, 0, 0, 0,
+ 2, 64, 0, 0, 10, 0,
+ 0, 0, 11, 0, 0, 0,
+ 10, 0, 0, 0, 5, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 5, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 0, 4, 0, 0, 0, 8,
+ 0, 0, 0, 12, 0, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 82, 0, 16, 0,
+ 11, 0, 0, 0, 6, 1,
+ 16, 0, 11, 0, 0, 0,
+ 246, 15, 16, 0, 12, 0,
+ 0, 0, 30, 0, 0, 9,
+ 242, 0, 16, 0, 13, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 2, 12,
+ 0, 0, 1, 4, 0, 0,
+ 1, 12, 0, 0, 2, 2,
+ 0, 0, 30, 0, 0, 10,
+ 50, 0, 16, 0, 14, 0,
+ 0, 0, 6, 0, 16, 0,
+ 6, 0, 0, 0, 2, 64,
+ 0, 0, 0, 8, 0, 0,
+ 0, 16, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 30, 0, 0, 6, 130, 0,
+ 16, 0, 5, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 16, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 10, 0, 0, 0,
+ 58, 0, 16, 0, 13, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 16, 0, 0, 30, 0,
+ 0, 9, 242, 0, 16, 0,
+ 15, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 1, 16, 0, 0, 0, 2,
+ 0, 0, 2, 18, 0, 0,
+ 253, 15, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 11, 0, 0, 0, 26, 0,
+ 16, 0, 15, 0, 0, 0,
+ 1, 64, 0, 0, 0, 16,
+ 0, 0, 80, 0, 0, 9,
+ 242, 0, 16, 0, 16, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 2, 0, 0, 0,
+ 4, 0, 0, 0, 8, 0,
+ 0, 0, 30, 0, 0, 9,
+ 242, 0, 16, 0, 17, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 250, 1,
+ 0, 0, 241, 15, 0, 0,
+ 226, 1, 0, 0, 193, 15,
+ 0, 0, 30, 0, 0, 10,
+ 194, 0, 16, 0, 14, 0,
+ 0, 0, 6, 8, 16, 0,
+ 17, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 16,
+ 0, 0, 0, 16, 0, 0,
+ 80, 0, 0, 9, 242, 0,
+ 16, 0, 18, 0, 0, 0,
+ 6, 32, 2, 0, 2, 64,
+ 0, 0, 16, 0, 0, 0,
+ 32, 0, 0, 0, 64, 0,
+ 0, 0, 128, 0, 0, 0,
+ 30, 0, 0, 9, 82, 0,
+ 16, 0, 17, 0, 0, 0,
+ 6, 32, 2, 0, 2, 64,
+ 0, 0, 130, 1, 0, 0,
+ 0, 0, 0, 0, 1, 15,
+ 0, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 12, 0, 0, 0,
+ 10, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 16, 0, 0, 80, 0,
+ 0, 6, 130, 0, 16, 0,
+ 13, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 1, 0, 0, 30, 0,
+ 0, 10, 82, 0, 16, 0,
+ 11, 0, 0, 0, 6, 2,
+ 16, 0, 11, 0, 0, 0,
+ 2, 64, 0, 0, 0, 16,
+ 0, 0, 0, 0, 0, 0,
+ 0, 16, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 34, 0, 16, 0, 15, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 48, 0,
+ 0, 1, 165, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 80, 0, 0, 7, 18, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 15, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 3, 0,
+ 4, 3, 10, 0, 16, 0,
+ 19, 0, 0, 0, 86, 0,
+ 0, 5, 18, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 15, 0, 0, 0,
+ 0, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 86, 0,
+ 0, 5, 18, 0, 16, 0,
+ 17, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 14, 0, 0, 7, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 31, 0,
+ 0, 2, 10, 32, 2, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 12, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 15, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 3, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 2, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 21, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 70, 126,
+ 16, 0, 2, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 3, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 22, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 32, 0, 0, 0,
+ 70, 126, 16, 0, 2, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 3, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 23, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 48, 0,
+ 0, 0, 70, 126, 16, 0,
+ 2, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 24, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 64, 0, 0, 0, 70, 126,
+ 16, 0, 2, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 3, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 25, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 80, 0, 0, 0,
+ 70, 126, 16, 0, 2, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 3, 128, 131, 153,
+ 25, 0, 34, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 96, 0,
+ 0, 0, 6, 112, 16, 0,
+ 2, 0, 0, 0, 166, 0,
+ 0, 7, 242, 240, 17, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 14, 16, 0, 20, 0,
+ 0, 0, 166, 0, 0, 7,
+ 242, 240, 17, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 70, 14,
+ 16, 0, 21, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 2, 0, 0, 0,
+ 1, 64, 0, 0, 32, 0,
+ 0, 0, 70, 14, 16, 0,
+ 22, 0, 0, 0, 166, 0,
+ 0, 7, 242, 240, 17, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 48, 0, 0, 0,
+ 70, 14, 16, 0, 23, 0,
+ 0, 0, 166, 0, 0, 7,
+ 242, 240, 17, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 64, 0, 0, 0, 70, 14,
+ 16, 0, 24, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 2, 0, 0, 0,
+ 1, 64, 0, 0, 80, 0,
+ 0, 0, 70, 14, 16, 0,
+ 25, 0, 0, 0, 166, 0,
+ 0, 7, 18, 240, 17, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 96, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 114, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 12, 0, 0, 0, 70, 242,
+ 17, 0, 2, 0, 0, 0,
+ 165, 0, 0, 7, 242, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 60, 0,
+ 0, 0, 70, 254, 17, 0,
+ 2, 0, 0, 0, 165, 0,
+ 0, 7, 242, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 24, 0, 0, 0,
+ 70, 254, 17, 0, 2, 0,
+ 0, 0, 165, 0, 0, 7,
+ 242, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 76, 0, 0, 0, 70, 254,
+ 17, 0, 2, 0, 0, 0,
+ 165, 0, 0, 7, 242, 0,
+ 16, 0, 24, 0, 0, 0,
+ 1, 64, 0, 0, 40, 0,
+ 0, 0, 70, 254, 17, 0,
+ 2, 0, 0, 0, 165, 0,
+ 0, 7, 194, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 92, 0, 0, 0,
+ 6, 244, 17, 0, 2, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 56, 0, 0, 0, 6, 240,
+ 17, 0, 2, 0, 0, 0,
+ 165, 0, 0, 7, 242, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 12, 0,
+ 0, 0, 70, 254, 17, 0,
+ 2, 0, 0, 0, 165, 0,
+ 0, 7, 18, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 36, 0, 0, 0,
+ 6, 240, 17, 0, 2, 0,
+ 0, 0, 165, 0, 0, 7,
+ 34, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 48, 0, 0, 0, 6, 240,
+ 17, 0, 2, 0, 0, 0,
+ 54, 0, 0, 4, 66, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 42, 0, 16, 0,
+ 26, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 27, 0, 0, 0,
+ 42, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 10, 242, 0,
+ 16, 0, 28, 0, 0, 0,
+ 166, 10, 16, 0, 26, 0,
+ 0, 0, 2, 64, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 0, 4, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 26, 0, 0, 0, 42, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 26, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 0, 0, 0, 0, 24, 0,
+ 0, 7, 18, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 55, 0, 0, 9,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 0, 0, 0, 7, 50, 0,
+ 16, 0, 30, 0, 0, 0,
+ 70, 0, 16, 0, 20, 0,
+ 0, 0, 70, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 50, 0, 16, 0,
+ 30, 0, 0, 0, 6, 0,
+ 16, 0, 27, 0, 0, 0,
+ 198, 0, 16, 0, 21, 0,
+ 0, 0, 70, 0, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 9, 50, 0, 16, 0,
+ 30, 0, 0, 0, 6, 0,
+ 16, 0, 29, 0, 0, 0,
+ 198, 0, 16, 0, 22, 0,
+ 0, 0, 70, 0, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 29, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 42, 0,
+ 16, 0, 27, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 9, 18, 0, 16, 0,
+ 31, 0, 0, 0, 42, 0,
+ 16, 0, 29, 0, 0, 0,
+ 42, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 27, 0, 0, 0,
+ 10, 0, 16, 0, 23, 0,
+ 0, 0, 26, 0, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 29, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 42, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 9, 34, 0, 16, 0,
+ 31, 0, 0, 0, 42, 0,
+ 16, 0, 29, 0, 0, 0,
+ 26, 0, 16, 0, 24, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 27, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 10, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 10, 0, 16, 0,
+ 29, 0, 0, 0, 42, 0,
+ 16, 0, 24, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 26, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 26, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 24, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 42, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 18, 0, 0, 1,
+ 0, 0, 0, 8, 50, 0,
+ 16, 0, 29, 0, 0, 0,
+ 70, 0, 16, 0, 27, 0,
+ 0, 0, 70, 0, 16, 128,
+ 65, 0, 0, 0, 29, 0,
+ 0, 0, 50, 0, 0, 9,
+ 18, 0, 16, 0, 29, 0,
+ 0, 0, 10, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 0, 0, 0, 7,
+ 18, 0, 16, 0, 31, 0,
+ 0, 0, 10, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 29, 0, 0, 0,
+ 50, 0, 0, 9, 18, 0,
+ 16, 0, 29, 0, 0, 0,
+ 26, 0, 16, 0, 29, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 26, 0,
+ 16, 0, 25, 0, 0, 0,
+ 0, 0, 0, 7, 34, 0,
+ 16, 0, 31, 0, 0, 0,
+ 26, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 29, 0, 0, 0, 0, 0,
+ 0, 8, 18, 0, 16, 0,
+ 29, 0, 0, 0, 42, 0,
+ 16, 0, 27, 0, 0, 0,
+ 42, 0, 16, 128, 65, 0,
+ 0, 0, 29, 0, 0, 0,
+ 50, 0, 0, 9, 18, 0,
+ 16, 0, 29, 0, 0, 0,
+ 10, 0, 16, 0, 29, 0,
+ 0, 0, 26, 0, 16, 0,
+ 26, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 42, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 29, 0, 0, 0, 21, 0,
+ 0, 1, 165, 0, 0, 7,
+ 114, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 242,
+ 17, 0, 2, 0, 0, 0,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 27, 0, 0, 0,
+ 70, 2, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 0,
+ 29, 0, 0, 0, 18, 0,
+ 0, 1, 54, 0, 0, 5,
+ 50, 0, 16, 0, 31, 0,
+ 0, 0, 70, 0, 16, 0,
+ 27, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 29, 0, 0, 0, 42, 0,
+ 16, 0, 27, 0, 0, 0,
+ 21, 0, 0, 1, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 31, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 31, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 168, 0,
+ 0, 9, 114, 224, 17, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 2, 16, 0,
+ 27, 0, 0, 0, 54, 0,
+ 0, 5, 66, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 22, 0, 0, 1, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 44, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 190, 24, 0, 1,
+ 165, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 8, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 56, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 55, 0, 0, 9,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 41, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 2, 0, 0, 0, 54, 0,
+ 0, 4, 34, 0, 16, 0,
+ 20, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 80, 0, 0, 7, 66, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 3, 0,
+ 4, 3, 42, 0, 16, 0,
+ 20, 0, 0, 0, 85, 0,
+ 0, 7, 66, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 2, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 114, 0, 16, 0, 22, 0,
+ 0, 0, 166, 10, 16, 0,
+ 20, 0, 0, 0, 2, 64,
+ 0, 0, 183, 7, 0, 0,
+ 110, 15, 0, 0, 37, 23,
+ 0, 0, 0, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 21, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 42, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 20, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 13, 0, 0, 0, 50, 0,
+ 0, 9, 242, 0, 16, 0,
+ 21, 0, 0, 0, 70, 14,
+ 16, 0, 23, 0, 0, 0,
+ 166, 10, 16, 0, 19, 0,
+ 0, 0, 70, 14, 16, 0,
+ 21, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 21, 0,
+ 0, 1, 30, 0, 0, 7,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 28, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 39, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 98, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 36, 0, 0, 0,
+ 6, 241, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 114, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 24, 0, 0, 0, 70, 242,
+ 17, 0, 1, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 4, 130, 0, 16, 0,
+ 20, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 80, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 10, 0, 16, 0,
+ 21, 0, 0, 0, 30, 0,
+ 0, 7, 50, 0, 16, 0,
+ 21, 0, 0, 0, 246, 15,
+ 16, 0, 20, 0, 0, 0,
+ 150, 5, 16, 0, 20, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 11, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 21, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 11, 0, 0, 0,
+ 0, 0, 0, 8, 242, 0,
+ 16, 0, 21, 0, 0, 0,
+ 70, 14, 16, 128, 65, 0,
+ 0, 0, 22, 0, 0, 0,
+ 70, 14, 16, 0, 21, 0,
+ 0, 0, 50, 0, 0, 9,
+ 242, 0, 16, 0, 21, 0,
+ 0, 0, 70, 14, 16, 0,
+ 21, 0, 0, 0, 6, 0,
+ 16, 0, 17, 0, 0, 0,
+ 70, 14, 16, 0, 22, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 242, 0, 16, 0, 23, 0,
+ 0, 0, 246, 15, 16, 0,
+ 20, 0, 0, 0, 2, 64,
+ 0, 0, 183, 7, 0, 0,
+ 110, 15, 0, 0, 37, 23,
+ 0, 0, 0, 4, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 21, 0, 0, 0,
+ 70, 2, 16, 0, 21, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 22, 0,
+ 0, 0, 16, 0, 0, 7,
+ 18, 0, 16, 0, 24, 0,
+ 0, 0, 70, 2, 16, 0,
+ 21, 0, 0, 0, 70, 2,
+ 16, 0, 21, 0, 0, 0,
+ 0, 0, 0, 7, 18, 0,
+ 16, 0, 24, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 50, 0,
+ 0, 11, 130, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 26, 0, 16, 128, 65, 0,
+ 0, 0, 19, 0, 0, 0,
+ 42, 0, 16, 128, 65, 0,
+ 0, 0, 19, 0, 0, 0,
+ 51, 0, 0, 7, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 68, 0,
+ 0, 5, 18, 0, 16, 0,
+ 24, 0, 0, 0, 10, 0,
+ 16, 0, 24, 0, 0, 0,
+ 50, 0, 0, 9, 18, 0,
+ 16, 0, 24, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 52, 0, 0, 7, 18, 0,
+ 16, 0, 24, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 56, 0,
+ 0, 7, 18, 0, 16, 0,
+ 24, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 21, 0,
+ 0, 0, 6, 0, 16, 0,
+ 24, 0, 0, 0, 70, 2,
+ 16, 0, 21, 0, 0, 0,
+ 70, 2, 16, 0, 22, 0,
+ 0, 0, 29, 0, 0, 7,
+ 130, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 22, 0, 0, 1,
+ 21, 0, 0, 1, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 20, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 24, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 24, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 32, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 60, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 31, 0, 0, 3, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 86, 0,
+ 0, 5, 18, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 56, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 86, 0,
+ 0, 5, 18, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 14, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 165, 0,
+ 0, 7, 50, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 28, 0, 0, 0,
+ 70, 240, 17, 0, 0, 0,
+ 0, 0, 54, 0, 0, 4,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 242, 0, 16, 0, 21, 0,
+ 0, 0, 166, 10, 16, 0,
+ 20, 0, 0, 0, 2, 64,
+ 0, 0, 183, 7, 0, 0,
+ 110, 15, 0, 0, 37, 23,
+ 0, 0, 0, 4, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 22, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 54, 0, 0, 8, 114, 0,
+ 16, 0, 23, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 112,
+ 16, 0, 5, 0, 0, 0,
+ 1, 0, 0, 7, 18, 0,
+ 16, 0, 24, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 24, 0, 0, 0, 10, 0,
+ 16, 0, 24, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 10, 82, 0, 16, 0,
+ 24, 0, 0, 0, 6, 0,
+ 16, 0, 24, 0, 0, 0,
+ 2, 64, 0, 0, 183, 7,
+ 0, 0, 0, 0, 0, 0,
+ 110, 15, 0, 0, 0, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 24, 0,
+ 0, 0, 10, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 24, 0,
+ 0, 0, 42, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 0, 0, 0, 8,
+ 34, 0, 16, 0, 24, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 24, 0, 0, 0, 0, 0,
+ 0, 8, 82, 0, 16, 0,
+ 24, 0, 0, 0, 6, 1,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 6, 2,
+ 16, 0, 24, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 24, 0, 0, 0,
+ 26, 0, 16, 0, 24, 0,
+ 0, 0, 26, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 24, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 10, 0, 16, 0,
+ 24, 0, 0, 0, 58, 0,
+ 16, 0, 24, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 24, 0, 0, 0,
+ 42, 0, 16, 0, 24, 0,
+ 0, 0, 42, 0, 16, 0,
+ 24, 0, 0, 0, 58, 0,
+ 16, 0, 24, 0, 0, 0,
+ 85, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 86, 0,
+ 0, 5, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 24, 0, 0, 0, 58, 0,
+ 16, 0, 24, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 50, 0, 0, 9,
+ 18, 0, 16, 0, 25, 0,
+ 0, 0, 26, 0, 16, 0,
+ 24, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 23, 0,
+ 0, 0, 50, 0, 0, 9,
+ 34, 0, 16, 0, 25, 0,
+ 0, 0, 10, 0, 16, 0,
+ 24, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 50, 0, 0, 9,
+ 66, 0, 16, 0, 25, 0,
+ 0, 0, 42, 0, 16, 0,
+ 24, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 23, 0,
+ 0, 0, 246, 15, 16, 0,
+ 24, 0, 0, 0, 70, 2,
+ 16, 0, 25, 0, 0, 0,
+ 70, 2, 16, 0, 23, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 22, 0, 0, 1, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 50, 0,
+ 0, 9, 18, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 22, 0, 0, 0, 50, 0,
+ 0, 9, 34, 0, 16, 0,
+ 22, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 22, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 22, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 22, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 22, 0, 0, 0, 54, 0,
+ 0, 5, 66, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 22, 0, 0, 1, 21, 0,
+ 0, 1, 54, 0, 0, 5,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 48, 0,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 8, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 80, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 42, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 114, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 12, 0, 0, 0,
+ 70, 242, 17, 0, 0, 0,
+ 0, 0, 30, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 167, 0, 0, 139, 2, 195,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 3, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 195, 0, 128, 131, 153,
+ 25, 0, 194, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 6, 116, 16, 0,
+ 3, 0, 0, 0, 165, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 108, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 56, 0, 0, 7,
+ 50, 0, 16, 0, 21, 0,
+ 0, 0, 6, 0, 16, 0,
+ 20, 0, 0, 0, 70, 0,
+ 16, 0, 21, 0, 0, 0,
+ 25, 0, 0, 5, 50, 0,
+ 16, 0, 21, 0, 0, 0,
+ 70, 0, 16, 0, 21, 0,
+ 0, 0, 0, 0, 0, 11,
+ 50, 0, 16, 0, 21, 0,
+ 0, 0, 70, 0, 16, 128,
+ 65, 0, 0, 0, 21, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 0, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 39, 0, 0, 7,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 20, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 190, 24, 0, 1,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 18, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 167, 0,
+ 0, 139, 2, 67, 0, 128,
+ 131, 153, 25, 0, 50, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 112,
+ 16, 0, 4, 0, 0, 0,
+ 1, 0, 0, 7, 66, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 0, 0, 85, 0,
+ 0, 7, 34, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 42, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 114, 0, 16, 0, 24, 0,
+ 0, 0, 166, 10, 16, 0,
+ 22, 0, 0, 0, 2, 64,
+ 0, 0, 183, 7, 0, 0,
+ 110, 15, 0, 0, 37, 23,
+ 0, 0, 0, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 24, 0, 0, 0,
+ 26, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 10, 114, 0,
+ 16, 0, 26, 0, 0, 0,
+ 86, 5, 16, 0, 22, 0,
+ 0, 0, 2, 64, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 25, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 25, 0, 0, 0, 26, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 26, 0, 0, 0, 42, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 0, 0,
+ 0, 8, 18, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 25, 0, 0, 0,
+ 0, 0, 0, 8, 34, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 0, 0, 0, 8,
+ 66, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 24, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 27, 0, 0, 0,
+ 10, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 27, 0, 0, 0,
+ 42, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 49, 0,
+ 0, 7, 18, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 68, 0, 0, 5,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 18, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 191, 1, 0,
+ 0, 7, 18, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 28, 0,
+ 0, 0, 51, 0, 0, 8,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 50, 0, 0, 9,
+ 18, 0, 16, 0, 22, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 0, 128, 131, 153, 25, 0,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 112, 16, 0, 16, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 25, 0, 0, 5, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 55, 0,
+ 0, 9, 130, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 56, 0,
+ 0, 7, 18, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 14, 0,
+ 0, 7, 18, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 56, 0, 0, 8,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 128, 65, 0, 0, 0,
+ 25, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 10, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 24, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 42, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 24, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 24, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 24, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 24, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 24, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 56, 0,
+ 0, 7, 18, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 50, 0, 0, 9,
+ 66, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 27, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 50, 0, 0, 9,
+ 18, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 22, 0, 0, 0, 42, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 22, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 22, 0, 0, 1,
+ 190, 24, 0, 1, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 36, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 39, 0, 0, 7,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 194, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 36, 0, 0, 0,
+ 6, 244, 17, 0, 1, 0,
+ 0, 0, 54, 0, 0, 4,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 3, 0, 4, 3,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 30, 0, 0, 7,
+ 98, 0, 16, 0, 20, 0,
+ 0, 0, 166, 11, 16, 0,
+ 19, 0, 0, 0, 6, 0,
+ 16, 0, 20, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 21, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 12, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 22, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 12, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 22, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 21, 0, 0, 0, 70, 14,
+ 16, 0, 22, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 21, 0, 0, 0,
+ 70, 14, 16, 0, 22, 0,
+ 0, 0, 6, 0, 16, 0,
+ 17, 0, 0, 0, 70, 14,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 10, 242, 0,
+ 16, 0, 24, 0, 0, 0,
+ 6, 0, 16, 0, 20, 0,
+ 0, 0, 2, 64, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 0, 4, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 24, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 24, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 24, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 21, 0, 0, 0, 70, 2,
+ 16, 0, 21, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 22, 0, 0, 0,
+ 16, 0, 0, 7, 66, 0,
+ 16, 0, 20, 0, 0, 0,
+ 70, 2, 16, 0, 21, 0,
+ 0, 0, 70, 2, 16, 0,
+ 21, 0, 0, 0, 0, 0,
+ 0, 7, 66, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 50, 0,
+ 0, 10, 66, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 51, 0, 0, 7,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 21, 0, 0, 0,
+ 166, 10, 16, 0, 20, 0,
+ 0, 0, 70, 2, 16, 0,
+ 21, 0, 0, 0, 70, 2,
+ 16, 0, 22, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 24, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 20, 0, 0, 0,
+ 54, 0, 0, 5, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 24, 0,
+ 0, 0, 22, 0, 0, 1,
+ 190, 24, 0, 1, 21, 0,
+ 0, 1, 31, 0, 4, 3,
+ 10, 0, 16, 0, 1, 0,
+ 0, 0, 165, 0, 0, 7,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 84, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 2, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 114, 0, 16, 0, 21, 0,
+ 0, 0, 166, 10, 16, 0,
+ 19, 0, 0, 0, 2, 64,
+ 0, 0, 183, 7, 0, 0,
+ 110, 15, 0, 0, 37, 23,
+ 0, 0, 0, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 17, 0, 0, 8, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 70, 14, 16, 0, 20, 0,
+ 0, 0, 70, 158, 144, 0,
+ 10, 0, 16, 0, 4, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 4, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 48, 0,
+ 0, 1, 30, 0, 0, 7,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 32, 0, 0, 0,
+ 80, 0, 0, 7, 34, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 26, 0, 16, 0,
+ 20, 0, 0, 0, 167, 0,
+ 0, 8, 34, 0, 16, 0,
+ 20, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 10, 114, 0,
+ 16, 0, 22, 0, 0, 0,
+ 246, 15, 16, 0, 19, 0,
+ 0, 0, 2, 64, 0, 0,
+ 215, 7, 0, 0, 142, 15,
+ 0, 0, 69, 23, 0, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 21, 0, 0, 0, 26, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 17, 0,
+ 0, 8, 66, 0, 16, 0,
+ 20, 0, 0, 0, 70, 14,
+ 16, 0, 21, 0, 0, 0,
+ 70, 158, 144, 0, 10, 0,
+ 16, 0, 4, 0, 0, 0,
+ 56, 0, 0, 7, 66, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 52, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 4, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 22, 0, 0, 1, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 1, 0, 0, 0,
+ 167, 0, 0, 8, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 4, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 4, 0,
+ 0, 0, 52, 0, 0, 7,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 4, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 1, 0, 0, 0, 167, 0,
+ 0, 8, 34, 0, 16, 0,
+ 19, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 52, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 4, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 26, 0, 16, 0, 1, 0,
+ 0, 0, 167, 0, 0, 8,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 4, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 4, 0, 0, 0, 52, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 4, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 1, 0, 0, 0,
+ 167, 0, 0, 8, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 4, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 4, 0,
+ 0, 0, 52, 0, 0, 7,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 4, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 1, 0, 0, 0, 167, 0,
+ 0, 8, 34, 0, 16, 0,
+ 19, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 52, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 4, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 0, 0,
+ 0, 0, 167, 0, 0, 8,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 4, 0, 0, 0, 166, 0,
+ 0, 7, 18, 240, 17, 0,
+ 1, 0, 0, 0, 10, 0,
+ 16, 0, 2, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 16, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 86, 0, 0, 5,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 14, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 165, 0, 0, 7,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 60, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 80, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 49, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 64, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 98, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 56, 0, 0, 0,
+ 6, 241, 17, 0, 1, 0,
+ 0, 0, 54, 0, 0, 4,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 3, 0, 4, 3,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 242, 0, 16, 0, 22, 0,
+ 0, 0, 246, 15, 16, 0,
+ 20, 0, 0, 0, 2, 64,
+ 0, 0, 183, 7, 0, 0,
+ 110, 15, 0, 0, 37, 23,
+ 0, 0, 0, 4, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 21, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 42, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 54, 0, 0, 8, 242, 0,
+ 16, 0, 24, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 18, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 10, 0,
+ 16, 0, 25, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 112,
+ 16, 0, 8, 0, 0, 0,
+ 136, 0, 0, 5, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 30, 0, 0, 7,
+ 98, 0, 16, 0, 25, 0,
+ 0, 0, 86, 6, 16, 0,
+ 20, 0, 0, 0, 86, 5,
+ 16, 0, 25, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 26, 0,
+ 0, 0, 26, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 9, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 9, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 27, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 26, 0, 0, 0, 70, 14,
+ 16, 0, 27, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 26, 0, 0, 0,
+ 6, 0, 16, 0, 17, 0,
+ 0, 0, 70, 14, 16, 0,
+ 27, 0, 0, 0, 70, 14,
+ 16, 0, 26, 0, 0, 0,
+ 16, 0, 0, 7, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 70, 2, 16, 0, 21, 0,
+ 0, 0, 70, 2, 16, 0,
+ 26, 0, 0, 0, 0, 0,
+ 0, 7, 34, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 54, 0, 0, 5,
+ 114, 0, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 0,
+ 26, 0, 0, 0, 54, 0,
+ 0, 5, 66, 0, 16, 0,
+ 25, 0, 0, 0, 10, 0,
+ 16, 0, 25, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 48, 0, 0, 1,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 1, 0, 0, 7,
+ 66, 0, 16, 0, 25, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 39, 0, 0, 7, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 3, 0, 0, 3,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 136, 0, 0, 5,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 30, 0,
+ 0, 7, 50, 0, 16, 0,
+ 28, 0, 0, 0, 150, 5,
+ 16, 0, 20, 0, 0, 0,
+ 246, 15, 16, 0, 25, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 29, 0, 0, 0, 10, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 9, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 9, 0, 0, 0,
+ 0, 0, 0, 8, 242, 0,
+ 16, 0, 28, 0, 0, 0,
+ 70, 14, 16, 128, 65, 0,
+ 0, 0, 29, 0, 0, 0,
+ 70, 14, 16, 0, 28, 0,
+ 0, 0, 50, 0, 0, 9,
+ 242, 0, 16, 0, 28, 0,
+ 0, 0, 6, 0, 16, 0,
+ 17, 0, 0, 0, 70, 14,
+ 16, 0, 28, 0, 0, 0,
+ 70, 14, 16, 0, 29, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 70, 2, 16, 0,
+ 21, 0, 0, 0, 70, 2,
+ 16, 0, 28, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 55, 0, 0, 9,
+ 242, 0, 16, 0, 27, 0,
+ 0, 0, 246, 15, 16, 0,
+ 25, 0, 0, 0, 70, 14,
+ 16, 0, 28, 0, 0, 0,
+ 70, 14, 16, 0, 27, 0,
+ 0, 0, 22, 0, 0, 1,
+ 49, 0, 0, 7, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 27, 0, 0, 0, 246, 15,
+ 16, 0, 27, 0, 0, 0,
+ 70, 2, 16, 0, 24, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 55, 0, 0, 9, 242, 0,
+ 16, 0, 24, 0, 0, 0,
+ 6, 0, 16, 0, 25, 0,
+ 0, 0, 70, 14, 16, 0,
+ 26, 0, 0, 0, 70, 14,
+ 16, 0, 24, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 22, 0,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 24, 0,
+ 0, 0, 86, 0, 0, 5,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 24, 0, 0, 0, 14, 0,
+ 0, 10, 130, 0, 16, 0,
+ 23, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 31, 0, 4, 3,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 25, 0,
+ 0, 0, 10, 0, 16, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 226, 0, 16, 0, 25, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 233, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 18, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 80, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 16, 0, 0, 7, 34, 0,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 24, 0,
+ 0, 0, 70, 2, 16, 0,
+ 24, 0, 0, 0, 0, 0,
+ 0, 7, 66, 0, 16, 0,
+ 26, 0, 0, 0, 26, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 66, 0, 16, 0, 26, 0,
+ 0, 0, 42, 0, 16, 0,
+ 26, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 24, 0, 0, 0,
+ 166, 10, 16, 0, 26, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 21, 0, 0, 0, 150, 7,
+ 16, 128, 65, 0, 0, 0,
+ 25, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 28, 0,
+ 0, 0, 50, 0, 0, 10,
+ 114, 0, 16, 0, 28, 0,
+ 0, 0, 246, 15, 16, 128,
+ 65, 0, 0, 0, 26, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 28, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 56, 0,
+ 0, 8, 18, 0, 16, 0,
+ 26, 0, 0, 0, 26, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 0, 16, 128, 65, 0,
+ 0, 0, 26, 0, 0, 0,
+ 56, 0, 0, 7, 18, 0,
+ 16, 0, 26, 0, 0, 0,
+ 42, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 56, 0,
+ 0, 7, 18, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 56, 0, 0, 7,
+ 18, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 52, 0, 0, 7, 18, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 191, 50, 0,
+ 0, 10, 226, 0, 16, 0,
+ 25, 0, 0, 0, 6, 9,
+ 16, 128, 65, 0, 0, 0,
+ 28, 0, 0, 0, 6, 0,
+ 16, 0, 26, 0, 0, 0,
+ 86, 14, 16, 0, 25, 0,
+ 0, 0, 168, 0, 0, 9,
+ 114, 224, 17, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 150, 7, 16, 0, 25, 0,
+ 0, 0, 21, 0, 0, 1,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 21, 0, 0, 0,
+ 70, 2, 16, 0, 24, 0,
+ 0, 0, 246, 15, 16, 0,
+ 23, 0, 0, 0, 70, 2,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 21, 0, 0, 1, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 22, 0, 0, 1, 21, 0,
+ 0, 1, 27, 0, 0, 5,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 10, 114, 0, 16, 0,
+ 21, 0, 0, 0, 86, 5,
+ 16, 0, 19, 0, 0, 0,
+ 2, 64, 0, 0, 183, 7,
+ 0, 0, 110, 15, 0, 0,
+ 37, 23, 0, 0, 0, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 68, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 64, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 54, 0,
+ 0, 8, 114, 0, 16, 0,
+ 22, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 8, 50, 0,
+ 16, 0, 25, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 255, 255, 127, 127,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 35, 0, 0, 9, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 3, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 167, 0, 0, 139, 2, 99,
+ 0, 128, 131, 153, 25, 0,
+ 114, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 114, 16, 0, 10, 0,
+ 0, 0, 30, 0, 0, 10,
+ 194, 0, 16, 0, 25, 0,
+ 0, 0, 246, 15, 16, 0,
+ 22, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 0,
+ 0, 0, 2, 0, 0, 0,
+ 167, 0, 0, 139, 2, 99,
+ 0, 128, 131, 153, 25, 0,
+ 114, 0, 16, 0, 28, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 114, 16, 0, 10, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 99, 0, 128, 131, 153,
+ 25, 0, 114, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 114, 16, 0,
+ 10, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 0, 28, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 29, 0,
+ 0, 0, 56, 0, 0, 7,
+ 114, 0, 16, 0, 30, 0,
+ 0, 0, 38, 9, 16, 0,
+ 28, 0, 0, 0, 150, 4,
+ 16, 0, 29, 0, 0, 0,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 30, 0, 0, 0,
+ 150, 4, 16, 0, 28, 0,
+ 0, 0, 38, 9, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 30, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 30, 0, 0, 0,
+ 70, 2, 16, 0, 30, 0,
+ 0, 0, 68, 0, 0, 5,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 30, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 30, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 70, 2, 16, 0, 28, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 16, 0,
+ 0, 7, 66, 0, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 29, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 25, 0, 0, 0, 14, 0,
+ 0, 10, 130, 0, 16, 0,
+ 25, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 192, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 14, 0, 0, 10, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 58, 0, 16, 0,
+ 26, 0, 0, 0, 14, 0,
+ 0, 10, 130, 0, 16, 0,
+ 28, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 14, 0, 0, 10,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 0, 0, 128, 63, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 20, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 26, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 70, 2, 16, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 0, 28, 0, 0, 0,
+ 16, 0, 0, 7, 18, 0,
+ 16, 0, 31, 0, 0, 0,
+ 70, 2, 16, 0, 26, 0,
+ 0, 0, 70, 2, 16, 0,
+ 29, 0, 0, 0, 16, 0,
+ 0, 7, 18, 0, 16, 0,
+ 32, 0, 0, 0, 70, 2,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 30, 0,
+ 0, 0, 56, 0, 0, 7,
+ 34, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 31, 0, 0, 0,
+ 50, 0, 0, 10, 34, 0,
+ 16, 0, 31, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 26, 0,
+ 16, 128, 65, 0, 0, 0,
+ 31, 0, 0, 0, 56, 0,
+ 0, 7, 66, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 31, 0, 0, 0,
+ 42, 0, 16, 128, 65, 0,
+ 0, 0, 31, 0, 0, 0,
+ 49, 0, 0, 7, 66, 0,
+ 16, 0, 31, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 56, 0,
+ 0, 7, 34, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 31, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 31, 0,
+ 0, 0, 26, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 49, 0, 0, 7, 34, 0,
+ 16, 0, 31, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 10, 0,
+ 16, 0, 31, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 22, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 128,
+ 65, 0, 0, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 55, 32, 0, 9,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 0, 0, 0, 8,
+ 66, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 51, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 52, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 56, 0, 0, 7, 114, 0,
+ 16, 0, 29, 0, 0, 0,
+ 246, 15, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 28, 0, 0, 0,
+ 246, 15, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 29, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 28, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 26, 0,
+ 0, 0, 70, 2, 16, 0,
+ 26, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 32, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 56, 0, 0, 7,
+ 66, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 71, 3, 128, 63,
+ 55, 0, 0, 9, 34, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 32, 0,
+ 0, 0, 26, 0, 16, 0,
+ 25, 0, 0, 0, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 22, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 30, 0,
+ 0, 0, 70, 2, 16, 0,
+ 22, 0, 0, 0, 55, 0,
+ 0, 9, 50, 0, 16, 0,
+ 25, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 70, 0, 16, 0, 32, 0,
+ 0, 0, 70, 0, 16, 0,
+ 25, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 22, 0, 0, 1,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 19, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 20, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 6, 0,
+ 16, 0, 25, 0, 0, 0,
+ 70, 2, 16, 0, 20, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 20, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 21, 0, 0, 1,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 72, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 80, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 49, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 50, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 48, 0, 0, 0,
+ 70, 240, 17, 0, 1, 0,
+ 0, 0, 165, 0, 0, 7,
+ 194, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 44, 0, 0, 0, 6, 244,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 34, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 10, 242, 0,
+ 16, 0, 28, 0, 0, 0,
+ 6, 0, 16, 0, 21, 0,
+ 0, 0, 2, 64, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 0, 4, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 26, 0, 0, 0, 10, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 26, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 226, 0, 16, 0,
+ 25, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 233, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 114, 0, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 26, 0, 0, 0,
+ 54, 0, 0, 8, 114, 0,
+ 16, 0, 30, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 8,
+ 242, 0, 16, 0, 31, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 42, 0, 16, 0, 20, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 139, 2, 67,
+ 0, 128, 131, 153, 25, 0,
+ 50, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 112, 16, 0, 6, 0,
+ 0, 0, 30, 0, 0, 7,
+ 242, 0, 16, 0, 32, 0,
+ 0, 0, 70, 4, 16, 0,
+ 20, 0, 0, 0, 6, 5,
+ 16, 0, 32, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 33, 0,
+ 0, 0, 10, 0, 16, 0,
+ 32, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 34, 0, 0, 0, 26, 0,
+ 16, 0, 32, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 35, 0, 0, 0,
+ 42, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 33, 0,
+ 0, 0, 70, 14, 16, 0,
+ 34, 0, 0, 0, 50, 0,
+ 0, 9, 242, 0, 16, 0,
+ 37, 0, 0, 0, 6, 0,
+ 16, 0, 19, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 33, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 38, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 35, 0, 0, 0, 70, 14,
+ 16, 0, 32, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 39, 0, 0, 0,
+ 6, 0, 16, 0, 19, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 70, 14,
+ 16, 0, 35, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 39, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 40, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 39, 0, 0, 0, 56, 0,
+ 0, 10, 242, 0, 16, 0,
+ 40, 0, 0, 0, 70, 14,
+ 16, 0, 40, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 16, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 29, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 23, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 7, 0,
+ 0, 1, 21, 0, 0, 1,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 6, 0, 16, 0, 17, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 33, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 38, 0, 0, 0,
+ 6, 0, 16, 0, 17, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 70, 14,
+ 16, 0, 35, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 38, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 41, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 56, 0,
+ 0, 10, 242, 0, 16, 0,
+ 42, 0, 0, 0, 70, 14,
+ 16, 0, 41, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 16, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 42, 0, 0, 0, 70, 2,
+ 16, 0, 42, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 42, 0, 0, 0,
+ 58, 0, 16, 0, 42, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 29, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 7, 0, 0, 1,
+ 21, 0, 0, 1, 0, 0,
+ 0, 7, 114, 0, 16, 0,
+ 41, 0, 0, 0, 38, 9,
+ 16, 0, 37, 0, 0, 0,
+ 38, 9, 16, 0, 39, 0,
+ 0, 0, 50, 0, 0, 13,
+ 114, 0, 16, 0, 41, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 41, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 0, 118, 14,
+ 16, 0, 25, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 56, 0, 0, 7,
+ 114, 0, 16, 0, 40, 0,
+ 0, 0, 246, 15, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 40, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 43, 0, 0, 0, 150, 4,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 50, 0, 0, 10,
+ 114, 0, 16, 0, 41, 0,
+ 0, 0, 38, 9, 16, 0,
+ 41, 0, 0, 0, 38, 9,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 43, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 150, 7, 16, 0, 25, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 0, 0,
+ 0, 7, 114, 0, 16, 0,
+ 40, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 50, 0, 0, 13,
+ 114, 0, 16, 0, 43, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 40, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 68, 0, 0, 5,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 63,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 44, 0, 0, 0, 246, 15,
+ 16, 0, 30, 0, 0, 0,
+ 70, 2, 16, 0, 42, 0,
+ 0, 0, 56, 0, 0, 7,
+ 114, 0, 16, 0, 45, 0,
+ 0, 0, 38, 9, 16, 0,
+ 43, 0, 0, 0, 150, 4,
+ 16, 0, 44, 0, 0, 0,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 45, 0, 0, 0,
+ 150, 4, 16, 0, 43, 0,
+ 0, 0, 38, 9, 16, 0,
+ 44, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 45, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 44, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 45, 0, 0, 0, 70, 2,
+ 16, 0, 45, 0, 0, 0,
+ 0, 0, 0, 7, 18, 0,
+ 16, 0, 46, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 42, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 52, 0, 0, 7, 34, 0,
+ 16, 0, 46, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 70, 2,
+ 16, 0, 41, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 10, 0, 16, 128,
+ 65, 0, 0, 0, 41, 0,
+ 0, 0, 10, 0, 16, 0,
+ 41, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 0, 41, 0, 0, 0,
+ 70, 2, 16, 0, 45, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 29, 0,
+ 0, 0, 26, 0, 16, 0,
+ 46, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 26, 0, 16, 128, 65, 0,
+ 0, 0, 46, 0, 0, 0,
+ 26, 0, 16, 0, 46, 0,
+ 0, 0, 10, 0, 16, 0,
+ 46, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 75, 0, 0, 5, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 0, 0,
+ 0, 8, 130, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 35, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 51, 0, 0, 7,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 14, 0, 0, 7, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 49, 0, 0, 7,
+ 130, 0, 16, 0, 39, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 39, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 29, 0,
+ 0, 0, 26, 0, 16, 0,
+ 46, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 39, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 236, 81, 184, 190, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 38, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 180, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 41, 0,
+ 0, 0, 150, 7, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 45, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 41, 0, 0, 0, 246, 15,
+ 16, 0, 35, 0, 0, 0,
+ 150, 7, 16, 0, 25, 0,
+ 0, 0, 56, 0, 0, 7,
+ 114, 0, 16, 0, 47, 0,
+ 0, 0, 246, 15, 16, 0,
+ 23, 0, 0, 0, 70, 2,
+ 16, 0, 42, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 39, 0,
+ 0, 0, 56, 0, 0, 10,
+ 114, 0, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 37, 0, 0, 0, 246, 15,
+ 16, 0, 33, 0, 0, 0,
+ 70, 2, 16, 0, 47, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 35, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 39, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 37, 0, 0, 0, 246, 15,
+ 16, 0, 33, 0, 0, 0,
+ 70, 2, 16, 0, 47, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 0, 39, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 45, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 45, 0, 0, 0, 70, 2,
+ 16, 0, 45, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 38, 0, 0, 0, 49, 0,
+ 0, 7, 18, 0, 16, 0,
+ 39, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 68, 0, 0, 5,
+ 34, 0, 16, 0, 39, 0,
+ 0, 0, 58, 0, 16, 0,
+ 38, 0, 0, 0, 1, 0,
+ 0, 7, 18, 0, 16, 0,
+ 39, 0, 0, 0, 26, 0,
+ 16, 0, 39, 0, 0, 0,
+ 10, 0, 16, 0, 39, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 38, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 39, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 49, 0,
+ 0, 8, 130, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 128, 129, 0, 0, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 246, 15, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 41, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 14, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 14, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 43, 0,
+ 0, 0, 50, 0, 0, 12,
+ 114, 0, 16, 0, 39, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 39, 0,
+ 0, 0, 246, 15, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 29, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 39, 0,
+ 0, 0, 246, 15, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 29, 0,
+ 0, 0, 50, 0, 0, 13,
+ 114, 0, 16, 0, 40, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 40, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 40, 0,
+ 0, 0, 70, 2, 16, 0,
+ 44, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 54, 0,
+ 0, 5, 114, 0, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 7, 0, 0, 1,
+ 21, 0, 0, 1, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 42, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 49, 0,
+ 0, 8, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 128, 129, 0, 0, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 40, 0, 0, 0,
+ 246, 15, 16, 128, 65, 0,
+ 0, 0, 34, 0, 0, 0,
+ 70, 2, 16, 0, 42, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 30, 0, 0, 0,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 0, 40, 0,
+ 0, 0, 246, 15, 16, 0,
+ 30, 0, 0, 0, 70, 2,
+ 16, 0, 30, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 34, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 33, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 33, 0,
+ 0, 0, 6, 0, 16, 0,
+ 19, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 70, 2, 16, 0, 33, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 32, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 32, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 32, 0, 0, 0, 6, 0,
+ 16, 0, 19, 0, 0, 0,
+ 70, 2, 16, 0, 32, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 63, 1, 64, 0, 0,
+ 0, 0, 0, 63, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 33, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 33, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 32, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 32, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 32, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 33, 0,
+ 0, 0, 70, 2, 16, 0,
+ 32, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 32, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 32, 0,
+ 0, 0, 70, 2, 16, 0,
+ 33, 0, 0, 0, 0, 0,
+ 0, 7, 114, 0, 16, 0,
+ 32, 0, 0, 0, 70, 2,
+ 16, 0, 31, 0, 0, 0,
+ 70, 2, 16, 0, 32, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 33, 0,
+ 0, 0, 166, 10, 16, 0,
+ 19, 0, 0, 0, 70, 2,
+ 16, 0, 32, 0, 0, 0,
+ 70, 2, 16, 0, 31, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 55, 0, 0, 9, 114, 0,
+ 16, 0, 32, 0, 0, 0,
+ 246, 15, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 70, 2,
+ 16, 0, 30, 0, 0, 0,
+ 55, 0, 0, 9, 242, 0,
+ 16, 0, 33, 0, 0, 0,
+ 246, 15, 16, 0, 23, 0,
+ 0, 0, 70, 14, 16, 0,
+ 33, 0, 0, 0, 70, 14,
+ 16, 0, 31, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 0,
+ 42, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 52, 0,
+ 0, 7, 34, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 50, 0, 0, 10, 18, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 55, 0,
+ 0, 9, 50, 0, 16, 0,
+ 34, 0, 0, 0, 246, 15,
+ 16, 0, 35, 0, 0, 0,
+ 70, 0, 16, 0, 34, 0,
+ 0, 0, 70, 0, 16, 0,
+ 46, 0, 0, 0, 55, 0,
+ 0, 9, 50, 0, 16, 0,
+ 34, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 0, 16, 0, 34, 0,
+ 0, 0, 70, 0, 16, 0,
+ 46, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 34, 0, 0, 0,
+ 26, 0, 16, 0, 34, 0,
+ 0, 0, 29, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 54, 0,
+ 0, 5, 114, 0, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 54, 0, 0, 5, 114, 0,
+ 16, 0, 30, 0, 0, 0,
+ 70, 2, 16, 0, 32, 0,
+ 0, 0, 54, 0, 0, 5,
+ 242, 0, 16, 0, 31, 0,
+ 0, 0, 70, 14, 16, 0,
+ 33, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 7, 0, 0, 1, 21, 0,
+ 0, 1, 54, 0, 0, 5,
+ 114, 0, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 54, 0,
+ 0, 5, 114, 0, 16, 0,
+ 30, 0, 0, 0, 70, 2,
+ 16, 0, 32, 0, 0, 0,
+ 54, 0, 0, 5, 242, 0,
+ 16, 0, 31, 0, 0, 0,
+ 70, 14, 16, 0, 33, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 22, 0, 0, 1, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 52, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 114, 0, 16, 0, 32, 0,
+ 0, 0, 70, 2, 16, 0,
+ 29, 0, 0, 0, 54, 0,
+ 0, 5, 114, 0, 16, 0,
+ 33, 0, 0, 0, 70, 2,
+ 16, 0, 30, 0, 0, 0,
+ 54, 0, 0, 5, 242, 0,
+ 16, 0, 34, 0, 0, 0,
+ 70, 14, 16, 0, 31, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 50, 0,
+ 16, 0, 35, 0, 0, 0,
+ 70, 0, 16, 0, 20, 0,
+ 0, 0, 246, 15, 16, 0,
+ 22, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 35, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 35, 0,
+ 0, 0, 26, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 35, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 35, 0, 0, 0, 50, 0,
+ 0, 9, 242, 0, 16, 0,
+ 37, 0, 0, 0, 6, 0,
+ 16, 0, 17, 0, 0, 0,
+ 70, 14, 16, 0, 35, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 35, 0,
+ 0, 0, 6, 0, 16, 0,
+ 19, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 32, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 37, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 38, 0, 0, 0, 150, 7,
+ 16, 0, 25, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 35, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 36, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 16, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 38, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 33, 0, 0, 0, 75, 0,
+ 0, 5, 130, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 0, 0, 0, 8, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 32, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 51, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 14, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 236, 81, 184, 190, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 180, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 0, 0,
+ 0, 8, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 32, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 14, 0,
+ 0, 7, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 30, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 14, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 0, 0, 0, 7,
+ 114, 0, 16, 0, 39, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 0, 38, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 55, 0, 0, 9,
+ 114, 0, 16, 0, 32, 0,
+ 0, 0, 246, 15, 16, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 32, 0,
+ 0, 0, 55, 0, 0, 9,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 246, 15, 16, 0,
+ 26, 0, 0, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 68, 0, 0, 5,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 191,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 246, 15, 16, 0,
+ 23, 0, 0, 0, 70, 2,
+ 16, 0, 33, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 35, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 0, 0, 0, 7,
+ 114, 0, 16, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 55, 0, 0, 9, 114, 0,
+ 16, 0, 35, 0, 0, 0,
+ 166, 10, 16, 0, 19, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 33, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 33, 0, 0, 0, 55, 0,
+ 0, 9, 242, 0, 16, 0,
+ 34, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 14, 16, 0, 35, 0,
+ 0, 0, 70, 14, 16, 0,
+ 34, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 22, 0, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 86, 0, 0, 5, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 14, 0, 0, 10,
+ 130, 0, 16, 0, 21, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 0, 0, 128, 63, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 80, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 70, 2,
+ 16, 0, 33, 0, 0, 0,
+ 70, 2, 16, 0, 33, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 56, 0, 0, 7,
+ 114, 0, 16, 0, 32, 0,
+ 0, 0, 246, 15, 16, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 0, 33, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 35, 0, 0, 0,
+ 150, 7, 16, 128, 65, 0,
+ 0, 0, 25, 0, 0, 0,
+ 70, 2, 16, 0, 26, 0,
+ 0, 0, 50, 0, 0, 10,
+ 114, 0, 16, 0, 35, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 34, 0,
+ 0, 0, 246, 15, 16, 0,
+ 21, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 32, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 32, 0, 0, 0, 246, 15,
+ 16, 128, 65, 0, 0, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 32, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 32, 0, 0, 0, 70, 2,
+ 16, 0, 32, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 56, 0, 0, 8, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 52, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 191,
+ 50, 0, 0, 10, 226, 0,
+ 16, 0, 25, 0, 0, 0,
+ 6, 9, 16, 128, 65, 0,
+ 0, 0, 32, 0, 0, 0,
+ 246, 15, 16, 0, 22, 0,
+ 0, 0, 86, 14, 16, 0,
+ 25, 0, 0, 0, 168, 0,
+ 0, 9, 114, 224, 17, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 150, 7, 16, 0,
+ 25, 0, 0, 0, 21, 0,
+ 0, 1, 50, 0, 0, 9,
+ 226, 0, 16, 0, 25, 0,
+ 0, 0, 6, 9, 16, 0,
+ 33, 0, 0, 0, 246, 15,
+ 16, 0, 21, 0, 0, 0,
+ 6, 9, 16, 0, 26, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 21, 0, 0, 1,
+ 54, 0, 0, 5, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 22, 0, 0, 1,
+ 18, 0, 0, 1, 165, 0,
+ 0, 7, 98, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 76, 0, 0, 0,
+ 6, 241, 17, 0, 0, 0,
+ 0, 0, 49, 0, 0, 10,
+ 98, 0, 16, 0, 19, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 166, 9,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 50, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 48, 0, 0, 0,
+ 70, 240, 17, 0, 1, 0,
+ 0, 0, 165, 0, 0, 7,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 80, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 165, 0,
+ 0, 7, 50, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 44, 0, 0, 0,
+ 70, 240, 17, 0, 0, 0,
+ 0, 0, 54, 0, 0, 4,
+ 66, 0, 16, 0, 21, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 26, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 242, 0, 16, 0, 28, 0,
+ 0, 0, 166, 10, 16, 0,
+ 21, 0, 0, 0, 2, 64,
+ 0, 0, 183, 7, 0, 0,
+ 110, 15, 0, 0, 37, 23,
+ 0, 0, 0, 4, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 26, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 42, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 54, 0, 0, 8, 226, 0,
+ 16, 0, 25, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 8,
+ 242, 0, 16, 0, 32, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 167, 0, 0, 139, 2, 67,
+ 0, 128, 131, 153, 25, 0,
+ 50, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 112, 16, 0, 6, 0,
+ 0, 0, 30, 0, 0, 7,
+ 242, 0, 16, 0, 35, 0,
+ 0, 0, 70, 4, 16, 0,
+ 20, 0, 0, 0, 6, 5,
+ 16, 0, 35, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 35, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 7, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 38, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 38, 0, 0, 0,
+ 6, 0, 16, 0, 17, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 70, 14,
+ 16, 0, 36, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 39, 0,
+ 0, 0, 42, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 7, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 40, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 0, 35, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 40, 0, 0, 0,
+ 6, 0, 16, 0, 17, 0,
+ 0, 0, 70, 14, 16, 0,
+ 40, 0, 0, 0, 70, 14,
+ 16, 0, 39, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 41, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 38, 0,
+ 0, 0, 70, 14, 16, 0,
+ 40, 0, 0, 0, 56, 0,
+ 0, 10, 242, 0, 16, 0,
+ 42, 0, 0, 0, 70, 14,
+ 16, 0, 41, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 16, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 42, 0, 0, 0, 70, 2,
+ 16, 0, 42, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 42, 0, 0, 0,
+ 58, 0, 16, 0, 42, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 29, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 7, 0, 0, 1,
+ 21, 0, 0, 1, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 41, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 41, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 63, 58, 0, 16, 0,
+ 38, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 56, 0, 0, 7, 114, 0,
+ 16, 0, 42, 0, 0, 0,
+ 246, 15, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 42, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 42, 0,
+ 0, 0, 50, 0, 0, 13,
+ 114, 0, 16, 0, 41, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 41, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 0, 70, 2,
+ 16, 0, 26, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 70, 2, 16, 0,
+ 42, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 41, 0, 0, 0, 70, 2,
+ 16, 0, 41, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 7, 0, 0, 1, 21, 0,
+ 0, 1, 56, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 42, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 49, 0, 0, 8,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 128,
+ 129, 0, 0, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 41, 0, 0, 0, 246, 15,
+ 16, 128, 65, 0, 0, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 0, 42, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 41, 0, 0, 0, 70, 2,
+ 16, 0, 41, 0, 0, 0,
+ 246, 15, 16, 0, 26, 0,
+ 0, 0, 150, 7, 16, 0,
+ 25, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 36, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 36, 0, 0, 0,
+ 6, 0, 16, 0, 19, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 35, 0,
+ 0, 0, 6, 0, 16, 0,
+ 19, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 0, 39, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 63,
+ 1, 64, 0, 0, 0, 0,
+ 0, 63, 0, 0, 0, 8,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 35, 0,
+ 0, 0, 246, 15, 16, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 0, 0, 0, 7,
+ 114, 0, 16, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 32, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 55, 0, 0, 9, 114, 0,
+ 16, 0, 35, 0, 0, 0,
+ 246, 15, 16, 0, 20, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 32, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 55, 0,
+ 0, 9, 226, 0, 16, 0,
+ 25, 0, 0, 0, 246, 15,
+ 16, 0, 23, 0, 0, 0,
+ 6, 9, 16, 0, 41, 0,
+ 0, 0, 86, 14, 16, 0,
+ 25, 0, 0, 0, 55, 0,
+ 0, 9, 242, 0, 16, 0,
+ 32, 0, 0, 0, 246, 15,
+ 16, 0, 23, 0, 0, 0,
+ 70, 14, 16, 0, 35, 0,
+ 0, 0, 70, 14, 16, 0,
+ 32, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 22, 0, 0, 1,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 52, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 114, 0, 16, 0,
+ 35, 0, 0, 0, 150, 7,
+ 16, 0, 25, 0, 0, 0,
+ 54, 0, 0, 5, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 32, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 30, 0, 0, 7, 50, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 0, 16, 0, 20, 0,
+ 0, 0, 246, 15, 16, 0,
+ 23, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 38, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 50, 0,
+ 0, 9, 242, 0, 16, 0,
+ 39, 0, 0, 0, 6, 0,
+ 16, 0, 17, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 40, 0,
+ 0, 0, 70, 2, 16, 0,
+ 26, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 39, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 0, 40, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 58, 0, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 191, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 40, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 246, 15, 16, 0, 26, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 37, 0, 0, 0, 6, 0,
+ 16, 0, 19, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 37, 0, 0, 0, 246, 15,
+ 16, 0, 20, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 0,
+ 36, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 35, 0,
+ 0, 0, 246, 15, 16, 0,
+ 30, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 55, 0, 0, 9,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 246, 15, 16, 0,
+ 30, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 22, 0, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 36, 0, 0, 0, 86, 0,
+ 0, 5, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 14, 0, 0, 10, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 58, 0, 16, 0,
+ 22, 0, 0, 0, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 19, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 114, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 226, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 80, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 56, 0, 0, 7, 114, 0,
+ 16, 0, 38, 0, 0, 0,
+ 246, 15, 16, 0, 30, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 39, 0, 0, 0, 70, 2,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 37, 0, 0, 0,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 246, 15, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 50, 0, 0, 10,
+ 114, 0, 16, 0, 38, 0,
+ 0, 0, 246, 15, 16, 128,
+ 65, 0, 0, 0, 33, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 56, 0,
+ 0, 8, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 191, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 38, 0, 0, 0, 246, 15,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 114, 224, 17, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 21, 0, 0, 1,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 246, 15, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 26, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 76, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 14, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 26, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 26, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 21, 0, 0, 1, 54, 0,
+ 0, 5, 66, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 22, 0, 0, 1, 21, 0,
+ 0, 1, 165, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 84, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 72, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 51, 0,
+ 0, 7, 18, 0, 16, 0,
+ 17, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 49, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 1, 64, 0, 0, 88, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 31, 0, 4, 3,
+ 42, 0, 16, 0, 1, 0,
+ 0, 0, 165, 0, 0, 7,
+ 98, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 4, 0, 0, 0, 6, 241,
+ 17, 0, 1, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 50, 0,
+ 0, 12, 98, 0, 16, 0,
+ 19, 0, 0, 0, 246, 15,
+ 16, 0, 19, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 23, 183, 209, 56,
+ 23, 183, 209, 56, 0, 0,
+ 0, 0, 166, 9, 16, 0,
+ 19, 0, 0, 0, 0, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 6, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 18, 0,
+ 0, 1, 54, 0, 0, 8,
+ 98, 0, 16, 0, 19, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 1, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 6, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 6, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 6, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 6, 0,
+ 0, 0, 49, 0, 0, 7,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 254, 255, 255, 255, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 30, 0, 0, 6, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 85, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 30, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 14, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 254, 66, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 84, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 14, 0,
+ 0, 10, 18, 0, 16, 0,
+ 20, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 51, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 41, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 2, 0, 0, 0, 30, 0,
+ 0, 10, 50, 0, 16, 0,
+ 20, 0, 0, 0, 246, 15,
+ 16, 0, 19, 0, 0, 0,
+ 2, 64, 0, 0, 12, 0,
+ 0, 0, 24, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 166, 0, 0, 7,
+ 18, 240, 17, 0, 5, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 56, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 166, 0,
+ 0, 7, 18, 240, 17, 0,
+ 5, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 166, 0, 0, 6,
+ 18, 240, 17, 0, 5, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 10, 32,
+ 2, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 100, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 41, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 116, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 31, 0, 4, 3,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 54, 0, 0, 4,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 33, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 3, 65,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 168, 0, 0, 9, 18, 224,
+ 17, 0, 2, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 4, 0, 0, 22, 0,
+ 0, 1, 21, 0, 0, 1,
+ 165, 0, 0, 7, 194, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 6, 244, 17, 0,
+ 5, 0, 0, 0, 165, 0,
+ 0, 7, 50, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 4, 0, 0, 0,
+ 70, 240, 17, 0, 5, 0,
+ 0, 0, 165, 0, 0, 7,
+ 194, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 28, 0, 0, 0, 6, 244,
+ 17, 0, 5, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 92, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 39, 0,
+ 0, 7, 34, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 165, 0, 0, 7,
+ 66, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 100, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 33, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 112,
+ 16, 0, 15, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 10, 114, 0,
+ 16, 0, 28, 0, 0, 0,
+ 246, 15, 16, 0, 22, 0,
+ 0, 0, 2, 64, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 26, 0, 0, 0, 10, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 26, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 26, 0, 0, 0, 42, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 17, 0,
+ 0, 8, 130, 0, 16, 0,
+ 23, 0, 0, 0, 70, 14,
+ 16, 0, 26, 0, 0, 0,
+ 70, 158, 144, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 51, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 255, 66, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 27, 0, 0, 5,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 17, 0,
+ 0, 8, 18, 0, 16, 0,
+ 26, 0, 0, 0, 70, 14,
+ 16, 0, 26, 0, 0, 0,
+ 70, 158, 144, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 50, 0, 0, 9, 18, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 20, 0, 0, 0,
+ 51, 0, 0, 7, 18, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 255, 66, 52, 0,
+ 0, 7, 18, 0, 16, 0,
+ 26, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 27, 0, 0, 5,
+ 18, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 41, 0, 0, 7,
+ 18, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 23, 0, 0, 0,
+ 35, 0, 0, 9, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 1, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 129, 0, 60, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 224, 17, 0, 2, 0,
+ 0, 0, 26, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 50, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 96, 0, 0, 0, 70, 240,
+ 17, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 17, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 54, 0,
+ 0, 4, 18, 0, 16, 0,
+ 26, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 33, 0, 0, 7, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 58, 0, 16, 0,
+ 21, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 28, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 242, 0, 16, 0, 26, 0,
+ 0, 0, 6, 0, 16, 0,
+ 26, 0, 0, 0, 2, 64,
+ 0, 0, 0, 4, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 168, 0, 0, 9, 242, 224,
+ 17, 0, 1, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 14,
+ 16, 0, 28, 0, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 30, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 1, 64,
+ 0, 0, 31, 0, 0, 0,
+ 138, 0, 0, 9, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 5, 0,
+ 0, 0, 1, 64, 0, 0,
+ 5, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 85, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 10, 0, 0, 0, 79, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 4, 0, 0, 0,
+ 58, 0, 16, 0, 21, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 55, 0,
+ 0, 9, 18, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 38, 0,
+ 0, 8, 0, 208, 0, 0,
+ 18, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 35, 0, 0, 9, 18, 0,
+ 16, 0, 26, 0, 0, 0,
+ 26, 0, 16, 0, 4, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 41, 0, 0, 7, 34, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 5, 0, 0, 0, 41, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 5, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 26, 0,
+ 16, 0, 26, 0, 0, 0,
+ 84, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 17, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 140, 0,
+ 0, 10, 18, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 5, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 10, 32,
+ 2, 0, 54, 0, 0, 5,
+ 34, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 54, 0,
+ 0, 5, 194, 0, 16, 0,
+ 26, 0, 0, 0, 86, 9,
+ 16, 0, 21, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 18, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 32, 0,
+ 0, 0, 3, 0, 4, 3,
+ 10, 0, 16, 0, 28, 0,
+ 0, 0, 54, 0, 0, 8,
+ 50, 0, 16, 0, 28, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 66, 0, 16, 0,
+ 28, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 79, 0, 0, 7,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 42, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 42, 0, 16, 0, 26, 0,
+ 0, 0, 42, 0, 16, 0,
+ 28, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 85, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 26, 0, 0, 0, 1, 0,
+ 0, 10, 242, 0, 16, 0,
+ 37, 0, 0, 0, 246, 15,
+ 16, 0, 28, 0, 0, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 2, 0, 0, 0,
+ 4, 0, 0, 0, 8, 0,
+ 0, 0, 39, 0, 0, 10,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 140, 0, 0, 16, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 6, 32, 2, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 26, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 38, 0, 0, 0, 26, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 38, 0, 0, 0, 26, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 38, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 38, 0,
+ 0, 0, 42, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 38, 0,
+ 0, 0, 42, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 42, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 13, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 4, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 38, 0, 0, 0, 26, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 38, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 5, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 10, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 26, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 42, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 87, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 2, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 87, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 2, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 87, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 10, 0, 16, 0, 5, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 87, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 26, 0, 16, 0,
+ 5, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 134, 0,
+ 0, 5, 130, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 10, 0, 16, 0,
+ 28, 0, 0, 0, 30, 0,
+ 0, 10, 98, 0, 16, 0,
+ 28, 0, 0, 0, 86, 6,
+ 16, 0, 28, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0,
+ 32, 0, 0, 0, 0, 0,
+ 0, 0, 22, 0, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 26, 0,
+ 0, 0, 42, 0, 16, 0,
+ 28, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 79, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 55, 0,
+ 0, 9, 34, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 85, 0,
+ 0, 7, 34, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 26, 0,
+ 0, 0, 1, 0, 0, 10,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 86, 5, 16, 0,
+ 28, 0, 0, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 2, 0, 0, 0, 4, 0,
+ 0, 0, 8, 0, 0, 0,
+ 39, 0, 0, 10, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 140, 0,
+ 0, 16, 242, 0, 16, 0,
+ 37, 0, 0, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 6, 32, 2, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 38, 0,
+ 0, 0, 10, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 6, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 38, 0,
+ 0, 0, 10, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 42, 0, 16, 0,
+ 6, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 31, 0, 4, 3,
+ 26, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 38, 0, 0, 0, 26, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 38, 0, 0, 0, 26, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 38, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 22, 0, 0, 0, 31, 0,
+ 4, 3, 42, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 21, 0, 0, 1, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 10, 0, 16, 0,
+ 13, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 4, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 38, 0, 0, 0, 26, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 38, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 22, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 5, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 18, 0,
+ 0, 1, 54, 0, 0, 8,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 21, 0,
+ 0, 1, 87, 0, 0, 7,
+ 162, 0, 16, 0, 28, 0,
+ 0, 0, 166, 14, 16, 0,
+ 2, 0, 0, 0, 6, 4,
+ 16, 0, 37, 0, 0, 0,
+ 1, 0, 0, 7, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 87, 0,
+ 0, 7, 50, 0, 16, 0,
+ 37, 0, 0, 0, 70, 0,
+ 16, 0, 5, 0, 0, 0,
+ 230, 10, 16, 0, 37, 0,
+ 0, 0, 1, 0, 0, 7,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 1, 0, 0, 7, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 134, 0,
+ 0, 5, 34, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 0, 0, 7, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 10, 0, 16, 0, 28, 0,
+ 0, 0, 21, 0, 0, 1,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 7, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 11, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 28, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 0, 2,
+ 10, 32, 2, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 16, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 1, 18, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 21, 0,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 1, 0,
+ 0, 0, 31, 0, 4, 3,
+ 10, 0, 16, 0, 16, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 5, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 7,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 58, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 18, 0, 0, 1, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 1, 16, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 2, 18, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 1, 0, 0, 0, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 16, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 18, 0, 0, 1,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 21, 0, 0, 1, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 15, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 1, 0, 0, 0,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 16, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 15, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 18, 0,
+ 0, 1, 167, 0, 0, 9,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 21, 0, 0, 1,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 15, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 1, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 16, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 10, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 42, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 7,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 18, 0, 0, 1, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 21, 0,
+ 0, 1, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 15, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 1, 0, 0, 0, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 18, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 17, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 18, 0, 0, 1,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 21, 0, 0, 1, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 15, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 1, 0, 0, 0,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 18, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 18, 0,
+ 0, 1, 167, 0, 0, 9,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 10, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 21, 0, 0, 1,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 15, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 1, 0,
+ 0, 0, 31, 0, 4, 3,
+ 42, 0, 16, 0, 18, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 17, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 7,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 18, 0, 0, 1, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 21, 0,
+ 0, 1, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 15, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 1, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 18, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 18, 0, 0, 1,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 21, 0, 0, 1, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 15, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 1, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 13, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 18, 0,
+ 0, 1, 167, 0, 0, 9,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 21, 0, 0, 1,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 15, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 54, 0, 0, 5,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 28, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 79, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 42, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 85, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 26, 0, 16, 0,
+ 26, 0, 0, 0, 1, 0,
+ 0, 10, 242, 0, 16, 0,
+ 37, 0, 0, 0, 246, 15,
+ 16, 0, 33, 0, 0, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 2, 0, 0, 0,
+ 4, 0, 0, 0, 8, 0,
+ 0, 0, 39, 0, 0, 10,
+ 242, 0, 16, 0, 38, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 140, 0, 0, 16, 242, 0,
+ 16, 0, 39, 0, 0, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 6, 32, 2, 0,
+ 70, 14, 16, 0, 38, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 39, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 39, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 39, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 39, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 40, 0, 0, 0,
+ 26, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 42, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 39, 0, 0, 0,
+ 70, 14, 16, 0, 39, 0,
+ 0, 0, 70, 14, 16, 0,
+ 40, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 26, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 39, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 39, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 39, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 39, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 40, 0, 0, 0, 26, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 40, 0, 0, 0, 26, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 0, 39, 0, 0, 0,
+ 70, 14, 16, 0, 40, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 39, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 39, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 39, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 39, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 40, 0,
+ 0, 0, 42, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 40, 0,
+ 0, 0, 26, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 42, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 40, 0,
+ 0, 0, 42, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 39, 0,
+ 0, 0, 70, 14, 16, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 0, 40, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 39, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 39, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 39, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 39, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 39, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 13, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 39, 0, 0, 0,
+ 70, 14, 16, 0, 39, 0,
+ 0, 0, 70, 14, 16, 0,
+ 40, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 4, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 39, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 39, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 39, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 39, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 40, 0, 0, 0, 26, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 40, 0, 0, 0, 42, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 0, 39, 0, 0, 0,
+ 70, 14, 16, 0, 40, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 42, 0, 16, 0, 5, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 140, 0, 0, 11, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 1, 64, 0, 0, 4, 0,
+ 0, 0, 1, 64, 0, 0,
+ 5, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 26, 0, 16, 0,
+ 11, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 16, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 29, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 87, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 32, 0, 0, 10, 114, 0,
+ 16, 0, 38, 0, 0, 0,
+ 150, 7, 16, 0, 38, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 87, 0,
+ 0, 7, 18, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 38, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 87, 0, 0, 7, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 38, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 87, 0, 0, 7,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 38, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 138, 0,
+ 0, 8, 130, 0, 16, 0,
+ 29, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 134, 0, 0, 5, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 168, 0,
+ 0, 9, 18, 224, 17, 0,
+ 2, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 42, 0, 16, 0, 11, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 87, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 42, 0, 16, 0, 2, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 87, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 2, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 87, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 10, 0, 16, 0, 5, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 87, 0, 0, 7,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 26, 0, 16, 0,
+ 5, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 134, 0,
+ 0, 5, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 21, 0,
+ 0, 1, 30, 0, 0, 10,
+ 162, 0, 16, 0, 28, 0,
+ 0, 0, 86, 13, 16, 0,
+ 28, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0,
+ 0, 0, 32, 0, 0, 0,
+ 22, 0, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 21, 0, 0, 0, 79, 0,
+ 0, 7, 34, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 42, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 0, 128, 131, 153, 25, 0,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 224, 17, 0, 2, 0,
+ 0, 0, 55, 0, 0, 9,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 85, 0, 0, 7,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 26, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 0, 0, 10, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 246, 15, 16, 0, 30, 0,
+ 0, 0, 2, 64, 0, 0,
+ 1, 0, 0, 0, 2, 0,
+ 0, 0, 4, 0, 0, 0,
+ 8, 0, 0, 0, 39, 0,
+ 0, 10, 242, 0, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 140, 0, 0, 16,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 2, 64, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 6, 32,
+ 2, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 22, 0, 0, 0, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 21, 0, 0, 1, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 6, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 10, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 31, 0, 4, 3,
+ 42, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 38, 0, 0, 0, 26, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 38, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 22, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 13, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 21, 0, 0, 1, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 4, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 38, 0,
+ 0, 0, 10, 0, 16, 0,
+ 6, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 13, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 38, 0,
+ 0, 0, 10, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 42, 0, 16, 0,
+ 13, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 5, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 10, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 10, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 10, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 1, 0, 0, 10,
+ 242, 0, 16, 0, 38, 0,
+ 0, 0, 246, 15, 16, 0,
+ 30, 0, 0, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 2, 0, 0, 0, 4, 0,
+ 0, 0, 8, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 10, 0, 16, 0, 38, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 87, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 39, 0, 0, 10,
+ 114, 0, 16, 0, 38, 0,
+ 0, 0, 150, 7, 16, 0,
+ 38, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 32, 0, 0, 10, 114, 0,
+ 16, 0, 38, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 87, 0,
+ 0, 7, 114, 0, 16, 0,
+ 38, 0, 0, 0, 150, 7,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 10, 0,
+ 16, 0, 38, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 26, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 42, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 140, 0, 0, 11,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 1, 64, 0, 0,
+ 4, 0, 0, 0, 1, 64,
+ 0, 0, 5, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 30, 0, 0, 0, 26, 0,
+ 16, 0, 11, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 30, 0, 0, 0, 1, 64,
+ 0, 0, 0, 16, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 138, 0, 0, 8, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 134, 0, 0, 5,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 18, 0, 0, 1,
+ 54, 0, 0, 8, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 30, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 21, 0,
+ 0, 1, 31, 0, 4, 3,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 30, 0, 0, 7,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 168, 0, 0, 9, 18, 224,
+ 17, 0, 2, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 31, 0, 4, 3,
+ 10, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 42, 0, 16, 0,
+ 11, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 87, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 42, 0, 16, 0,
+ 2, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 87, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 2, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 87, 0, 0, 7,
+ 50, 0, 16, 0, 37, 0,
+ 0, 0, 70, 0, 16, 0,
+ 5, 0, 0, 0, 230, 10,
+ 16, 0, 37, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 134, 0, 0, 5,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 11, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 21, 0,
+ 0, 1, 30, 0, 0, 7,
+ 34, 0, 16, 0, 26, 0,
+ 0, 0, 26, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 4, 0, 0, 0,
+ 54, 0, 0, 5, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 42, 0, 16, 0,
+ 26, 0, 0, 0, 54, 0,
+ 0, 5, 66, 0, 16, 0,
+ 26, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 22, 0, 0, 1, 54, 0,
+ 0, 4, 18, 0, 16, 0,
+ 21, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 33, 0, 0, 7, 66, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 3, 0,
+ 4, 3, 42, 0, 16, 0,
+ 21, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 0, 128, 131, 153,
+ 25, 0, 130, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 224, 17, 0,
+ 2, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 167, 0, 0, 139,
+ 2, 35, 0, 128, 131, 153,
+ 25, 0, 66, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 224, 17, 0,
+ 2, 0, 0, 0, 42, 0,
+ 0, 10, 194, 0, 16, 0,
+ 21, 0, 0, 0, 166, 14,
+ 16, 0, 21, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 16, 0, 0, 0, 16, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 55, 0, 0, 9, 66, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 39, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 22, 0, 0, 0, 30, 0,
+ 0, 7, 194, 0, 16, 0,
+ 21, 0, 0, 0, 86, 5,
+ 16, 0, 19, 0, 0, 0,
+ 166, 14, 16, 0, 21, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 224, 17, 0, 2, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 30, 0, 0, 7,
+ 66, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 168, 0, 0, 9, 18, 224,
+ 17, 0, 2, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 21, 0, 0, 1, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 4,
+ 0, 0, 22, 0, 0, 1,
+ 165, 0, 0, 7, 50, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 96, 0,
+ 0, 0, 70, 240, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 4, 18, 0, 16, 0,
+ 26, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 33, 0, 0, 7, 66, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 3, 0,
+ 4, 3, 42, 0, 16, 0,
+ 21, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 21, 0, 0, 0, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 0, 128, 131, 153,
+ 25, 0, 66, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 224, 17, 0,
+ 2, 0, 0, 0, 1, 0,
+ 0, 7, 66, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 0, 0, 30, 0, 0, 7,
+ 66, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 238, 17, 0, 1, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 30, 0, 0, 10,
+ 242, 0, 16, 0, 26, 0,
+ 0, 0, 6, 0, 16, 0,
+ 26, 0, 0, 0, 2, 64,
+ 0, 0, 0, 4, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 84, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 56, 0, 0, 7, 34, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 165, 0,
+ 0, 7, 66, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 100, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 112, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 33, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 10, 242, 0, 16, 0,
+ 37, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 2, 64, 0, 0, 183, 7,
+ 0, 0, 110, 15, 0, 0,
+ 37, 23, 0, 0, 0, 4,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 26, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 7,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 114, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 114, 16, 0, 14, 0,
+ 0, 0, 17, 0, 0, 8,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 70, 14, 16, 0,
+ 26, 0, 0, 0, 70, 158,
+ 144, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 50, 0,
+ 0, 9, 34, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 51, 0,
+ 0, 7, 34, 0, 16, 0,
+ 28, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 255, 66, 52, 0, 0, 7,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 27, 0, 0, 5, 34, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 17, 0, 0, 8,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 70, 14, 16, 0,
+ 26, 0, 0, 0, 70, 158,
+ 144, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 51, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 255, 66, 52, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 27, 0, 0, 5, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 35, 0, 0, 9,
+ 34, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 1, 64,
+ 0, 0, 128, 0, 0, 0,
+ 26, 0, 16, 0, 28, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 54, 0,
+ 0, 8, 114, 0, 16, 0,
+ 38, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 33, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 54, 0, 0, 5,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 26, 0, 16, 0,
+ 28, 0, 0, 0, 48, 0,
+ 0, 1, 34, 0, 0, 7,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 38, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 18, 0,
+ 16, 0, 39, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 30, 0, 0, 10, 226, 0,
+ 16, 0, 39, 0, 0, 0,
+ 246, 15, 16, 0, 38, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 1, 0,
+ 0, 0, 2, 0, 0, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 39, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 0, 128, 131, 153, 25, 0,
+ 34, 0, 16, 0, 39, 0,
+ 0, 0, 42, 0, 16, 0,
+ 39, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 224, 17, 0, 2, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 0, 128, 131, 153,
+ 25, 0, 66, 0, 16, 0,
+ 39, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 224, 17, 0,
+ 2, 0, 0, 0, 84, 0,
+ 0, 7, 18, 0, 16, 0,
+ 39, 0, 0, 0, 58, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 39, 0,
+ 0, 0, 84, 0, 0, 7,
+ 18, 0, 16, 0, 39, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 10, 0,
+ 16, 0, 39, 0, 0, 0,
+ 36, 0, 0, 7, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 36, 0,
+ 0, 7, 130, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 39, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 36, 0, 0, 7,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 38, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 226, 0,
+ 16, 0, 39, 0, 0, 0,
+ 6, 9, 16, 0, 38, 0,
+ 0, 0, 54, 0, 0, 5,
+ 18, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 54, 0,
+ 0, 5, 34, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 39, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 66, 0, 16, 0,
+ 40, 0, 0, 0, 26, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 3, 0, 4, 3,
+ 42, 0, 16, 0, 40, 0,
+ 0, 0, 39, 0, 0, 7,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 40, 0, 0, 0,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 40, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 41, 0, 0, 0,
+ 26, 0, 16, 0, 40, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 10, 114, 0,
+ 16, 0, 42, 0, 0, 0,
+ 86, 5, 16, 0, 40, 0,
+ 0, 0, 2, 64, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 41, 0, 0, 0, 10, 0,
+ 16, 0, 42, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 41, 0, 0, 0, 26, 0,
+ 16, 0, 42, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 40, 0, 0, 0, 42, 0,
+ 16, 0, 42, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 41, 0, 0, 0, 70, 2,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 41, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 70, 2, 16, 0,
+ 41, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 41, 0, 0, 0, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 41, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 41, 0,
+ 0, 0, 26, 0, 16, 0,
+ 40, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 54, 0, 0, 5, 34, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 41, 0,
+ 0, 0, 7, 0, 0, 1,
+ 21, 0, 0, 1, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 41, 0, 0, 0, 1, 64,
+ 0, 0, 112, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 39, 0, 0, 7,
+ 18, 0, 16, 0, 42, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 42, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 42, 0, 0, 0,
+ 1, 64, 0, 0, 100, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 42, 0, 0, 0, 26, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 42, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 0, 128, 131, 153,
+ 25, 0, 18, 0, 16, 0,
+ 42, 0, 0, 0, 10, 0,
+ 16, 0, 42, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 224, 17, 0,
+ 2, 0, 0, 0, 1, 0,
+ 0, 7, 18, 0, 16, 0,
+ 42, 0, 0, 0, 10, 0,
+ 16, 0, 42, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 41, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 10, 0,
+ 16, 0, 42, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 114, 0, 16, 0, 42, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 114, 16, 0, 14, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 42, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 42, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 41, 0, 0, 0, 70, 2,
+ 16, 0, 42, 0, 0, 0,
+ 70, 2, 16, 0, 42, 0,
+ 0, 0, 29, 0, 0, 7,
+ 18, 0, 16, 0, 42, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 41, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 42, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 42, 0, 0, 0,
+ 26, 0, 16, 0, 40, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 54, 0,
+ 0, 5, 34, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 42, 0, 0, 0,
+ 7, 0, 0, 1, 21, 0,
+ 0, 1, 0, 0, 0, 7,
+ 130, 0, 16, 0, 41, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 14, 0, 0, 7, 130, 0,
+ 16, 0, 41, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 41, 0, 0, 0, 58, 0,
+ 16, 0, 41, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 41, 0, 0, 0,
+ 58, 0, 16, 0, 41, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 191, 51, 0,
+ 0, 7, 130, 0, 16, 0,
+ 41, 0, 0, 0, 58, 0,
+ 16, 0, 41, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 18, 0, 0, 1,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 41, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 21, 0, 0, 1,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 40, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 191,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 56, 0,
+ 0, 7, 18, 0, 16, 0,
+ 42, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 41, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 10, 0,
+ 16, 0, 42, 0, 0, 0,
+ 0, 0, 0, 7, 66, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 42, 0, 16, 0,
+ 40, 0, 0, 0, 14, 0,
+ 0, 7, 66, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 41, 0, 0, 0,
+ 42, 0, 16, 0, 40, 0,
+ 0, 0, 50, 0, 0, 9,
+ 226, 0, 16, 0, 39, 0,
+ 0, 0, 166, 10, 16, 0,
+ 40, 0, 0, 0, 6, 9,
+ 16, 0, 41, 0, 0, 0,
+ 86, 14, 16, 0, 39, 0,
+ 0, 0, 0, 0, 0, 7,
+ 18, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 40, 0, 0, 0,
+ 21, 0, 0, 1, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 40, 0, 0, 0, 26, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 22, 0, 0, 1,
+ 54, 0, 0, 5, 114, 0,
+ 16, 0, 38, 0, 0, 0,
+ 150, 7, 16, 0, 39, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 10, 0, 16, 0,
+ 40, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 1, 64, 0, 0, 128, 0,
+ 0, 0, 22, 0, 0, 1,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 72, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 56, 0,
+ 0, 7, 18, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 10, 0, 16, 0, 26, 0,
+ 0, 0, 14, 0, 0, 7,
+ 18, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 96, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 26, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 114, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 226, 17, 0,
+ 1, 0, 0, 0, 50, 0,
+ 0, 9, 210, 0, 16, 0,
+ 26, 0, 0, 0, 6, 9,
+ 16, 0, 38, 0, 0, 0,
+ 6, 0, 16, 0, 26, 0,
+ 0, 0, 6, 9, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 114, 224, 17, 0,
+ 1, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 134, 3, 16, 0,
+ 26, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 96, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 33, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 1, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 30, 0, 0, 10, 242, 0,
+ 16, 0, 20, 0, 0, 0,
+ 6, 0, 16, 0, 20, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 4, 0, 0, 183, 7,
+ 0, 0, 110, 15, 0, 0,
+ 37, 23, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 22, 0,
+ 0, 1, 165, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 100, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 33, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 85, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 224, 17, 0, 2, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 30, 0, 0, 7,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 168, 0, 0, 9, 18, 224,
+ 17, 0, 2, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 4, 0, 0, 22, 0,
+ 0, 1, 21, 0, 0, 1,
+ 31, 0, 0, 2, 10, 32,
+ 2, 0, 165, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 8, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 56, 0, 0, 7, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 10, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 122, 68, 28, 0,
+ 0, 5, 18, 0, 16, 0,
+ 17, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 83, 0, 0, 7, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 10, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 165, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 104, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 166, 0, 0, 7, 18, 240,
+ 17, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 104, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 1, 64, 0, 0, 104, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 165, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 104, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 80, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 18, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 3, 0, 4, 3, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 10, 114, 0,
+ 16, 0, 21, 0, 0, 0,
+ 86, 5, 16, 0, 19, 0,
+ 0, 0, 2, 64, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 0, 4, 0, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 178, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 232, 17, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 20, 0, 0, 0, 70, 2,
+ 16, 0, 20, 0, 0, 0,
+ 70, 3, 16, 128, 65, 0,
+ 0, 0, 21, 0, 0, 0,
+ 52, 0, 0, 9, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 128, 129, 0,
+ 0, 0, 20, 0, 0, 0,
+ 10, 0, 16, 128, 129, 0,
+ 0, 0, 20, 0, 0, 0,
+ 52, 0, 0, 8, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 128, 129, 0,
+ 0, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 52, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 54, 0, 0, 5, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 22, 0, 0, 1,
+ 31, 0, 0, 2, 10, 32,
+ 2, 0, 165, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 100, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 10, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 166, 0,
+ 0, 7, 18, 240, 17, 0,
+ 1, 0, 0, 0, 1, 64,
+ 0, 0, 100, 0, 0, 0,
+ 10, 0, 16, 0, 17, 0,
+ 0, 0, 165, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 104, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 104, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 30, 0,
+ 0, 8, 18, 0, 16, 0,
+ 17, 0, 0, 0, 10, 0,
+ 16, 128, 65, 0, 0, 0,
+ 17, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 166, 0, 0, 7, 18, 240,
+ 17, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 104, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 21, 0,
+ 0, 1, 165, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 112, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 8, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 56, 0,
+ 0, 7, 18, 0, 16, 0,
+ 17, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 49, 0, 0, 7,
+ 18, 0, 16, 0, 17, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 166, 0, 0, 7, 18, 240,
+ 17, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 100, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 15, 0, 0, 0,
+ 26, 0, 16, 0, 15, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 22, 0,
+ 0, 1, 190, 24, 0, 1,
+ 165, 0, 0, 7, 98, 0,
+ 16, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 241, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 4, 18, 0, 16, 0,
+ 1, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 80, 0, 0, 7, 130, 0,
+ 16, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 1, 0,
+ 0, 0, 26, 0, 16, 0,
+ 0, 0, 0, 0, 3, 0,
+ 4, 3, 58, 0, 16, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 1, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 2, 0,
+ 0, 0, 10, 0, 16, 0,
+ 1, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 242, 0, 16, 0, 1, 0,
+ 0, 0, 6, 0, 16, 0,
+ 1, 0, 0, 0, 2, 64,
+ 0, 0, 0, 4, 0, 0,
+ 183, 7, 0, 0, 110, 15,
+ 0, 0, 37, 23, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 2, 0, 0, 0,
+ 26, 0, 16, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 2, 0, 0, 0,
+ 42, 0, 16, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 2, 0, 0, 0,
+ 58, 0, 16, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 168, 0, 0, 9, 242, 224,
+ 17, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 14,
+ 16, 0, 2, 0, 0, 0,
+ 22, 0, 0, 1, 18, 0,
+ 0, 1, 165, 0, 0, 7,
+ 34, 0, 16, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 4, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 59, 0, 0, 4, 66, 0,
+ 16, 0, 0, 0, 0, 0,
+ 10, 32, 2, 0, 1, 0,
+ 0, 7, 66, 0, 16, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 3, 0,
+ 0, 0, 79, 0, 0, 9,
+ 242, 0, 16, 0, 1, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 192, 0,
+ 0, 0, 176, 0, 0, 0,
+ 3, 0, 0, 0, 0, 2,
+ 0, 0, 1, 0, 0, 9,
+ 242, 0, 16, 0, 2, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 32, 0,
+ 0, 0, 31, 0, 0, 0,
+ 1, 0, 0, 0, 2, 0,
+ 0, 0, 55, 0, 0, 9,
+ 130, 0, 16, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 191,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 30, 0, 0, 9,
+ 242, 0, 16, 0, 3, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 16, 0,
+ 0, 0, 8, 0, 0, 0,
+ 4, 0, 0, 0, 2, 0,
+ 0, 0, 32, 0, 0, 7,
+ 18, 0, 16, 0, 2, 0,
+ 0, 0, 26, 0, 16, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 7, 18, 0,
+ 16, 0, 2, 0, 0, 0,
+ 10, 0, 16, 0, 1, 0,
+ 0, 0, 10, 0, 16, 0,
+ 2, 0, 0, 0, 85, 0,
+ 0, 9, 50, 0, 16, 0,
+ 4, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 6, 0, 0, 0, 5, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 41, 0,
+ 0, 7, 66, 0, 16, 0,
+ 4, 0, 0, 0, 26, 0,
+ 16, 0, 4, 0, 0, 0,
+ 1, 64, 0, 0, 2, 0,
+ 0, 0, 41, 0, 0, 6,
+ 130, 0, 16, 0, 4, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 3, 0,
+ 0, 0, 30, 0, 0, 10,
+ 194, 0, 16, 0, 4, 0,
+ 0, 0, 166, 14, 16, 0,
+ 4, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 76, 0,
+ 0, 0, 76, 0, 0, 0,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 2, 0, 0, 0,
+ 42, 0, 16, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 39, 0,
+ 0, 7, 130, 0, 16, 0,
+ 2, 0, 0, 0, 58, 0,
+ 16, 0, 2, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 32, 0, 0, 7,
+ 130, 0, 16, 0, 2, 0,
+ 0, 0, 58, 0, 16, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 9, 114, 0,
+ 16, 0, 5, 0, 0, 0,
+ 6, 32, 2, 0, 2, 64,
+ 0, 0, 4, 0, 0, 0,
+ 8, 0, 0, 0, 224, 3,
+ 0, 0, 0, 0, 0, 0,
+ 39, 0, 0, 10, 50, 0,
+ 16, 0, 5, 0, 0, 0,
+ 70, 0, 16, 0, 5, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 32, 0,
+ 0, 10, 50, 0, 16, 0,
+ 5, 0, 0, 0, 70, 0,
+ 16, 0, 5, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 30, 0, 0, 9,
+ 242, 0, 16, 0, 6, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 16, 4, 0, 0,
+ 16, 12, 0, 0, 8, 4,
+ 0, 0, 79, 0, 0, 10,
+ 242, 0, 16, 0, 7, 0,
+ 0, 0, 86, 5, 16, 0,
+ 2, 0, 0, 0, 2, 64,
+ 0, 0, 16, 0, 0, 0,
+ 8, 0, 0, 0, 4, 0,
+ 0, 0, 2, 0, 0, 0,
+ 79, 0, 0, 7, 130, 0,
+ 16, 0, 5, 0, 0, 0,
+ 26, 0, 16, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 30, 0,
+ 0, 9, 242, 0, 16, 0,
+ 8, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 8, 12, 0, 0, 4, 4,
+ 0, 0, 4, 12, 0, 0,
+ 2, 4, 0, 0, 30, 0,
+ 0, 10, 242, 0, 16, 0,
+ 9, 0, 0, 0, 70, 14,
+ 16, 0, 3, 0, 0, 0,
+ 2, 64, 0, 0, 0, 8,
+ 0, 0, 0, 8, 0, 0,
+ 0, 8, 0, 0, 0, 8,
+ 0, 0, 30, 0, 0, 10,
+ 114, 0, 16, 0, 10, 0,
+ 0, 0, 166, 10, 16, 0,
+ 5, 0, 0, 0, 2, 64,
+ 0, 0, 0, 4, 0, 0,
+ 0, 8, 0, 0, 0, 12,
+ 0, 0, 0, 0, 0, 0,
+ 30, 0, 0, 10, 50, 0,
+ 16, 0, 11, 0, 0, 0,
+ 86, 5, 16, 0, 4, 0,
+ 0, 0, 2, 64, 0, 0,
+ 1, 0, 0, 0, 1, 2,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 140, 0,
+ 0, 19, 242, 0, 16, 0,
+ 12, 0, 0, 0, 2, 64,
+ 0, 0, 10, 0, 0, 0,
+ 11, 0, 0, 0, 10, 0,
+ 0, 0, 5, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 5, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 0, 4,
+ 0, 0, 0, 8, 0, 0,
+ 0, 12, 0, 0, 0, 0,
+ 0, 0, 30, 0, 0, 7,
+ 82, 0, 16, 0, 11, 0,
+ 0, 0, 6, 1, 16, 0,
+ 11, 0, 0, 0, 246, 15,
+ 16, 0, 12, 0, 0, 0,
+ 30, 0, 0, 9, 242, 0,
+ 16, 0, 13, 0, 0, 0,
+ 6, 32, 2, 0, 2, 64,
+ 0, 0, 2, 12, 0, 0,
+ 1, 4, 0, 0, 1, 12,
+ 0, 0, 2, 2, 0, 0,
+ 30, 0, 0, 10, 50, 0,
+ 16, 0, 14, 0, 0, 0,
+ 6, 0, 16, 0, 6, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 8, 0, 0, 0, 16,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 6, 130, 0, 16, 0,
+ 10, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 16, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 11, 0, 0, 0, 58, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 16,
+ 0, 0, 30, 0, 0, 9,
+ 242, 0, 16, 0, 15, 0,
+ 0, 0, 6, 32, 2, 0,
+ 2, 64, 0, 0, 1, 16,
+ 0, 0, 0, 2, 0, 0,
+ 2, 18, 0, 0, 253, 15,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 12, 0,
+ 0, 0, 26, 0, 16, 0,
+ 15, 0, 0, 0, 1, 64,
+ 0, 0, 0, 16, 0, 0,
+ 80, 0, 0, 9, 242, 0,
+ 16, 0, 16, 0, 0, 0,
+ 6, 32, 2, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 2, 0, 0, 0, 4, 0,
+ 0, 0, 8, 0, 0, 0,
+ 30, 0, 0, 9, 242, 0,
+ 16, 0, 17, 0, 0, 0,
+ 6, 32, 2, 0, 2, 64,
+ 0, 0, 250, 1, 0, 0,
+ 241, 15, 0, 0, 226, 1,
+ 0, 0, 193, 15, 0, 0,
+ 30, 0, 0, 10, 194, 0,
+ 16, 0, 14, 0, 0, 0,
+ 6, 8, 16, 0, 17, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 16, 0, 0,
+ 0, 16, 0, 0, 80, 0,
+ 0, 9, 242, 0, 16, 0,
+ 18, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 16, 0, 0, 0, 32, 0,
+ 0, 0, 64, 0, 0, 0,
+ 128, 0, 0, 0, 30, 0,
+ 0, 9, 82, 0, 16, 0,
+ 17, 0, 0, 0, 6, 32,
+ 2, 0, 2, 64, 0, 0,
+ 130, 1, 0, 0, 0, 0,
+ 0, 0, 1, 15, 0, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 13, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 1, 64, 0, 0, 0, 16,
+ 0, 0, 80, 0, 0, 6,
+ 34, 0, 16, 0, 15, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 1,
+ 0, 0, 30, 0, 0, 10,
+ 82, 0, 16, 0, 11, 0,
+ 0, 0, 6, 2, 16, 0,
+ 11, 0, 0, 0, 2, 64,
+ 0, 0, 0, 16, 0, 0,
+ 0, 0, 0, 0, 0, 16,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 48, 0, 0, 1,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 80, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 17, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 3, 0, 4, 3,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 86, 0, 0, 5,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 0, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 86, 0, 0, 5,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 14, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 31, 0, 0, 2,
+ 10, 32, 2, 0, 165, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 12, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 30, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 17, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 3, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 2, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 3, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 70, 126, 16, 0,
+ 2, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 22, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 32, 0, 0, 0, 70, 126,
+ 16, 0, 2, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 3, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 48, 0, 0, 0,
+ 70, 126, 16, 0, 2, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 3, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 24, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 64, 0,
+ 0, 0, 70, 126, 16, 0,
+ 2, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 3, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 80, 0, 0, 0, 70, 126,
+ 16, 0, 2, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 3, 128, 131, 153, 25, 0,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 96, 0, 0, 0,
+ 6, 112, 16, 0, 2, 0,
+ 0, 0, 166, 0, 0, 7,
+ 242, 240, 17, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 14,
+ 16, 0, 20, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 2, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 70, 14, 16, 0,
+ 21, 0, 0, 0, 166, 0,
+ 0, 7, 242, 240, 17, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 32, 0, 0, 0,
+ 70, 14, 16, 0, 22, 0,
+ 0, 0, 166, 0, 0, 7,
+ 242, 240, 17, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 48, 0, 0, 0, 70, 14,
+ 16, 0, 23, 0, 0, 0,
+ 166, 0, 0, 7, 242, 240,
+ 17, 0, 2, 0, 0, 0,
+ 1, 64, 0, 0, 64, 0,
+ 0, 0, 70, 14, 16, 0,
+ 24, 0, 0, 0, 166, 0,
+ 0, 7, 242, 240, 17, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 80, 0, 0, 0,
+ 70, 14, 16, 0, 25, 0,
+ 0, 0, 166, 0, 0, 7,
+ 18, 240, 17, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 96, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 114, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 12, 0,
+ 0, 0, 70, 242, 17, 0,
+ 2, 0, 0, 0, 165, 0,
+ 0, 7, 242, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 60, 0, 0, 0,
+ 70, 254, 17, 0, 2, 0,
+ 0, 0, 165, 0, 0, 7,
+ 242, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 24, 0, 0, 0, 70, 254,
+ 17, 0, 2, 0, 0, 0,
+ 165, 0, 0, 7, 242, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 76, 0,
+ 0, 0, 70, 254, 17, 0,
+ 2, 0, 0, 0, 165, 0,
+ 0, 7, 242, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 40, 0, 0, 0,
+ 70, 254, 17, 0, 2, 0,
+ 0, 0, 165, 0, 0, 7,
+ 50, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 92, 0, 0, 0, 70, 240,
+ 17, 0, 2, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 56, 0,
+ 0, 0, 6, 240, 17, 0,
+ 2, 0, 0, 0, 165, 0,
+ 0, 7, 242, 0, 16, 0,
+ 26, 0, 0, 0, 1, 64,
+ 0, 0, 12, 0, 0, 0,
+ 70, 254, 17, 0, 2, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 36, 0, 0, 0, 6, 240,
+ 17, 0, 2, 0, 0, 0,
+ 165, 0, 0, 7, 66, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 48, 0,
+ 0, 0, 6, 240, 17, 0,
+ 2, 0, 0, 0, 54, 0,
+ 0, 4, 130, 0, 16, 0,
+ 25, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 80, 0, 0, 7, 18, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 10, 0, 16, 0,
+ 27, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 28, 0, 0, 0, 10, 0,
+ 16, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 0, 0, 0, 0, 24, 0,
+ 0, 7, 66, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 55, 0, 0, 9,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 49, 0, 0, 7,
+ 66, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 27, 0, 0, 0,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 25, 0, 0, 0,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 30, 0, 0, 0,
+ 70, 2, 16, 0, 20, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 50, 0,
+ 0, 9, 194, 0, 16, 0,
+ 27, 0, 0, 0, 6, 0,
+ 16, 0, 28, 0, 0, 0,
+ 6, 12, 16, 0, 21, 0,
+ 0, 0, 6, 4, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 9, 194, 0, 16, 0,
+ 27, 0, 0, 0, 6, 0,
+ 16, 0, 29, 0, 0, 0,
+ 6, 12, 16, 0, 22, 0,
+ 0, 0, 166, 14, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 29, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 18, 0, 16, 0,
+ 30, 0, 0, 0, 42, 0,
+ 16, 0, 29, 0, 0, 0,
+ 42, 0, 16, 0, 22, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 10, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 29, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 34, 0, 16, 0,
+ 30, 0, 0, 0, 42, 0,
+ 16, 0, 29, 0, 0, 0,
+ 26, 0, 16, 0, 24, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 29, 0, 0, 0,
+ 42, 0, 16, 0, 24, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 24, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 28, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 66, 0, 16, 0,
+ 27, 0, 0, 0, 42, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 27, 0, 0, 0, 18, 0,
+ 0, 1, 0, 0, 0, 8,
+ 114, 0, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 29, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 0, 0,
+ 0, 7, 18, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 10, 0, 16, 0, 28, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 26, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 26, 0,
+ 0, 0, 0, 0, 0, 7,
+ 34, 0, 16, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 28, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 42, 0, 16, 0, 31, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 42, 0,
+ 16, 0, 26, 0, 0, 0,
+ 0, 0, 0, 7, 66, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 42, 0, 16, 0,
+ 28, 0, 0, 0, 21, 0,
+ 0, 1, 165, 0, 0, 7,
+ 114, 0, 16, 0, 31, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 242,
+ 17, 0, 2, 0, 0, 0,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 28, 0,
+ 0, 0, 70, 2, 16, 0,
+ 31, 0, 0, 0, 54, 0,
+ 0, 5, 50, 0, 16, 0,
+ 28, 0, 0, 0, 70, 0,
+ 16, 0, 30, 0, 0, 0,
+ 54, 0, 0, 5, 66, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 27, 0,
+ 0, 0, 18, 0, 0, 1,
+ 54, 0, 0, 5, 114, 0,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 28, 0,
+ 0, 0, 21, 0, 0, 1,
+ 168, 0, 0, 9, 242, 224,
+ 17, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 14,
+ 16, 0, 28, 0, 0, 0,
+ 168, 0, 0, 9, 242, 224,
+ 17, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 14,
+ 16, 0, 29, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 4, 0, 0, 22, 0,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 44, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 190, 24, 0, 1, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 8, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 41, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 2, 0,
+ 0, 0, 54, 0, 0, 4,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 85, 0, 0, 7,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 2, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 49, 0, 0, 7, 18, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 22, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 22, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 20, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 13, 0, 0, 0, 50, 0,
+ 0, 9, 242, 0, 16, 0,
+ 21, 0, 0, 0, 70, 14,
+ 16, 0, 22, 0, 0, 0,
+ 246, 15, 16, 0, 19, 0,
+ 0, 0, 70, 14, 16, 0,
+ 21, 0, 0, 0, 168, 0,
+ 0, 9, 242, 224, 17, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 14, 16, 0,
+ 21, 0, 0, 0, 21, 0,
+ 0, 1, 30, 0, 0, 7,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 28, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 39, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 31, 0,
+ 4, 3, 42, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 194, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 36, 0, 0, 0,
+ 6, 244, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 114, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 24, 0, 0, 0, 70, 242,
+ 17, 0, 1, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 4, 18, 0, 16, 0,
+ 21, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 80, 0, 0, 7, 34, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 3, 0,
+ 4, 3, 26, 0, 16, 0,
+ 21, 0, 0, 0, 30, 0,
+ 0, 7, 98, 0, 16, 0,
+ 21, 0, 0, 0, 86, 6,
+ 16, 0, 20, 0, 0, 0,
+ 6, 0, 16, 0, 21, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 11, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 11, 0, 0, 0,
+ 0, 0, 0, 8, 242, 0,
+ 16, 0, 23, 0, 0, 0,
+ 70, 14, 16, 128, 65, 0,
+ 0, 0, 22, 0, 0, 0,
+ 70, 14, 16, 0, 23, 0,
+ 0, 0, 50, 0, 0, 9,
+ 242, 0, 16, 0, 22, 0,
+ 0, 0, 70, 14, 16, 0,
+ 23, 0, 0, 0, 6, 0,
+ 16, 0, 19, 0, 0, 0,
+ 70, 14, 16, 0, 22, 0,
+ 0, 0, 30, 0, 0, 7,
+ 34, 0, 16, 0, 21, 0,
+ 0, 0, 26, 0, 16, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 23, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 238, 17, 0, 0, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 23, 0, 0, 0, 16, 0,
+ 0, 7, 66, 0, 16, 0,
+ 21, 0, 0, 0, 70, 2,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 22, 0,
+ 0, 0, 0, 0, 0, 7,
+ 66, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 50, 0, 0, 11, 130, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 42, 0, 16, 128,
+ 65, 0, 0, 0, 19, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 19, 0,
+ 0, 0, 51, 0, 0, 7,
+ 130, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 68, 0, 0, 5, 66, 0,
+ 16, 0, 21, 0, 0, 0,
+ 42, 0, 16, 0, 21, 0,
+ 0, 0, 50, 0, 0, 9,
+ 66, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 21, 0, 0, 0, 42, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 52, 0, 0, 7,
+ 66, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 56, 0, 0, 7, 66, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 22, 0, 0, 0, 166, 10,
+ 16, 0, 21, 0, 0, 0,
+ 70, 2, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 23, 0, 0, 0, 29, 0,
+ 0, 7, 66, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 55, 0, 0, 9,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 42, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 168, 0, 0, 9,
+ 242, 224, 17, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 14, 16, 0, 22, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 21, 0,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 20, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 24, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 24, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 32, 0, 0, 7,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 60, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 31, 0,
+ 0, 3, 58, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 86, 0, 0, 5,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 56, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 86, 0, 0, 5,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 14, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 165, 0, 0, 7,
+ 98, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 28, 0, 0, 0, 6, 241,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 8, 226, 0,
+ 16, 0, 21, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 18, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 34, 0, 16, 0, 23, 0,
+ 0, 0, 10, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 3, 0, 4, 3, 26, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 23, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 34, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 112,
+ 16, 0, 5, 0, 0, 0,
+ 1, 0, 0, 7, 66, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 23, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 114, 0, 16, 0,
+ 24, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 226, 17, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 24, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 24, 0, 0, 0,
+ 50, 0, 0, 9, 66, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 24, 0,
+ 0, 0, 10, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 50, 0, 0, 9, 66, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 24, 0,
+ 0, 0, 26, 0, 16, 0,
+ 24, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 50, 0, 0, 9, 66, 0,
+ 16, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 24, 0,
+ 0, 0, 42, 0, 16, 0,
+ 24, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 85, 0, 0, 7, 34, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 86, 0,
+ 0, 5, 34, 0, 16, 0,
+ 23, 0, 0, 0, 26, 0,
+ 16, 0, 23, 0, 0, 0,
+ 56, 0, 0, 7, 34, 0,
+ 16, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 23, 0, 0, 0, 68, 0,
+ 0, 5, 66, 0, 16, 0,
+ 23, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 50, 0, 0, 10, 34, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 128, 65, 0,
+ 0, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 49, 0,
+ 0, 7, 66, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 24, 0,
+ 0, 0, 70, 2, 16, 0,
+ 24, 0, 0, 0, 86, 5,
+ 16, 0, 23, 0, 0, 0,
+ 150, 7, 16, 0, 21, 0,
+ 0, 0, 55, 0, 0, 9,
+ 226, 0, 16, 0, 21, 0,
+ 0, 0, 166, 10, 16, 0,
+ 23, 0, 0, 0, 6, 9,
+ 16, 0, 24, 0, 0, 0,
+ 86, 14, 16, 0, 21, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 23, 0, 0, 0,
+ 22, 0, 0, 1, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 22, 0, 0, 0, 150, 7,
+ 16, 0, 21, 0, 0, 0,
+ 166, 10, 16, 0, 19, 0,
+ 0, 0, 70, 2, 16, 0,
+ 22, 0, 0, 0, 168, 0,
+ 0, 9, 242, 224, 17, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 14, 16, 0,
+ 22, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 4,
+ 0, 0, 22, 0, 0, 1,
+ 21, 0, 0, 1, 54, 0,
+ 0, 5, 66, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 48, 0, 0, 1, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 8, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 80, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 114, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 12, 0,
+ 0, 0, 70, 242, 17, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 195, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 139, 2, 195, 0, 128,
+ 131, 153, 25, 0, 146, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 6, 116,
+ 16, 0, 3, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 108, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 56, 0,
+ 0, 7, 50, 0, 16, 0,
+ 22, 0, 0, 0, 246, 15,
+ 16, 0, 19, 0, 0, 0,
+ 70, 0, 16, 0, 22, 0,
+ 0, 0, 25, 0, 0, 5,
+ 50, 0, 16, 0, 22, 0,
+ 0, 0, 70, 0, 16, 0,
+ 22, 0, 0, 0, 0, 0,
+ 0, 11, 50, 0, 16, 0,
+ 22, 0, 0, 0, 70, 0,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 39, 0,
+ 0, 7, 66, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 165, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 20, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 190, 24,
+ 0, 1, 54, 0, 0, 4,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 18, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 3, 0, 4, 3,
+ 10, 0, 16, 0, 23, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 23, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 139, 2, 67,
+ 0, 128, 131, 153, 25, 0,
+ 50, 0, 16, 0, 23, 0,
+ 0, 0, 10, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 112, 16, 0, 4, 0,
+ 0, 0, 1, 0, 0, 7,
+ 66, 0, 16, 0, 23, 0,
+ 0, 0, 26, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 0, 0,
+ 85, 0, 0, 7, 34, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 30, 0,
+ 0, 7, 98, 0, 16, 0,
+ 23, 0, 0, 0, 86, 5,
+ 16, 0, 0, 0, 0, 0,
+ 86, 6, 16, 0, 23, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 24, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 0, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 24, 0, 0, 0,
+ 70, 2, 16, 0, 25, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 10, 0, 16, 0,
+ 26, 0, 0, 0, 10, 0,
+ 16, 0, 26, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 50, 0, 0, 9,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 26, 0, 16, 0,
+ 26, 0, 0, 0, 26, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 26, 0, 0, 0, 42, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 10, 0,
+ 16, 0, 23, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 50, 0, 0, 9,
+ 18, 0, 16, 0, 23, 0,
+ 0, 0, 10, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 191, 1, 0, 0, 7,
+ 18, 0, 16, 0, 23, 0,
+ 0, 0, 10, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 51, 0, 0, 8, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 128,
+ 65, 0, 0, 0, 23, 0,
+ 0, 0, 52, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 50, 0, 0, 9, 18, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 112,
+ 16, 0, 16, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 25, 0,
+ 0, 5, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 0, 0, 0, 8, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 55, 0, 0, 9,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 56, 0, 0, 7,
+ 18, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 23, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 24, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 14, 0, 0, 7,
+ 18, 0, 16, 0, 23, 0,
+ 0, 0, 10, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 56, 0, 0, 8, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 24, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 24, 0,
+ 0, 0, 246, 15, 16, 0,
+ 23, 0, 0, 0, 70, 2,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 24, 0,
+ 0, 0, 168, 0, 0, 9,
+ 114, 224, 17, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 2, 16, 0, 24, 0,
+ 0, 0, 56, 0, 0, 7,
+ 18, 0, 16, 0, 23, 0,
+ 0, 0, 10, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 50, 0, 0, 9, 210, 0,
+ 16, 0, 23, 0, 0, 0,
+ 6, 0, 16, 0, 23, 0,
+ 0, 0, 6, 9, 16, 0,
+ 26, 0, 0, 0, 6, 9,
+ 16, 0, 25, 0, 0, 0,
+ 168, 0, 0, 9, 114, 224,
+ 17, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 134, 3,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 4, 0, 0, 22, 0,
+ 0, 1, 30, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 36, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 39, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 31, 0,
+ 4, 3, 42, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 50, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 36, 0, 0, 0, 70, 240,
+ 17, 0, 1, 0, 0, 0,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 30, 0, 0, 7, 194, 0,
+ 16, 0, 20, 0, 0, 0,
+ 6, 4, 16, 0, 20, 0,
+ 0, 0, 246, 15, 16, 0,
+ 19, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 22, 0, 0, 0,
+ 42, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 12, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 12, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 23, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 22, 0,
+ 0, 0, 70, 14, 16, 0,
+ 23, 0, 0, 0, 50, 0,
+ 0, 9, 242, 0, 16, 0,
+ 22, 0, 0, 0, 70, 14,
+ 16, 0, 23, 0, 0, 0,
+ 6, 0, 16, 0, 19, 0,
+ 0, 0, 70, 14, 16, 0,
+ 22, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 114, 0, 16, 0,
+ 23, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 226, 17, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 23, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 70, 2, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 22, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 51, 0, 0, 7,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 22, 0, 0, 0,
+ 246, 15, 16, 0, 20, 0,
+ 0, 0, 70, 2, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 23, 0, 0, 0,
+ 168, 0, 0, 9, 114, 224,
+ 17, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 2,
+ 16, 0, 22, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 4, 0, 0, 22, 0,
+ 0, 1, 190, 24, 0, 1,
+ 21, 0, 0, 1, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 1, 0, 0, 0, 165, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 84, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 2, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 0, 0, 0, 0, 17, 0,
+ 0, 8, 18, 0, 16, 0,
+ 20, 0, 0, 0, 70, 14,
+ 16, 0, 20, 0, 0, 0,
+ 70, 158, 144, 0, 10, 0,
+ 16, 0, 4, 0, 0, 0,
+ 56, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 4, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 54, 0, 0, 5, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 48, 0, 0, 1,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 32, 0, 0, 0, 80, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 3, 0, 4, 3,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 167, 0, 0, 8,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 4, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 22, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 0, 0, 0, 0, 17, 0,
+ 0, 8, 66, 0, 16, 0,
+ 20, 0, 0, 0, 70, 14,
+ 16, 0, 22, 0, 0, 0,
+ 70, 158, 144, 0, 10, 0,
+ 16, 0, 4, 0, 0, 0,
+ 56, 0, 0, 7, 66, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 52, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 4, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 22, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 1, 0, 0, 0, 167, 0,
+ 0, 8, 66, 0, 16, 0,
+ 19, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 52, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 4, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 26, 0, 16, 0, 1, 0,
+ 0, 0, 167, 0, 0, 8,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 4, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 4, 0, 0, 0, 52, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 4, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 1, 0, 0, 0,
+ 167, 0, 0, 8, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 4, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 4, 0,
+ 0, 0, 52, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 4, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 1, 0, 0, 0, 167, 0,
+ 0, 8, 66, 0, 16, 0,
+ 19, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 4, 0, 0, 0,
+ 52, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 4, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 26, 0, 16, 0, 1, 0,
+ 0, 0, 167, 0, 0, 8,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 4, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 4, 0, 0, 0, 52, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 4, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 2, 0, 0, 0,
+ 167, 0, 0, 8, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 4, 0,
+ 0, 0, 166, 0, 0, 7,
+ 18, 240, 17, 0, 1, 0,
+ 0, 0, 42, 0, 16, 0,
+ 4, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 86, 0, 0, 5, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 14, 0, 0, 7,
+ 34, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 60, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 31, 0,
+ 4, 3, 42, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 80, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 64, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 194, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 56, 0, 0, 0, 6, 244,
+ 17, 0, 1, 0, 0, 0,
+ 54, 0, 0, 4, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 18, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 3, 0, 4, 3, 10, 0,
+ 16, 0, 22, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 8, 242, 0,
+ 16, 0, 24, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 34, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 66, 0, 16, 0, 22, 0,
+ 0, 0, 26, 0, 16, 0,
+ 22, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 42, 0,
+ 16, 0, 22, 0, 0, 0,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 22, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 66, 0,
+ 16, 0, 22, 0, 0, 0,
+ 42, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 112,
+ 16, 0, 8, 0, 0, 0,
+ 136, 0, 0, 5, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 42, 0, 16, 0, 22, 0,
+ 0, 0, 30, 0, 0, 7,
+ 50, 0, 16, 0, 25, 0,
+ 0, 0, 230, 10, 16, 0,
+ 20, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 26, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 9, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 25, 0, 0, 0, 26, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 9, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 25, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 26, 0, 0, 0, 70, 14,
+ 16, 0, 25, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 25, 0, 0, 0,
+ 6, 0, 16, 0, 19, 0,
+ 0, 0, 70, 14, 16, 0,
+ 25, 0, 0, 0, 70, 14,
+ 16, 0, 26, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 54, 0, 0, 5,
+ 114, 0, 16, 0, 26, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 25, 0, 0, 0, 42, 0,
+ 16, 0, 22, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 48, 0, 0, 1,
+ 49, 0, 0, 7, 18, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 1, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 26, 0,
+ 16, 0, 27, 0, 0, 0,
+ 39, 0, 0, 7, 34, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 1, 0,
+ 0, 7, 18, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 27, 0, 0, 0,
+ 10, 0, 16, 0, 27, 0,
+ 0, 0, 3, 0, 0, 3,
+ 10, 0, 16, 0, 27, 0,
+ 0, 0, 136, 0, 0, 5,
+ 18, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 30, 0,
+ 0, 7, 50, 0, 16, 0,
+ 27, 0, 0, 0, 230, 10,
+ 16, 0, 20, 0, 0, 0,
+ 6, 0, 16, 0, 27, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 28, 0, 0, 0, 10, 0,
+ 16, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 9, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 9, 0, 0, 0,
+ 0, 0, 0, 8, 242, 0,
+ 16, 0, 27, 0, 0, 0,
+ 70, 14, 16, 128, 65, 0,
+ 0, 0, 28, 0, 0, 0,
+ 70, 14, 16, 0, 27, 0,
+ 0, 0, 50, 0, 0, 9,
+ 242, 0, 16, 0, 27, 0,
+ 0, 0, 6, 0, 16, 0,
+ 19, 0, 0, 0, 70, 14,
+ 16, 0, 27, 0, 0, 0,
+ 70, 14, 16, 0, 28, 0,
+ 0, 0, 16, 0, 0, 7,
+ 18, 0, 16, 0, 28, 0,
+ 0, 0, 70, 2, 16, 0,
+ 23, 0, 0, 0, 70, 2,
+ 16, 0, 27, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 28, 0, 0, 0, 49, 0,
+ 0, 7, 18, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 26, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 55, 0, 0, 9,
+ 242, 0, 16, 0, 26, 0,
+ 0, 0, 6, 0, 16, 0,
+ 28, 0, 0, 0, 70, 14,
+ 16, 0, 27, 0, 0, 0,
+ 70, 14, 16, 0, 26, 0,
+ 0, 0, 22, 0, 0, 1,
+ 49, 0, 0, 7, 66, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 26, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 26, 0, 0, 0, 246, 15,
+ 16, 0, 26, 0, 0, 0,
+ 70, 2, 16, 0, 24, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 24, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 55, 0, 0, 9, 242, 0,
+ 16, 0, 24, 0, 0, 0,
+ 166, 10, 16, 0, 22, 0,
+ 0, 0, 70, 14, 16, 0,
+ 25, 0, 0, 0, 70, 14,
+ 16, 0, 24, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 22, 0,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 24, 0,
+ 0, 0, 86, 0, 0, 5,
+ 34, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 24, 0, 0, 0, 14, 0,
+ 0, 10, 34, 0, 16, 0,
+ 22, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 26, 0, 16, 0, 22, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 30, 0, 0, 7,
+ 66, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 114, 0, 16, 0, 25, 0,
+ 0, 0, 42, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 226, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 80, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 25, 0, 0, 0,
+ 70, 2, 16, 0, 24, 0,
+ 0, 0, 70, 2, 16, 0,
+ 24, 0, 0, 0, 0, 0,
+ 0, 7, 18, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 18, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 27, 0, 0, 0, 56, 0,
+ 0, 7, 226, 0, 16, 0,
+ 27, 0, 0, 0, 6, 9,
+ 16, 0, 24, 0, 0, 0,
+ 6, 0, 16, 0, 27, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 28, 0,
+ 0, 0, 70, 2, 16, 0,
+ 23, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 25, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 28, 0, 0, 0,
+ 150, 7, 16, 0, 27, 0,
+ 0, 0, 50, 0, 0, 10,
+ 226, 0, 16, 0, 27, 0,
+ 0, 0, 246, 15, 16, 128,
+ 65, 0, 0, 0, 28, 0,
+ 0, 0, 86, 14, 16, 0,
+ 27, 0, 0, 0, 6, 9,
+ 16, 0, 28, 0, 0, 0,
+ 16, 0, 0, 7, 18, 0,
+ 16, 0, 28, 0, 0, 0,
+ 150, 7, 16, 0, 27, 0,
+ 0, 0, 150, 7, 16, 0,
+ 27, 0, 0, 0, 0, 0,
+ 0, 7, 18, 0, 16, 0,
+ 28, 0, 0, 0, 10, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 18, 0, 16, 0, 28, 0,
+ 0, 0, 10, 0, 16, 0,
+ 28, 0, 0, 0, 56, 0,
+ 0, 8, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 26, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 191, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 25, 0, 0, 0, 150, 7,
+ 16, 128, 65, 0, 0, 0,
+ 27, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 25, 0,
+ 0, 0, 168, 0, 0, 9,
+ 114, 224, 17, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 2, 16, 0, 25, 0,
+ 0, 0, 21, 0, 0, 1,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 23, 0, 0, 0,
+ 70, 2, 16, 0, 24, 0,
+ 0, 0, 86, 5, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 23, 0, 0, 0,
+ 168, 0, 0, 9, 242, 224,
+ 17, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 14,
+ 16, 0, 23, 0, 0, 0,
+ 21, 0, 0, 1, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 4,
+ 0, 0, 22, 0, 0, 1,
+ 21, 0, 0, 1, 27, 0,
+ 0, 5, 66, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 114, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 226,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 68, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 64, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 54, 0, 0, 8,
+ 114, 0, 16, 0, 22, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 8, 50, 0, 16, 0,
+ 23, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 255, 255, 127, 127, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 48, 0, 0, 1,
+ 80, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 58, 0, 16, 0,
+ 22, 0, 0, 0, 35, 0,
+ 0, 9, 130, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 167, 0,
+ 0, 139, 2, 99, 0, 128,
+ 131, 153, 25, 0, 114, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 114,
+ 16, 0, 10, 0, 0, 0,
+ 30, 0, 0, 10, 194, 0,
+ 16, 0, 23, 0, 0, 0,
+ 246, 15, 16, 0, 22, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0,
+ 2, 0, 0, 0, 167, 0,
+ 0, 139, 2, 99, 0, 128,
+ 131, 153, 25, 0, 114, 0,
+ 16, 0, 27, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 114,
+ 16, 0, 10, 0, 0, 0,
+ 167, 0, 0, 139, 2, 99,
+ 0, 128, 131, 153, 25, 0,
+ 114, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 114, 16, 0, 10, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 25, 0,
+ 0, 0, 70, 2, 16, 0,
+ 27, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 0, 28, 0, 0, 0,
+ 56, 0, 0, 7, 114, 0,
+ 16, 0, 29, 0, 0, 0,
+ 38, 9, 16, 0, 27, 0,
+ 0, 0, 150, 4, 16, 0,
+ 28, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 29, 0, 0, 0, 150, 4,
+ 16, 0, 27, 0, 0, 0,
+ 38, 9, 16, 0, 28, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 29, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 56, 0, 0, 7,
+ 114, 0, 16, 0, 29, 0,
+ 0, 0, 246, 15, 16, 0,
+ 22, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 16, 0,
+ 0, 7, 66, 0, 16, 0,
+ 23, 0, 0, 0, 70, 2,
+ 16, 0, 27, 0, 0, 0,
+ 70, 2, 16, 0, 27, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 28, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 25, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 25, 0,
+ 0, 0, 14, 0, 0, 10,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 0, 0, 128, 63, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 23, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 192, 58, 0, 16, 0,
+ 27, 0, 0, 0, 14, 0,
+ 0, 10, 130, 0, 16, 0,
+ 27, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 14, 0, 0, 10,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 0, 0, 128, 63, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 14, 0, 0, 10, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 58, 0, 16, 0,
+ 23, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 0, 20, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 25, 0, 0, 0,
+ 16, 0, 0, 7, 18, 0,
+ 16, 0, 30, 0, 0, 0,
+ 70, 2, 16, 0, 25, 0,
+ 0, 0, 70, 2, 16, 0,
+ 27, 0, 0, 0, 16, 0,
+ 0, 7, 34, 0, 16, 0,
+ 30, 0, 0, 0, 70, 2,
+ 16, 0, 25, 0, 0, 0,
+ 70, 2, 16, 0, 28, 0,
+ 0, 0, 16, 0, 0, 7,
+ 18, 0, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 0, 29, 0, 0, 0,
+ 56, 0, 0, 7, 194, 0,
+ 16, 0, 30, 0, 0, 0,
+ 246, 15, 16, 0, 22, 0,
+ 0, 0, 86, 1, 16, 0,
+ 30, 0, 0, 0, 50, 0,
+ 0, 10, 66, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 0, 16, 0, 30, 0,
+ 0, 0, 42, 0, 16, 128,
+ 65, 0, 0, 0, 30, 0,
+ 0, 0, 50, 0, 0, 10,
+ 66, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 23, 0, 0, 0, 26, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 30, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 30, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 23, 0, 0, 0, 56, 0,
+ 0, 7, 66, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 30, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 10, 0,
+ 16, 0, 30, 0, 0, 0,
+ 55, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 30, 0,
+ 0, 0, 42, 0, 16, 0,
+ 30, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 49, 0, 0, 7, 66, 0,
+ 16, 0, 30, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 56, 0,
+ 0, 7, 66, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 26, 0,
+ 16, 0, 30, 0, 0, 0,
+ 55, 0, 0, 9, 66, 0,
+ 16, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 30, 0,
+ 0, 0, 42, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 30, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 26, 0, 16, 128,
+ 65, 0, 0, 0, 30, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 55, 32, 0, 9,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 51, 0,
+ 0, 7, 66, 0, 16, 0,
+ 23, 0, 0, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 52, 0, 0, 7,
+ 66, 0, 16, 0, 23, 0,
+ 0, 0, 42, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 56, 0, 0, 7, 114, 0,
+ 16, 0, 28, 0, 0, 0,
+ 166, 10, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 27, 0, 0, 0, 70, 2,
+ 16, 0, 27, 0, 0, 0,
+ 246, 15, 16, 0, 22, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 0, 25, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 27, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 25, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 49, 0,
+ 0, 7, 66, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 31, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 71, 3, 128, 63,
+ 55, 0, 0, 9, 34, 0,
+ 16, 0, 31, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 26, 0, 16, 0, 31, 0,
+ 0, 0, 26, 0, 16, 0,
+ 23, 0, 0, 0, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 22, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 70, 2, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 22, 0, 0, 0, 55, 0,
+ 0, 9, 50, 0, 16, 0,
+ 23, 0, 0, 0, 246, 15,
+ 16, 0, 22, 0, 0, 0,
+ 70, 0, 16, 0, 31, 0,
+ 0, 0, 70, 0, 16, 0,
+ 23, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 22, 0, 0, 1,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 19, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 20, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 22, 0, 0, 0, 6, 0,
+ 16, 0, 23, 0, 0, 0,
+ 70, 2, 16, 0, 20, 0,
+ 0, 0, 168, 0, 0, 9,
+ 114, 224, 17, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 2, 16, 0, 20, 0,
+ 0, 0, 21, 0, 0, 1,
+ 165, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 72, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 31, 0,
+ 4, 3, 42, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 80, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 98, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 48, 0, 0, 0,
+ 6, 241, 17, 0, 1, 0,
+ 0, 0, 165, 0, 0, 7,
+ 98, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 44, 0, 0, 0, 6, 241,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 3, 0, 4, 3, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 114, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 226,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 114, 0,
+ 16, 0, 28, 0, 0, 0,
+ 70, 2, 16, 0, 25, 0,
+ 0, 0, 54, 0, 0, 8,
+ 114, 0, 16, 0, 29, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 8, 242, 0, 16, 0,
+ 30, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 48, 0, 0, 1,
+ 80, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 26, 0, 16, 0,
+ 23, 0, 0, 0, 3, 0,
+ 4, 3, 58, 0, 16, 0,
+ 27, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 67, 0, 128, 131, 153,
+ 25, 0, 50, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 112, 16, 0,
+ 6, 0, 0, 0, 30, 0,
+ 0, 7, 242, 0, 16, 0,
+ 31, 0, 0, 0, 150, 9,
+ 16, 0, 20, 0, 0, 0,
+ 6, 5, 16, 0, 31, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 32, 0, 0, 0, 10, 0,
+ 16, 0, 31, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 33, 0, 0, 0,
+ 26, 0, 16, 0, 31, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 34, 0,
+ 0, 0, 42, 0, 16, 0,
+ 31, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 31, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 7, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 35, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 32, 0, 0, 0, 70, 14,
+ 16, 0, 33, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 86, 5, 16, 0, 19, 0,
+ 0, 0, 70, 14, 16, 0,
+ 35, 0, 0, 0, 70, 14,
+ 16, 0, 32, 0, 0, 0,
+ 0, 0, 0, 8, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 128, 65, 0,
+ 0, 0, 34, 0, 0, 0,
+ 70, 14, 16, 0, 31, 0,
+ 0, 0, 50, 0, 0, 9,
+ 242, 0, 16, 0, 38, 0,
+ 0, 0, 86, 5, 16, 0,
+ 19, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 34, 0,
+ 0, 0, 52, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 56, 0, 0, 10, 242, 0,
+ 16, 0, 39, 0, 0, 0,
+ 70, 14, 16, 0, 39, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 63, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 39, 0,
+ 0, 0, 29, 0, 0, 7,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 7, 0, 0, 1, 21, 0,
+ 0, 1, 50, 0, 0, 9,
+ 242, 0, 16, 0, 35, 0,
+ 0, 0, 6, 0, 16, 0,
+ 19, 0, 0, 0, 70, 14,
+ 16, 0, 35, 0, 0, 0,
+ 70, 14, 16, 0, 32, 0,
+ 0, 0, 50, 0, 0, 9,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 6, 0, 16, 0,
+ 19, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 34, 0,
+ 0, 0, 52, 0, 0, 7,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 40, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 35, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 56, 0, 0, 10, 242, 0,
+ 16, 0, 41, 0, 0, 0,
+ 70, 14, 16, 0, 40, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 63, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 41, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 41, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 29, 0, 0, 7, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 31, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 7, 0,
+ 0, 1, 21, 0, 0, 1,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 40, 0, 0, 0,
+ 38, 9, 16, 0, 36, 0,
+ 0, 0, 38, 9, 16, 0,
+ 38, 0, 0, 0, 50, 0,
+ 0, 13, 114, 0, 16, 0,
+ 40, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 40, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 0,
+ 38, 9, 16, 0, 27, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 39, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 68, 0, 0, 5,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 39, 0, 0, 0, 246, 15,
+ 16, 0, 27, 0, 0, 0,
+ 70, 2, 16, 0, 39, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 56, 0, 0, 7, 114, 0,
+ 16, 0, 42, 0, 0, 0,
+ 150, 4, 16, 0, 39, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 40, 0, 0, 0, 38, 9,
+ 16, 0, 40, 0, 0, 0,
+ 38, 9, 16, 0, 39, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 42, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 27, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 50, 0,
+ 0, 13, 114, 0, 16, 0,
+ 42, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 39, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 0,
+ 70, 2, 16, 0, 28, 0,
+ 0, 0, 68, 0, 0, 5,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 63, 58, 0, 16, 0,
+ 35, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 33, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 56, 0, 0, 7, 114, 0,
+ 16, 0, 43, 0, 0, 0,
+ 246, 15, 16, 0, 32, 0,
+ 0, 0, 70, 2, 16, 0,
+ 41, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 44, 0, 0, 0, 38, 9,
+ 16, 0, 42, 0, 0, 0,
+ 150, 4, 16, 0, 43, 0,
+ 0, 0, 50, 0, 0, 10,
+ 114, 0, 16, 0, 44, 0,
+ 0, 0, 150, 4, 16, 0,
+ 42, 0, 0, 0, 38, 9,
+ 16, 0, 43, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 44, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 70, 2, 16, 0, 28, 0,
+ 0, 0, 70, 2, 16, 0,
+ 43, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 44, 0, 0, 0,
+ 70, 2, 16, 0, 44, 0,
+ 0, 0, 0, 0, 0, 7,
+ 18, 0, 16, 0, 45, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 41, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 52, 0, 0, 7,
+ 34, 0, 16, 0, 45, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 70, 2, 16, 0, 40, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 34, 0, 0, 0, 10, 0,
+ 16, 128, 65, 0, 0, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 0, 40, 0,
+ 0, 0, 70, 2, 16, 0,
+ 44, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 31, 0, 0, 0, 26, 0,
+ 16, 0, 45, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 128,
+ 65, 0, 0, 0, 45, 0,
+ 0, 0, 26, 0, 16, 0,
+ 45, 0, 0, 0, 10, 0,
+ 16, 0, 45, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 37, 0,
+ 0, 0, 75, 0, 0, 5,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 0, 0,
+ 0, 8, 130, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 0, 0, 0, 8, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 36, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 51, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 14, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 39, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 49, 0,
+ 0, 7, 18, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 39, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 10, 0,
+ 16, 0, 40, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 0, 0,
+ 0, 8, 18, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 31, 0, 0, 0, 26, 0,
+ 16, 0, 45, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 10, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 31, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 1, 64,
+ 0, 0, 236, 81, 184, 190,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 39, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 180,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 40, 0, 0, 0, 70, 2,
+ 16, 0, 27, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 28, 0, 0, 0,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 44, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 40, 0, 0, 0,
+ 246, 15, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 27, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 46, 0, 0, 0, 246, 15,
+ 16, 0, 28, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 56, 0,
+ 0, 10, 114, 0, 16, 0,
+ 36, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 36, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 246, 15, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 46, 0, 0, 0, 0, 0,
+ 0, 8, 130, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 38, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 246, 15, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 46, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 38, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 0,
+ 44, 0, 0, 0, 70, 2,
+ 16, 0, 38, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 16, 0,
+ 0, 7, 18, 0, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 44, 0, 0, 0,
+ 70, 2, 16, 0, 44, 0,
+ 0, 0, 50, 0, 0, 10,
+ 18, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 38, 0, 0, 0,
+ 49, 0, 0, 7, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 38, 0, 0, 0, 68, 0,
+ 0, 5, 66, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 38, 0, 0, 0,
+ 1, 0, 0, 7, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 38, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 10, 0, 16, 0, 38, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 49, 0, 0, 8, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 128, 129, 0,
+ 0, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 50, 0, 0, 9,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 36, 0, 0, 0, 246, 15,
+ 16, 0, 27, 0, 0, 0,
+ 70, 2, 16, 0, 40, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 14, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 0, 0,
+ 0, 8, 130, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 27, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 14, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 36, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 246, 15, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 0,
+ 42, 0, 0, 0, 50, 0,
+ 0, 12, 114, 0, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 0, 70, 2, 16, 0,
+ 36, 0, 0, 0, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 38, 0, 0, 0, 246, 15,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 38, 0, 0, 0, 246, 15,
+ 16, 0, 31, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 28, 0, 0, 0, 50, 0,
+ 0, 13, 114, 0, 16, 0,
+ 39, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 39, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 70, 2,
+ 16, 0, 43, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 39, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 54, 0, 0, 5, 114, 0,
+ 16, 0, 28, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 7, 0,
+ 0, 1, 21, 0, 0, 1,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 52, 0, 0, 7,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 49, 0, 0, 8, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 128, 129, 0,
+ 0, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 50, 0, 0, 10,
+ 114, 0, 16, 0, 39, 0,
+ 0, 0, 246, 15, 16, 128,
+ 65, 0, 0, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 41, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 32, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 39, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 246, 15,
+ 16, 0, 32, 0, 0, 0,
+ 70, 2, 16, 0, 29, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 33, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 32, 0,
+ 0, 0, 70, 2, 16, 0,
+ 33, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 32, 0, 0, 0, 86, 5,
+ 16, 0, 19, 0, 0, 0,
+ 70, 2, 16, 0, 33, 0,
+ 0, 0, 70, 2, 16, 0,
+ 32, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 31, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 0, 31, 0, 0, 0,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 31, 0, 0, 0,
+ 86, 5, 16, 0, 19, 0,
+ 0, 0, 70, 2, 16, 0,
+ 31, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 63, 1, 64,
+ 0, 0, 0, 0, 0, 63,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 32, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 32, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 31, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 32, 0, 0, 0, 70, 2,
+ 16, 0, 31, 0, 0, 0,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 31, 0, 0, 0,
+ 246, 15, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 0,
+ 31, 0, 0, 0, 70, 2,
+ 16, 0, 32, 0, 0, 0,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 31, 0, 0, 0,
+ 70, 2, 16, 0, 30, 0,
+ 0, 0, 70, 2, 16, 0,
+ 31, 0, 0, 0, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 32, 0, 0, 0, 246, 15,
+ 16, 0, 19, 0, 0, 0,
+ 70, 2, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 30, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 30, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 31, 0,
+ 0, 0, 246, 15, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 29, 0,
+ 0, 0, 55, 0, 0, 9,
+ 242, 0, 16, 0, 32, 0,
+ 0, 0, 246, 15, 16, 0,
+ 28, 0, 0, 0, 70, 14,
+ 16, 0, 32, 0, 0, 0,
+ 70, 14, 16, 0, 30, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 0,
+ 36, 0, 0, 0, 70, 2,
+ 16, 0, 41, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 33, 0, 0, 0,
+ 52, 0, 0, 7, 34, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 50, 0, 0, 10,
+ 18, 0, 16, 0, 33, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 55, 0, 0, 9, 50, 0,
+ 16, 0, 33, 0, 0, 0,
+ 246, 15, 16, 0, 36, 0,
+ 0, 0, 70, 0, 16, 0,
+ 33, 0, 0, 0, 70, 0,
+ 16, 0, 45, 0, 0, 0,
+ 55, 0, 0, 9, 50, 0,
+ 16, 0, 33, 0, 0, 0,
+ 246, 15, 16, 0, 31, 0,
+ 0, 0, 70, 0, 16, 0,
+ 33, 0, 0, 0, 70, 0,
+ 16, 0, 45, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 33, 0,
+ 0, 0, 26, 0, 16, 0,
+ 33, 0, 0, 0, 29, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 10, 0,
+ 16, 0, 33, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 54, 0, 0, 5, 114, 0,
+ 16, 0, 28, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 54, 0, 0, 5,
+ 114, 0, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 31, 0, 0, 0, 54, 0,
+ 0, 5, 242, 0, 16, 0,
+ 30, 0, 0, 0, 70, 14,
+ 16, 0, 32, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 7, 0, 0, 1,
+ 21, 0, 0, 1, 54, 0,
+ 0, 5, 114, 0, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 38, 0, 0, 0,
+ 54, 0, 0, 5, 114, 0,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 31, 0,
+ 0, 0, 54, 0, 0, 5,
+ 242, 0, 16, 0, 30, 0,
+ 0, 0, 70, 14, 16, 0,
+ 32, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 22, 0, 0, 1,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 52, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 114, 0, 16, 0,
+ 31, 0, 0, 0, 70, 2,
+ 16, 0, 28, 0, 0, 0,
+ 54, 0, 0, 5, 114, 0,
+ 16, 0, 32, 0, 0, 0,
+ 70, 2, 16, 0, 29, 0,
+ 0, 0, 54, 0, 0, 5,
+ 242, 0, 16, 0, 33, 0,
+ 0, 0, 70, 14, 16, 0,
+ 30, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 27, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 30, 0, 0, 7,
+ 50, 0, 16, 0, 34, 0,
+ 0, 0, 150, 5, 16, 0,
+ 20, 0, 0, 0, 246, 15,
+ 16, 0, 27, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 35, 0,
+ 0, 0, 10, 0, 16, 0,
+ 34, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 34, 0, 0, 0, 26, 0,
+ 16, 0, 34, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 7, 0, 0, 0, 0, 0,
+ 0, 8, 242, 0, 16, 0,
+ 34, 0, 0, 0, 70, 14,
+ 16, 128, 65, 0, 0, 0,
+ 35, 0, 0, 0, 70, 14,
+ 16, 0, 34, 0, 0, 0,
+ 50, 0, 0, 9, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 6, 0, 16, 0, 19, 0,
+ 0, 0, 70, 14, 16, 0,
+ 34, 0, 0, 0, 70, 14,
+ 16, 0, 35, 0, 0, 0,
+ 52, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 34, 0, 0, 0, 86, 5,
+ 16, 0, 19, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 31, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 34, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 50, 0, 0, 10, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 50, 0, 0, 10,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 75, 0, 0, 5, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 0, 0, 0, 8,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 0, 0,
+ 0, 8, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 34, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 51, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 14, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 49, 0,
+ 0, 7, 18, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 49, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 10, 0, 16, 0,
+ 38, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 1, 64,
+ 0, 0, 236, 81, 184, 190,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 49, 0, 0, 7,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 180,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 0, 0, 0, 8, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 128, 65, 0,
+ 0, 0, 34, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 56, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 14, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 0, 0,
+ 0, 8, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 32, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 14, 0, 0, 7, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 246, 15, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 0, 0,
+ 0, 7, 114, 0, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 31, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 31, 0, 0, 0, 55, 0,
+ 0, 9, 242, 0, 16, 0,
+ 35, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 35, 0, 0, 0, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 31, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 191, 50, 0, 0, 9,
+ 114, 0, 16, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 246, 15,
+ 16, 0, 28, 0, 0, 0,
+ 70, 2, 16, 0, 32, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 36, 0, 0, 0, 0, 0,
+ 0, 7, 114, 0, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 0, 33, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 34, 0,
+ 0, 0, 246, 15, 16, 0,
+ 19, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 70, 2, 16, 0, 33, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 55, 0, 0, 9, 114, 0,
+ 16, 0, 32, 0, 0, 0,
+ 246, 15, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 32, 0, 0, 0,
+ 55, 0, 0, 9, 242, 0,
+ 16, 0, 33, 0, 0, 0,
+ 246, 15, 16, 0, 31, 0,
+ 0, 0, 70, 14, 16, 0,
+ 34, 0, 0, 0, 70, 14,
+ 16, 0, 33, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 22, 0,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 33, 0,
+ 0, 0, 86, 0, 0, 5,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 33, 0, 0, 0, 14, 0,
+ 0, 10, 130, 0, 16, 0,
+ 23, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 80, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 70, 2, 16, 0, 32, 0,
+ 0, 0, 70, 2, 16, 0,
+ 32, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 31, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 32, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 27, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 33, 0, 0, 0, 246, 15,
+ 16, 0, 23, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 0, 31, 0, 0, 0,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 31, 0, 0, 0,
+ 246, 15, 16, 128, 65, 0,
+ 0, 0, 31, 0, 0, 0,
+ 70, 2, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 34, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 31, 0, 0, 0, 70, 2,
+ 16, 0, 31, 0, 0, 0,
+ 70, 2, 16, 0, 31, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 31, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 31, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 56, 0, 0, 8,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 31, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 191, 50, 0, 0, 10,
+ 114, 0, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 31, 0,
+ 0, 0, 246, 15, 16, 0,
+ 27, 0, 0, 0, 70, 2,
+ 16, 0, 27, 0, 0, 0,
+ 168, 0, 0, 9, 114, 224,
+ 17, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 2,
+ 16, 0, 27, 0, 0, 0,
+ 21, 0, 0, 1, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 0, 32, 0, 0, 0,
+ 246, 15, 16, 0, 23, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 168, 0,
+ 0, 9, 242, 224, 17, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 14, 16, 0,
+ 25, 0, 0, 0, 21, 0,
+ 0, 1, 30, 0, 0, 7,
+ 130, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 18, 0,
+ 0, 1, 165, 0, 0, 7,
+ 194, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 76, 0, 0, 0, 6, 244,
+ 17, 0, 0, 0, 0, 0,
+ 49, 0, 0, 10, 194, 0,
+ 16, 0, 19, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 246, 11, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 98, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 48, 0, 0, 0, 6, 241,
+ 17, 0, 1, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 80, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 49, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 20, 0,
+ 0, 0, 165, 0, 0, 7,
+ 98, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 44, 0, 0, 0, 6, 241,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 8, 242, 0,
+ 16, 0, 31, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 8,
+ 242, 0, 16, 0, 27, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 26, 0,
+ 16, 0, 23, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 42, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 167, 0,
+ 0, 139, 2, 67, 0, 128,
+ 131, 153, 25, 0, 50, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 112,
+ 16, 0, 6, 0, 0, 0,
+ 30, 0, 0, 7, 242, 0,
+ 16, 0, 34, 0, 0, 0,
+ 150, 9, 16, 0, 20, 0,
+ 0, 0, 6, 5, 16, 0,
+ 34, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 35, 0, 0, 0,
+ 10, 0, 16, 0, 34, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 34, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 35, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 50, 0,
+ 0, 9, 242, 0, 16, 0,
+ 37, 0, 0, 0, 6, 0,
+ 16, 0, 19, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 35, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 34, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 126, 16, 0, 7, 0,
+ 0, 0, 0, 0, 0, 8,
+ 242, 0, 16, 0, 39, 0,
+ 0, 0, 70, 14, 16, 128,
+ 65, 0, 0, 0, 38, 0,
+ 0, 0, 70, 14, 16, 0,
+ 34, 0, 0, 0, 50, 0,
+ 0, 9, 242, 0, 16, 0,
+ 39, 0, 0, 0, 6, 0,
+ 16, 0, 19, 0, 0, 0,
+ 70, 14, 16, 0, 39, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 52, 0, 0, 7,
+ 130, 0, 16, 0, 39, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 8, 242, 0,
+ 16, 0, 40, 0, 0, 0,
+ 70, 14, 16, 128, 65, 0,
+ 0, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 39, 0,
+ 0, 0, 56, 0, 0, 10,
+ 242, 0, 16, 0, 41, 0,
+ 0, 0, 70, 14, 16, 0,
+ 40, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 70, 2, 16, 0,
+ 41, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 41, 0, 0, 0, 58, 0,
+ 16, 0, 41, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 29, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 7, 0, 0, 1, 21, 0,
+ 0, 1, 68, 0, 0, 5,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 0, 0,
+ 0, 7, 114, 0, 16, 0,
+ 40, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 39, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 40, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 63,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 41, 0, 0, 0, 246, 15,
+ 16, 0, 32, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 41, 0, 0, 0,
+ 50, 0, 0, 13, 114, 0,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 40, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 63, 0, 0, 0, 63,
+ 0, 0, 0, 63, 0, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 0, 41, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 52, 0, 0, 7,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 40, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 35, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 37, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 7, 0,
+ 0, 1, 21, 0, 0, 1,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 128, 65, 0, 0, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 63, 52, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 35, 0, 0, 0,
+ 49, 0, 0, 8, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 128, 129, 0,
+ 0, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 50, 0, 0, 10,
+ 114, 0, 16, 0, 40, 0,
+ 0, 0, 246, 15, 16, 128,
+ 65, 0, 0, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 41, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 50, 0,
+ 0, 10, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 32, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 40, 0,
+ 0, 0, 70, 2, 16, 0,
+ 40, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 27, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 35, 0,
+ 0, 0, 70, 2, 16, 0,
+ 36, 0, 0, 0, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 35, 0, 0, 0, 86, 5,
+ 16, 0, 19, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 35, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 34, 0, 0, 0,
+ 86, 5, 16, 0, 19, 0,
+ 0, 0, 70, 2, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 0, 38, 0, 0, 0,
+ 50, 0, 0, 9, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 63, 1, 64,
+ 0, 0, 0, 0, 0, 63,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 35, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 35, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 50, 0, 0, 9, 114, 0,
+ 16, 0, 34, 0, 0, 0,
+ 246, 15, 16, 0, 29, 0,
+ 0, 0, 70, 2, 16, 0,
+ 34, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 0, 0, 0, 7, 114, 0,
+ 16, 0, 34, 0, 0, 0,
+ 70, 2, 16, 0, 31, 0,
+ 0, 0, 70, 2, 16, 0,
+ 34, 0, 0, 0, 55, 0,
+ 0, 9, 114, 0, 16, 0,
+ 34, 0, 0, 0, 6, 0,
+ 16, 0, 21, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 31, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 31, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 27, 0,
+ 0, 0, 246, 15, 16, 0,
+ 28, 0, 0, 0, 70, 2,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 0, 27, 0,
+ 0, 0, 55, 0, 0, 9,
+ 242, 0, 16, 0, 31, 0,
+ 0, 0, 246, 15, 16, 0,
+ 28, 0, 0, 0, 70, 14,
+ 16, 0, 34, 0, 0, 0,
+ 70, 14, 16, 0, 31, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 22, 0, 0, 1, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 27, 0, 0, 0, 1, 64,
+ 0, 0, 52, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 114, 0, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 27, 0, 0, 0, 54, 0,
+ 0, 5, 242, 0, 16, 0,
+ 35, 0, 0, 0, 70, 14,
+ 16, 0, 31, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 48, 0, 0, 1,
+ 80, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 3, 0,
+ 4, 3, 58, 0, 16, 0,
+ 29, 0, 0, 0, 30, 0,
+ 0, 7, 50, 0, 16, 0,
+ 36, 0, 0, 0, 150, 5,
+ 16, 0, 20, 0, 0, 0,
+ 246, 15, 16, 0, 28, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 126, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 126,
+ 16, 0, 7, 0, 0, 0,
+ 0, 0, 0, 8, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 128, 65, 0,
+ 0, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 50, 0, 0, 9,
+ 242, 0, 16, 0, 38, 0,
+ 0, 0, 6, 0, 16, 0,
+ 19, 0, 0, 0, 70, 14,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 52, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 38, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 39, 0, 0, 0,
+ 70, 2, 16, 0, 25, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 38, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 70, 2,
+ 16, 0, 39, 0, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 68, 0,
+ 0, 5, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 49, 0,
+ 0, 7, 130, 0, 16, 0,
+ 34, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 50, 0, 0, 9,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 191, 50, 0, 0, 9,
+ 114, 0, 16, 0, 39, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 246, 15,
+ 16, 0, 29, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 50, 0, 0, 9,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 86, 5, 16, 0,
+ 19, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 0, 0,
+ 0, 7, 114, 0, 16, 0,
+ 36, 0, 0, 0, 70, 2,
+ 16, 0, 35, 0, 0, 0,
+ 70, 2, 16, 0, 36, 0,
+ 0, 0, 55, 0, 0, 9,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 6, 0, 16, 0,
+ 21, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 35, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 55, 0, 0, 9, 114, 0,
+ 16, 0, 34, 0, 0, 0,
+ 246, 15, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 39, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 55, 0, 0, 9, 242, 0,
+ 16, 0, 35, 0, 0, 0,
+ 246, 15, 16, 0, 34, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 35, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 22, 0,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 35, 0,
+ 0, 0, 86, 0, 0, 5,
+ 130, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 35, 0, 0, 0, 14, 0,
+ 0, 10, 130, 0, 16, 0,
+ 27, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 31, 0, 4, 3,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 10, 0, 16, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 226, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 1, 64, 0, 0,
+ 80, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 70, 2, 16, 0,
+ 34, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 68, 0, 0, 5,
+ 130, 0, 16, 0, 34, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 56, 0,
+ 0, 7, 114, 0, 16, 0,
+ 37, 0, 0, 0, 246, 15,
+ 16, 0, 34, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 38, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 36, 0, 0, 0, 50, 0,
+ 0, 10, 114, 0, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 128, 65, 0, 0, 0,
+ 35, 0, 0, 0, 246, 15,
+ 16, 0, 27, 0, 0, 0,
+ 70, 2, 16, 0, 38, 0,
+ 0, 0, 16, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 50, 0, 0, 10, 114, 0,
+ 16, 0, 37, 0, 0, 0,
+ 246, 15, 16, 128, 65, 0,
+ 0, 0, 36, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 0,
+ 38, 0, 0, 0, 16, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 70, 2,
+ 16, 0, 37, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 56, 0, 0, 8,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 128,
+ 65, 0, 0, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 58, 0, 16, 0, 29, 0,
+ 0, 0, 56, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 56, 0, 0, 7, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 52, 0,
+ 0, 7, 130, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 191, 50, 0, 0, 10,
+ 114, 0, 16, 0, 36, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 37, 0,
+ 0, 0, 246, 15, 16, 0,
+ 29, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 114, 224,
+ 17, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 2,
+ 16, 0, 36, 0, 0, 0,
+ 21, 0, 0, 1, 50, 0,
+ 0, 9, 114, 0, 16, 0,
+ 36, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 246, 15, 16, 0, 27, 0,
+ 0, 0, 70, 2, 16, 0,
+ 25, 0, 0, 0, 16, 0,
+ 0, 7, 18, 0, 16, 0,
+ 25, 0, 0, 0, 70, 2,
+ 16, 0, 34, 0, 0, 0,
+ 70, 2, 16, 0, 34, 0,
+ 0, 0, 165, 0, 0, 7,
+ 34, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 76, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 50, 0, 0, 9, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 63,
+ 14, 0, 0, 7, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 55, 0,
+ 0, 9, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 168, 0,
+ 0, 9, 242, 224, 17, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 21, 0,
+ 0, 1, 30, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 21, 0,
+ 0, 1, 165, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 84, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 72, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 51, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 49, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 18, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 88, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 31, 0, 4, 3,
+ 42, 0, 16, 0, 1, 0,
+ 0, 0, 165, 0, 0, 7,
+ 194, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 4, 0, 0, 0, 6, 244,
+ 17, 0, 1, 0, 0, 0,
+ 0, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 50, 0,
+ 0, 12, 194, 0, 16, 0,
+ 19, 0, 0, 0, 6, 0,
+ 16, 0, 20, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 23, 183, 209, 56, 23, 183,
+ 209, 56, 246, 11, 16, 0,
+ 19, 0, 0, 0, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 6, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 18, 0,
+ 0, 1, 54, 0, 0, 8,
+ 194, 0, 16, 0, 19, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 1, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 6, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 6, 0, 0, 0,
+ 49, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 1, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 6, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 2, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 6, 0,
+ 0, 0, 49, 0, 0, 7,
+ 34, 0, 16, 0, 20, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 20, 0, 0, 0,
+ 55, 0, 0, 9, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 254, 255, 255, 255, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 30, 0, 0, 6, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 10, 32, 2, 0,
+ 85, 0, 0, 7, 34, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 30, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 14, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 254, 66, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 84, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 14, 0,
+ 0, 10, 34, 0, 16, 0,
+ 20, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 128, 63,
+ 0, 0, 128, 63, 0, 0,
+ 128, 63, 0, 0, 128, 63,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 51, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 20, 0, 0, 0,
+ 41, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 2, 0, 0, 0, 30, 0,
+ 0, 10, 98, 0, 16, 0,
+ 20, 0, 0, 0, 6, 0,
+ 16, 0, 20, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 12, 0, 0, 0,
+ 24, 0, 0, 0, 0, 0,
+ 0, 0, 166, 0, 0, 7,
+ 18, 240, 17, 0, 5, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 56, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 166, 0,
+ 0, 7, 18, 240, 17, 0,
+ 5, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 166, 0, 0, 6,
+ 18, 240, 17, 0, 5, 0,
+ 0, 0, 42, 0, 16, 0,
+ 20, 0, 0, 0, 10, 32,
+ 2, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 165, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 100, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 41, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 116, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 54, 0, 0, 4,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 33, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 3, 65,
+ 0, 0, 3, 0, 4, 3,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 168, 0, 0, 9, 18, 224,
+ 17, 0, 2, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 4, 0, 0, 22, 0,
+ 0, 1, 21, 0, 0, 1,
+ 165, 0, 0, 7, 50, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 70, 240, 17, 0,
+ 5, 0, 0, 0, 165, 0,
+ 0, 7, 194, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 4, 0, 0, 0,
+ 6, 244, 17, 0, 5, 0,
+ 0, 0, 165, 0, 0, 7,
+ 98, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 28, 0, 0, 0, 6, 241,
+ 17, 0, 5, 0, 0, 0,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 92, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 39, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 165, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 100, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 33, 0, 0, 7,
+ 18, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 10, 0,
+ 16, 0, 25, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 112,
+ 16, 0, 15, 0, 0, 0,
+ 55, 0, 0, 9, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 17, 0, 0, 8, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 70, 158, 144, 0,
+ 26, 0, 16, 0, 23, 0,
+ 0, 0, 50, 0, 0, 9,
+ 34, 0, 16, 0, 25, 0,
+ 0, 0, 26, 0, 16, 0,
+ 25, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 20, 0,
+ 0, 0, 51, 0, 0, 7,
+ 34, 0, 16, 0, 25, 0,
+ 0, 0, 26, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 255, 66,
+ 52, 0, 0, 7, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 17, 0,
+ 0, 8, 66, 0, 16, 0,
+ 25, 0, 0, 0, 70, 14,
+ 16, 0, 36, 0, 0, 0,
+ 70, 158, 144, 0, 42, 0,
+ 16, 0, 23, 0, 0, 0,
+ 50, 0, 0, 9, 66, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 20, 0, 0, 0,
+ 51, 0, 0, 7, 66, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 255, 66, 52, 0,
+ 0, 7, 66, 0, 16, 0,
+ 25, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 27, 0, 0, 5,
+ 98, 0, 16, 0, 25, 0,
+ 0, 0, 86, 6, 16, 0,
+ 25, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 41, 0, 0, 7,
+ 66, 0, 16, 0, 25, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 23, 0, 0, 0,
+ 35, 0, 0, 9, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 1, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 129, 0, 60, 0,
+ 0, 7, 18, 0, 16, 0,
+ 25, 0, 0, 0, 10, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 224, 17, 0, 2, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 50, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 96, 0, 0, 0, 70, 240,
+ 17, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 25, 0, 0, 0, 54, 0,
+ 0, 4, 18, 0, 16, 0,
+ 21, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 33, 0, 0, 7, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 58, 0, 16, 0,
+ 22, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 26, 0, 16, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 238, 17, 0, 0, 0,
+ 0, 0, 168, 0, 0, 9,
+ 242, 224, 17, 0, 1, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 30, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 31, 0, 0, 0,
+ 138, 0, 0, 9, 130, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 5, 0,
+ 0, 0, 1, 64, 0, 0,
+ 5, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 85, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 10, 0, 0, 0, 79, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 26, 0,
+ 16, 0, 4, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 0, 0, 7,
+ 18, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 55, 0,
+ 0, 9, 66, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 38, 0,
+ 0, 8, 0, 208, 0, 0,
+ 66, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 22, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 35, 0, 0, 9, 66, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 4, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 41, 0, 0, 10, 146, 0,
+ 16, 0, 25, 0, 0, 0,
+ 6, 8, 16, 0, 25, 0,
+ 0, 0, 2, 64, 0, 0,
+ 5, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 5, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 25, 0, 0, 0, 10, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 84, 0, 0, 7,
+ 18, 0, 16, 0, 25, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 25, 0, 0, 0,
+ 140, 0, 0, 10, 66, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 27, 0,
+ 0, 0, 1, 64, 0, 0,
+ 5, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 32, 2, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 16, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 32, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 29, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 29, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 34, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 18, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 3, 0, 4, 3,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 79, 0, 0, 7,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 10, 0,
+ 16, 0, 25, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 55, 0, 0, 9, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 85, 0, 0, 7, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 1, 0,
+ 0, 10, 242, 0, 16, 0,
+ 36, 0, 0, 0, 6, 0,
+ 16, 0, 36, 0, 0, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 2, 0, 0, 0,
+ 4, 0, 0, 0, 8, 0,
+ 0, 0, 39, 0, 0, 10,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 140, 0, 0, 16, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 6, 32, 2, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 36, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 26, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 36, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 58, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 13, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 5, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 5, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 87, 0, 0, 7, 50, 0,
+ 16, 0, 36, 0, 0, 0,
+ 230, 10, 16, 0, 2, 0,
+ 0, 0, 70, 0, 16, 0,
+ 36, 0, 0, 0, 1, 0,
+ 0, 7, 18, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 87, 0, 0, 7,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 5, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 0, 0, 7, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 87, 0,
+ 0, 7, 34, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 5, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 1, 0, 0, 7,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 134, 0, 0, 5, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 34, 0, 0, 0, 58, 0,
+ 16, 0, 34, 0, 0, 0,
+ 1, 64, 0, 0, 32, 0,
+ 0, 0, 22, 0, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 58, 0, 16, 0,
+ 34, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 79, 0, 0, 7, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 34, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 55, 0,
+ 0, 9, 130, 0, 16, 0,
+ 32, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 85, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 1, 0, 0, 10,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 246, 15, 16, 0,
+ 32, 0, 0, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 2, 0, 0, 0, 4, 0,
+ 0, 0, 8, 0, 0, 0,
+ 39, 0, 0, 10, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 140, 0,
+ 0, 16, 242, 0, 16, 0,
+ 36, 0, 0, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 6, 32, 2, 0, 70, 14,
+ 16, 0, 36, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 6, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 6, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 31, 0, 4, 3,
+ 26, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 23, 0, 0, 0, 31, 0,
+ 4, 3, 42, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 36, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 21, 0, 0, 1, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 58, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 13, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 70, 14, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 5, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 36, 0, 0, 0, 70, 14,
+ 16, 0, 36, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 23, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 5, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 18, 0,
+ 0, 1, 54, 0, 0, 8,
+ 242, 0, 16, 0, 36, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 21, 0,
+ 0, 1, 87, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 42, 0, 16, 0,
+ 2, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 87, 0, 0, 7, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 2, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 87, 0, 0, 7,
+ 50, 0, 16, 0, 36, 0,
+ 0, 0, 70, 0, 16, 0,
+ 5, 0, 0, 0, 230, 10,
+ 16, 0, 36, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 134, 0, 0, 5,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 29, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 21, 0, 0, 1, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 7, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 29, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 0, 2, 10, 32,
+ 2, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 16, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 18, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 21, 0, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 1, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 16, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 26, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 58, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 18, 0,
+ 0, 1, 167, 0, 0, 9,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 16, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 2, 18, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 1, 0,
+ 0, 0, 31, 0, 4, 3,
+ 26, 0, 16, 0, 16, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 11, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 18, 0, 0, 1, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 21, 0,
+ 0, 1, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 15, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 1, 0, 0, 0, 31, 0,
+ 4, 3, 42, 0, 16, 0,
+ 16, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 32, 0, 0, 0, 26, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 15, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 18, 0, 0, 1,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 26, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 21, 0, 0, 1, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 15, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 1, 0, 0, 0,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 16, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 11, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 18, 0,
+ 0, 1, 167, 0, 0, 9,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 11, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 21, 0, 0, 1,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 15, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 1, 0,
+ 0, 0, 31, 0, 4, 3,
+ 10, 0, 16, 0, 18, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 26, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 17, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 18, 0, 0, 1, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 32, 0, 0, 0, 26, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 21, 0,
+ 0, 1, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 15, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 1, 0, 0, 0, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 18, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 18, 0, 0, 1,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 11, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 21, 0, 0, 1, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 15, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 1, 0, 0, 0,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 18, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 26, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 18, 0,
+ 0, 1, 167, 0, 0, 9,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 26, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 21, 0, 0, 1,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 15, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 1, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 18, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 11, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 13, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 10, 0,
+ 16, 0, 36, 0, 0, 0,
+ 18, 0, 0, 1, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 21, 0,
+ 0, 1, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 15, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 1, 0, 0, 0, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 15, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 32, 0, 0, 0, 26, 0,
+ 16, 0, 14, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 17, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 18, 0, 0, 1,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 26, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 21, 0, 0, 1, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 15, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 5,
+ 18, 0, 16, 0, 36, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 48, 0,
+ 0, 1, 80, 0, 0, 7,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 3, 0, 4, 3, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 79, 0, 0, 7, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 27, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 0, 128, 131, 153,
+ 25, 0, 66, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 224, 17, 0,
+ 2, 0, 0, 0, 55, 0,
+ 0, 9, 66, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 85, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 25, 0,
+ 0, 0, 1, 0, 0, 10,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 246, 15, 16, 0,
+ 36, 0, 0, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 2, 0, 0, 0, 4, 0,
+ 0, 0, 8, 0, 0, 0,
+ 39, 0, 0, 10, 242, 0,
+ 16, 0, 38, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 140, 0,
+ 0, 16, 242, 0, 16, 0,
+ 39, 0, 0, 0, 2, 64,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 6, 32, 2, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 10, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 39, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 39, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 39, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 39, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 40, 0, 0, 0, 26, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 40, 0, 0, 0, 42, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 0, 39, 0, 0, 0,
+ 70, 14, 16, 0, 40, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 26, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 39, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 39, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 39, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 39, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 40, 0,
+ 0, 0, 26, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 6, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 26, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 40, 0,
+ 0, 0, 10, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 39, 0,
+ 0, 0, 70, 14, 16, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 0, 40, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 39, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 42, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 39, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 39, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 39, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 39, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 40, 0, 0, 0,
+ 42, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 40, 0, 0, 0,
+ 26, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 40, 0, 0, 0,
+ 42, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 42, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 39, 0, 0, 0,
+ 70, 14, 16, 0, 39, 0,
+ 0, 0, 70, 14, 16, 0,
+ 40, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 39, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 39, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 39, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 39, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 39, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 0, 39, 0, 0, 0,
+ 70, 14, 16, 0, 40, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 39, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 39, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 5, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 39, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 39, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 39, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 39, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 40, 0,
+ 0, 0, 10, 0, 16, 0,
+ 6, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 40, 0,
+ 0, 0, 26, 0, 16, 0,
+ 13, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 10, 0, 16, 0,
+ 14, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 40, 0,
+ 0, 0, 42, 0, 16, 0,
+ 13, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 39, 0,
+ 0, 0, 70, 14, 16, 0,
+ 39, 0, 0, 0, 70, 14,
+ 16, 0, 40, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 39, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 39, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 5, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 10, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 140, 0,
+ 0, 11, 130, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 4, 0, 0, 0,
+ 1, 64, 0, 0, 5, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 11, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 0, 16,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 31, 0, 4, 3,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 30, 0, 0, 7,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 87, 0, 0, 7, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 32, 0,
+ 0, 10, 226, 0, 16, 0,
+ 38, 0, 0, 0, 86, 14,
+ 16, 0, 38, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 87, 0, 0, 7,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 38, 0, 0, 0,
+ 1, 0, 0, 7, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 87, 0,
+ 0, 7, 18, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 38, 0,
+ 0, 0, 1, 0, 0, 7,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 87, 0, 0, 7, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 38, 0, 0, 0, 1, 0,
+ 0, 7, 34, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 138, 0, 0, 8,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 134, 0,
+ 0, 5, 34, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 36, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 28, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 224, 17, 0, 2, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 10, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 87, 0,
+ 0, 7, 194, 0, 16, 0,
+ 36, 0, 0, 0, 166, 14,
+ 16, 0, 2, 0, 0, 0,
+ 86, 9, 16, 0, 37, 0,
+ 0, 0, 1, 0, 0, 7,
+ 66, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 87, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 5, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 1, 0,
+ 0, 7, 66, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 87, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 5, 0, 0, 0, 10, 0,
+ 16, 0, 38, 0, 0, 0,
+ 1, 0, 0, 7, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 42, 0, 16, 0,
+ 36, 0, 0, 0, 134, 0,
+ 0, 5, 66, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 11, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 21, 0,
+ 0, 1, 30, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 36, 0, 0, 0,
+ 10, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 32, 0, 0, 0, 22, 0,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 79, 0, 0, 7,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 25, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 27, 0,
+ 0, 0, 10, 0, 16, 0,
+ 36, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 55, 0, 0, 9, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 85, 0, 0, 7, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 23, 0, 0, 0, 1, 0,
+ 0, 10, 242, 0, 16, 0,
+ 37, 0, 0, 0, 166, 10,
+ 16, 0, 36, 0, 0, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 2, 0, 0, 0,
+ 4, 0, 0, 0, 8, 0,
+ 0, 0, 39, 0, 0, 10,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 140, 0, 0, 16, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 2, 64, 0, 0, 1, 0,
+ 0, 0, 1, 0, 0, 0,
+ 1, 0, 0, 0, 1, 0,
+ 0, 0, 6, 32, 2, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 31, 0, 4, 3,
+ 10, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 38, 0, 0, 0, 26, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 38, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 23, 0, 0, 0, 31, 0,
+ 4, 3, 26, 0, 16, 0,
+ 7, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 3, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 9, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 8, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 21, 0, 0, 1, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 8, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 37, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 37, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 18, 0, 16, 0, 38, 0,
+ 0, 0, 42, 0, 16, 0,
+ 3, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 34, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 66, 0, 16, 0, 38, 0,
+ 0, 0, 42, 0, 16, 0,
+ 9, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 167, 0, 0, 9,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 42, 0, 16, 0,
+ 8, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 240, 17, 0, 3, 0,
+ 0, 0, 60, 0, 0, 7,
+ 242, 0, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 38, 0, 0, 0,
+ 168, 0, 0, 8, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 32, 2, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 26, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 26, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 42, 0, 16, 0, 37, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 12, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 31, 0, 4, 3,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 7, 0,
+ 0, 0, 167, 0, 0, 8,
+ 18, 0, 16, 0, 37, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 37, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 18, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 34, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 8, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 9, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 9, 130, 0, 16, 0,
+ 38, 0, 0, 0, 10, 0,
+ 16, 0, 13, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 60, 0,
+ 0, 7, 242, 0, 16, 0,
+ 37, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 38, 0,
+ 0, 0, 168, 0, 0, 8,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 10, 32, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 26, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 42, 0, 16, 0,
+ 37, 0, 0, 0, 168, 0,
+ 0, 9, 18, 240, 17, 0,
+ 3, 0, 0, 0, 42, 0,
+ 16, 0, 12, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 58, 0, 16, 0,
+ 37, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 190, 24, 0, 1, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 23, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 5, 0, 0, 0, 167, 0,
+ 0, 8, 18, 0, 16, 0,
+ 37, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 6, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 13, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 14, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 13, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 60, 0, 0, 7, 242, 0,
+ 16, 0, 37, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 70, 14, 16, 0,
+ 38, 0, 0, 0, 168, 0,
+ 0, 8, 18, 240, 17, 0,
+ 3, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 10, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 10, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 26, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 26, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 37, 0, 0, 0,
+ 168, 0, 0, 9, 18, 240,
+ 17, 0, 3, 0, 0, 0,
+ 42, 0, 16, 0, 12, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 37, 0, 0, 0,
+ 21, 0, 0, 1, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 167, 0, 0, 9, 18, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 5, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 34, 0,
+ 16, 0, 37, 0, 0, 0,
+ 10, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 66, 0,
+ 16, 0, 37, 0, 0, 0,
+ 26, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 37, 0, 0, 0,
+ 42, 0, 16, 0, 10, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 1, 0, 0, 10, 242, 0,
+ 16, 0, 38, 0, 0, 0,
+ 166, 10, 16, 0, 36, 0,
+ 0, 0, 2, 64, 0, 0,
+ 1, 0, 0, 0, 2, 0,
+ 0, 0, 4, 0, 0, 0,
+ 8, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 10, 0,
+ 16, 0, 38, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 87, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 37, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 39, 0, 0, 10, 114, 0,
+ 16, 0, 38, 0, 0, 0,
+ 150, 7, 16, 0, 38, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 32, 0,
+ 0, 10, 114, 0, 16, 0,
+ 38, 0, 0, 0, 70, 2,
+ 16, 0, 38, 0, 0, 0,
+ 2, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 87, 0, 0, 7,
+ 114, 0, 16, 0, 38, 0,
+ 0, 0, 150, 7, 16, 0,
+ 37, 0, 0, 0, 70, 2,
+ 16, 0, 38, 0, 0, 0,
+ 1, 0, 0, 7, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 10, 0, 16, 0,
+ 38, 0, 0, 0, 1, 0,
+ 0, 7, 130, 0, 16, 0,
+ 36, 0, 0, 0, 26, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 1, 0, 0, 7,
+ 130, 0, 16, 0, 36, 0,
+ 0, 0, 42, 0, 16, 0,
+ 38, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 140, 0, 0, 11, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 4, 0,
+ 0, 0, 1, 64, 0, 0,
+ 5, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 30, 0, 0, 7,
+ 66, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 11, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 16, 0, 0, 167, 0,
+ 0, 9, 66, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 240, 17, 0,
+ 3, 0, 0, 0, 138, 0,
+ 0, 8, 130, 0, 16, 0,
+ 36, 0, 0, 0, 10, 32,
+ 2, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 134, 0, 0, 5, 130, 0,
+ 16, 0, 36, 0, 0, 0,
+ 58, 0, 16, 0, 36, 0,
+ 0, 0, 30, 0, 0, 7,
+ 66, 0, 16, 0, 36, 0,
+ 0, 0, 58, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 18, 0, 0, 1, 54, 0,
+ 0, 8, 242, 0, 16, 0,
+ 37, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 66, 0,
+ 16, 0, 36, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 21, 0, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 42, 0, 16, 0,
+ 36, 0, 0, 0, 168, 0,
+ 0, 9, 18, 224, 17, 0,
+ 2, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 21, 0,
+ 0, 1, 190, 24, 0, 1,
+ 31, 0, 4, 3, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 7, 0, 0, 0,
+ 167, 0, 0, 9, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 42, 0, 16, 0, 11, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 3, 0, 0, 0,
+ 87, 0, 0, 7, 98, 0,
+ 16, 0, 36, 0, 0, 0,
+ 166, 11, 16, 0, 2, 0,
+ 0, 0, 6, 1, 16, 0,
+ 37, 0, 0, 0, 1, 0,
+ 0, 7, 34, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 87, 0, 0, 7,
+ 194, 0, 16, 0, 36, 0,
+ 0, 0, 6, 4, 16, 0,
+ 5, 0, 0, 0, 166, 14,
+ 16, 0, 37, 0, 0, 0,
+ 1, 0, 0, 7, 34, 0,
+ 16, 0, 36, 0, 0, 0,
+ 42, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 1, 0,
+ 0, 7, 34, 0, 16, 0,
+ 36, 0, 0, 0, 58, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 134, 0, 0, 5,
+ 34, 0, 16, 0, 36, 0,
+ 0, 0, 26, 0, 16, 0,
+ 36, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 32, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 26, 0, 16, 0, 36, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 240, 17, 0, 3, 0,
+ 0, 0, 42, 0, 16, 0,
+ 11, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 21, 0, 0, 1,
+ 21, 0, 0, 1, 21, 0,
+ 0, 1, 30, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 4, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 54, 0, 0, 5,
+ 130, 0, 16, 0, 28, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 22, 0, 0, 1, 54, 0,
+ 0, 4, 130, 0, 16, 0,
+ 19, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 33, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 10, 0, 16, 0,
+ 21, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 0, 128, 131, 153,
+ 25, 0, 130, 0, 16, 0,
+ 22, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 224, 17, 0,
+ 2, 0, 0, 0, 42, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 1, 64, 0, 0, 16, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 255, 255, 255, 255,
+ 167, 0, 0, 139, 2, 35,
+ 0, 128, 131, 153, 25, 0,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 224, 17, 0, 2, 0,
+ 0, 0, 42, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 16, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 23, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 55, 0,
+ 0, 9, 18, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 23, 0, 0, 0, 39, 0,
+ 0, 7, 130, 0, 16, 0,
+ 23, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 31, 0, 4, 3,
+ 58, 0, 16, 0, 23, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 22, 0,
+ 0, 0, 42, 0, 16, 0,
+ 19, 0, 0, 0, 58, 0,
+ 16, 0, 22, 0, 0, 0,
+ 168, 0, 0, 9, 18, 224,
+ 17, 0, 2, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 224, 17, 0, 2, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 21, 0, 0, 1,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 4, 0, 0, 22, 0,
+ 0, 1, 165, 0, 0, 7,
+ 50, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 96, 0, 0, 0, 70, 240,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 4, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 33, 0, 0, 7,
+ 18, 0, 16, 0, 21, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 25, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 1, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 37, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 1, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 21, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 168, 0, 0, 9,
+ 242, 224, 17, 0, 0, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 14, 16, 0, 37, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 84, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 56, 0, 0, 7, 18, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 165, 0,
+ 0, 7, 130, 0, 16, 0,
+ 22, 0, 0, 0, 1, 64,
+ 0, 0, 100, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 23, 0,
+ 0, 0, 1, 64, 0, 0,
+ 112, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 54, 0, 0, 4, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 32, 2, 0, 48, 0,
+ 0, 1, 33, 0, 0, 7,
+ 34, 0, 16, 0, 25, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 3, 0, 4, 3, 26, 0,
+ 16, 0, 25, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 22, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 1, 0, 0, 7, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 25, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 37, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 0, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 23, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 226, 0, 16, 0,
+ 36, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 121, 16, 0,
+ 14, 0, 0, 0, 17, 0,
+ 0, 8, 66, 0, 16, 0,
+ 25, 0, 0, 0, 70, 14,
+ 16, 0, 37, 0, 0, 0,
+ 70, 158, 144, 0, 26, 0,
+ 16, 0, 23, 0, 0, 0,
+ 50, 0, 0, 9, 66, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 42, 0,
+ 16, 0, 20, 0, 0, 0,
+ 51, 0, 0, 7, 66, 0,
+ 16, 0, 25, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 255, 66, 52, 0,
+ 0, 7, 66, 0, 16, 0,
+ 25, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 17, 0, 0, 8,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 70, 14, 16, 0,
+ 37, 0, 0, 0, 70, 158,
+ 144, 0, 42, 0, 16, 0,
+ 23, 0, 0, 0, 50, 0,
+ 0, 9, 130, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 20, 0, 0, 0, 51, 0,
+ 0, 7, 130, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 25, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 255, 66, 52, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 27, 0, 0, 5, 194, 0,
+ 16, 0, 25, 0, 0, 0,
+ 166, 14, 16, 0, 25, 0,
+ 0, 0, 35, 0, 0, 9,
+ 66, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 128, 0, 0, 0,
+ 42, 0, 16, 0, 25, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 1, 64,
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 7, 130, 0,
+ 16, 0, 27, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 52, 54, 0,
+ 0, 8, 114, 0, 16, 0,
+ 38, 0, 0, 0, 2, 64,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 130, 0,
+ 16, 0, 28, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 54, 0, 0, 5,
+ 130, 0, 16, 0, 32, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 48, 0,
+ 0, 1, 34, 0, 0, 7,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 58, 0,
+ 16, 0, 32, 0, 0, 0,
+ 3, 0, 4, 3, 58, 0,
+ 16, 0, 38, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 32, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 18, 0,
+ 16, 0, 39, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 30, 0, 0, 10, 226, 0,
+ 16, 0, 39, 0, 0, 0,
+ 246, 15, 16, 0, 38, 0,
+ 0, 0, 2, 64, 0, 0,
+ 0, 0, 0, 0, 1, 0,
+ 0, 0, 2, 0, 0, 0,
+ 3, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 26, 0, 16, 0, 39, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 0, 128, 131, 153, 25, 0,
+ 34, 0, 16, 0, 39, 0,
+ 0, 0, 42, 0, 16, 0,
+ 39, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 224, 17, 0, 2, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 35, 0, 128, 131, 153,
+ 25, 0, 66, 0, 16, 0,
+ 39, 0, 0, 0, 58, 0,
+ 16, 0, 39, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 6, 224, 17, 0,
+ 2, 0, 0, 0, 84, 0,
+ 0, 7, 18, 0, 16, 0,
+ 39, 0, 0, 0, 58, 0,
+ 16, 0, 38, 0, 0, 0,
+ 10, 0, 16, 0, 39, 0,
+ 0, 0, 84, 0, 0, 7,
+ 18, 0, 16, 0, 39, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 10, 0,
+ 16, 0, 39, 0, 0, 0,
+ 36, 0, 0, 7, 130, 0,
+ 16, 0, 38, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 26, 0, 16, 0,
+ 39, 0, 0, 0, 36, 0,
+ 0, 7, 130, 0, 16, 0,
+ 38, 0, 0, 0, 42, 0,
+ 16, 0, 39, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 36, 0, 0, 7,
+ 130, 0, 16, 0, 38, 0,
+ 0, 0, 58, 0, 16, 0,
+ 38, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 226, 0,
+ 16, 0, 39, 0, 0, 0,
+ 6, 9, 16, 0, 38, 0,
+ 0, 0, 54, 0, 0, 5,
+ 18, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 28, 0, 0, 0, 54, 0,
+ 0, 5, 34, 0, 16, 0,
+ 40, 0, 0, 0, 10, 0,
+ 16, 0, 39, 0, 0, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 66, 0, 16, 0,
+ 40, 0, 0, 0, 26, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 38, 0,
+ 0, 0, 3, 0, 4, 3,
+ 42, 0, 16, 0, 40, 0,
+ 0, 0, 39, 0, 0, 7,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 10, 0, 16, 0,
+ 25, 0, 0, 0, 26, 0,
+ 16, 0, 40, 0, 0, 0,
+ 31, 0, 4, 3, 42, 0,
+ 16, 0, 40, 0, 0, 0,
+ 30, 0, 0, 7, 66, 0,
+ 16, 0, 40, 0, 0, 0,
+ 26, 0, 16, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 40, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 242, 0,
+ 16, 0, 41, 0, 0, 0,
+ 42, 0, 16, 0, 40, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 70, 238,
+ 17, 0, 0, 0, 0, 0,
+ 0, 0, 0, 8, 114, 0,
+ 16, 0, 41, 0, 0, 0,
+ 70, 2, 16, 0, 37, 0,
+ 0, 0, 70, 2, 16, 128,
+ 65, 0, 0, 0, 41, 0,
+ 0, 0, 16, 0, 0, 7,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 70, 2, 16, 0,
+ 41, 0, 0, 0, 70, 2,
+ 16, 0, 41, 0, 0, 0,
+ 49, 0, 0, 7, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 21, 0,
+ 0, 0, 42, 0, 16, 0,
+ 40, 0, 0, 0, 31, 0,
+ 4, 3, 58, 0, 16, 0,
+ 40, 0, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 40, 0, 0, 0, 26, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 54, 0, 0, 5,
+ 34, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 40, 0, 0, 0, 7, 0,
+ 0, 1, 21, 0, 0, 1,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 112, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 39, 0,
+ 0, 7, 18, 0, 16, 0,
+ 42, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 31, 0, 4, 3,
+ 10, 0, 16, 0, 42, 0,
+ 0, 0, 165, 0, 0, 7,
+ 18, 0, 16, 0, 42, 0,
+ 0, 0, 1, 64, 0, 0,
+ 100, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 42, 0, 0, 0,
+ 26, 0, 16, 0, 40, 0,
+ 0, 0, 10, 0, 16, 0,
+ 42, 0, 0, 0, 167, 0,
+ 0, 139, 2, 35, 0, 128,
+ 131, 153, 25, 0, 18, 0,
+ 16, 0, 42, 0, 0, 0,
+ 10, 0, 16, 0, 42, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 224,
+ 17, 0, 2, 0, 0, 0,
+ 1, 0, 0, 7, 18, 0,
+ 16, 0, 42, 0, 0, 0,
+ 10, 0, 16, 0, 42, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 0, 0, 30, 0,
+ 0, 7, 130, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 42, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 114, 0, 16, 0,
+ 42, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 114, 16, 0,
+ 14, 0, 0, 0, 0, 0,
+ 0, 8, 114, 0, 16, 0,
+ 42, 0, 0, 0, 150, 7,
+ 16, 0, 36, 0, 0, 0,
+ 70, 2, 16, 128, 65, 0,
+ 0, 0, 42, 0, 0, 0,
+ 16, 0, 0, 7, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 70, 2, 16, 0, 42, 0,
+ 0, 0, 70, 2, 16, 0,
+ 42, 0, 0, 0, 29, 0,
+ 0, 7, 18, 0, 16, 0,
+ 42, 0, 0, 0, 10, 0,
+ 16, 0, 21, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 31, 0, 4, 3,
+ 10, 0, 16, 0, 42, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 42, 0,
+ 0, 0, 26, 0, 16, 0,
+ 40, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 54, 0, 0, 5, 34, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 42, 0,
+ 0, 0, 7, 0, 0, 1,
+ 21, 0, 0, 1, 0, 0,
+ 0, 7, 130, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 52, 14, 0, 0, 7,
+ 130, 0, 16, 0, 40, 0,
+ 0, 0, 10, 0, 16, 0,
+ 21, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 68, 0, 0, 5, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 40, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 128, 191,
+ 51, 0, 0, 7, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 18, 0,
+ 0, 1, 54, 0, 0, 5,
+ 130, 0, 16, 0, 40, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 128, 63, 21, 0,
+ 0, 1, 0, 0, 0, 7,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 42, 0, 16, 0,
+ 40, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 52,
+ 68, 0, 0, 5, 66, 0,
+ 16, 0, 40, 0, 0, 0,
+ 42, 0, 16, 0, 40, 0,
+ 0, 0, 50, 0, 0, 9,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 40, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 128, 191, 52, 0, 0, 7,
+ 66, 0, 16, 0, 40, 0,
+ 0, 0, 42, 0, 16, 0,
+ 40, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 56, 0, 0, 7, 18, 0,
+ 16, 0, 42, 0, 0, 0,
+ 42, 0, 16, 0, 40, 0,
+ 0, 0, 42, 0, 16, 0,
+ 40, 0, 0, 0, 56, 0,
+ 0, 7, 130, 0, 16, 0,
+ 40, 0, 0, 0, 58, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 42, 0,
+ 0, 0, 0, 0, 0, 7,
+ 130, 0, 16, 0, 41, 0,
+ 0, 0, 58, 0, 16, 0,
+ 27, 0, 0, 0, 58, 0,
+ 16, 0, 41, 0, 0, 0,
+ 14, 0, 0, 7, 130, 0,
+ 16, 0, 40, 0, 0, 0,
+ 58, 0, 16, 0, 40, 0,
+ 0, 0, 58, 0, 16, 0,
+ 41, 0, 0, 0, 50, 0,
+ 0, 9, 226, 0, 16, 0,
+ 39, 0, 0, 0, 246, 15,
+ 16, 0, 40, 0, 0, 0,
+ 6, 9, 16, 0, 41, 0,
+ 0, 0, 86, 14, 16, 0,
+ 39, 0, 0, 0, 0, 0,
+ 0, 7, 18, 0, 16, 0,
+ 40, 0, 0, 0, 42, 0,
+ 16, 0, 40, 0, 0, 0,
+ 10, 0, 16, 0, 40, 0,
+ 0, 0, 21, 0, 0, 1,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 40, 0, 0, 0,
+ 26, 0, 16, 0, 40, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 22, 0,
+ 0, 1, 54, 0, 0, 5,
+ 114, 0, 16, 0, 38, 0,
+ 0, 0, 150, 7, 16, 0,
+ 39, 0, 0, 0, 54, 0,
+ 0, 5, 130, 0, 16, 0,
+ 28, 0, 0, 0, 10, 0,
+ 16, 0, 40, 0, 0, 0,
+ 30, 0, 0, 7, 130, 0,
+ 16, 0, 32, 0, 0, 0,
+ 58, 0, 16, 0, 32, 0,
+ 0, 0, 1, 64, 0, 0,
+ 128, 0, 0, 0, 22, 0,
+ 0, 1, 165, 0, 0, 7,
+ 66, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 72, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 56, 0, 0, 7, 66, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 37, 0,
+ 0, 0, 42, 0, 16, 0,
+ 25, 0, 0, 0, 14, 0,
+ 0, 7, 66, 0, 16, 0,
+ 25, 0, 0, 0, 42, 0,
+ 16, 0, 25, 0, 0, 0,
+ 58, 0, 16, 0, 28, 0,
+ 0, 0, 165, 0, 0, 7,
+ 130, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 96, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 30, 0, 0, 7, 34, 0,
+ 16, 0, 25, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 58, 0, 16, 0,
+ 25, 0, 0, 0, 167, 0,
+ 0, 139, 2, 131, 0, 128,
+ 131, 153, 25, 0, 226, 0,
+ 16, 0, 36, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 233,
+ 17, 0, 1, 0, 0, 0,
+ 50, 0, 0, 9, 226, 0,
+ 16, 0, 36, 0, 0, 0,
+ 6, 9, 16, 0, 38, 0,
+ 0, 0, 166, 10, 16, 0,
+ 25, 0, 0, 0, 86, 14,
+ 16, 0, 36, 0, 0, 0,
+ 168, 0, 0, 9, 114, 224,
+ 17, 0, 1, 0, 0, 0,
+ 26, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 150, 7,
+ 16, 0, 36, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 25, 0, 0, 0,
+ 10, 0, 16, 0, 25, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 4, 0, 0, 22, 0,
+ 0, 1, 190, 24, 0, 1,
+ 165, 0, 0, 7, 130, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 96, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 54, 0,
+ 0, 4, 18, 0, 16, 0,
+ 20, 0, 0, 0, 10, 32,
+ 2, 0, 48, 0, 0, 1,
+ 33, 0, 0, 7, 34, 0,
+ 16, 0, 20, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 26, 0, 16, 0,
+ 19, 0, 0, 0, 3, 0,
+ 4, 3, 26, 0, 16, 0,
+ 20, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 167, 0, 0, 139,
+ 2, 131, 0, 128, 131, 153,
+ 25, 0, 242, 0, 16, 0,
+ 25, 0, 0, 0, 26, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 238, 17, 0,
+ 1, 0, 0, 0, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 26, 0,
+ 16, 0, 0, 0, 0, 0,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 168, 0, 0, 9,
+ 242, 224, 17, 0, 0, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 14, 16, 0, 25, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 165, 0,
+ 0, 7, 34, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 100, 0, 0, 0,
+ 6, 240, 17, 0, 0, 0,
+ 0, 0, 54, 0, 0, 4,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 33, 0,
+ 0, 7, 18, 0, 16, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 3, 0, 4, 3,
+ 10, 0, 16, 0, 20, 0,
+ 0, 0, 30, 0, 0, 7,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 167, 0, 0, 139, 2, 35,
+ 0, 128, 131, 153, 25, 0,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 224, 17, 0, 2, 0,
+ 0, 0, 85, 0, 0, 7,
+ 18, 0, 16, 0, 20, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 16, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 20, 0, 0, 0,
+ 42, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 20, 0, 0, 0, 168, 0,
+ 0, 9, 18, 224, 17, 0,
+ 2, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 1, 64, 0, 0,
+ 255, 255, 255, 255, 30, 0,
+ 0, 7, 34, 0, 16, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 0, 20, 0, 0, 0,
+ 1, 64, 0, 0, 1, 0,
+ 0, 0, 168, 0, 0, 9,
+ 18, 224, 17, 0, 2, 0,
+ 0, 0, 26, 0, 16, 0,
+ 20, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 255, 255,
+ 255, 255, 30, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 4, 0, 0,
+ 22, 0, 0, 1, 21, 0,
+ 0, 1, 31, 0, 0, 2,
+ 10, 32, 2, 0, 165, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 8, 0, 0, 0,
+ 6, 240, 17, 0, 1, 0,
+ 0, 0, 56, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 122, 68,
+ 28, 0, 0, 5, 18, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 83, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 1, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 104, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 30, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 166, 0, 0, 7,
+ 18, 240, 17, 0, 1, 0,
+ 0, 0, 1, 64, 0, 0,
+ 104, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 21, 0, 0, 1, 190, 24,
+ 0, 1, 165, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 104, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 165, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 104, 0,
+ 0, 0, 6, 240, 17, 0,
+ 0, 0, 0, 0, 80, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 31, 0, 4, 3,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 165, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 54, 0, 0, 5, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 4,
+ 66, 0, 16, 0, 19, 0,
+ 0, 0, 10, 32, 2, 0,
+ 48, 0, 0, 1, 80, 0,
+ 0, 7, 130, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 3, 0, 4, 3,
+ 58, 0, 16, 0, 19, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 26, 0, 16, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 114, 0, 16, 0, 20, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 70, 226, 17, 0, 0, 0,
+ 0, 0, 30, 0, 0, 7,
+ 130, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 0, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 167, 0, 0, 139, 2, 131,
+ 0, 128, 131, 153, 25, 0,
+ 226, 0, 16, 0, 23, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 1, 64,
+ 0, 0, 0, 0, 0, 0,
+ 6, 233, 17, 0, 0, 0,
+ 0, 0, 0, 0, 0, 8,
+ 114, 0, 16, 0, 20, 0,
+ 0, 0, 70, 2, 16, 0,
+ 20, 0, 0, 0, 150, 7,
+ 16, 128, 65, 0, 0, 0,
+ 23, 0, 0, 0, 52, 0,
+ 0, 9, 130, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 128, 129, 0, 0, 0,
+ 20, 0, 0, 0, 10, 0,
+ 16, 128, 129, 0, 0, 0,
+ 20, 0, 0, 0, 52, 0,
+ 0, 8, 130, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 128, 129, 0, 0, 0,
+ 20, 0, 0, 0, 58, 0,
+ 16, 0, 19, 0, 0, 0,
+ 52, 0, 0, 7, 34, 0,
+ 16, 0, 19, 0, 0, 0,
+ 26, 0, 16, 0, 19, 0,
+ 0, 0, 58, 0, 16, 0,
+ 19, 0, 0, 0, 30, 0,
+ 0, 7, 66, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 0, 4,
+ 0, 0, 22, 0, 0, 1,
+ 31, 0, 0, 2, 10, 32,
+ 2, 0, 165, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 100, 0, 0, 0, 6, 240,
+ 17, 0, 1, 0, 0, 0,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 166, 0,
+ 0, 7, 18, 240, 17, 0,
+ 1, 0, 0, 0, 1, 64,
+ 0, 0, 100, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 165, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 104, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 104, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 30, 0,
+ 0, 8, 18, 0, 16, 0,
+ 19, 0, 0, 0, 10, 0,
+ 16, 128, 65, 0, 0, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 166, 0, 0, 7, 18, 240,
+ 17, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 104, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 21, 0,
+ 0, 1, 165, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 1, 64, 0, 0,
+ 112, 0, 0, 0, 6, 240,
+ 17, 0, 0, 0, 0, 0,
+ 165, 0, 0, 7, 66, 0,
+ 16, 0, 19, 0, 0, 0,
+ 1, 64, 0, 0, 8, 0,
+ 0, 0, 6, 240, 17, 0,
+ 1, 0, 0, 0, 56, 0,
+ 0, 7, 18, 0, 16, 0,
+ 19, 0, 0, 0, 42, 0,
+ 16, 0, 19, 0, 0, 0,
+ 10, 0, 16, 0, 19, 0,
+ 0, 0, 49, 0, 0, 7,
+ 18, 0, 16, 0, 19, 0,
+ 0, 0, 10, 0, 16, 0,
+ 19, 0, 0, 0, 26, 0,
+ 16, 0, 19, 0, 0, 0,
+ 31, 0, 4, 3, 10, 0,
+ 16, 0, 19, 0, 0, 0,
+ 166, 0, 0, 7, 18, 240,
+ 17, 0, 1, 0, 0, 0,
+ 1, 64, 0, 0, 100, 0,
+ 0, 0, 1, 64, 0, 0,
+ 0, 0, 0, 0, 21, 0,
+ 0, 1, 21, 0, 0, 1,
+ 30, 0, 0, 7, 18, 0,
+ 16, 0, 17, 0, 0, 0,
+ 10, 0, 16, 0, 17, 0,
+ 0, 0, 1, 64, 0, 0,
+ 1, 0, 0, 0, 22, 0,
+ 0, 1, 190, 24, 0, 1,
+ 21, 0, 0, 1, 31, 0,
+ 0, 2, 10, 32, 2, 0,
+ 165, 0, 0, 7, 242, 0,
+ 16, 0, 0, 0, 0, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 254, 17, 0,
+ 1, 0, 0, 0, 165, 0,
+ 0, 7, 242, 0, 16, 0,
+ 1, 0, 0, 0, 1, 64,
+ 0, 0, 16, 0, 0, 0,
+ 70, 254, 17, 0, 1, 0,
+ 0, 0, 165, 0, 0, 7,
+ 242, 0, 16, 0, 2, 0,
+ 0, 0, 1, 64, 0, 0,
+ 32, 0, 0, 0, 70, 254,
+ 17, 0, 1, 0, 0, 0,
+ 165, 0, 0, 7, 242, 0,
+ 16, 0, 3, 0, 0, 0,
+ 1, 64, 0, 0, 48, 0,
+ 0, 0, 70, 254, 17, 0,
+ 1, 0, 0, 0, 165, 0,
+ 0, 7, 242, 0, 16, 0,
+ 4, 0, 0, 0, 1, 64,
+ 0, 0, 64, 0, 0, 0,
+ 70, 254, 17, 0, 1, 0,
+ 0, 0, 165, 0, 0, 7,
+ 242, 0, 16, 0, 5, 0,
+ 0, 0, 1, 64, 0, 0,
+ 80, 0, 0, 0, 70, 254,
+ 17, 0, 1, 0, 0, 0,
+ 165, 0, 0, 7, 242, 0,
+ 16, 0, 6, 0, 0, 0,
+ 1, 64, 0, 0, 96, 0,
+ 0, 0, 70, 254, 17, 0,
+ 1, 0, 0, 0, 165, 0,
+ 0, 7, 50, 0, 16, 0,
+ 7, 0, 0, 0, 1, 64,
+ 0, 0, 112, 0, 0, 0,
+ 70, 240, 17, 0, 1, 0,
+ 0, 0, 168, 0, 0, 8,
+ 242, 224, 17, 0, 3, 0,
+ 0, 0, 10, 16, 2, 0,
+ 1, 64, 0, 0, 0, 0,
+ 0, 0, 70, 14, 16, 0,
+ 0, 0, 0, 0, 168, 0,
+ 0, 8, 242, 224, 17, 0,
+ 3, 0, 0, 0, 10, 16,
+ 2, 0, 1, 64, 0, 0,
+ 16, 0, 0, 0, 70, 14,
+ 16, 0, 1, 0, 0, 0,
+ 168, 0, 0, 8, 242, 224,
+ 17, 0, 3, 0, 0, 0,
+ 10, 16, 2, 0, 1, 64,
+ 0, 0, 32, 0, 0, 0,
+ 70, 14, 16, 0, 2, 0,
+ 0, 0, 168, 0, 0, 8,
+ 242, 224, 17, 0, 3, 0,
+ 0, 0, 10, 16, 2, 0,
+ 1, 64, 0, 0, 48, 0,
+ 0, 0, 70, 14, 16, 0,
+ 3, 0, 0, 0, 168, 0,
+ 0, 8, 242, 224, 17, 0,
+ 3, 0, 0, 0, 10, 16,
+ 2, 0, 1, 64, 0, 0,
+ 64, 0, 0, 0, 70, 14,
+ 16, 0, 4, 0, 0, 0,
+ 168, 0, 0, 8, 242, 224,
+ 17, 0, 3, 0, 0, 0,
+ 10, 16, 2, 0, 1, 64,
+ 0, 0, 80, 0, 0, 0,
+ 70, 14, 16, 0, 5, 0,
+ 0, 0, 168, 0, 0, 8,
+ 242, 224, 17, 0, 3, 0,
+ 0, 0, 10, 16, 2, 0,
+ 1, 64, 0, 0, 96, 0,
+ 0, 0, 70, 14, 16, 0,
+ 6, 0, 0, 0, 168, 0,
+ 0, 8, 50, 224, 17, 0,
+ 3, 0, 0, 0, 10, 16,
+ 2, 0, 1, 64, 0, 0,
+ 112, 0, 0, 0, 70, 0,
+ 16, 0, 7, 0, 0, 0,
+ 21, 0, 0, 1, 62, 0,
+ 0, 1, 83, 84, 65, 84,
+ 148, 0, 0, 0, 108, 16,
+ 0, 0, 48, 0, 0, 0,
+ 4, 0, 0, 0, 2, 0,
+ 0, 0, 62, 4, 0, 0,
+ 185, 1, 0, 0, 23, 1,
+ 0, 0, 50, 0, 0, 0,
+ 23, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 164, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 184, 0, 0, 0,
+ 82, 0, 0, 0, 28, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 115, 0, 0, 0,
+ 0, 0, 0, 0, 50, 0,
+ 0, 0
+};
diff --git a/NvCloth/src/dx/DxSortKernel.inc b/NvCloth/src/dx/DxSortKernel.inc
new file mode 100644
index 0000000..7ebba68
--- /dev/null
+++ b/NvCloth/src/dx/DxSortKernel.inc
@@ -0,0 +1,193 @@
+interface ISortElements
+{
+ int get(int index);
+ void set(int index, int value);
+ void swap();
+};
+
+//Reduce - BlockSize * sizeof(uint4)
+//Scan - (2 + BlockSize) * sizeof(uint)
+interface ISortShared
+{
+ uint4 getReduce(int index);
+ void setReduce(int index, uint4 value);
+
+ uint getScan(int index);
+ void setScan(int index, uint value);
+};
+
+#define USE_WARP_LIMIT 1
+
+uint32_t reduceWarps(uint32_t threadIdx, uint32_t laneIdx, uint32_t warpIdx, uint32_t warpLimit, uint32_t threadPos, uint32_t threadEnd, ISortElements sortElements, uint32_t bit, uniform bool bOutput, uint32_t scanOut, ISortShared sortShared)
+{
+ const uint32_t laneMask = (1u << laneIdx) - 1;
+ const uint32_t mask1 = (threadIdx & 1) - 1;
+ const uint32_t mask2 = !!(threadIdx & 2) - 1;
+ const uint32_t mask4 = !!(threadIdx & 4) - 1;
+ const uint32_t mask8 = !!(threadIdx & 8) - 1;
+
+ uint32_t key = threadPos < threadEnd ? sortElements.get(threadPos) : 0xFFFFFFFF;
+ uint32_t keyDigit = (key >> bit) & 0x0F;
+
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ sortShared.setReduce(threadIdx, uint4(!!(keyDigit & 1) << laneIdx, !!(keyDigit & 2) << laneIdx, !!(keyDigit & 4) << laneIdx, !!(keyDigit & 8) << laneIdx));
+ GroupMemoryBarrierWithGroupSync();
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ if (laneIdx < 16) sortShared.setReduce(threadIdx, sortShared.getReduce(threadIdx) | sortShared.getReduce(threadIdx + 16));
+ GroupMemoryBarrierWithGroupSync();
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ if (laneIdx < 8) sortShared.setReduce(threadIdx, sortShared.getReduce(threadIdx) | sortShared.getReduce(threadIdx + 8));
+ GroupMemoryBarrierWithGroupSync();
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ if (laneIdx < 4) sortShared.setReduce(threadIdx, sortShared.getReduce(threadIdx) | sortShared.getReduce(threadIdx + 4));
+ GroupMemoryBarrierWithGroupSync();
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ if (laneIdx < 2) sortShared.setReduce(threadIdx, sortShared.getReduce(threadIdx) | sortShared.getReduce(threadIdx + 2));
+ GroupMemoryBarrierWithGroupSync();
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ if (laneIdx < 1) sortShared.setReduce(threadIdx, sortShared.getReduce(threadIdx) | sortShared.getReduce(threadIdx + 1));
+ GroupMemoryBarrierWithGroupSync();
+
+ uint4 ballot = 0;
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ ballot = sortShared.getReduce(threadIdx & ~31);
+
+ uint32_t result = 0;
+ if (bOutput)
+ {
+ uint32_t index = 0;
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ {
+ uint32_t bits = ((keyDigit & 1) - 1 ^ ballot[0]) & (!!(keyDigit & 2) - 1 ^ ballot[1]) & (!!(keyDigit & 4) - 1 ^ ballot[2]) & (!!(keyDigit & 8) - 1 ^ ballot[3]);
+ index = sortShared.getScan(scanOut + warpIdx + keyDigit * WarpsPerBlock) + countbits(bits & laneMask);
+ }
+ if (threadPos < threadEnd)
+ {
+ sortElements.set(index, key);
+ }
+
+ GroupMemoryBarrierWithGroupSync(); //to remove gScan read/write race condition
+#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+#endif
+ if (laneIdx < 16)
+ {
+ int scanIdx = scanOut + warpIdx + laneIdx * WarpsPerBlock;
+ sortShared.setScan(scanIdx, sortShared.getScan(scanIdx) + countbits((mask1 ^ ballot[0]) & (mask2 ^ ballot[1]) & (mask4 ^ ballot[2]) & (mask8 ^ ballot[3])));
+ }
+ }
+ else
+ {
+//#if USE_WARP_LIMIT
+ if (warpIdx < warpLimit)
+//#endif
+ result = countbits((mask1 ^ ballot[0]) & (mask2 ^ ballot[1]) & (mask4 ^ ballot[2]) & (mask8 ^ ballot[3]));
+ }
+ return result;
+}
+
+//TODO: check & fix shared memory bank conflicts if needed!
+void radixSort_BitCount(uint32_t threadIdx, uint32_t n, ISortElements sortElements, uint32_t startBit, uint32_t endBit, ISortShared sortShared)
+{
+ const uint32_t warpIdx = threadIdx >> 5;
+ const uint32_t laneIdx = threadIdx & 31;
+
+ const uint32_t WarpsTotal = ((n + 31) >> 5);
+ const uint32_t WarpsRemain = WarpsTotal % WarpsPerBlock;
+ const uint32_t WarpsFactor = WarpsTotal / WarpsPerBlock;
+ const uint32_t WarpsSelect = (warpIdx < WarpsRemain);
+ const uint32_t WarpsCount = WarpsFactor + WarpsSelect;
+ const uint32_t WarpsOffset = warpIdx * WarpsCount + WarpsRemain * (1 - WarpsSelect);
+ const uint32_t warpBeg = (WarpsOffset << 5);
+ const uint32_t warpEnd = min(warpBeg + (WarpsCount << 5), n);
+ const uint32_t threadBeg = warpBeg + laneIdx;
+
+ const uint32_t ScanCount = WarpsPerBlock * 16;
+
+ for (uint32_t bit = startBit; bit < endBit; bit += 4) // radix passes (4 bits each)
+ {
+ // gather bucket histograms per warp
+ uint32_t warpCount = 0;
+ uint32_t i;
+ uint32_t threadPos;
+ for (i = 0, threadPos = threadBeg; i < WarpsFactor; ++i, threadPos += 32)
+ {
+ warpCount += reduceWarps(threadIdx, laneIdx, warpIdx, WarpsPerBlock, threadPos, warpEnd, sortElements, bit, false, 0, sortShared);
+ }
+ if (WarpsRemain > 0)
+ {
+ warpCount += reduceWarps(threadIdx, laneIdx, warpIdx, WarpsRemain, threadPos, warpEnd, sortElements, bit, false, 0, sortShared);
+ }
+
+ if (laneIdx < 16)
+ {
+ sortShared.setScan(1 + warpIdx + laneIdx * WarpsPerBlock, warpCount);
+ }
+ GroupMemoryBarrierWithGroupSync();
+
+ // prefix sum of histogram buckets
+ if (threadIdx == 0)
+ {
+ sortShared.setScan(0, 0);
+ sortShared.setScan(ScanCount + 1, 0);
+ }
+
+ uint32_t scanIn = 1;
+ uint32_t scanOut = 2 + ScanCount;
+
+ //if (threadIdx < ScanCount)
+ // bSortElements[n * 2 + threadIdx] = gScan[scanIn + threadIdx];
+
+ {
+ [unroll]
+ for (uint32_t offset = 1; offset < ScanCount; offset *= 2)
+ {
+ if (threadIdx < ScanCount)
+ {
+ if (threadIdx >= offset)
+ sortShared.setScan(scanOut + threadIdx, sortShared.getScan(scanIn + threadIdx) + sortShared.getScan(scanIn + threadIdx - offset));
+ else
+ sortShared.setScan(scanOut + threadIdx, sortShared.getScan(scanIn + threadIdx));
+ }
+ // swap double buffer indices
+ uint32_t temp = scanOut;
+ scanOut = scanIn;
+ scanIn = temp;
+ GroupMemoryBarrierWithGroupSync();
+ }
+ }
+
+ //if (threadIdx < ScanCount)
+ // bSortElements[n * 2 + ScanCount + threadIdx] = gScan[scanIn + threadIdx];
+
+ scanIn -= 1; //make scan exclusive!
+ // split indices
+ for (i = 0, threadPos = threadBeg; i < WarpsFactor; ++i, threadPos += 32)
+ {
+ reduceWarps(threadIdx, laneIdx, warpIdx, WarpsPerBlock, threadPos, warpEnd, sortElements, bit, true, scanIn, sortShared);
+ }
+ if (WarpsRemain > 0)
+ {
+ reduceWarps(threadIdx, laneIdx, warpIdx, WarpsRemain, threadPos, warpEnd, sortElements, bit, true, scanIn, sortShared);
+ }
+
+ //GroupMemoryBarrierWithGroupSync();
+ sortElements.swap();
+ }
+}
diff --git a/NvCloth/src/neon/NeonCollision.cpp b/NvCloth/src/neon/NeonCollision.cpp
new file mode 100644
index 0000000..fe45778
--- /dev/null
+++ b/NvCloth/src/neon/NeonCollision.cpp
@@ -0,0 +1,34 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef __ARM_NEON__
+#error This file needs to be compiled with NEON support!
+#endif
+
+#include "SwCollision.cpp"
diff --git a/NvCloth/src/neon/NeonSelfCollision.cpp b/NvCloth/src/neon/NeonSelfCollision.cpp
new file mode 100644
index 0000000..8faa407
--- /dev/null
+++ b/NvCloth/src/neon/NeonSelfCollision.cpp
@@ -0,0 +1,34 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef __ARM_NEON__
+#error This file needs to be compiled with NEON support!
+#endif
+
+#include "SwSelfCollision.cpp"
diff --git a/NvCloth/src/neon/NeonSolverKernel.cpp b/NvCloth/src/neon/NeonSolverKernel.cpp
new file mode 100644
index 0000000..4d6de68
--- /dev/null
+++ b/NvCloth/src/neon/NeonSolverKernel.cpp
@@ -0,0 +1,49 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef __ARM_NEON__
+#error This file needs to be compiled with NEON support!
+#endif
+
+#include "SwSolverKernel.cpp"
+
+#include <cpu-features.h>
+
+namespace physx
+{
+namespace cloth
+{
+bool neonSolverKernel(SwCloth const& cloth, SwClothData& data, SwKernelAllocator& allocator,
+ IterationStateFactory& factory, PxProfileZone* profileZone)
+{
+ return ANDROID_CPU_ARM_FEATURE_NEON & android_getCpuFeatures() &&
+ (SwSolverKernel<Simd4f>(cloth, data, allocator, factory, profileZone)(), true);
+}
+}
+}
diff --git a/NvCloth/src/neon/SwCollisionHelpers.h b/NvCloth/src/neon/SwCollisionHelpers.h
new file mode 100644
index 0000000..0b9410b
--- /dev/null
+++ b/NvCloth/src/neon/SwCollisionHelpers.h
@@ -0,0 +1,87 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#ifdef _M_ARM
+#include <arm_neon.h>
+#endif
+
+namespace physx
+{
+namespace cloth
+{
+
+uint32_t findBitSet(uint32_t mask)
+{
+#ifdef _M_ARM
+ __n64 t = { mask };
+ return 31 - (vclz_u32(t)).n64_u32[0];
+#else
+ return 31 - __builtin_clz(mask);
+#endif
+}
+
+Simd4i intFloor(const Simd4f& v)
+{
+ int32x4_t neg = vreinterpretq_s32_u32(vshrq_n_u32(v.u4, 31));
+ return vsubq_s32(vcvtq_s32_f32(v.f4), neg);
+}
+
+Simd4i horizontalOr(const Simd4i& mask)
+{
+ uint32x2_t hi = vget_high_u32(mask.u4);
+ uint32x2_t lo = vget_low_u32(mask.u4);
+ uint32x2_t tmp = vorr_u32(lo, hi);
+ uint32x2_t rev = vrev64_u32(tmp);
+ uint32x2_t res = vorr_u32(tmp, rev);
+ return vcombine_u32(res, res);
+}
+
+Gather<Simd4i>::Gather(const Simd4i& index)
+{
+ PX_ALIGN(16, uint8x8x2_t) byteIndex = reinterpret_cast<const uint8x8x2_t&>(sPack);
+ uint8x8x2_t lohiIndex = reinterpret_cast<const uint8x8x2_t&>(index);
+ byteIndex.val[0] = vtbl2_u8(lohiIndex, byteIndex.val[0]);
+ byteIndex.val[1] = vtbl2_u8(lohiIndex, byteIndex.val[1]);
+ mPermute = vshlq_n_u32(reinterpret_cast<const uint32x4_t&>(byteIndex), 2);
+ mPermute = mPermute | sOffset | vcgtq_u32(index.u4, sMask.u4);
+}
+
+Simd4i Gather<Simd4i>::operator()(const Simd4i* ptr) const
+{
+ PX_ALIGN(16, uint8x8x2_t) result = reinterpret_cast<const uint8x8x2_t&>(mPermute);
+ const uint8x8x4_t* table = reinterpret_cast<const uint8x8x4_t*>(ptr);
+ result.val[0] = vtbl4_u8(*table, result.val[0]);
+ result.val[1] = vtbl4_u8(*table, result.val[1]);
+ return reinterpret_cast<const Simd4i&>(result);
+}
+
+} // namespace cloth
+} // namespace physx
diff --git a/NvCloth/src/scalar/SwCollisionHelpers.h b/NvCloth/src/scalar/SwCollisionHelpers.h
new file mode 100644
index 0000000..0d7321f
--- /dev/null
+++ b/NvCloth/src/scalar/SwCollisionHelpers.h
@@ -0,0 +1,92 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+namespace physx
+{
+namespace cloth
+{
+
+#if !NV_SIMD_SIMD
+uint32_t findBitSet(uint32_t mask)
+{
+ uint32_t result = 0;
+ while (mask >>= 1)
+ ++result;
+ return result;
+}
+#endif
+
+inline Scalar4i intFloor(const Scalar4f& v)
+{
+ return Scalar4i(int(floor(v.f4[0])), int(floor(v.f4[1])), int(floor(v.f4[2])), int(floor(v.f4[3])));
+}
+
+inline Scalar4i horizontalOr(const Scalar4i& mask)
+{
+ return simd4i(mask.i4[0] | mask.i4[1] | mask.i4[2] | mask.i4[3]);
+}
+
+template <>
+struct Gather<Scalar4i>
+{
+ inline Gather(const Scalar4i& index);
+ inline Scalar4i operator()(const Scalar4i*) const;
+
+ Scalar4i mIndex;
+ Scalar4i mOutOfRange;
+};
+
+Gather<Scalar4i>::Gather(const Scalar4i& index)
+{
+ uint32_t mask = /* sGridSize */ 8 - 1;
+
+ mIndex.u4[0] = index.u4[0] & mask;
+ mIndex.u4[1] = index.u4[1] & mask;
+ mIndex.u4[2] = index.u4[2] & mask;
+ mIndex.u4[3] = index.u4[3] & mask;
+
+ mOutOfRange.i4[0] = index.u4[0] & ~mask ? 0 : -1;
+ mOutOfRange.i4[1] = index.u4[1] & ~mask ? 0 : -1;
+ mOutOfRange.i4[2] = index.u4[2] & ~mask ? 0 : -1;
+ mOutOfRange.i4[3] = index.u4[3] & ~mask ? 0 : -1;
+}
+
+Scalar4i Gather<Scalar4i>::operator()(const Scalar4i* ptr) const
+{
+ const int32_t* base = ptr->i4;
+ const int32_t* index = mIndex.i4;
+ const int32_t* mask = mOutOfRange.i4;
+ return Scalar4i(base[index[0]] & mask[0], base[index[1]] & mask[1], base[index[2]] & mask[2],
+ base[index[3]] & mask[3]);
+}
+
+} // namespace cloth
+} // namespace physx
diff --git a/NvCloth/src/sse2/SwCollisionHelpers.h b/NvCloth/src/sse2/SwCollisionHelpers.h
new file mode 100644
index 0000000..c80ba1d
--- /dev/null
+++ b/NvCloth/src/sse2/SwCollisionHelpers.h
@@ -0,0 +1,96 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#if PX_GCC_FAMILY || NV_ORBIS
+#include <xmmintrin.h> // _BitScanForward
+#else
+#if __APPLE__
+#include <x86intrin.h>
+#else
+#pragma warning(push)
+#pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
+#pragma warning(disable : 4987) // nonstandard extension used: 'throw (...)'
+#pragma warning(pop)
+#include <intrin.h> // _BitScanForward
+#endif
+#endif
+
+namespace nv
+{
+namespace cloth
+{
+
+uint32_t findBitSet(uint32_t mask)
+{
+#if defined(_MSC_VER)
+ unsigned long result;
+ _BitScanForward(&result, unsigned long(mask));
+ return result;
+#else
+ return __builtin_ffs(mask) - 1;
+#endif
+}
+
+Simd4i intFloor(const Simd4f& v)
+{
+ Simd4i i = _mm_cvttps_epi32(v);
+ return _mm_sub_epi32(i, _mm_srli_epi32(simd4i(v), 31));
+}
+
+Simd4i horizontalOr(const Simd4i& mask)
+{
+ Simd4i tmp = mask | _mm_shuffle_epi32(mask, 0xb1); // w z y x -> z w x y
+ return tmp | _mm_shuffle_epi32(tmp, 0x4e); // w z y x -> y x w z
+}
+
+Gather<Simd4i>::Gather(const Simd4i& index)
+{
+ mSelectQ = _mm_srai_epi32(index << 29, 31);
+ mSelectD = _mm_srai_epi32(index << 30, 31);
+ mSelectW = _mm_srai_epi32(index << 31, 31);
+ mOutOfRange = (index ^ sIntSignBit) > sSignedMask;
+}
+
+Simd4i Gather<Simd4i>::operator()(const Simd4i* ptr) const
+{
+ // more efficient with _mm_shuffle_epi8 (SSSE3)
+ Simd4i lo = ptr[0], hi = ptr[1];
+ Simd4i m01 = select(mSelectW, splat<1>(lo), splat<0>(lo));
+ Simd4i m23 = select(mSelectW, splat<3>(lo), splat<2>(lo));
+ Simd4i m45 = select(mSelectW, splat<1>(hi), splat<0>(hi));
+ Simd4i m67 = select(mSelectW, splat<3>(hi), splat<2>(hi));
+ Simd4i m0123 = select(mSelectD, m23, m01);
+ Simd4i m4567 = select(mSelectD, m67, m45);
+ return select(mSelectQ, m4567, m0123) & ~mOutOfRange;
+}
+
+} // namespace cloth
+} // namespace nv
diff --git a/NvCloth/src/sse2/SwSolveConstraints.h b/NvCloth/src/sse2/SwSolveConstraints.h
new file mode 100644
index 0000000..a6e88af
--- /dev/null
+++ b/NvCloth/src/sse2/SwSolveConstraints.h
@@ -0,0 +1,134 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "NvSimd/sse2/NvSse2Simd4f.h"
+
+__pragma(warning(push))
+#pragma warning(disable : 4127) // conditional expression is constant
+
+template <bool useMultiplier>
+void solveConstraints(float* __restrict posIt, const float* __restrict rIt, const float* __restrict stIt, const float* __restrict rEnd,
+ const uint16_t* __restrict iIt, const Simd4f& stiffnessEtc, const Simd4f& stiffnessExponent)
+{
+ PX_UNUSED(stIt);
+ PX_UNUSED(stiffnessEtc);
+ PX_UNUSED(stiffnessExponent);
+ __m128 sOne = _mm_set1_ps(1.0f);
+
+ __m128 stretchLimit, compressionLimit, multiplier;
+ if (useMultiplier)
+ {
+ stretchLimit = _mm_shuffle_ps(stiffnessEtc, stiffnessEtc, 0xff);
+ compressionLimit = _mm_shuffle_ps(stiffnessEtc, stiffnessEtc, 0xaa);
+ multiplier = _mm_shuffle_ps(stiffnessEtc, stiffnessEtc, 0x55);
+ }
+ __m128 stiffness = _mm_shuffle_ps(stiffnessEtc, stiffnessEtc, 0x00);
+ bool useStiffnessPerConstraint = nullptr != stIt;
+
+ for (; rIt != rEnd; rIt += 4, stIt += 4, iIt += 8)
+ {
+ float* p0i = posIt + iIt[0] * 4;
+ float* p0j = posIt + iIt[1] * 4;
+ float* p1i = posIt + iIt[2] * 4;
+ float* p1j = posIt + iIt[3] * 4;
+ float* p2i = posIt + iIt[4] * 4;
+ float* p2j = posIt + iIt[5] * 4;
+ float* p3i = posIt + iIt[6] * 4;
+ float* p3j = posIt + iIt[7] * 4;
+
+ __m128 v0i = _mm_load_ps(p0i);
+ __m128 v0j = _mm_load_ps(p0j);
+ __m128 v1i = _mm_load_ps(p1i);
+ __m128 v1j = _mm_load_ps(p1j);
+ __m128 v2i = _mm_load_ps(p2i);
+ __m128 v2j = _mm_load_ps(p2j);
+ __m128 v3i = _mm_load_ps(p3i);
+ __m128 v3j = _mm_load_ps(p3j);
+
+ __m128 h0ij = _mm_add_ps(v0j, _mm_mul_ps(v0i, sMinusOneXYZOneW));
+ __m128 h1ij = _mm_add_ps(v1j, _mm_mul_ps(v1i, sMinusOneXYZOneW));
+ __m128 h2ij = _mm_add_ps(v2j, _mm_mul_ps(v2i, sMinusOneXYZOneW));
+ __m128 h3ij = _mm_add_ps(v3j, _mm_mul_ps(v3i, sMinusOneXYZOneW));
+
+ __m128 a = _mm_unpacklo_ps(h0ij, h2ij);
+ __m128 b = _mm_unpackhi_ps(h0ij, h2ij);
+ __m128 c = _mm_unpacklo_ps(h1ij, h3ij);
+ __m128 d = _mm_unpackhi_ps(h1ij, h3ij);
+
+ __m128 hxij = _mm_unpacklo_ps(a, c);
+ __m128 hyij = _mm_unpackhi_ps(a, c);
+ __m128 hzij = _mm_unpacklo_ps(b, d);
+ __m128 vwij = _mm_unpackhi_ps(b, d);
+
+ __m128 rij = _mm_load_ps(rIt);
+ __m128 e2ij = _mm_add_ps(gSimd4fEpsilon, _mm_add_ps(_mm_mul_ps(hxij, hxij),
+ _mm_add_ps(_mm_mul_ps(hyij, hyij), _mm_mul_ps(hzij, hzij))));
+
+ //Load/calculate the constraint stiffness
+ __m128 stij = useStiffnessPerConstraint ? _mm_set_ps1(1.0f) - exp2(stiffnessExponent * _mm_load_ps(stIt)) : stiffness;
+
+
+ __m128 mask = _mm_cmpnle_ps(rij, gSimd4fEpsilon);
+ __m128 erij = _mm_and_ps(_mm_sub_ps(sOne, _mm_mul_ps(rij, _mm_rsqrt_ps(e2ij))), mask);
+
+ if (useMultiplier)
+ {
+ erij = _mm_sub_ps(erij, _mm_mul_ps(multiplier, _mm_max_ps(compressionLimit, _mm_min_ps(erij, stretchLimit))));
+ }
+ __m128 exij = _mm_mul_ps(erij, _mm_mul_ps(stij, _mm_rcp_ps(_mm_add_ps(gSimd4fEpsilon, vwij))));
+
+ __m128 exlo = _mm_and_ps(sMaskXY, exij);
+ __m128 exhi = _mm_andnot_ps(sMaskXY, exij);
+
+ __m128 f0ij = _mm_mul_ps(h0ij, _mm_shuffle_ps(exlo, exlo, 0xc0));
+ __m128 f1ij = _mm_mul_ps(h1ij, _mm_shuffle_ps(exlo, exlo, 0xd5));
+ __m128 f2ij = _mm_mul_ps(h2ij, _mm_shuffle_ps(exhi, exhi, 0x2a));
+ __m128 f3ij = _mm_mul_ps(h3ij, _mm_shuffle_ps(exhi, exhi, 0x3f));
+
+ __m128 u0i = _mm_add_ps(v0i, _mm_mul_ps(f0ij, _mm_shuffle_ps(v0i, v0i, 0xff)));
+ __m128 u0j = _mm_sub_ps(v0j, _mm_mul_ps(f0ij, _mm_shuffle_ps(v0j, v0j, 0xff)));
+ __m128 u1i = _mm_add_ps(v1i, _mm_mul_ps(f1ij, _mm_shuffle_ps(v1i, v1i, 0xff)));
+ __m128 u1j = _mm_sub_ps(v1j, _mm_mul_ps(f1ij, _mm_shuffle_ps(v1j, v1j, 0xff)));
+ __m128 u2i = _mm_add_ps(v2i, _mm_mul_ps(f2ij, _mm_shuffle_ps(v2i, v2i, 0xff)));
+ __m128 u2j = _mm_sub_ps(v2j, _mm_mul_ps(f2ij, _mm_shuffle_ps(v2j, v2j, 0xff)));
+ __m128 u3i = _mm_add_ps(v3i, _mm_mul_ps(f3ij, _mm_shuffle_ps(v3i, v3i, 0xff)));
+ __m128 u3j = _mm_sub_ps(v3j, _mm_mul_ps(f3ij, _mm_shuffle_ps(v3j, v3j, 0xff)));
+
+ _mm_store_ps(p0i, u0i);
+ _mm_store_ps(p0j, u0j);
+ _mm_store_ps(p1i, u1i);
+ _mm_store_ps(p1j, u1j);
+ _mm_store_ps(p2i, u2i);
+ _mm_store_ps(p2j, u2j);
+ _mm_store_ps(p3i, u3i);
+ _mm_store_ps(p3j, u3j);
+ }
+}
+
+__pragma(warning(pop))