aboutsummaryrefslogtreecommitdiff
path: root/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar')
-rw-r--r--APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/Simd4f.h410
-rw-r--r--APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/Simd4i.h188
-rw-r--r--APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/SimdTypes.h86
-rw-r--r--APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/SwCollisionHelpers.h76
4 files changed, 760 insertions, 0 deletions
diff --git a/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/Simd4f.h b/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/Simd4f.h
new file mode 100644
index 00000000..d02d5066
--- /dev/null
+++ b/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/Simd4f.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factory implementation
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline Simd4fFactory<const float&>::operator Scalar4f() const
+{
+ return Scalar4f(v, v, v, v);
+}
+
+inline Simd4fFactory<detail::FourTuple>::operator Scalar4f() const
+{
+ return reinterpret_cast<const Scalar4f&>(v);
+}
+
+template <int i>
+inline Simd4fFactory<detail::IntType<i> >::operator Scalar4f() const
+{
+ float s = i;
+ return Scalar4f(s, s, s, s);
+}
+
+template <>
+inline Simd4fFactory<detail::IntType<0x80000000u> >::operator Scalar4f() const
+{
+ int32_t i = 0x80000000u;
+ return Scalar4f(i, i, i, i);
+}
+
+template <>
+inline Simd4fFactory<detail::IntType<0xffffffff> >::operator Scalar4f() const
+{
+ int32_t i = 0xffffffff;
+ return Scalar4f(i, i, i, i);
+}
+
+template <>
+inline Simd4fFactory<const float*>::operator Scalar4f() const
+{
+ return Scalar4f(v[0], v[1], v[2], v[3]);
+}
+
+template <>
+inline Simd4fFactory<detail::AlignedPointer<float> >::operator Scalar4f() const
+{
+ return Scalar4f(v.ptr[0], v.ptr[1], v.ptr[2], v.ptr[3]);
+}
+
+template <>
+inline Simd4fFactory<detail::OffsetPointer<float> >::operator Scalar4f() const
+{
+ const float* ptr = reinterpret_cast<const float*>(reinterpret_cast<const char*>(v.ptr) + v.offset);
+ return Scalar4f(ptr[0], ptr[1], ptr[2], ptr[3]);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// expression template
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline ComplementExpr<Scalar4f>::operator Scalar4f() const
+{
+ return Scalar4f(~v.u4[0], ~v.u4[1], ~v.u4[2], ~v.u4[3]);
+}
+
+inline Scalar4f operator&(const ComplementExpr<Scalar4f>& complement, const Scalar4f& v)
+{
+ return Scalar4f(v.u4[0] & ~complement.v.u4[0], v.u4[1] & ~complement.v.u4[1], v.u4[2] & ~complement.v.u4[2],
+ v.u4[3] & ~complement.v.u4[3]);
+}
+
+inline Scalar4f operator&(const Scalar4f& v, const ComplementExpr<Scalar4f>& complement)
+{
+ return Scalar4f(v.u4[0] & ~complement.v.u4[0], v.u4[1] & ~complement.v.u4[1], v.u4[2] & ~complement.v.u4[2],
+ v.u4[3] & ~complement.v.u4[3]);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operator implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+inline Scalar4f operator==(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] == v1.f4[0], v0.f4[1] == v1.f4[1], v0.f4[2] == v1.f4[2], v0.f4[3] == v1.f4[3]);
+}
+
+inline Scalar4f operator<(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] < v1.f4[0], v0.f4[1] < v1.f4[1], v0.f4[2] < v1.f4[2], v0.f4[3] < v1.f4[3]);
+}
+
+inline Scalar4f operator<=(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] <= v1.f4[0], v0.f4[1] <= v1.f4[1], v0.f4[2] <= v1.f4[2], v0.f4[3] <= v1.f4[3]);
+}
+
+inline Scalar4f operator>(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] > v1.f4[0], v0.f4[1] > v1.f4[1], v0.f4[2] > v1.f4[2], v0.f4[3] > v1.f4[3]);
+}
+
+inline Scalar4f operator>=(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] >= v1.f4[0], v0.f4[1] >= v1.f4[1], v0.f4[2] >= v1.f4[2], v0.f4[3] >= v1.f4[3]);
+}
+
+inline ComplementExpr<Scalar4f> operator~(const Scalar4f& v)
+{
+ return ComplementExpr<Scalar4f>(v);
+}
+
+inline Scalar4f operator&(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.u4[0] & v1.u4[0], v0.u4[1] & v1.u4[1], v0.u4[2] & v1.u4[2], v0.u4[3] & v1.u4[3]);
+}
+
+inline Scalar4f operator|(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.u4[0] | v1.u4[0], v0.u4[1] | v1.u4[1], v0.u4[2] | v1.u4[2], v0.u4[3] | v1.u4[3]);
+}
+
+inline Scalar4f operator^(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.u4[0] ^ v1.u4[0], v0.u4[1] ^ v1.u4[1], v0.u4[2] ^ v1.u4[2], v0.u4[3] ^ v1.u4[3]);
+}
+
+inline Scalar4f operator<<(const Scalar4f& v, int shift)
+{
+ return Scalar4f(v.u4[0] << shift, v.u4[1] << shift, v.u4[2] << shift, v.u4[3] << shift);
+}
+
+inline Scalar4f operator>>(const Scalar4f& v, int shift)
+{
+ return Scalar4f(v.u4[0] >> shift, v.u4[1] >> shift, v.u4[2] >> shift, v.u4[3] >> shift);
+}
+
+inline Scalar4f operator+(const Scalar4f& v)
+{
+ return v;
+}
+
+inline Scalar4f operator+(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] + v1.f4[0], v0.f4[1] + v1.f4[1], v0.f4[2] + v1.f4[2], v0.f4[3] + v1.f4[3]);
+}
+
+inline Scalar4f operator-(const Scalar4f& v)
+{
+ return Scalar4f(-v.f4[0], -v.f4[1], -v.f4[2], -v.f4[3]);
+}
+
+inline Scalar4f operator-(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] - v1.f4[0], v0.f4[1] - v1.f4[1], v0.f4[2] - v1.f4[2], v0.f4[3] - v1.f4[3]);
+}
+
+inline Scalar4f operator*(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] * v1.f4[0], v0.f4[1] * v1.f4[1], v0.f4[2] * v1.f4[2], v0.f4[3] * v1.f4[3]);
+}
+
+inline Scalar4f operator/(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(v0.f4[0] / v1.f4[0], v0.f4[1] / v1.f4[1], v0.f4[2] / v1.f4[2], v0.f4[3] / v1.f4[3]);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// function implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+inline Scalar4f simd4f(const Scalar4i& v)
+{
+ return v;
+}
+
+inline float (&array(Scalar4f& v))[4]
+{
+ return v.f4;
+}
+
+inline const float (&array(const Scalar4f& v))[4]
+{
+ return v.f4;
+}
+
+inline void store(float* ptr, const Scalar4f& v)
+{
+ ptr[0] = v.f4[0];
+ ptr[1] = v.f4[1];
+ ptr[2] = v.f4[2];
+ ptr[3] = v.f4[3];
+}
+
+inline void storeAligned(float* ptr, const Scalar4f& v)
+{
+ store(ptr, v);
+}
+
+inline void storeAligned(float* ptr, unsigned int offset, const Scalar4f& v)
+{
+ storeAligned(reinterpret_cast<float*>(reinterpret_cast<char*>(ptr) + offset), v);
+}
+
+template <size_t i>
+inline Scalar4f splat(const Scalar4f& v)
+{
+ return Scalar4f(v.f4[i], v.f4[i], v.f4[i], v.f4[i]);
+}
+
+inline Scalar4f select(const Scalar4f& mask, const Scalar4f& v0, const Scalar4f& v1)
+{
+ return ((v0 ^ v1) & mask) ^ v1;
+}
+
+inline Scalar4f abs(const Scalar4f& v)
+{
+ return Scalar4f(::fabsf(v.f4[0]), ::fabsf(v.f4[1]), ::fabsf(v.f4[2]), ::fabsf(v.f4[3]));
+}
+
+inline Scalar4f floor(const Scalar4f& v)
+{
+ return Scalar4f(::floorf(v.f4[0]), ::floorf(v.f4[1]), ::floorf(v.f4[2]), ::floorf(v.f4[3]));
+}
+
+inline Scalar4f max(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(std::max(v0.f4[0], v1.f4[0]), std::max(v0.f4[1], v1.f4[1]), std::max(v0.f4[2], v1.f4[2]),
+ std::max(v0.f4[3], v1.f4[3]));
+}
+
+inline Scalar4f min(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return Scalar4f(std::min(v0.f4[0], v1.f4[0]), std::min(v0.f4[1], v1.f4[1]), std::min(v0.f4[2], v1.f4[2]),
+ std::min(v0.f4[3], v1.f4[3]));
+}
+
+inline Scalar4f recip(const Scalar4f& v)
+{
+ return Scalar4f(1 / v.f4[0], 1 / v.f4[1], 1 / v.f4[2], 1 / v.f4[3]);
+}
+
+template <int n>
+inline Scalar4f recipT(const Scalar4f& v)
+{
+ return recip(v);
+}
+
+inline Scalar4f sqrt(const Scalar4f& v)
+{
+ return Scalar4f(::sqrtf(v.f4[0]), ::sqrtf(v.f4[1]), ::sqrtf(v.f4[2]), ::sqrtf(v.f4[3]));
+}
+
+inline Scalar4f rsqrt(const Scalar4f& v)
+{
+ return recip(sqrt(v));
+}
+
+template <int n>
+inline Scalar4f rsqrtT(const Scalar4f& v)
+{
+ return rsqrt(v);
+}
+
+inline Scalar4f exp2(const Scalar4f& v)
+{
+ float scale = 0.69314718055994531f; // ::logf(2.0f);
+ return Scalar4f(::expf(v.f4[0] * scale), ::expf(v.f4[1] * scale), ::expf(v.f4[2] * scale), ::expf(v.f4[3] * scale));
+}
+
+namespace simdf
+{
+// PSP2 is confused resolving about exp2, forwarding works
+inline Scalar4f exp2(const Scalar4f& v)
+{
+ return ::exp2(v);
+}
+}
+
+inline Scalar4f log2(const Scalar4f& v)
+{
+ float scale = 1.44269504088896341f; // 1/ln(2)
+ return Scalar4f(::logf(v.f4[0]) * scale, ::logf(v.f4[1]) * scale, ::logf(v.f4[2]) * scale, ::logf(v.f4[3]) * scale);
+}
+
+inline Scalar4f dot3(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return simd4f(v0.f4[0] * v1.f4[0] + v0.f4[1] * v1.f4[1] + v0.f4[2] * v1.f4[2]);
+}
+
+inline Scalar4f cross3(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return simd4f(v0.f4[1] * v1.f4[2] - v0.f4[2] * v1.f4[1], v0.f4[2] * v1.f4[0] - v0.f4[0] * v1.f4[2],
+ v0.f4[0] * v1.f4[1] - v0.f4[1] * v1.f4[0], 0.0f);
+}
+
+inline void transpose(Scalar4f& x, Scalar4f& y, Scalar4f& z, Scalar4f& w)
+{
+ float x1 = x.f4[1], x2 = x.f4[2], x3 = x.f4[3];
+ float y2 = y.f4[2], y3 = y.f4[3], z3 = z.f4[3];
+
+ x.f4[1] = y.f4[0];
+ x.f4[2] = z.f4[0];
+ x.f4[3] = w.f4[0];
+ y.f4[0] = x1;
+ y.f4[2] = z.f4[1];
+ y.f4[3] = w.f4[1];
+ z.f4[0] = x2;
+ z.f4[1] = y2;
+ z.f4[3] = w.f4[2];
+ w.f4[0] = x3;
+ w.f4[1] = y3;
+ w.f4[2] = z3;
+}
+
+inline int allEqual(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] == v1.f4[0] && v0.f4[1] == v1.f4[1] && v0.f4[2] == v1.f4[2] && v0.f4[3] == v1.f4[3];
+}
+
+inline int allEqual(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] == v1.f4[0], b1 = v0.f4[1] == v1.f4[1], b2 = v0.f4[2] == v1.f4[2], b3 = v0.f4[3] == v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyEqual(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] == v1.f4[0] || v0.f4[1] == v1.f4[1] || v0.f4[2] == v1.f4[2] || v0.f4[3] == v1.f4[3];
+}
+
+inline int anyEqual(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] == v1.f4[0], b1 = v0.f4[1] == v1.f4[1], b2 = v0.f4[2] == v1.f4[2], b3 = v0.f4[3] == v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allGreater(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] > v1.f4[0] && v0.f4[1] > v1.f4[1] && v0.f4[2] > v1.f4[2] && v0.f4[3] > v1.f4[3];
+}
+
+inline int allGreater(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] > v1.f4[0], b1 = v0.f4[1] > v1.f4[1], b2 = v0.f4[2] > v1.f4[2], b3 = v0.f4[3] > v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyGreater(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] > v1.f4[0] || v0.f4[1] > v1.f4[1] || v0.f4[2] > v1.f4[2] || v0.f4[3] > v1.f4[3];
+}
+
+inline int anyGreater(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] > v1.f4[0], b1 = v0.f4[1] > v1.f4[1], b2 = v0.f4[2] > v1.f4[2], b3 = v0.f4[3] > v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allGreaterEqual(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] >= v1.f4[0] && v0.f4[1] >= v1.f4[1] && v0.f4[2] >= v1.f4[2] && v0.f4[3] >= v1.f4[3];
+}
+
+inline int allGreaterEqual(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] >= v1.f4[0], b1 = v0.f4[1] >= v1.f4[1], b2 = v0.f4[2] >= v1.f4[2], b3 = v0.f4[3] >= v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyGreaterEqual(const Scalar4f& v0, const Scalar4f& v1)
+{
+ return v0.f4[0] >= v1.f4[0] || v0.f4[1] >= v1.f4[1] || v0.f4[2] >= v1.f4[2] || v0.f4[3] >= v1.f4[3];
+}
+
+inline int anyGreaterEqual(const Scalar4f& v0, const Scalar4f& v1, Scalar4f& outMask)
+{
+ bool b0 = v0.f4[0] >= v1.f4[0], b1 = v0.f4[1] >= v1.f4[1], b2 = v0.f4[2] >= v1.f4[2], b3 = v0.f4[3] >= v1.f4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allTrue(const Scalar4f& v)
+{
+ return v.u4[0] & v.u4[1] & v.u4[2] & v.u4[3];
+}
+
+inline int anyTrue(const Scalar4f& v)
+{
+ return v.u4[0] | v.u4[1] | v.u4[2] | v.u4[3];
+}
diff --git a/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/Simd4i.h b/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/Simd4i.h
new file mode 100644
index 00000000..80ac2abd
--- /dev/null
+++ b/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/Simd4i.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// factory implementation
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+template <>
+inline Simd4iFactory<const int&>::operator Scalar4i() const
+{
+ return Scalar4i(v, v, v, v);
+}
+
+inline Simd4iFactory<detail::FourTuple>::operator Scalar4i() const
+{
+ return reinterpret_cast<const Scalar4i&>(v);
+}
+
+template <int i>
+inline Simd4iFactory<detail::IntType<i> >::operator Scalar4i() const
+{
+ return Scalar4i(i, i, i, i);
+}
+
+template <>
+inline Simd4iFactory<const int*>::operator Scalar4i() const
+{
+ return Scalar4i(v[0], v[1], v[2], v[3]);
+}
+
+template <>
+inline Simd4iFactory<detail::AlignedPointer<int> >::operator Scalar4i() const
+{
+ return Scalar4i(v.ptr[0], v.ptr[1], v.ptr[2], v.ptr[3]);
+}
+
+template <>
+inline Simd4iFactory<detail::OffsetPointer<int> >::operator Scalar4i() const
+{
+ const int* ptr = reinterpret_cast<const int*>(reinterpret_cast<const char*>(v.ptr) + v.offset);
+ return Scalar4i(ptr[0], ptr[1], ptr[2], ptr[3]);
+}
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// operator implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+namespace simdi
+{
+
+inline Scalar4i operator==(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] == v1.i4[0], v0.i4[1] == v1.i4[1], v0.i4[2] == v1.i4[2], v0.i4[3] == v1.i4[3]);
+}
+
+inline Scalar4i operator<(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] < v1.i4[0], v0.i4[1] < v1.i4[1], v0.i4[2] < v1.i4[2], v0.i4[3] < v1.i4[3]);
+}
+
+inline Scalar4i operator>(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] > v1.i4[0], v0.i4[1] > v1.i4[1], v0.i4[2] > v1.i4[2], v0.i4[3] > v1.i4[3]);
+}
+
+inline Scalar4i operator+(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] + v1.i4[0], v0.i4[1] + v1.i4[1], v0.i4[2] + v1.i4[2], v0.i4[3] + v1.i4[3]);
+}
+
+inline Scalar4i operator-(const Scalar4i& v)
+{
+ return Scalar4i(-v.i4[0], -v.i4[1], -v.i4[2], -v.i4[3]);
+}
+
+inline Scalar4i operator-(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return Scalar4i(v0.i4[0] - v1.i4[0], v0.i4[1] - v1.i4[1], v0.i4[2] - v1.i4[2], v0.i4[3] - v1.i4[3]);
+}
+
+} // namespace simd
+
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// function implementations
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+inline Scalar4i simd4i(const Scalar4f& v)
+{
+ return v;
+}
+
+namespace simdi
+{
+
+inline int (&array(Scalar4i& v))[4]
+{
+ return v.i4;
+}
+
+inline const int (&array(const Scalar4i& v))[4]
+{
+ return v.i4;
+}
+
+} // namespace simdi
+
+inline void store(int* ptr, const Scalar4i& v)
+{
+ ptr[0] = v.i4[0];
+ ptr[1] = v.i4[1];
+ ptr[2] = v.i4[2];
+ ptr[3] = v.i4[3];
+}
+
+inline void storeAligned(int* ptr, const Scalar4i& v)
+{
+ store(ptr, v);
+}
+
+inline void storeAligned(int* ptr, unsigned int offset, const Scalar4i& v)
+{
+ store(reinterpret_cast<int*>(reinterpret_cast<char*>(ptr) + offset), v);
+}
+
+namespace simdi
+{
+
+inline int allEqual(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return v0.i4[0] == v1.i4[0] && v0.i4[1] == v1.i4[1] && v0.i4[2] == v1.i4[2] && v0.i4[3] == v1.i4[3];
+}
+
+inline int allEqual(const Scalar4i& v0, const Scalar4i& v1, Scalar4i& outMask)
+{
+ bool b0 = v0.i4[0] == v1.i4[0], b1 = v0.i4[1] == v1.i4[1], b2 = v0.i4[2] == v1.i4[2], b3 = v0.i4[3] == v1.i4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyEqual(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return v0.i4[0] == v1.i4[0] || v0.i4[1] == v1.i4[1] || v0.i4[2] == v1.i4[2] || v0.i4[3] == v1.i4[3];
+}
+
+inline int anyEqual(const Scalar4i& v0, const Scalar4i& v1, Scalar4i& outMask)
+{
+ bool b0 = v0.i4[0] == v1.i4[0], b1 = v0.i4[1] == v1.i4[1], b2 = v0.i4[2] == v1.i4[2], b3 = v0.i4[3] == v1.i4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+inline int allGreater(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return v0.i4[0] > v1.i4[0] && v0.i4[1] > v1.i4[1] && v0.i4[2] > v1.i4[2] && v0.i4[3] > v1.i4[3];
+}
+
+inline int allGreater(const Scalar4i& v0, const Scalar4i& v1, Scalar4i& outMask)
+{
+ bool b0 = v0.i4[0] > v1.i4[0], b1 = v0.i4[1] > v1.i4[1], b2 = v0.i4[2] > v1.i4[2], b3 = v0.i4[3] > v1.i4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 && b1 && b2 && b3;
+}
+
+inline int anyGreater(const Scalar4i& v0, const Scalar4i& v1)
+{
+ return v0.i4[0] > v1.i4[0] || v0.i4[1] > v1.i4[1] || v0.i4[2] > v1.i4[2] || v0.i4[3] > v1.i4[3];
+}
+
+inline int anyGreater(const Scalar4i& v0, const Scalar4i& v1, Scalar4i& outMask)
+{
+ bool b0 = v0.i4[0] > v1.i4[0], b1 = v0.i4[1] > v1.i4[1], b2 = v0.i4[2] > v1.i4[2], b3 = v0.i4[3] > v1.i4[3];
+ outMask = Scalar4f(b0, b1, b2, b3);
+ return b0 || b1 || b2 || b3;
+}
+
+} // namespace simd
diff --git a/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/SimdTypes.h b/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/SimdTypes.h
new file mode 100644
index 00000000..a287766c
--- /dev/null
+++ b/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/SimdTypes.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+#ifdef PX_WIIU
+#pragma ghs nowarning 193 // warning #193-D: zero used for undefined preprocessing identifier
+#endif
+
+#include <algorithm>
+
+#ifdef PX_WIIU
+#pragma ghs endnowarning
+#endif
+
+union Scalar4f
+{
+ Scalar4f()
+ {
+ }
+
+ Scalar4f(float x, float y, float z, float w)
+ {
+ f4[0] = x;
+ f4[1] = y;
+ f4[2] = z;
+ f4[3] = w;
+ }
+
+ Scalar4f(int32_t x, int32_t y, int32_t z, int32_t w)
+ {
+ i4[0] = x;
+ i4[1] = y;
+ i4[2] = z;
+ i4[3] = w;
+ }
+
+ Scalar4f(uint32_t x, uint32_t y, uint32_t z, uint32_t w)
+ {
+ u4[0] = x;
+ u4[1] = y;
+ u4[2] = z;
+ u4[3] = w;
+ }
+
+ Scalar4f(bool x, bool y, bool z, bool w)
+ {
+ u4[0] = ~(uint32_t(x) - 1);
+ u4[1] = ~(uint32_t(y) - 1);
+ u4[2] = ~(uint32_t(z) - 1);
+ u4[3] = ~(uint32_t(w) - 1);
+ }
+
+ Scalar4f(const Scalar4f& other)
+ {
+ u4[0] = other.u4[0];
+ u4[1] = other.u4[1];
+ u4[2] = other.u4[2];
+ u4[3] = other.u4[3];
+ }
+
+ Scalar4f& operator=(const Scalar4f& other)
+ {
+ u4[0] = other.u4[0];
+ u4[1] = other.u4[1];
+ u4[2] = other.u4[2];
+ u4[3] = other.u4[3];
+ return *this;
+ }
+
+ float f4[4];
+ int32_t i4[4];
+ uint32_t u4[4];
+};
+
+typedef Scalar4f Scalar4i;
diff --git a/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/SwCollisionHelpers.h b/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/SwCollisionHelpers.h
new file mode 100644
index 00000000..33b35f72
--- /dev/null
+++ b/APEX_1.4/module/clothing/embedded/LowLevelCloth/src/scalar/SwCollisionHelpers.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#pragma once
+
+namespace nvidia
+{
+namespace cloth
+{
+
+#if !NVMATH_SIMD
+uint32_t findBitSet(uint32_t mask)
+{
+ uint32_t result = 0;
+ while(mask >>= 1)
+ ++result;
+ return result;
+}
+#endif
+
+inline Scalar4i intFloor(const Scalar4f& v)
+{
+ return Scalar4i(int(floor(v.f4[0])), int(floor(v.f4[1])), int(floor(v.f4[2])), int(floor(v.f4[3])));
+}
+
+inline Scalar4i horizontalOr(Scalar4i mask)
+{
+ return simd4i(mask.i4[0] | mask.i4[1] | mask.i4[2] | mask.i4[3]);
+}
+
+template <>
+struct Gather<Scalar4i>
+{
+ inline Gather(const Scalar4i& index);
+ inline Scalar4i operator()(const Scalar4i*) const;
+
+ Scalar4i mIndex;
+ Scalar4i mOutOfRange;
+};
+
+Gather<Scalar4i>::Gather(const Scalar4i& index)
+{
+ uint32_t mask = physx::cloth::SwCollision<Scalar4i>::sGridSize - 1;
+
+ mIndex.u4[0] = index.u4[0] & mask;
+ mIndex.u4[1] = index.u4[1] & mask;
+ mIndex.u4[2] = index.u4[2] & mask;
+ mIndex.u4[3] = index.u4[3] & mask;
+
+ mOutOfRange.u4[0] = index.u4[0] & ~mask ? 0 : -1;
+ mOutOfRange.u4[1] = index.u4[1] & ~mask ? 0 : -1;
+ mOutOfRange.u4[2] = index.u4[2] & ~mask ? 0 : -1;
+ mOutOfRange.u4[3] = index.u4[3] & ~mask ? 0 : -1;
+}
+
+Scalar4i Gather<Scalar4i>::operator()(const Scalar4i* ptr) const
+{
+ const int32_t* base = ptr->i4;
+ const int32_t* index = mIndex.i4;
+ const int32_t* mask = mOutOfRange.i4;
+ return Scalar4i(base[index[0]] & mask[0], base[index[1]] & mask[1], base[index[2]] & mask[2],
+ base[index[3]] & mask[3]);
+}
+
+} // namespace cloth
+} // namespace physx