diff options
Diffstat (limited to 'external/NvFoundation/1.1/include')
33 files changed, 6930 insertions, 0 deletions
diff --git a/external/NvFoundation/1.1/include/Nv.h b/external/NvFoundation/1.1/include/Nv.h new file mode 100644 index 0000000..83d85de --- /dev/null +++ b/external/NvFoundation/1.1/include/Nv.h @@ -0,0 +1,89 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NV_H +#define NV_NVFOUNDATION_NV_H + +/** \addtogroup foundation +@{ +*/ + +#include "NvSimpleTypes.h" + +/** files to always include */ +#include <string.h> +#include <stdlib.h> + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif +class NvAllocatorCallback; +class NvErrorCallback; +struct NvErrorCode; +class NvAssertHandler; + +class NvInputStream; +class NvInputData; +class NvOutputStream; + +class NvVec2; +class NvVec3; +class NvVec4; +class NvMat33; +class NvMat44; +class NvPlane; +class NvQuat; +class NvTransform; +class NvBounds3; + +/** enum for empty constructor tag*/ +enum NvEMPTY +{ + NvEmpty +}; + +/** enum for zero constructor tag for vectors and matrices */ +enum NvZERO +{ + NvZero +}; + +/** enum for identity constructor flag for quaternions, transforms, and matrices */ +enum NvIDENTITY +{ + NvIdentity +}; + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NV_H diff --git a/external/NvFoundation/1.1/include/NvAllocatorCallback.h b/external/NvFoundation/1.1/include/NvAllocatorCallback.h new file mode 100644 index 0000000..1d1682d --- /dev/null +++ b/external/NvFoundation/1.1/include/NvAllocatorCallback.h @@ -0,0 +1,95 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVALLOCATORCALLBACK_H +#define NV_NVFOUNDATION_NVALLOCATORCALLBACK_H + +/** \addtogroup foundation +@{ +*/ + +#include "Nv.h" +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/** +\brief Abstract base class for an application defined memory allocator that can be used by the Nv library. + +\note The SDK state should not be modified from within any allocation/free function. + +<b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread +or the physics processing thread(s). +*/ + +class NvAllocatorCallback +{ + public: + /** + \brief destructor + */ + virtual ~NvAllocatorCallback() + { + } + + /** + \brief Allocates size bytes of memory, which must be 16-byte aligned. + + This method should never return NULL. If you run out of memory, then + you should terminate the app or take some other appropriate action. + + <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread + and physics processing thread(s). + + \param size Number of bytes to allocate. + \param typeName Name of the datatype that is being allocated + \param filename The source file which allocated the memory + \param line The source line which allocated the memory + \return The allocated block of memory. + */ + virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) = 0; + + /** + \brief Frees memory previously allocated by allocate(). + + <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread + and physics processing thread(s). + + \param ptr Memory to free. + */ + virtual void deallocate(void* ptr) = 0; +}; + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVALLOCATORCALLBACK_H diff --git a/external/NvFoundation/1.1/include/NvAssert.h b/external/NvFoundation/1.1/include/NvAssert.h new file mode 100644 index 0000000..e2ccb95 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvAssert.h @@ -0,0 +1,97 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVASSERT_H +#define NV_NVFOUNDATION_NVASSERT_H + +/** \addtogroup foundation +@{ */ + +#include "Nv.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/* Base class to handle assert failures */ +class NvAssertHandler +{ + public: + virtual ~NvAssertHandler() + { + } + virtual void operator()(const char* exp, const char* file, int line, bool& ignore) = 0; +}; + +NV_FOUNDATION_API NvAssertHandler& NvGetAssertHandler(); +NV_FOUNDATION_API void NvSetAssertHandler(NvAssertHandler& handler); + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +#if !NV_ENABLE_ASSERTS +#define NV_ASSERT(exp) ((void)0) +#define NV_ALWAYS_ASSERT_MESSAGE(exp) ((void)0) +#define NV_ASSERT_WITH_MESSAGE(condition, message) ((void)0) +#elif NV_SPU +#include "ps3/NvPS3Assert.h" +#else +#if NV_VC +#define NV_CODE_ANALYSIS_ASSUME(exp) \ + __analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a NV_ASSERT is used +// to "guard" illegal mem access, for example. +#else +#define NV_CODE_ANALYSIS_ASSUME(exp) +#endif +#define NV_ASSERT(exp) \ + { \ + static bool _ignore = false; \ + ((void)((!!(exp)) || (!_ignore && (nvidia::NvGetAssertHandler()(#exp, __FILE__, __LINE__, _ignore), false)))); \ + NV_CODE_ANALYSIS_ASSUME(exp); \ + } +#define NV_ALWAYS_ASSERT_MESSAGE(exp) \ + { \ + static bool _ignore = false; \ + if(!_ignore) \ + nvidia::NvGetAssertHandler()(exp, __FILE__, __LINE__, _ignore); \ + } +#define NV_ASSERT_WITH_MESSAGE(exp, message) \ + { \ + static bool _ignore = false; \ + ((void)((!!(exp)) || (!_ignore && (nvidia::NvGetAssertHandler()(message, __FILE__, __LINE__, _ignore), false)))); \ + NV_CODE_ANALYSIS_ASSUME(exp); \ + } +#endif + +#define NV_ALWAYS_ASSERT() NV_ASSERT(0) + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVASSERT_H diff --git a/external/NvFoundation/1.1/include/NvBounds3.h b/external/NvFoundation/1.1/include/NvBounds3.h new file mode 100644 index 0000000..ea76169 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvBounds3.h @@ -0,0 +1,480 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVBOUNDS3_H +#define NV_NVFOUNDATION_NVBOUNDS3_H + +/** \addtogroup foundation +@{ +*/ + +#include "NvTransform.h" +#include "NvMat33.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +// maximum extents defined such that floating point exceptions are avoided for standard use cases +#define NV_MAX_BOUNDS_EXTENTS (NV_MAX_REAL * 0.25f) + +/** +\brief Class representing 3D range or axis aligned bounding box. + +Stored as minimum and maximum extent corners. Alternate representation +would be center and dimensions. +May be empty or nonempty. For nonempty bounds, minimum <= maximum has to hold for all axes. +Empty bounds have to be represented as minimum = NV_MAX_BOUNDS_EXTENTS and maximum = -NV_MAX_BOUNDS_EXTENTS for all +axes. +All other representations are invalid and the behavior is undefined. +*/ +class NvBounds3 +{ + public: + /** + \brief Default constructor, not performing any initialization for performance reason. + \remark Use empty() function below to construct empty bounds. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3() + { + } + + /** + \brief Construct from two bounding points + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3(const NvVec3& minimum, const NvVec3& maximum); + + /** + \brief Return empty bounds. + */ + static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 empty(); + + /** + \brief returns the AABB containing v0 and v1. + \param v0 first point included in the AABB. + \param v1 second point included in the AABB. + */ + static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 boundsOfPoints(const NvVec3& v0, const NvVec3& v1); + + /** + \brief returns the AABB from center and extents vectors. + \param center Center vector + \param extent Extents vector + */ + static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 centerExtents(const NvVec3& center, const NvVec3& extent); + + /** + \brief Construct from center, extent, and (not necessarily orthogonal) basis + */ + static NV_CUDA_CALLABLE NV_INLINE NvBounds3 + basisExtent(const NvVec3& center, const NvMat33& basis, const NvVec3& extent); + + /** + \brief Construct from pose and extent + */ + static NV_CUDA_CALLABLE NV_INLINE NvBounds3 poseExtent(const NvTransform& pose, const NvVec3& extent); + + /** + \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). + + This version is safe to call for empty bounds. + + \param[in] matrix Transform to apply, can contain scaling as well + \param[in] bounds The bounds to transform. + */ + static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformSafe(const NvMat33& matrix, const NvBounds3& bounds); + + /** + \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). + + Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead. + + \param[in] matrix Transform to apply, can contain scaling as well + \param[in] bounds The bounds to transform. + */ + static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformFast(const NvMat33& matrix, const NvBounds3& bounds); + + /** + \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). + + This version is safe to call for empty bounds. + + \param[in] transform Transform to apply, can contain scaling as well + \param[in] bounds The bounds to transform. + */ + static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformSafe(const NvTransform& transform, const NvBounds3& bounds); + + /** + \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). + + Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead. + + \param[in] transform Transform to apply, can contain scaling as well + \param[in] bounds The bounds to transform. + */ + static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformFast(const NvTransform& transform, const NvBounds3& bounds); + + /** + \brief Sets empty to true + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE void setEmpty(); + + /** + \brief Sets the bounds to maximum size [-NV_MAX_BOUNDS_EXTENTS, NV_MAX_BOUNDS_EXTENTS]. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE void setMaximal(); + + /** + \brief expands the volume to include v + \param v Point to expand to. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE void include(const NvVec3& v); + + /** + \brief expands the volume to include b. + \param b Bounds to perform union with. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE void include(const NvBounds3& b); + + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isEmpty() const; + + /** + \brief indicates whether the intersection of this and b is empty or not. + \param b Bounds to test for intersection. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool intersects(const NvBounds3& b) const; + + /** + \brief computes the 1D-intersection between two AABBs, on a given axis. + \param a the other AABB + \param axis the axis (0, 1, 2) + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool intersects1D(const NvBounds3& a, uint32_t axis) const; + + /** + \brief indicates if these bounds contain v. + \param v Point to test against bounds. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool contains(const NvVec3& v) const; + + /** + \brief checks a box is inside another box. + \param box the other AABB + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isInside(const NvBounds3& box) const; + + /** + \brief returns the center of this axis aligned box. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getCenter() const; + + /** + \brief get component of the box's center along a given axis + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float getCenter(uint32_t axis) const; + + /** + \brief get component of the box's extents along a given axis + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float getExtents(uint32_t axis) const; + + /** + \brief returns the dimensions (width/height/depth) of this axis aligned box. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getDimensions() const; + + /** + \brief returns the extents, which are half of the width/height/depth. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getExtents() const; + + /** + \brief scales the AABB. + + This version is safe to call for empty bounds. + + \param scale Factor to scale AABB by. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE void scaleSafe(float scale); + + /** + \brief scales the AABB. + + Calling this method for empty bounds leads to undefined behavior. Use #scaleSafe() instead. + + \param scale Factor to scale AABB by. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE void scaleFast(float scale); + + /** + fattens the AABB in all 3 dimensions by the given distance. + + This version is safe to call for empty bounds. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE void fattenSafe(float distance); + + /** + fattens the AABB in all 3 dimensions by the given distance. + + Calling this method for empty bounds leads to undefined behavior. Use #fattenSafe() instead. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE void fattenFast(float distance); + + /** + checks that the AABB values are not NaN + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite() const; + + /** + checks that the AABB values describe a valid configuration. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isValid() const; + + NvVec3 minimum, maximum; +}; + +NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3::NvBounds3(const NvVec3& minimum_, const NvVec3& maximum_) +: minimum(minimum_), maximum(maximum_) +{ +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::empty() +{ + return NvBounds3(NvVec3(NV_MAX_BOUNDS_EXTENTS), NvVec3(-NV_MAX_BOUNDS_EXTENTS)); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isFinite() const +{ + return minimum.isFinite() && maximum.isFinite(); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::boundsOfPoints(const NvVec3& v0, const NvVec3& v1) +{ + return NvBounds3(v0.minimum(v1), v0.maximum(v1)); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::centerExtents(const NvVec3& center, const NvVec3& extent) +{ + return NvBounds3(center - extent, center + extent); +} + +NV_CUDA_CALLABLE NV_INLINE NvBounds3 +NvBounds3::basisExtent(const NvVec3& center, const NvMat33& basis, const NvVec3& extent) +{ + // extended basis vectors + NvVec3 c0 = basis.column0 * extent.x; + NvVec3 c1 = basis.column1 * extent.y; + NvVec3 c2 = basis.column2 * extent.z; + + NvVec3 w; + // find combination of base vectors that produces max. distance for each component = sum of abs() + w.x = NvAbs(c0.x) + NvAbs(c1.x) + NvAbs(c2.x); + w.y = NvAbs(c0.y) + NvAbs(c1.y) + NvAbs(c2.y); + w.z = NvAbs(c0.z) + NvAbs(c1.z) + NvAbs(c2.z); + + return NvBounds3(center - w, center + w); +} + +NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::poseExtent(const NvTransform& pose, const NvVec3& extent) +{ + return basisExtent(pose.p, NvMat33(pose.q), extent); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::setEmpty() +{ + minimum = NvVec3(NV_MAX_BOUNDS_EXTENTS); + maximum = NvVec3(-NV_MAX_BOUNDS_EXTENTS); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::setMaximal() +{ + minimum = NvVec3(-NV_MAX_BOUNDS_EXTENTS); + maximum = NvVec3(NV_MAX_BOUNDS_EXTENTS); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::include(const NvVec3& v) +{ + NV_ASSERT(isValid()); + minimum = minimum.minimum(v); + maximum = maximum.maximum(v); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::include(const NvBounds3& b) +{ + NV_ASSERT(isValid()); + minimum = minimum.minimum(b.minimum); + maximum = maximum.maximum(b.maximum); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isEmpty() const +{ + NV_ASSERT(isValid()); + return minimum.x > maximum.x; +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::intersects(const NvBounds3& b) const +{ + NV_ASSERT(isValid() && b.isValid()); + return !(b.minimum.x > maximum.x || minimum.x > b.maximum.x || b.minimum.y > maximum.y || minimum.y > b.maximum.y || + b.minimum.z > maximum.z || minimum.z > b.maximum.z); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::intersects1D(const NvBounds3& a, uint32_t axis) const +{ + NV_ASSERT(isValid() && a.isValid()); + return maximum[axis] >= a.minimum[axis] && a.maximum[axis] >= minimum[axis]; +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::contains(const NvVec3& v) const +{ + NV_ASSERT(isValid()); + + return !(v.x < minimum.x || v.x > maximum.x || v.y < minimum.y || v.y > maximum.y || v.z < minimum.z || + v.z > maximum.z); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isInside(const NvBounds3& box) const +{ + NV_ASSERT(isValid() && box.isValid()); + if(box.minimum.x > minimum.x) + return false; + if(box.minimum.y > minimum.y) + return false; + if(box.minimum.z > minimum.z) + return false; + if(box.maximum.x < maximum.x) + return false; + if(box.maximum.y < maximum.y) + return false; + if(box.maximum.z < maximum.z) + return false; + return true; +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getCenter() const +{ + NV_ASSERT(isValid()); + return (minimum + maximum) * 0.5f; +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvBounds3::getCenter(uint32_t axis) const +{ + NV_ASSERT(isValid()); + return (minimum[axis] + maximum[axis]) * 0.5f; +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvBounds3::getExtents(uint32_t axis) const +{ + NV_ASSERT(isValid()); + return (maximum[axis] - minimum[axis]) * 0.5f; +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getDimensions() const +{ + NV_ASSERT(isValid()); + return maximum - minimum; +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getExtents() const +{ + NV_ASSERT(isValid()); + return getDimensions() * 0.5f; +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::scaleSafe(float scale) +{ + NV_ASSERT(isValid()); + if(!isEmpty()) + scaleFast(scale); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::scaleFast(float scale) +{ + NV_ASSERT(isValid()); + *this = centerExtents(getCenter(), getExtents() * scale); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::fattenSafe(float distance) +{ + NV_ASSERT(isValid()); + if(!isEmpty()) + fattenFast(distance); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::fattenFast(float distance) +{ + NV_ASSERT(isValid()); + minimum.x -= distance; + minimum.y -= distance; + minimum.z -= distance; + + maximum.x += distance; + maximum.y += distance; + maximum.z += distance; +} + +NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformSafe(const NvMat33& matrix, const NvBounds3& bounds) +{ + NV_ASSERT(bounds.isValid()); + return !bounds.isEmpty() ? transformFast(matrix, bounds) : bounds; +} + +NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformFast(const NvMat33& matrix, const NvBounds3& bounds) +{ + NV_ASSERT(bounds.isValid()); + return NvBounds3::basisExtent(matrix * bounds.getCenter(), matrix, bounds.getExtents()); +} + +NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformSafe(const NvTransform& transform, const NvBounds3& bounds) +{ + NV_ASSERT(bounds.isValid()); + return !bounds.isEmpty() ? transformFast(transform, bounds) : bounds; +} + +NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformFast(const NvTransform& transform, const NvBounds3& bounds) +{ + NV_ASSERT(bounds.isValid()); + return NvBounds3::basisExtent(transform.transform(bounds.getCenter()), NvMat33(transform.q), bounds.getExtents()); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isValid() const +{ + return (isFinite() && (((minimum.x <= maximum.x) && (minimum.y <= maximum.y) && (minimum.z <= maximum.z)) || + ((minimum.x == NV_MAX_BOUNDS_EXTENTS) && (minimum.y == NV_MAX_BOUNDS_EXTENTS) && + (minimum.z == NV_MAX_BOUNDS_EXTENTS) && (maximum.x == -NV_MAX_BOUNDS_EXTENTS) && + (maximum.y == -NV_MAX_BOUNDS_EXTENTS) && (maximum.z == -NV_MAX_BOUNDS_EXTENTS)))); +} + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVBOUNDS3_H diff --git a/external/NvFoundation/1.1/include/NvCTypes.h b/external/NvFoundation/1.1/include/NvCTypes.h new file mode 100644 index 0000000..fd83c18 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvCTypes.h @@ -0,0 +1,124 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2013 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_C_TYPES_H +#define NV_C_TYPES_H + +#include "NvPreprocessor.h" +#ifdef _MSC_VER +#ifndef _INTPTR +#define _INTPTR 0 +#endif +#endif +#include <stdint.h> + +/** C type for 2-float vectors */ +typedef struct +{ + float x, y; +} NvcVec2; + +/** C type for 3-float vectors */ +typedef struct +{ + float x, y, z; +} NvcVec3; + +/** C type for 4-float vectors */ +typedef struct +{ + float x, y, z, w; +} NvcVec4; + +/** C type for quaternions */ +typedef struct +{ + float x, y, z, w; +} NvcQuat; + +/** C type for transforms */ +typedef struct +{ + NvcQuat q; + NvcVec3 p; +} NvcTransform; + +/** C type for 3x3 matrices */ +typedef struct +{ + NvcVec3 column0, column1, column2, column3; +} NvcMat34; + +/** C type for 3x3 matrices */ +typedef struct +{ + NvcVec3 column0, column1, column2; +} NvcMat33; + +/** C type for 4x4 matrices */ +typedef struct +{ + NvcVec4 column0, column1, column2, column3; +} NvcMat44; + +/** C type for 3d bounding box */ +typedef struct +{ + NvcVec3 minimum; + NvcVec3 maximum; +} NvcBounds3; + +/** C type for a plane */ +typedef struct +{ + NvcVec3 n; + float d; +} NvcPlane; + +/** C type for 2-integer vectors */ +typedef struct +{ + int32_t x, y; +} NvcVec2i; + +/** C type for 3-integer vectors */ +typedef struct +{ + int32_t x, y, z; +} NvcVec3i; + +/** C type for 4-integer vectors */ +typedef struct +{ + int32_t x, y, z, w; +} NvcVec4i; + +/** @} */ + +#endif // NV_C_TYPES_H diff --git a/external/NvFoundation/1.1/include/NvErrorCallback.h b/external/NvFoundation/1.1/include/NvErrorCallback.h new file mode 100644 index 0000000..d9f23e8 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvErrorCallback.h @@ -0,0 +1,73 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVERRORCALLBACK_H +#define NV_NVFOUNDATION_NVERRORCALLBACK_H + +/** \addtogroup foundation +@{ +*/ + +#include "NvErrors.h" +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/** +\brief User defined interface class. Used by the library to emit debug information. + +\note The SDK state should not be modified from within any error reporting functions. + +<b>Threading:</b> The SDK sequences its calls to the output stream using a mutex, so the class need not +be implemented in a thread-safe manner if the SDK is the only client. +*/ +class NvErrorCallback +{ + public: + virtual ~NvErrorCallback() + { + } + + /** + \brief Reports an error code. + \param code Error code, see #NvErrorCode + \param message Message to display. + \param file File error occured in. + \param line Line number error occured on. + */ + virtual void reportError(NvErrorCode::Enum code, const char* message, const char* file, int line) = 0; +}; + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVERRORCALLBACK_H diff --git a/external/NvFoundation/1.1/include/NvErrors.h b/external/NvFoundation/1.1/include/NvErrors.h new file mode 100644 index 0000000..043c525 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvErrors.h @@ -0,0 +1,93 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVERRORS_H +#define NV_NVFOUNDATION_NVERRORS_H +/** \addtogroup foundation +@{ +*/ + +#include "Nv.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/** +\brief Error codes + +These error codes are passed to #NvErrorCallback + +@see NvErrorCallback +*/ + +struct NvErrorCode +{ + enum Enum + { + eNO_ERROR = 0, + + //! \brief An informational message. + eDEBUG_INFO = 1, + + //! \brief a warning message for the user to help with debugging + eDEBUG_WARNING = 2, + + //! \brief method called with invalid parameter(s) + eINVALID_PARAMETER = 4, + + //! \brief method was called at a time when an operation is not possible + eINVALID_OPERATION = 8, + + //! \brief method failed to allocate some memory + eOUT_OF_MEMORY = 16, + + /** \brief The library failed for some reason. + Possibly you have passed invalid values like NaNs, which are not checked for. + */ + eINTERNAL_ERROR = 32, + + //! \brief An unrecoverable error, execution should be halted and log output flushed + eABORT = 64, + + //! \brief The SDK has determined that an operation may result in poor performance. + ePERF_WARNING = 128, + + //! \brief A bit mask for including all errors + eMASK_ALL = -1 + }; +}; + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVERRORS_H diff --git a/external/NvFoundation/1.1/include/NvFlags.h b/external/NvFoundation/1.1/include/NvFlags.h new file mode 100644 index 0000000..b2ec31c --- /dev/null +++ b/external/NvFoundation/1.1/include/NvFlags.h @@ -0,0 +1,375 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVFLAGS_H +#define NV_NVFOUNDATION_NVFLAGS_H + +/** \addtogroup foundation + @{ +*/ + +#include "Nv.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif +/** +\brief Container for bitfield flag variables associated with a specific enum type. + +This allows for type safe manipulation for bitfields. + +<h3>Example</h3> + // enum that defines each bit... + struct MyEnum + { + enum Enum + { + eMAN = 1, + eBEAR = 2, + ePIG = 4, + }; + }; + + // implements some convenient global operators. + NV_FLAGS_OPERATORS(MyEnum::Enum, uint8_t); + + NvFlags<MyEnum::Enum, uint8_t> myFlags; + myFlags |= MyEnum::eMAN; + myFlags |= MyEnum::eBEAR | MyEnum::ePIG; + if(myFlags & MyEnum::eBEAR) + { + doSomething(); + } +*/ + +template <typename enumtype, typename storagetype = uint32_t> +class NvFlags +{ + public: + typedef storagetype InternalType; + + NV_INLINE explicit NvFlags(const NvEMPTY) + { + } + NV_INLINE NvFlags(void); + NV_INLINE NvFlags(enumtype e); + NV_INLINE NvFlags(const NvFlags<enumtype, storagetype>& f); + NV_INLINE explicit NvFlags(storagetype b); + + NV_INLINE bool isSet(enumtype e) const; + NV_INLINE NvFlags<enumtype, storagetype>& set(enumtype e); + NV_INLINE bool operator==(enumtype e) const; + NV_INLINE bool operator==(const NvFlags<enumtype, storagetype>& f) const; + NV_INLINE bool operator==(bool b) const; + NV_INLINE bool operator!=(enumtype e) const; + NV_INLINE bool operator!=(const NvFlags<enumtype, storagetype>& f) const; + + NV_INLINE NvFlags<enumtype, storagetype>& operator=(const NvFlags<enumtype, storagetype>& f); + NV_INLINE NvFlags<enumtype, storagetype>& operator=(enumtype e); + + NV_INLINE NvFlags<enumtype, storagetype>& operator|=(enumtype e); + NV_INLINE NvFlags<enumtype, storagetype>& operator|=(const NvFlags<enumtype, storagetype>& f); + NV_INLINE NvFlags<enumtype, storagetype> operator|(enumtype e) const; + NV_INLINE NvFlags<enumtype, storagetype> operator|(const NvFlags<enumtype, storagetype>& f) const; + + NV_INLINE NvFlags<enumtype, storagetype>& operator&=(enumtype e); + NV_INLINE NvFlags<enumtype, storagetype>& operator&=(const NvFlags<enumtype, storagetype>& f); + NV_INLINE NvFlags<enumtype, storagetype> operator&(enumtype e) const; + NV_INLINE NvFlags<enumtype, storagetype> operator&(const NvFlags<enumtype, storagetype>& f) const; + + NV_INLINE NvFlags<enumtype, storagetype>& operator^=(enumtype e); + NV_INLINE NvFlags<enumtype, storagetype>& operator^=(const NvFlags<enumtype, storagetype>& f); + NV_INLINE NvFlags<enumtype, storagetype> operator^(enumtype e) const; + NV_INLINE NvFlags<enumtype, storagetype> operator^(const NvFlags<enumtype, storagetype>& f) const; + + NV_INLINE NvFlags<enumtype, storagetype> operator~(void) const; + + NV_INLINE operator bool(void) const; + NV_INLINE operator uint8_t(void) const; + NV_INLINE operator uint16_t(void) const; + NV_INLINE operator uint32_t(void) const; + + NV_INLINE void clear(enumtype e); + + public: + friend NV_INLINE NvFlags<enumtype, storagetype> operator&(enumtype a, NvFlags<enumtype, storagetype>& b) + { + NvFlags<enumtype, storagetype> out; + out.mBits = a & b.mBits; + return out; + } + + private: + storagetype mBits; +}; + +#define NV_FLAGS_OPERATORS(enumtype, storagetype) \ + NV_INLINE NvFlags<enumtype, storagetype> operator|(enumtype a, enumtype b) \ + { \ + NvFlags<enumtype, storagetype> r(a); \ + r |= b; \ + return r; \ + } \ + NV_INLINE NvFlags<enumtype, storagetype> operator&(enumtype a, enumtype b) \ + { \ + NvFlags<enumtype, storagetype> r(a); \ + r &= b; \ + return r; \ + } \ + NV_INLINE NvFlags<enumtype, storagetype> operator~(enumtype a) \ + { \ + return ~NvFlags<enumtype, storagetype>(a); \ + } + +#define NV_FLAGS_TYPEDEF(x, y) \ + typedef NvFlags<x::Enum, y> x##s; \ + NV_FLAGS_OPERATORS(x::Enum, y) + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>::NvFlags(void) +{ + mBits = 0; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>::NvFlags(enumtype e) +{ + mBits = static_cast<storagetype>(e); +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>::NvFlags(const NvFlags<enumtype, storagetype>& f) +{ + mBits = f.mBits; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>::NvFlags(storagetype b) +{ + mBits = b; +} + +template <typename enumtype, typename storagetype> +NV_INLINE bool NvFlags<enumtype, storagetype>::isSet(enumtype e) const +{ + return (mBits & static_cast<storagetype>(e)) == static_cast<storagetype>(e); +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::set(enumtype e) +{ + mBits = static_cast<storagetype>(e); + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE bool NvFlags<enumtype, storagetype>::operator==(enumtype e) const +{ + return mBits == static_cast<storagetype>(e); +} + +template <typename enumtype, typename storagetype> +NV_INLINE bool NvFlags<enumtype, storagetype>::operator==(const NvFlags<enumtype, storagetype>& f) const +{ + return mBits == f.mBits; +} + +template <typename enumtype, typename storagetype> +NV_INLINE bool NvFlags<enumtype, storagetype>::operator==(bool b) const +{ + return bool(*this) == b; +} + +template <typename enumtype, typename storagetype> +NV_INLINE bool NvFlags<enumtype, storagetype>::operator!=(enumtype e) const +{ + return mBits != static_cast<storagetype>(e); +} + +template <typename enumtype, typename storagetype> +NV_INLINE bool NvFlags<enumtype, storagetype>::operator!=(const NvFlags<enumtype, storagetype>& f) const +{ + return mBits != f.mBits; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator=(enumtype e) +{ + mBits = static_cast<storagetype>(e); + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator=(const NvFlags<enumtype, storagetype>& f) +{ + mBits = f.mBits; + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator|=(enumtype e) +{ + mBits |= static_cast<storagetype>(e); + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>:: +operator|=(const NvFlags<enumtype, storagetype>& f) +{ + mBits |= f.mBits; + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>::operator|(enumtype e) const +{ + NvFlags<enumtype, storagetype> out(*this); + out |= e; + return out; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>:: +operator|(const NvFlags<enumtype, storagetype>& f) const +{ + NvFlags<enumtype, storagetype> out(*this); + out |= f; + return out; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator&=(enumtype e) +{ + mBits &= static_cast<storagetype>(e); + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>:: +operator&=(const NvFlags<enumtype, storagetype>& f) +{ + mBits &= f.mBits; + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>::operator&(enumtype e) const +{ + NvFlags<enumtype, storagetype> out = *this; + out.mBits &= static_cast<storagetype>(e); + return out; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>:: +operator&(const NvFlags<enumtype, storagetype>& f) const +{ + NvFlags<enumtype, storagetype> out = *this; + out.mBits &= f.mBits; + return out; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator^=(enumtype e) +{ + mBits ^= static_cast<storagetype>(e); + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>:: +operator^=(const NvFlags<enumtype, storagetype>& f) +{ + mBits ^= f.mBits; + return *this; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>::operator^(enumtype e) const +{ + NvFlags<enumtype, storagetype> out = *this; + out.mBits ^= static_cast<storagetype>(e); + return out; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>:: +operator^(const NvFlags<enumtype, storagetype>& f) const +{ + NvFlags<enumtype, storagetype> out = *this; + out.mBits ^= f.mBits; + return out; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>::operator~(void) const +{ + NvFlags<enumtype, storagetype> out; + out.mBits = storagetype(~mBits); + return out; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>::operator bool(void) const +{ + return mBits ? true : false; +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>::operator uint8_t(void) const +{ + return static_cast<uint8_t>(mBits); +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>::operator uint16_t(void) const +{ + return static_cast<uint16_t>(mBits); +} + +template <typename enumtype, typename storagetype> +NV_INLINE NvFlags<enumtype, storagetype>::operator uint32_t(void) const +{ + return static_cast<uint32_t>(mBits); +} + +template <typename enumtype, typename storagetype> +NV_INLINE void NvFlags<enumtype, storagetype>::clear(enumtype e) +{ + mBits &= ~static_cast<storagetype>(e); +} + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVFLAGS_H diff --git a/external/NvFoundation/1.1/include/NvFoundationInterface.h b/external/NvFoundation/1.1/include/NvFoundationInterface.h new file mode 100644 index 0000000..40678fb --- /dev/null +++ b/external/NvFoundation/1.1/include/NvFoundationInterface.h @@ -0,0 +1,37 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2013 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_FOUNDATION_INTERFACE_H +#define NV_FOUNDATION_INTERFACE_H + +#include "NvErrorCallback.h" +#include "NvAllocatorCallback.h" +#include "NvAssert.h" + +#endif // NV_FOUNDATION_INTERFACE_H diff --git a/external/NvFoundation/1.1/include/NvFoundationMath.h b/external/NvFoundation/1.1/include/NvFoundationMath.h new file mode 100644 index 0000000..04c83f6 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvFoundationMath.h @@ -0,0 +1,42 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2013 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_FOUNDATION_MATH_H +#define NV_FOUNDATION_MATH_H + +#include "NvVec2.h" +#include "NvVec3.h" +#include "NvVec4.h" +#include "NvMat33.h" +#include "NvMat44.h" +#include "NvTransform.h" +#include "NvQuat.h" +#include "NvBounds3.h" + +#endif // NV_FOUNDATION_H diff --git a/external/NvFoundation/1.1/include/NvIO.h b/external/NvFoundation/1.1/include/NvIO.h new file mode 100644 index 0000000..51796fd --- /dev/null +++ b/external/NvFoundation/1.1/include/NvIO.h @@ -0,0 +1,138 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVIO_H +#define NV_NVFOUNDATION_NVIO_H + +/** \addtogroup common + @{ +*/ + +#include "NvSimpleTypes.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/** +\brief Input stream class for I/O. + +The user needs to supply a NvInputStream implementation to a number of methods to allow the SDK to read data. +*/ + +class NvInputStream +{ + public: + /** + \brief read from the stream. The number of bytes read may be less than the number requested. + + \param[in] dest the destination address to which the data will be read + \param[in] count the number of bytes requested + + \return the number of bytes read from the stream. + */ + + virtual uint32_t read(void* dest, uint32_t count) = 0; + + virtual ~NvInputStream() + { + } +}; + +/** +\brief Input data class for I/O which provides random read access. + +The user needs to supply a NvInputData implementation to a number of methods to allow the SDK to read data. +*/ + +class NvInputData : public NvInputStream +{ + public: + /** + \brief return the length of the input data + + \return size in bytes of the input data + */ + + virtual uint32_t getLength() const = 0; + + /** + \brief seek to the given offset from the start of the data. + + \param[in] offset the offset to seek to. If greater than the length of the data, this call is equivalent to + seek(length); + */ + + virtual void seek(uint32_t offset) = 0; + + /** + \brief return the current offset from the start of the data + + \return the offset to seek to. + */ + + virtual uint32_t tell() const = 0; + + virtual ~NvInputData() + { + } +}; + +/** +\brief Output stream class for I/O. + +The user needs to supply a NvOutputStream implementation to a number of methods to allow the SDK to write data. +*/ + +class NvOutputStream +{ + public: + /** + \brief write to the stream. The number of bytes written may be less than the number sent. + + \param[in] src the destination address from which the data will be written + \param[in] count the number of bytes to be written + + \return the number of bytes written to the stream by this call. + */ + + virtual uint32_t write(const void* src, uint32_t count) = 0; + + virtual ~NvOutputStream() + { + } +}; + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVIO_H diff --git a/external/NvFoundation/1.1/include/NvIntrinsics.h b/external/NvFoundation/1.1/include/NvIntrinsics.h new file mode 100644 index 0000000..b093224 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvIntrinsics.h @@ -0,0 +1,53 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVINTRINSICS_H +#define NV_NVFOUNDATION_NVINTRINSICS_H + +#include "NvPreprocessor.h" + +#if NV_WINDOWS_FAMILY +#include "windows/NvWindowsIntrinsics.h" +#elif NV_X360 +#include "xbox360/NvXbox360Intrinsics.h" +#elif(NV_LINUX || NV_ANDROID || NV_APPLE_FAMILY || NV_PS4) +#include "unix/NvUnixIntrinsics.h" +#elif NV_PS3 +#include "ps3/NvPS3Intrinsics.h" +#elif NV_PSP2 +#include "psp2/NvPSP2Intrinsics.h" +#elif NV_WIIU +#include "wiiu/NvWiiUIntrinsics.h" +#elif NV_XBOXONE +#include "XboxOne/NvXboxOneIntrinsics.h" +#else +#error "Platform not supported!" +#endif + +#endif // #ifndef NV_NVFOUNDATION_NVINTRINSICS_H diff --git a/external/NvFoundation/1.1/include/NvMat33.h b/external/NvFoundation/1.1/include/NvMat33.h new file mode 100644 index 0000000..b0a43fd --- /dev/null +++ b/external/NvFoundation/1.1/include/NvMat33.h @@ -0,0 +1,392 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVMAT33_H +#define NV_NVFOUNDATION_NVMAT33_H +/** \addtogroup foundation +@{ +*/ + +#include "NvVec3.h" +#include "NvQuat.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif +/*! +\brief 3x3 matrix class + +Some clarifications, as there have been much confusion about matrix formats etc in the past. + +Short: +- Matrix have base vectors in columns (vectors are column matrices, 3x1 matrices). +- Matrix is physically stored in column major format +- Matrices are concaternated from left + +Long: +Given three base vectors a, b and c the matrix is stored as + +|a.x b.x c.x| +|a.y b.y c.y| +|a.z b.z c.z| + +Vectors are treated as columns, so the vector v is + +|x| +|y| +|z| + +And matrices are applied _before_ the vector (pre-multiplication) +v' = M*v + +|x'| |a.x b.x c.x| |x| |a.x*x + b.x*y + c.x*z| +|y'| = |a.y b.y c.y| * |y| = |a.y*x + b.y*y + c.y*z| +|z'| |a.z b.z c.z| |z| |a.z*x + b.z*y + c.z*z| + + +Physical storage and indexing: +To be compatible with popular 3d rendering APIs (read D3d and OpenGL) +the physical indexing is + +|0 3 6| +|1 4 7| +|2 5 8| + +index = column*3 + row + +which in C++ translates to M[column][row] + +The mathematical indexing is M_row,column and this is what is used for _-notation +so _12 is 1st row, second column and operator(row, column)! + +*/ +class NvMat33 +{ + public: + //! Default constructor + NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33() + { + } + + //! identity constructor + NV_CUDA_CALLABLE NV_INLINE NvMat33(NvIDENTITY r) + : column0(1.0f, 0.0f, 0.0f), column1(0.0f, 1.0f, 0.0f), column2(0.0f, 0.0f, 1.0f) + { + NV_UNUSED(r); + } + + //! zero constructor + NV_CUDA_CALLABLE NV_INLINE NvMat33(NvZERO r) : column0(0.0f), column1(0.0f), column2(0.0f) + { + NV_UNUSED(r); + } + + //! Construct from three base vectors + NV_CUDA_CALLABLE NvMat33(const NvVec3& col0, const NvVec3& col1, const NvVec3& col2) + : column0(col0), column1(col1), column2(col2) + { + } + + //! constructor from a scalar, which generates a multiple of the identity matrix + explicit NV_CUDA_CALLABLE NV_INLINE NvMat33(float r) + : column0(r, 0.0f, 0.0f), column1(0.0f, r, 0.0f), column2(0.0f, 0.0f, r) + { + } + + //! Construct from float[9] + explicit NV_CUDA_CALLABLE NV_INLINE NvMat33(float values[]) + : column0(values[0], values[1], values[2]) + , column1(values[3], values[4], values[5]) + , column2(values[6], values[7], values[8]) + { + } + + //! Construct from a quaternion + explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33(const NvQuat& q) + { + const float x = q.x; + const float y = q.y; + const float z = q.z; + const float w = q.w; + + const float x2 = x + x; + const float y2 = y + y; + const float z2 = z + z; + + const float xx = x2 * x; + const float yy = y2 * y; + const float zz = z2 * z; + + const float xy = x2 * y; + const float xz = x2 * z; + const float xw = x2 * w; + + const float yz = y2 * z; + const float yw = y2 * w; + const float zw = z2 * w; + + column0 = NvVec3(1.0f - yy - zz, xy + zw, xz - yw); + column1 = NvVec3(xy - zw, 1.0f - xx - zz, yz + xw); + column2 = NvVec3(xz + yw, yz - xw, 1.0f - xx - yy); + } + + //! Copy constructor + NV_CUDA_CALLABLE NV_INLINE NvMat33(const NvMat33& other) + : column0(other.column0), column1(other.column1), column2(other.column2) + { + } + + //! Assignment operator + NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33& operator=(const NvMat33& other) + { + column0 = other.column0; + column1 = other.column1; + column2 = other.column2; + return *this; + } + + //! Construct from diagonal, off-diagonals are zero. + NV_CUDA_CALLABLE NV_INLINE static NvMat33 createDiagonal(const NvVec3& d) + { + return NvMat33(NvVec3(d.x, 0.0f, 0.0f), NvVec3(0.0f, d.y, 0.0f), NvVec3(0.0f, 0.0f, d.z)); + } + + /** + \brief returns true if the two matrices are exactly equal + */ + NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvMat33& m) const + { + return column0 == m.column0 && column1 == m.column1 && column2 == m.column2; + } + + //! Get transposed matrix + NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33 getTranspose() const + { + const NvVec3 v0(column0.x, column1.x, column2.x); + const NvVec3 v1(column0.y, column1.y, column2.y); + const NvVec3 v2(column0.z, column1.z, column2.z); + + return NvMat33(v0, v1, v2); + } + + //! Get the real inverse + NV_CUDA_CALLABLE NV_INLINE NvMat33 getInverse() const + { + const float det = getDeterminant(); + NvMat33 inverse; + + if(det != 0) + { + const float invDet = 1.0f / det; + + inverse.column0.x = invDet * (column1.y * column2.z - column2.y * column1.z); + inverse.column0.y = invDet * -(column0.y * column2.z - column2.y * column0.z); + inverse.column0.z = invDet * (column0.y * column1.z - column0.z * column1.y); + + inverse.column1.x = invDet * -(column1.x * column2.z - column1.z * column2.x); + inverse.column1.y = invDet * (column0.x * column2.z - column0.z * column2.x); + inverse.column1.z = invDet * -(column0.x * column1.z - column0.z * column1.x); + + inverse.column2.x = invDet * (column1.x * column2.y - column1.y * column2.x); + inverse.column2.y = invDet * -(column0.x * column2.y - column0.y * column2.x); + inverse.column2.z = invDet * (column0.x * column1.y - column1.x * column0.y); + + return inverse; + } + else + { + return NvMat33(NvIdentity); + } + } + + //! Get determinant + NV_CUDA_CALLABLE NV_INLINE float getDeterminant() const + { + return column0.dot(column1.cross(column2)); + } + + //! Unary minus + NV_CUDA_CALLABLE NV_INLINE NvMat33 operator-() const + { + return NvMat33(-column0, -column1, -column2); + } + + //! Add + NV_CUDA_CALLABLE NV_INLINE NvMat33 operator+(const NvMat33& other) const + { + return NvMat33(column0 + other.column0, column1 + other.column1, column2 + other.column2); + } + + //! Subtract + NV_CUDA_CALLABLE NV_INLINE NvMat33 operator-(const NvMat33& other) const + { + return NvMat33(column0 - other.column0, column1 - other.column1, column2 - other.column2); + } + + //! Scalar multiplication + NV_CUDA_CALLABLE NV_INLINE NvMat33 operator*(float scalar) const + { + return NvMat33(column0 * scalar, column1 * scalar, column2 * scalar); + } + + friend NvMat33 operator*(float, const NvMat33&); + + //! Matrix vector multiplication (returns 'this->transform(vec)') + NV_CUDA_CALLABLE NV_INLINE NvVec3 operator*(const NvVec3& vec) const + { + return transform(vec); + } + + // a <op>= b operators + + //! Matrix multiplication + NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33 operator*(const NvMat33& other) const + { + // Rows from this <dot> columns from other + // column0 = transform(other.column0) etc + return NvMat33(transform(other.column0), transform(other.column1), transform(other.column2)); + } + + //! Equals-add + NV_CUDA_CALLABLE NV_INLINE NvMat33& operator+=(const NvMat33& other) + { + column0 += other.column0; + column1 += other.column1; + column2 += other.column2; + return *this; + } + + //! Equals-sub + NV_CUDA_CALLABLE NV_INLINE NvMat33& operator-=(const NvMat33& other) + { + column0 -= other.column0; + column1 -= other.column1; + column2 -= other.column2; + return *this; + } + + //! Equals scalar multiplication + NV_CUDA_CALLABLE NV_INLINE NvMat33& operator*=(float scalar) + { + column0 *= scalar; + column1 *= scalar; + column2 *= scalar; + return *this; + } + + //! Equals matrix multiplication + NV_CUDA_CALLABLE NV_INLINE NvMat33& operator*=(const NvMat33& other) + { + *this = *this * other; + return *this; + } + + //! Element access, mathematical way! + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float operator()(unsigned int row, unsigned int col) const + { + return (*this)[col][row]; + } + + //! Element access, mathematical way! + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator()(unsigned int row, unsigned int col) + { + return (*this)[col][row]; + } + + // Transform etc + + //! Transform vector by matrix, equal to v' = M*v + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transform(const NvVec3& other) const + { + return column0 * other.x + column1 * other.y + column2 * other.z; + } + + //! Transform vector by matrix transpose, v' = M^t*v + NV_CUDA_CALLABLE NV_INLINE NvVec3 transformTranspose(const NvVec3& other) const + { + return NvVec3(column0.dot(other), column1.dot(other), column2.dot(other)); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE const float* front() const + { + return &column0.x; + } + + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator[](unsigned int num) + { + return (&column0)[num]; + } + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3& operator[](unsigned int num) const + { + return (&column0)[num]; + } + + // Data, see above for format! + + NvVec3 column0, column1, column2; // the three base vectors +}; + +// implementation from NvQuat.h +NV_CUDA_CALLABLE NV_INLINE NvQuat::NvQuat(const NvMat33& m) +{ + if (m.column2.z < 0) + { + if (m.column0.x > m.column1.y) + { + float t = 1 + m.column0.x - m.column1.y - m.column2.z; + *this = NvQuat(t, m.column0.y + m.column1.x, m.column2.x + m.column0.z, m.column1.z - m.column2.y) * (0.5f / NvSqrt(t)); + } + else + { + float t = 1 - m.column0.x + m.column1.y - m.column2.z; + *this = NvQuat(m.column0.y + m.column1.x, t, m.column1.z + m.column2.y, m.column2.x - m.column0.z) * (0.5f / NvSqrt(t)); + } + } + else + { + if (m.column0.x < -m.column1.y) + { + float t = 1 - m.column0.x - m.column1.y + m.column2.z; + *this = NvQuat(m.column2.x + m.column0.z, m.column1.z + m.column2.y, t, m.column0.y - m.column1.x) * (0.5f / NvSqrt(t)); + } + else + { + float t = 1 + m.column0.x + m.column1.y + m.column2.z; + *this = NvQuat(m.column1.z - m.column2.y, m.column2.x - m.column0.z, m.column0.y - m.column1.x, t) * (0.5f / NvSqrt(t)); + } + } +} + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVMAT33_H diff --git a/external/NvFoundation/1.1/include/NvMat44.h b/external/NvFoundation/1.1/include/NvMat44.h new file mode 100644 index 0000000..3519d95 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvMat44.h @@ -0,0 +1,376 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVMAT44_H +#define NV_NVFOUNDATION_NVMAT44_H +/** \addtogroup foundation +@{ +*/ + +#include "NvQuat.h" +#include "NvVec4.h" +#include "NvMat33.h" +#include "NvTransform.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/*! +\brief 4x4 matrix class + +This class is layout-compatible with D3D and OpenGL matrices. More notes on layout are given in the NvMat33 + +@see NvMat33 NvTransform +*/ + +class NvMat44 +{ + public: + //! Default constructor + NV_CUDA_CALLABLE NV_INLINE NvMat44() + { + } + + //! identity constructor + NV_CUDA_CALLABLE NV_INLINE NvMat44(NvIDENTITY r) + : column0(1.0f, 0.0f, 0.0f, 0.0f) + , column1(0.0f, 1.0f, 0.0f, 0.0f) + , column2(0.0f, 0.0f, 1.0f, 0.0f) + , column3(0.0f, 0.0f, 0.0f, 1.0f) + { + NV_UNUSED(r); + } + + //! zero constructor + NV_CUDA_CALLABLE NV_INLINE NvMat44(NvZERO r) : column0(NvZero), column1(NvZero), column2(NvZero), column3(NvZero) + { + NV_UNUSED(r); + } + + //! Construct from four 4-vectors + NV_CUDA_CALLABLE NvMat44(const NvVec4& col0, const NvVec4& col1, const NvVec4& col2, const NvVec4& col3) + : column0(col0), column1(col1), column2(col2), column3(col3) + { + } + + //! constructor that generates a multiple of the identity matrix + explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(float r) + : column0(r, 0.0f, 0.0f, 0.0f) + , column1(0.0f, r, 0.0f, 0.0f) + , column2(0.0f, 0.0f, r, 0.0f) + , column3(0.0f, 0.0f, 0.0f, r) + { + } + + //! Construct from three base vectors and a translation + NV_CUDA_CALLABLE NvMat44(const NvVec3& col0, const NvVec3& col1, const NvVec3& col2, const NvVec3& col3) + : column0(col0, 0), column1(col1, 0), column2(col2, 0), column3(col3, 1.0f) + { + } + + //! Construct from float[16] + explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(float values[]) + : column0(values[0], values[1], values[2], values[3]) + , column1(values[4], values[5], values[6], values[7]) + , column2(values[8], values[9], values[10], values[11]) + , column3(values[12], values[13], values[14], values[15]) + { + } + + //! Construct from a quaternion + explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvQuat& q) + { + const float x = q.x; + const float y = q.y; + const float z = q.z; + const float w = q.w; + + const float x2 = x + x; + const float y2 = y + y; + const float z2 = z + z; + + const float xx = x2 * x; + const float yy = y2 * y; + const float zz = z2 * z; + + const float xy = x2 * y; + const float xz = x2 * z; + const float xw = x2 * w; + + const float yz = y2 * z; + const float yw = y2 * w; + const float zw = z2 * w; + + column0 = NvVec4(1.0f - yy - zz, xy + zw, xz - yw, 0.0f); + column1 = NvVec4(xy - zw, 1.0f - xx - zz, yz + xw, 0.0f); + column2 = NvVec4(xz + yw, yz - xw, 1.0f - xx - yy, 0.0f); + column3 = NvVec4(0.0f, 0.0f, 0.0f, 1.0f); + } + + //! Construct from a diagonal vector + explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvVec4& diagonal) + : column0(diagonal.x, 0.0f, 0.0f, 0.0f) + , column1(0.0f, diagonal.y, 0.0f, 0.0f) + , column2(0.0f, 0.0f, diagonal.z, 0.0f) + , column3(0.0f, 0.0f, 0.0f, diagonal.w) + { + } + + //! Construct from Mat33 and a translation + NV_CUDA_CALLABLE NvMat44(const NvMat33& axes, const NvVec3& position) + : column0(axes.column0, 0.0f), column1(axes.column1, 0.0f), column2(axes.column2, 0.0f), column3(position, 1.0f) + { + } + + NV_CUDA_CALLABLE NvMat44(const NvTransform& t) + { + *this = NvMat44(NvMat33(t.q), t.p); + } + + /** + \brief returns true if the two matrices are exactly equal + */ + NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvMat44& m) const + { + return column0 == m.column0 && column1 == m.column1 && column2 == m.column2 && column3 == m.column3; + } + + //! Copy constructor + NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvMat44& other) + : column0(other.column0), column1(other.column1), column2(other.column2), column3(other.column3) + { + } + + //! Assignment operator + NV_CUDA_CALLABLE NV_INLINE const NvMat44& operator=(const NvMat44& other) + { + column0 = other.column0; + column1 = other.column1; + column2 = other.column2; + column3 = other.column3; + return *this; + } + + //! Get transposed matrix + NV_CUDA_CALLABLE NV_INLINE NvMat44 getTranspose() const + { + return NvMat44( + NvVec4(column0.x, column1.x, column2.x, column3.x), NvVec4(column0.y, column1.y, column2.y, column3.y), + NvVec4(column0.z, column1.z, column2.z, column3.z), NvVec4(column0.w, column1.w, column2.w, column3.w)); + } + + //! Unary minus + NV_CUDA_CALLABLE NV_INLINE NvMat44 operator-() const + { + return NvMat44(-column0, -column1, -column2, -column3); + } + + //! Add + NV_CUDA_CALLABLE NV_INLINE NvMat44 operator+(const NvMat44& other) const + { + return NvMat44(column0 + other.column0, column1 + other.column1, column2 + other.column2, + column3 + other.column3); + } + + //! Subtract + NV_CUDA_CALLABLE NV_INLINE NvMat44 operator-(const NvMat44& other) const + { + return NvMat44(column0 - other.column0, column1 - other.column1, column2 - other.column2, + column3 - other.column3); + } + + //! Scalar multiplication + NV_CUDA_CALLABLE NV_INLINE NvMat44 operator*(float scalar) const + { + return NvMat44(column0 * scalar, column1 * scalar, column2 * scalar, column3 * scalar); + } + + friend NvMat44 operator*(float, const NvMat44&); + + //! Matrix multiplication + NV_CUDA_CALLABLE NV_INLINE NvMat44 operator*(const NvMat44& other) const + { + // Rows from this <dot> columns from other + // column0 = transform(other.column0) etc + return NvMat44(transform(other.column0), transform(other.column1), transform(other.column2), + transform(other.column3)); + } + + // a <op>= b operators + + //! Equals-add + NV_CUDA_CALLABLE NV_INLINE NvMat44& operator+=(const NvMat44& other) + { + column0 += other.column0; + column1 += other.column1; + column2 += other.column2; + column3 += other.column3; + return *this; + } + + //! Equals-sub + NV_CUDA_CALLABLE NV_INLINE NvMat44& operator-=(const NvMat44& other) + { + column0 -= other.column0; + column1 -= other.column1; + column2 -= other.column2; + column3 -= other.column3; + return *this; + } + + //! Equals scalar multiplication + NV_CUDA_CALLABLE NV_INLINE NvMat44& operator*=(float scalar) + { + column0 *= scalar; + column1 *= scalar; + column2 *= scalar; + column3 *= scalar; + return *this; + } + + //! Equals matrix multiplication + NV_CUDA_CALLABLE NV_INLINE NvMat44& operator*=(const NvMat44& other) + { + *this = *this * other; + return *this; + } + + //! Element access, mathematical way! + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float operator()(unsigned int row, unsigned int col) const + { + return (*this)[col][row]; + } + + //! Element access, mathematical way! + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator()(unsigned int row, unsigned int col) + { + return (*this)[col][row]; + } + + //! Transform vector by matrix, equal to v' = M*v + NV_CUDA_CALLABLE NV_INLINE NvVec4 transform(const NvVec4& other) const + { + return column0 * other.x + column1 * other.y + column2 * other.z + column3 * other.w; + } + + //! Transform vector by matrix, equal to v' = M*v + NV_CUDA_CALLABLE NV_INLINE NvVec3 transform(const NvVec3& other) const + { + return transform(NvVec4(other, 1.0f)).getXYZ(); + } + + //! Rotate vector by matrix, equal to v' = M*v + NV_CUDA_CALLABLE NV_INLINE const NvVec4 rotate(const NvVec4& other) const + { + return column0 * other.x + column1 * other.y + column2 * other.z; // + column3*0; + } + + //! Rotate vector by matrix, equal to v' = M*v + NV_CUDA_CALLABLE NV_INLINE const NvVec3 rotate(const NvVec3& other) const + { + return rotate(NvVec4(other, 1.0f)).getXYZ(); + } + + NV_CUDA_CALLABLE NV_INLINE NvVec3 getBasis(int num) const + { + NV_ASSERT(num >= 0 && num < 3); + return (&column0)[num].getXYZ(); + } + + NV_CUDA_CALLABLE NV_INLINE NvVec3 getPosition() const + { + return column3.getXYZ(); + } + + NV_CUDA_CALLABLE NV_INLINE void setPosition(const NvVec3& position) + { + column3.x = position.x; + column3.y = position.y; + column3.z = position.z; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE const float* front() const + { + return &column0.x; + } + + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec4& operator[](unsigned int num) + { + return (&column0)[num]; + } + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec4& operator[](unsigned int num) const + { + return (&column0)[num]; + } + + NV_CUDA_CALLABLE NV_INLINE void scale(const NvVec4& p) + { + column0 *= p.x; + column1 *= p.y; + column2 *= p.z; + column3 *= p.w; + } + + NV_CUDA_CALLABLE NV_INLINE NvMat44 inverseRT(void) const + { + NvVec3 r0(column0.x, column1.x, column2.x), r1(column0.y, column1.y, column2.y), + r2(column0.z, column1.z, column2.z); + + return NvMat44(r0, r1, r2, -(r0 * column3.x + r1 * column3.y + r2 * column3.z)); + } + + NV_CUDA_CALLABLE NV_INLINE bool isFinite() const + { + return column0.isFinite() && column1.isFinite() && column2.isFinite() && column3.isFinite(); + } + + // Data, see above for format! + + NvVec4 column0, column1, column2, column3; // the four base vectors +}; + +// implementation from NvTransform.h +NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform::NvTransform(const NvMat44& m) +{ + NvVec3 column0 = NvVec3(m.column0.x, m.column0.y, m.column0.z); + NvVec3 column1 = NvVec3(m.column1.x, m.column1.y, m.column1.z); + NvVec3 column2 = NvVec3(m.column2.x, m.column2.y, m.column2.z); + + q = NvQuat(NvMat33(column0, column1, column2)); + p = NvVec3(m.column3.x, m.column3.y, m.column3.z); +} + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVMAT44_H diff --git a/external/NvFoundation/1.1/include/NvMath.h b/external/NvFoundation/1.1/include/NvMath.h new file mode 100644 index 0000000..2dda45a --- /dev/null +++ b/external/NvFoundation/1.1/include/NvMath.h @@ -0,0 +1,338 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVMATH_H +#define NV_NVFOUNDATION_NVMATH_H + +/** \addtogroup foundation +@{ +*/ + +#include "NvPreprocessor.h" + +#if NV_VC +#pragma warning(push) +#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration +#endif +#include <math.h> +#if NV_VC +#pragma warning(pop) +#endif + +#include <float.h> +#include "NvIntrinsics.h" +#include "NvAssert.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +// constants +static const float NvPi = float(3.141592653589793); +static const float NvHalfPi = float(1.57079632679489661923); +static const float NvTwoPi = float(6.28318530717958647692); +static const float NvInvPi = float(0.31830988618379067154); +static const float NvInvTwoPi = float(0.15915494309189533577); +static const float NvPiDivTwo = float(1.57079632679489661923); +static const float NvPiDivFour = float(0.78539816339744830962); + +/** +\brief The return value is the greater of the two specified values. +*/ +template <class T> +NV_CUDA_CALLABLE NV_FORCE_INLINE T NvMax(T a, T b) +{ + return a < b ? b : a; +} + +//! overload for float to use fsel on xbox +template <> +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvMax(float a, float b) +{ + return intrinsics::selectMax(a, b); +} + +/** +\brief The return value is the lesser of the two specified values. +*/ +template <class T> +NV_CUDA_CALLABLE NV_FORCE_INLINE T NvMin(T a, T b) +{ + return a < b ? a : b; +} + +template <> +//! overload for float to use fsel on xbox +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvMin(float a, float b) +{ + return intrinsics::selectMin(a, b); +} + +/* +Many of these are just implemented as NV_CUDA_CALLABLE NV_FORCE_INLINE calls to the C lib right now, +but later we could replace some of them with some approximations or more +clever stuff. +*/ + +/** +\brief abs returns the absolute value of its argument. +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAbs(float a) +{ + return intrinsics::abs(a); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvEquals(float a, float b, float eps) +{ + return (NvAbs(a - b) < eps); +} + +/** +\brief abs returns the absolute value of its argument. +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAbs(double a) +{ + return ::fabs(a); +} + +/** +\brief abs returns the absolute value of its argument. +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE int32_t NvAbs(int32_t a) +{ + return ::abs(a); +} + +/** +\brief Clamps v to the range [hi,lo] +*/ +template <class T> +NV_CUDA_CALLABLE NV_FORCE_INLINE T NvClamp(T v, T lo, T hi) +{ + NV_ASSERT(lo <= hi); + return NvMin(hi, NvMax(lo, v)); +} + +//! \brief Square root. +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSqrt(float a) +{ + return intrinsics::sqrt(a); +} + +//! \brief Square root. +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvSqrt(double a) +{ + return ::sqrt(a); +} + +//! \brief reciprocal square root. +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvRecipSqrt(float a) +{ + return intrinsics::recipSqrt(a); +} + +//! \brief reciprocal square root. +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvRecipSqrt(double a) +{ + return 1 / ::sqrt(a); +} + +//! trigonometry -- all angles are in radians. + +//! \brief Sine of an angle ( <b>Unit:</b> Radians ) +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSin(float a) +{ + return intrinsics::sin(a); +} + +//! \brief Sine of an angle ( <b>Unit:</b> Radians ) +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvSin(double a) +{ + return ::sin(a); +} + +//! \brief Cosine of an angle (<b>Unit:</b> Radians) +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvCos(float a) +{ + return intrinsics::cos(a); +} + +//! \brief Cosine of an angle (<b>Unit:</b> Radians) +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvCos(double a) +{ + return ::cos(a); +} + +/** +\brief Tangent of an angle. +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvTan(float a) +{ + return ::tanf(a); +} + +/** +\brief Tangent of an angle. +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvTan(double a) +{ + return ::tan(a); +} + +/** +\brief Arcsine. +Returns angle between -PI/2 and PI/2 in radians +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAsin(float f) +{ + return ::asinf(NvClamp(f, -1.0f, 1.0f)); +} + +/** +\brief Arcsine. +Returns angle between -PI/2 and PI/2 in radians +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAsin(double f) +{ + return ::asin(NvClamp(f, -1.0, 1.0)); +} + +/** +\brief Arccosine. +Returns angle between 0 and PI in radians +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAcos(float f) +{ + return ::acosf(NvClamp(f, -1.0f, 1.0f)); +} + +/** +\brief Arccosine. +Returns angle between 0 and PI in radians +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAcos(double f) +{ + return ::acos(NvClamp(f, -1.0, 1.0)); +} + +/** +\brief ArcTangent. +Returns angle between -PI/2 and PI/2 in radians +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAtan(float a) +{ + return ::atanf(a); +} + +/** +\brief ArcTangent. +Returns angle between -PI/2 and PI/2 in radians +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAtan(double a) +{ + return ::atan(a); +} + +/** +\brief Arctangent of (x/y) with correct sign. +Returns angle between -PI and PI in radians +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAtan2(float x, float y) +{ + return ::atan2f(x, y); +} + +/** +\brief Arctangent of (x/y) with correct sign. +Returns angle between -PI and PI in radians +<b>Unit:</b> Radians +*/ +NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAtan2(double x, double y) +{ + return ::atan2(x, y); +} + +//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc. +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvIsFinite(float f) +{ + return intrinsics::isFinite(f); +} + +//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc. +NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvIsFinite(double f) +{ + return intrinsics::isFinite(f); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvFloor(float a) +{ + return ::floorf(a); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvExp(float a) +{ + return ::expf(a); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvCeil(float a) +{ + return ::ceilf(a); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSign(float a) +{ + return nvidia::intrinsics::sign(a); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvPow(float x, float y) +{ + return ::powf(x, y); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float NvLog(float x) +{ + return ::logf(x); +} + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVMATH_H diff --git a/external/NvFoundation/1.1/include/NvPlane.h b/external/NvFoundation/1.1/include/NvPlane.h new file mode 100644 index 0000000..c61dfa9 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvPlane.h @@ -0,0 +1,145 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVPLANE_H +#define NV_NVFOUNDATION_NVPLANE_H + +/** \addtogroup foundation +@{ +*/ + +#include "NvMath.h" +#include "NvVec3.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/** +\brief Representation of a plane. + + Plane equation used: n.dot(v) + d = 0 +*/ +class NvPlane +{ + public: + /** + \brief Constructor + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane() + { + } + + /** + \brief Constructor from a normal and a distance + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(float nx, float ny, float nz, float distance) : n(nx, ny, nz), d(distance) + { + } + + /** + \brief Constructor from a normal and a distance + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& normal, float distance) : n(normal), d(distance) + { + } + + /** + \brief Constructor from a point on the plane and a normal + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& point, const NvVec3& normal) + : n(normal), d(-point.dot(n)) // p satisfies normal.dot(p) + d = 0 + { + } + + /** + \brief Constructor from three points + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& p0, const NvVec3& p1, const NvVec3& p2) + { + n = (p1 - p0).cross(p2 - p0).getNormalized(); + d = -p0.dot(n); + } + + /** + \brief returns true if the two planes are exactly equal + */ + NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvPlane& p) const + { + return n == p.n && d == p.d; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE float distance(const NvVec3& p) const + { + return p.dot(n) + d; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE bool contains(const NvVec3& p) const + { + return NvAbs(distance(p)) < (1.0e-7f); + } + + /** + \brief projects p into the plane + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 project(const NvVec3& p) const + { + return p - n * distance(p); + } + + /** + \brief find an arbitrary point in the plane + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 pointInPlane() const + { + return -n * d; + } + + /** + \brief equivalent plane with unit normal + */ + + NV_CUDA_CALLABLE NV_FORCE_INLINE void normalize() + { + float denom = 1.0f / n.magnitude(); + n *= denom; + d *= denom; + } + + NvVec3 n; //!< The normal to the plane + float d; //!< The distance from the origin +}; + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVPLANE_H diff --git a/external/NvFoundation/1.1/include/NvPreprocessor.h b/external/NvFoundation/1.1/include/NvPreprocessor.h new file mode 100644 index 0000000..8913106 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvPreprocessor.h @@ -0,0 +1,540 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVPREPROCESSOR_H +#define NV_NVFOUNDATION_NVPREPROCESSOR_H + +#include <stddef.h> + +/** \addtogroup foundation + @{ +*/ + +/* +The following preprocessor identifiers specify compiler, OS, and architecture. +All definitions have a value of 1 or 0, use '#if' instead of '#ifdef'. +*/ + +/** +Compiler defines, see http://sourceforge.net/p/predef/wiki/Compilers/ +*/ +#if defined(_MSC_VER) +#if _MSC_VER >= 1900 +#define NV_VC 14 +#elif _MSC_VER >= 1800 +#define NV_VC 12 +#elif _MSC_VER >= 1700 +#define NV_VC 11 +#elif _MSC_VER >= 1600 +#define NV_VC 10 +#elif _MSC_VER >= 1500 +#define NV_VC 9 +#else +#error "Unknown VC version" +#endif +#elif defined(__clang__) +#define NV_CLANG 1 +#elif defined(__SNC__) +#define NV_SNC 1 +#elif defined(__ghs__) +#define NV_GHS 1 +#elif defined(__GNUC__) // note: __clang__, __SNC__, or __ghs__ imply __GNUC__ +#define NV_GCC 1 +#else +#error "Unknown compiler" +#endif + +/** +Operating system defines, see http://sourceforge.net/p/predef/wiki/OperatingSystems/ +*/ +#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_PARTITION_APP +#define NV_WINRT 1 // Windows Runtime, either on Windows RT or Windows 8 +#elif defined(XBOXONE) +#define NV_XBOXONE 1 +#elif defined(_WIN64) // note: XBOXONE implies _WIN64 +#define NV_WIN64 1 +#elif defined(_M_PPC) +#define NV_X360 1 +#elif defined(_WIN32) // note: _M_PPC implies _WIN32 +#define NV_WIN32 1 +#elif defined(__ANDROID__) +#define NV_ANDROID 1 +#elif defined(__linux__) // note: __ANDROID__ implies __linux__ +#define NV_LINUX 1 +#elif defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) +#define NV_IOS 1 +#elif defined(__APPLE__) +#define NV_OSX 1 +#elif defined(__CELLOS_LV2__) +#define NV_PS3 1 +#elif defined(__ORBIS__) +#define NV_PS4 1 +#elif defined(__SNC__) && defined(__arm__) +#define NV_PSP2 1 +#elif defined(__ghs__) +#define NV_WIIU 1 +#else +#error "Unknown operating system" +#endif + +/** +Architecture defines, see http://sourceforge.net/p/predef/wiki/Architectures/ +*/ +#if defined(__x86_64__) || defined(_M_X64) // ps4 compiler defines _M_X64 without value +#define NV_X64 1 +#elif defined(__i386__) || defined(_M_IX86) +#define NV_X86 1 +#elif defined(__arm64__) || defined(__aarch64__) +#define NV_A64 1 +#elif defined(__arm__) || defined(_M_ARM) +#define NV_ARM 1 +#elif defined(__SPU__) +#define NV_SPU 1 +#elif defined(__ppc__) || defined(_M_PPC) || defined(__CELLOS_LV2__) +#define NV_PPC 1 +#else +#error "Unknown architecture" +#endif + +/** +SIMD defines +*/ +#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +#define NV_SSE2 1 +#endif +#if defined(_M_ARM) || defined(__ARM_NEON__) +#define NV_NEON 1 +#endif +#if defined(_M_PPC) || defined(__CELLOS_LV2__) +#define NV_VMX 1 +#endif + +/** +define anything not defined on this platform to 0 +*/ +#ifndef NV_VC +#define NV_VC 0 +#endif +#ifndef NV_CLANG +#define NV_CLANG 0 +#endif +#ifndef NV_SNC +#define NV_SNC 0 +#endif +#ifndef NV_GHS +#define NV_GHS 0 +#endif +#ifndef NV_GCC +#define NV_GCC 0 +#endif +#ifndef NV_WINRT +#define NV_WINRT 0 +#endif +#ifndef NV_XBOXONE +#define NV_XBOXONE 0 +#endif +#ifndef NV_WIN64 +#define NV_WIN64 0 +#endif +#ifndef NV_X360 +#define NV_X360 0 +#endif +#ifndef NV_WIN32 +#define NV_WIN32 0 +#endif +#ifndef NV_ANDROID +#define NV_ANDROID 0 +#endif +#ifndef NV_LINUX +#define NV_LINUX 0 +#endif +#ifndef NV_IOS +#define NV_IOS 0 +#endif +#ifndef NV_OSX +#define NV_OSX 0 +#endif +#ifndef NV_PS3 +#define NV_PS3 0 +#endif +#ifndef NV_PS4 +#define NV_PS4 0 +#endif +#ifndef NV_PSP2 +#define NV_PSP2 0 +#endif +#ifndef NV_WIIU +#define NV_WIIU 0 +#endif +#ifndef NV_X64 +#define NV_X64 0 +#endif +#ifndef NV_X86 +#define NV_X86 0 +#endif +#ifndef NV_A64 +#define NV_A64 0 +#endif +#ifndef NV_ARM +#define NV_ARM 0 +#endif +#ifndef NV_SPU +#define NV_SPU 0 +#endif +#ifndef NV_PPC +#define NV_PPC 0 +#endif +#ifndef NV_SSE2 +#define NV_SSE2 0 +#endif +#ifndef NV_NEON +#define NV_NEON 0 +#endif +#ifndef NV_VMX +#define NV_VMX 0 +#endif + +/* +define anything not defined through the command line to 0 +*/ +#ifndef NV_DEBUG +#define NV_DEBUG 0 +#endif +#ifndef NV_CHECKED +#define NV_CHECKED 0 +#endif +#ifndef NV_PROFILE +#define NV_PROFILE 0 +#endif +#ifndef NV_NVTX +#define NV_NVTX 0 +#endif +#ifndef NV_DOXYGEN +#define NV_DOXYGEN 0 +#endif + +/** +family shortcuts +*/ +// compiler +#define NV_GCC_FAMILY (NV_CLANG || NV_SNC || NV_GHS || NV_GCC) +// os +#define NV_WINDOWS_FAMILY (NV_WINRT || NV_WIN32 || NV_WIN64) +#define NV_MICROSOFT_FAMILY (NV_XBOXONE || NV_X360 || NV_WINDOWS_FAMILY) +#define NV_LINUX_FAMILY (NV_LINUX || NV_ANDROID) +#define NV_APPLE_FAMILY (NV_IOS || NV_OSX) // equivalent to #if __APPLE__ +#define NV_UNIX_FAMILY (NV_LINUX_FAMILY || NV_APPLE_FAMILY) // shortcut for unix/posix platforms +// architecture +#define NV_INTEL_FAMILY (NV_X64 || NV_X86) +#define NV_ARM_FAMILY (NV_ARM || NV_A64) +#define NV_P64_FAMILY (NV_X64 || NV_A64) // shortcut for 64-bit architectures + +// shortcut for PS3 PPU +#define NV_PPU (NV_PS3&& NV_PPC) + +/** +Assert macro +*/ +#ifndef NV_ENABLE_ASSERTS +#if NV_DEBUG && !defined(__CUDACC__) +#define NV_ENABLE_ASSERTS 1 +#else +#define NV_ENABLE_ASSERTS 0 +#endif +#endif + +/** +DLL export macros +*/ +#ifndef NV_C_EXPORT +#if NV_WINDOWS_FAMILY || NV_LINUX +#define NV_C_EXPORT extern "C" +#else +#define NV_C_EXPORT +#endif +#endif + +#if NV_UNIX_FAMILY&& __GNUC__ >= 4 +#define NV_UNIX_EXPORT __attribute__((visibility("default"))) +#else +#define NV_UNIX_EXPORT +#endif + +#if NV_WINDOWS_FAMILY +#define NV_DLL_EXPORT __declspec(dllexport) +#define NV_DLL_IMPORT __declspec(dllimport) +#else +#define NV_DLL_EXPORT NV_UNIX_EXPORT +#define NV_DLL_IMPORT +#endif + +/** +Define API function declaration + +NV_FOUNDATION_DLL=1 - used by the DLL library (PhysXCommon) to export the API +NV_FOUNDATION_DLL=0 - for windows configurations where the NV_FOUNDATION_API is linked through standard static linking +no definition - this will allow DLLs and libraries to use the exported API from PhysXCommon + +*/ + +#if NV_WINDOWS_FAMILY && !NV_ARM_FAMILY || NV_WINRT +#ifndef NV_FOUNDATION_DLL +#define NV_FOUNDATION_API NV_DLL_IMPORT +#elif NV_FOUNDATION_DLL +#define NV_FOUNDATION_API NV_DLL_EXPORT +#endif +#elif NV_UNIX_FAMILY +#ifdef NV_FOUNDATION_DLL +#define NV_FOUNDATION_API NV_UNIX_EXPORT +#endif +#endif + +#ifndef NV_FOUNDATION_API +#define NV_FOUNDATION_API +#endif + +/** +Calling convention +*/ +#ifndef NV_CALL_CONV +#if NV_MICROSOFT_FAMILY +#define NV_CALL_CONV __cdecl +#else +#define NV_CALL_CONV +#endif +#endif + +/** +Pack macros - disabled on SPU because they are not supported +*/ +#if NV_VC +#define NV_PUSH_PACK_DEFAULT __pragma(pack(push, 8)) +#define NV_POP_PACK __pragma(pack(pop)) +#elif NV_GCC_FAMILY && !NV_SPU +#define NV_PUSH_PACK_DEFAULT _Pragma("pack(push, 8)") +#define NV_POP_PACK _Pragma("pack(pop)") +#else +#define NV_PUSH_PACK_DEFAULT +#define NV_POP_PACK +#endif + +/** +Inline macro +*/ +#define NV_INLINE inline +#if NV_MICROSOFT_FAMILY +#pragma inline_depth(255) +#endif + +/** +Force inline macro +*/ +#if NV_VC +#define NV_FORCE_INLINE __forceinline +#elif NV_LINUX // Workaround; Fedora Core 3 do not agree with force inline and NvcPool +#define NV_FORCE_INLINE inline +#elif NV_GCC_FAMILY +#define NV_FORCE_INLINE inline __attribute__((always_inline)) +#else +#define NV_FORCE_INLINE inline +#endif + +/** +Noinline macro +*/ +#if NV_MICROSOFT_FAMILY +#define NV_NOINLINE __declspec(noinline) +#elif NV_GCC_FAMILY +#define NV_NOINLINE __attribute__((noinline)) +#else +#define NV_NOINLINE +#endif + +/** +Restrict macro +*/ +#if defined(__CUDACC__) +#define NV_RESTRICT __restrict__ +#else +#define NV_RESTRICT __restrict +#endif + +/** +Noalias macro +*/ +#if NV_MICROSOFT_FAMILY +#define NV_NOALIAS __declspec(noalias) +#else +#define NV_NOALIAS +#endif + +/** +Alignment macros + +NV_ALIGN_PREFIX and NV_ALIGN_SUFFIX can be used for type alignment instead of aligning individual variables as follows: +NV_ALIGN_PREFIX(16) +struct A { +... +} NV_ALIGN_SUFFIX(16); +This declaration style is parsed correctly by Visual Assist. + +*/ +#ifndef NV_ALIGN +#if NV_MICROSOFT_FAMILY +#define NV_ALIGN(alignment, decl) __declspec(align(alignment)) decl +#define NV_ALIGN_PREFIX(alignment) __declspec(align(alignment)) +#define NV_ALIGN_SUFFIX(alignment) +#elif NV_GCC_FAMILY +#define NV_ALIGN(alignment, decl) decl __attribute__((aligned(alignment))) +#define NV_ALIGN_PREFIX(alignment) +#define NV_ALIGN_SUFFIX(alignment) __attribute__((aligned(alignment))) +#else +#define NV_ALIGN(alignment, decl) +#define NV_ALIGN_PREFIX(alignment) +#define NV_ALIGN_SUFFIX(alignment) +#endif +#endif + +/** +Deprecated macro +- To deprecate a function: Place NV_DEPRECATED at the start of the function header (leftmost word). +- To deprecate a 'typedef', a 'struct' or a 'class': Place NV_DEPRECATED directly after the keywords ('typdef', +'struct', 'class'). + +Use these macro definitions to create warnings for deprecated functions +#define NV_DEPRECATED __declspec(deprecated) // Microsoft +#define NV_DEPRECATED __attribute__((deprecated())) // GCC +*/ +#define NV_DEPRECATED + +/** +General defines +*/ + +// static assert +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) +#define NV_COMPILE_TIME_ASSERT(exp) typedef char NvCompileTimeAssert_Dummy[(exp) ? 1 : -1] __attribute__((unused)) +#else +#define NV_COMPILE_TIME_ASSERT(exp) typedef char NvCompileTimeAssert_Dummy[(exp) ? 1 : -1] +#endif + +#if NV_GCC_FAMILY && !NV_SNC && !NV_GHS +#define NV_OFFSET_OF(X, Y) __builtin_offsetof(X, Y) +#else +#define NV_OFFSET_OF(X, Y) offsetof(X, Y) +#endif + +#define NV_OFFSETOF_BASE 0x100 // casting the null ptr takes a special-case code path, which we don't want +#define NV_OFFSET_OF_RT(Class, Member) \ + (reinterpret_cast<size_t>(&reinterpret_cast<Class*>(NV_OFFSETOF_BASE)->Member) - size_t(NV_OFFSETOF_BASE)) + +// check that exactly one of NDEBUG and _DEBUG is defined +#if !defined(NDEBUG) ^ defined(_DEBUG) +#error Exactly one of NDEBUG and _DEBUG needs to be defined! +#endif + +// make sure NV_CHECKED is defined in all _DEBUG configurations as well +#if !defined(NV_CHECKED) && defined(NV_DEBUG) +#error NV_CHECKED must be defined when NV_DEBUG is defined +#endif + +#ifdef __CUDACC__ +#define NV_CUDA_CALLABLE __host__ __device__ +#else +#define NV_CUDA_CALLABLE +#endif + +// avoid unreferenced parameter warning +// preferred solution: omit the parameter's name from the declaration +template <class T> +NV_CUDA_CALLABLE NV_INLINE void NV_UNUSED(T const&) +{ +} + +// Ensure that the application hasn't tweaked the pack value to less than 8, which would break +// matching between the API headers and the binaries +// This assert works on win32/win64/360/ps3, but may need further specialization on other platforms. +// Some GCC compilers need the compiler flag -malign-double to be set. +// Apparently the apple-clang-llvm compiler doesn't support malign-double. +#if NV_PS4 || NV_APPLE_FAMILY +struct NvPackValidation +{ + char _; + long a; +}; +#elif NV_ANDROID +struct NvPackValidation +{ + char _; + double a; +}; +#else +struct NvPackValidation +{ + char _; + long long a; +}; +#endif +#if !NV_APPLE_FAMILY +NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvPackValidation, a) == 8); +#endif + +// use in a cpp file to suppress LNK4221 +#if NV_VC +#define NV_DUMMY_SYMBOL \ + namespace \ + { \ + char NvDummySymbol; \ + } +#else +#define NV_DUMMY_SYMBOL +#endif + +#if NV_GCC_FAMILY && !NV_GHS +#define NV_WEAK_SYMBOL __attribute__((weak)) // this is to support SIMD constant merging in template specialization +#else +#define NV_WEAK_SYMBOL +#endif + +// Macro for avoiding default assignment and copy, because doing this by inheritance can increase class size on some +// platforms. +#define NV_NOCOPY(Class) \ + \ +protected: \ + Class(const Class&); \ + Class& operator=(const Class&); + +#define NV_STRINGIZE_HELPER(X) #X +#define NV_STRINGIZE(X) NV_STRINGIZE_HELPER(X) + +#define NV_CONCAT_HELPER(X, Y) X##Y +#define NV_CONCAT(X, Y) NV_CONCAT_HELPER(X, Y) + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVPREPROCESSOR_H diff --git a/external/NvFoundation/1.1/include/NvProfiler.h b/external/NvFoundation/1.1/include/NvProfiler.h new file mode 100644 index 0000000..67a4c81 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvProfiler.h @@ -0,0 +1,219 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. + +#ifndef NV_PROFILER_H +#define NV_PROFILER_H + +#include <NvSimpleTypes.h> + +namespace nvidia +{ + class NvProfilerCallback; + namespace shdfnd + { + NV_FOUNDATION_API NvProfilerCallback *getProfilerCallback(); + NV_FOUNDATION_API void setProfilerCallback(NvProfilerCallback *profiler); + } +} + + +namespace nvidia +{ + +struct NvProfileTypes +{ + enum Enum + { + eNORMAL = 0, //!< ordinary profile zone, starts and ends in same thread + eSTALL = 1, //!< thread is busy but can't progress (example: spin-lock) + eIDLE = 2, //!< thread is idle (example: waiting for event) + eDETACHED = 3, //!< zone crosses thread boundary + eLOCK = 4, //!< thread tries to acquire a lock, reports result on zoneEnd() + eLOCK_SUCCESS = 5, //!< locking mutex succeded, to be passed to zoneEnd() + eLOCK_FAILED = 6, //!< locking mutex failed, to be passed to zoneEnd() + eLOCK_TIMEOUT = 7 //!< locking mutex timed out, to be passed to zoneEnd() + }; +}; + +struct NvProfileContext +{ + enum Enum + { + eNONE = 0 //!< value for no specific profile context. \see NvProfilerCallback::zoneAt + }; +}; + + +/** +\brief The pure virtual callback interface for general purpose instrumentation and profiling of GameWorks modules as well as applications +*/ +class NvProfilerCallback +{ +public: + + /************************************************************************************************************************** + Instrumented profiling events + ***************************************************************************************************************************/ + + /** + \brief Mark the beginning of a nested profile block + \param eventName Event name. Must be a persistent const char * + \param type What type this zone is (i.e. normal, cross thread, lock, etc.). eLOCK_* should not be used here. + \param contextId the context id of this zone. Zones with the same id belong to the same group. 0 is used for no specific group. + \param filename The source code filename where this profile event begins + \param lineno The source code line number where this profile event begins + */ + virtual void zoneStart(const char* eventName, NvProfileTypes::Enum type, uint64_t contextId, const char *filename, int lineno) = 0; + + /** + \brief Mark the end of a nested profile block + \param eventName The name of the zone ending, must match the corresponding name passed with 'zoneStart'. Must be a persistent const char * + \param type What type this zone is (i.e. normal, cross thread, lock, etc.). Should match the value passed to zoneStart, except if eLOCK was passed to zoneStart. In this case, type should be one of eLOCK_*. + \param contextId The context of this zone. Should match the value passed to zoneStart. + \note eventName plus contextId can be used to uniquely match up start and end of a zone. + */ + virtual void zoneEnd(const char *eventName, NvProfileTypes::Enum type, uint64_t contextId) = 0; + + /************************************************************************************************************************** + TimeSpan events; profiling events not associated with any specific thread. + ***************************************************************************************************************************/ + + /** + \brief Return current time + This function is called to calibrate the start and end times passed to zoneAt(). + */ + virtual uint64_t getTime() = 0; + + /** + \brief Send a discrete zone of data with manually recorded time stamps + \param start The timestamp and the start of the span + \param end The timestamp at the end of the span + \param name The name of this event. Must be a persistent const char * + \param type What type this zone is (i.e. normal, cross thread, lock, etc.). eLOCK or eLOCK_* should not be used here. + \param contextId The context id for this event + \param filename The source code filename where this zone is being sent from + \param linenumber The source code line number where this zone is being sent from + */ + virtual void zoneAt(uint64_t start, uint64_t end, const char* name, NvProfileTypes::Enum type, uint64_t contextId, const char* filename, int linenumber) = 0; + + /************************************************************************************************************************** + Bundled CUDA warp profile events + ***************************************************************************************************************************/ + + /** + \brief Bundled profiling data relating to CUDA warps + */ + struct WarpProfileEvent + { + uint16_t block; + uint8_t warp; + uint8_t mpId; + uint8_t hwWarpId; + uint8_t userDataCfg; + uint16_t eventId; + uint32_t startTime; + uint32_t endTime; + }; + + /** + \brief Converts a string into a unique 16 bit integer value; used when sending CUDA kernel warp data. + \param str The source string + \return Returns the unique id associated with this string which can later be passed via the WarpProfileEvent structure + */ + virtual uint16_t getStringID(const char *str) = 0; + + /** + \brief Defines the format version number for the kernel warp data struct + */ + static const uint32_t CurrentCUDABufferFormat = 1; + + /** + \brief Send a CUDA profile buffer. We assume the submit time is almost exactly the end time of the batch. + We then work backwards, using the batchRuntimeInMilliseconds in order to get the original time + of the batch. The buffer format is described in GPUProfile.h. + + \param batchRuntimeInMilliseconds The batch runtime in milliseconds, see cuEventElapsedTime. + \param cudaData An opaque pointer to the buffer of cuda data. + \param eventCount number of events + \param bufferVersion Version of the format of the cuda data. + */ + virtual void CUDAProfileBuffer( float batchRuntimeInMilliseconds, const WarpProfileEvent* cudaData, uint32_t eventCount, uint32_t bufferVersion = CurrentCUDABufferFormat ) = 0; + + protected: + virtual ~NvProfilerCallback(void) {} +}; + +class NvProfileScoped +{ +public: + NV_FORCE_INLINE NvProfileScoped(const char *eventName, NvProfileTypes::Enum type, uint64_t contextId, const char *fileName, int lineno) + : mCallback(nvidia::shdfnd::getProfilerCallback()) + { + if ( mCallback ) + { + mEventName = eventName; + mType = type; + mContextId = contextId; + mCallback->zoneStart(mEventName, mType, mContextId, fileName, lineno); + } + } + ~NvProfileScoped(void) + { + if ( mCallback ) + { + mCallback->zoneEnd(mEventName, mType, mContextId); + } + } + nvidia::NvProfilerCallback *mCallback; + const char *mEventName; + NvProfileTypes::Enum mType; + uint64_t mContextId; +}; + + + +} // end of NVIDIA namespace + + + +#if NV_DEBUG || NV_CHECKED || NV_PROFILE + +#define NV_PROFILE_ZONE(x,y) nvidia::NvProfileScoped NV_CONCAT(_scoped,__LINE__)(x,nvidia::NvProfileTypes::eNORMAL,y,__FILE__,__LINE__) +#define NV_PROFILE_START_CROSSTHREAD(x,y) if ( nvidia::shdfnd::getProfilerCallback() ) nvidia::shdfnd::getProfilerCallback()->zoneStart(x,nvidia::NvProfileTypes::eDETACHED,y,__FILE__,__LINE__) +#define NV_PROFILE_STOP_CROSSTHREAD(x,y) if ( nvidia::shdfnd::getProfilerCallback() ) nvidia::shdfnd::getProfilerCallback()->zoneEnd(x,nvidia::NvProfileTypes::eDETACHED,y) + +#else + +#define NV_PROFILE_ZONE(x,y) +#define NV_PROFILE_START_CROSSTHREAD(x,y) +#define NV_PROFILE_STOP_CROSSTHREAD(x,y) + +#endif + +#define NV_PROFILE_POINTER_TO_U64( pointer ) static_cast<uint64_t>(reinterpret_cast<size_t>(pointer)) + +#endif diff --git a/external/NvFoundation/1.1/include/NvQuat.h b/external/NvFoundation/1.1/include/NvQuat.h new file mode 100644 index 0000000..54f83cc --- /dev/null +++ b/external/NvFoundation/1.1/include/NvQuat.h @@ -0,0 +1,398 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVQUAT_H +#define NV_NVFOUNDATION_NVQUAT_H + +/** \addtogroup foundation +@{ +*/ + +#include "NvVec3.h" +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/** +\brief This is a quaternion class. For more information on quaternion mathematics +consult a mathematics source on complex numbers. + +*/ + +class NvQuat +{ + public: + /** + \brief Default constructor, does not do any initialization. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat() + { + } + + //! identity constructor + NV_CUDA_CALLABLE NV_INLINE NvQuat(NvIDENTITY r) : x(0.0f), y(0.0f), z(0.0f), w(1.0f) + { + NV_UNUSED(r); + } + + /** + \brief Constructor from a scalar: sets the real part w to the scalar value, and the imaginary parts (x,y,z) to zero + */ + explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(float r) : x(0.0f), y(0.0f), z(0.0f), w(r) + { + } + + /** + \brief Constructor. Take note of the order of the elements! + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(float nx, float ny, float nz, float nw) : x(nx), y(ny), z(nz), w(nw) + { + } + + /** + \brief Creates from angle-axis representation. + + Axis must be normalized! + + Angle is in radians! + + <b>Unit:</b> Radians + */ + NV_CUDA_CALLABLE NV_INLINE NvQuat(float angleRadians, const NvVec3& unitAxis) + { + NV_ASSERT(NvAbs(1.0f - unitAxis.magnitude()) < 1e-3f); + const float a = angleRadians * 0.5f; + const float s = NvSin(a); + w = NvCos(a); + x = unitAxis.x * s; + y = unitAxis.y * s; + z = unitAxis.z * s; + } + + /** + \brief Copy ctor. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(const NvQuat& v) : x(v.x), y(v.y), z(v.z), w(v.w) + { + } + + /** + \brief Creates from orientation matrix. + + \param[in] m Rotation matrix to extract quaternion from. + */ + NV_CUDA_CALLABLE NV_INLINE explicit NvQuat(const NvMat33& m); /* defined in NvMat33.h */ + + /** + \brief returns true if all elements are finite (not NAN or INF, etc.) + */ + NV_CUDA_CALLABLE bool isFinite() const + { + return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z) && NvIsFinite(w); + } + + /** + \brief returns true if finite and magnitude is close to unit + */ + + NV_CUDA_CALLABLE bool isUnit() const + { + const float unitTolerance = 1e-4f; + return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; + } + + /** + \brief returns true if finite and magnitude is reasonably close to unit to allow for some accumulation of error vs + isValid + */ + + NV_CUDA_CALLABLE bool isSane() const + { + const float unitTolerance = 1e-2f; + return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; + } + + /** + \brief returns true if the two quaternions are exactly equal + */ + NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvQuat& q) const + { + return x == q.x && y == q.y && z == q.z && w == q.w; + } + + /** + \brief converts this quaternion to angle-axis representation + */ + + NV_CUDA_CALLABLE NV_INLINE void toRadiansAndUnitAxis(float& angle, NvVec3& axis) const + { + const float quatEpsilon = 1.0e-8f; + const float s2 = x * x + y * y + z * z; + if(s2 < quatEpsilon * quatEpsilon) // can't extract a sensible axis + { + angle = 0.0f; + axis = NvVec3(1.0f, 0.0f, 0.0f); + } + else + { + const float s = NvRecipSqrt(s2); + axis = NvVec3(x, y, z) * s; + angle = NvAbs(w) < quatEpsilon ? NvPi : NvAtan2(s2 * s, w) * 2.0f; + } + } + + /** + \brief Gets the angle between this quat and the identity quaternion. + + <b>Unit:</b> Radians + */ + NV_CUDA_CALLABLE NV_INLINE float getAngle() const + { + return NvAcos(w) * 2.0f; + } + + /** + \brief Gets the angle between this quat and the argument + + <b>Unit:</b> Radians + */ + NV_CUDA_CALLABLE NV_INLINE float getAngle(const NvQuat& q) const + { + return NvAcos(dot(q)) * 2.0f; + } + + /** + \brief This is the squared 4D vector length, should be 1 for unit quaternions. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const + { + return x * x + y * y + z * z + w * w; + } + + /** + \brief returns the scalar product of this and other. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvQuat& v) const + { + return x * v.x + y * v.y + z * v.z + w * v.w; + } + + NV_CUDA_CALLABLE NV_INLINE NvQuat getNormalized() const + { + const float s = 1.0f / magnitude(); + return NvQuat(x * s, y * s, z * s, w * s); + } + + NV_CUDA_CALLABLE NV_INLINE float magnitude() const + { + return NvSqrt(magnitudeSquared()); + } + + // modifiers: + /** + \brief maps to the closest unit quaternion. + */ + NV_CUDA_CALLABLE NV_INLINE float normalize() // convert this NvQuat to a unit quaternion + { + const float mag = magnitude(); + if(mag != 0.0f) + { + const float imag = 1.0f / mag; + + x *= imag; + y *= imag; + z *= imag; + w *= imag; + } + return mag; + } + + /* + \brief returns the conjugate. + + \note for unit quaternions, this is the inverse. + */ + NV_CUDA_CALLABLE NV_INLINE NvQuat getConjugate() const + { + return NvQuat(-x, -y, -z, w); + } + + /* + \brief returns imaginary part. + */ + NV_CUDA_CALLABLE NV_INLINE NvVec3 getImaginaryPart() const + { + return NvVec3(x, y, z); + } + + /** brief computes rotation of x-axis */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector0() const + { + const float x2 = x * 2.0f; + const float w2 = w * 2.0f; + return NvVec3((w * w2) - 1.0f + x * x2, (z * w2) + y * x2, (-y * w2) + z * x2); + } + + /** brief computes rotation of y-axis */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector1() const + { + const float y2 = y * 2.0f; + const float w2 = w * 2.0f; + return NvVec3((-z * w2) + x * y2, (w * w2) - 1.0f + y * y2, (x * w2) + z * y2); + } + + /** brief computes rotation of z-axis */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector2() const + { + const float z2 = z * 2.0f; + const float w2 = w * 2.0f; + return NvVec3((y * w2) + x * z2, (-x * w2) + y * z2, (w * w2) - 1.0f + z * z2); + } + + /** + rotates passed vec by this (assumed unitary) + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3 rotate(const NvVec3& v) const + { + const float vx = 2.0f * v.x; + const float vy = 2.0f * v.y; + const float vz = 2.0f * v.z; + const float w2 = w * w - 0.5f; + const float dot2 = (x * vx + y * vy + z * vz); + return NvVec3((vx * w2 + (y * vz - z * vy) * w + x * dot2), (vy * w2 + (z * vx - x * vz) * w + y * dot2), + (vz * w2 + (x * vy - y * vx) * w + z * dot2)); + } + + /** + inverse rotates passed vec by this (assumed unitary) + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3 rotateInv(const NvVec3& v) const + { + const float vx = 2.0f * v.x; + const float vy = 2.0f * v.y; + const float vz = 2.0f * v.z; + const float w2 = w * w - 0.5f; + const float dot2 = (x * vx + y * vy + z * vz); + return NvVec3((vx * w2 - (y * vz - z * vy) * w + x * dot2), (vy * w2 - (z * vx - x * vz) * w + y * dot2), + (vz * w2 - (x * vy - y * vx) * w + z * dot2)); + } + + /** + \brief Assignment operator + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator=(const NvQuat& p) + { + x = p.x; + y = p.y; + z = p.z; + w = p.w; + return *this; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator*=(const NvQuat& q) + { + const float tx = w * q.x + q.w * x + y * q.z - q.y * z; + const float ty = w * q.y + q.w * y + z * q.x - q.z * x; + const float tz = w * q.z + q.w * z + x * q.y - q.x * y; + + w = w * q.w - q.x * x - y * q.y - q.z * z; + x = tx; + y = ty; + z = tz; + + return *this; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator+=(const NvQuat& q) + { + x += q.x; + y += q.y; + z += q.z; + w += q.w; + return *this; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator-=(const NvQuat& q) + { + x -= q.x; + y -= q.y; + z -= q.z; + w -= q.w; + return *this; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator*=(const float s) + { + x *= s; + y *= s; + z *= s; + w *= s; + return *this; + } + + /** quaternion multiplication */ + NV_CUDA_CALLABLE NV_INLINE NvQuat operator*(const NvQuat& q) const + { + return NvQuat(w * q.x + q.w * x + y * q.z - q.y * z, w * q.y + q.w * y + z * q.x - q.z * x, + w * q.z + q.w * z + x * q.y - q.x * y, w * q.w - x * q.x - y * q.y - z * q.z); + } + + /** quaternion addition */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator+(const NvQuat& q) const + { + return NvQuat(x + q.x, y + q.y, z + q.z, w + q.w); + } + + /** quaternion subtraction */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator-() const + { + return NvQuat(-x, -y, -z, -w); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator-(const NvQuat& q) const + { + return NvQuat(x - q.x, y - q.y, z - q.z, w - q.w); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator*(float r) const + { + return NvQuat(x * r, y * r, z * r, w * r); + } + + /** the quaternion elements */ + float x, y, z, w; +}; + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVQUAT_H diff --git a/external/NvFoundation/1.1/include/NvSimpleTypes.h b/external/NvFoundation/1.1/include/NvSimpleTypes.h new file mode 100644 index 0000000..8041ceb --- /dev/null +++ b/external/NvFoundation/1.1/include/NvSimpleTypes.h @@ -0,0 +1,72 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVSIMPLETYPES_H +#define NV_NVFOUNDATION_NVSIMPLETYPES_H + +/** \addtogroup foundation + @{ +*/ + +// Platform specific types: +// Design note: Its OK to use int for general loop variables and temps. + +#include "NvPreprocessor.h" +#if NV_VC +#pragma warning(push) +#pragma warning(disable : 4668) // suppressing warning generated by Microsoft Visual Studio when including this standard +// header +#endif + +#if NV_LINUX +#define __STDC_LIMIT_MACROS +#endif + +#include <stdint.h> +#if NV_VC +#pragma warning(pop) +#endif +// Type ranges + +// These are here because we sometimes have non-IEEE compliant platforms to deal with. +// Removal is under consideration (issue GWSD-34) + +#define NV_MAX_F32 3.4028234663852885981170418348452e+38F +// maximum possible float value +#define NV_MAX_F64 DBL_MAX // maximum possible double value + +#define NV_EPS_F32 FLT_EPSILON // maximum relative error of float rounding +#define NV_EPS_F64 DBL_EPSILON // maximum relative error of double rounding + +#define NV_MAX_REAL NV_MAX_F32 +#define NV_EPS_REAL NV_EPS_F32 +#define NV_NORMALIZATION_EPSILON float(1e-20f) + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVSIMPLETYPES_H diff --git a/external/NvFoundation/1.1/include/NvTransform.h b/external/NvFoundation/1.1/include/NvTransform.h new file mode 100644 index 0000000..e15cd08 --- /dev/null +++ b/external/NvFoundation/1.1/include/NvTransform.h @@ -0,0 +1,215 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVTRANSFORM_H +#define NV_NVFOUNDATION_NVTRANSFORM_H +/** \addtogroup foundation + @{ +*/ + +#include "NvQuat.h" +#include "NvPlane.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/*! +\brief class representing a rigid euclidean transform as a quaternion and a vector +*/ + +class NvTransform +{ + public: + NvQuat q; + NvVec3 p; + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform() + { + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvVec3& position) : q(NvIdentity), p(position) + { + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(NvIDENTITY r) : q(NvIdentity), p(NvZero) + { + NV_UNUSED(r); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvQuat& orientation) : q(orientation), p(0) + { + NV_ASSERT(orientation.isSane()); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform(float x, float y, float z, NvQuat aQ = NvQuat(NvIdentity)) + : q(aQ), p(x, y, z) + { + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform(const NvVec3& p0, const NvQuat& q0) : q(q0), p(p0) + { + NV_ASSERT(q0.isSane()); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvMat44& m); // defined in NvMat44.h + + /** + \brief returns true if the two transforms are exactly equal + */ + NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvTransform& t) const + { + return p == t.p && q == t.q; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform operator*(const NvTransform& x) const + { + NV_ASSERT(x.isSane()); + return transform(x); + } + + //! Equals matrix multiplication + NV_CUDA_CALLABLE NV_INLINE NvTransform& operator*=(NvTransform& other) + { + *this = *this * other; + return *this; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform getInverse() const + { + NV_ASSERT(isFinite()); + return NvTransform(q.rotateInv(-p), q.getConjugate()); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transform(const NvVec3& input) const + { + NV_ASSERT(isFinite()); + return q.rotate(input) + p; + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transformInv(const NvVec3& input) const + { + NV_ASSERT(isFinite()); + return q.rotateInv(input - p); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 rotate(const NvVec3& input) const + { + NV_ASSERT(isFinite()); + return q.rotate(input); + } + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 rotateInv(const NvVec3& input) const + { + NV_ASSERT(isFinite()); + return q.rotateInv(input); + } + + //! Transform transform to parent (returns compound transform: first src, then *this) + NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform transform(const NvTransform& src) const + { + NV_ASSERT(src.isSane()); + NV_ASSERT(isSane()); + // src = [srct, srcr] -> [r*srct + t, r*srcr] + return NvTransform(q.rotate(src.p) + p, q * src.q); + } + + /** + \brief returns true if finite and q is a unit quaternion + */ + + NV_CUDA_CALLABLE bool isValid() const + { + return p.isFinite() && q.isFinite() && q.isUnit(); + } + + /** + \brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error + vs isValid + */ + + NV_CUDA_CALLABLE bool isSane() const + { + return isFinite() && q.isSane(); + } + + /** + \brief returns true if all elems are finite (not NAN or INF, etc.) + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite() const + { + return p.isFinite() && q.isFinite(); + } + + //! Transform transform from parent (returns compound transform: first src, then this->inverse) + NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform transformInv(const NvTransform& src) const + { + NV_ASSERT(src.isSane()); + NV_ASSERT(isFinite()); + // src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr] + NvQuat qinv = q.getConjugate(); + return NvTransform(qinv.rotate(src.p - p), qinv * src.q); + } + + /** + \brief transform plane + */ + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane transform(const NvPlane& plane) const + { + NvVec3 transformedNormal = rotate(plane.n); + return NvPlane(transformedNormal, plane.d - p.dot(transformedNormal)); + } + + /** + \brief inverse-transform plane + */ + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane inverseTransform(const NvPlane& plane) const + { + NvVec3 transformedNormal = rotateInv(plane.n); + return NvPlane(transformedNormal, plane.d + p.dot(plane.n)); + } + + /** + \brief return a normalized transform (i.e. one in which the quaternion has unit magnitude) + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform getNormalized() const + { + return NvTransform(p, q.getNormalized()); + } +}; + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVTRANSFORM_H diff --git a/external/NvFoundation/1.1/include/NvVec2.h b/external/NvFoundation/1.1/include/NvVec2.h new file mode 100644 index 0000000..49d502e --- /dev/null +++ b/external/NvFoundation/1.1/include/NvVec2.h @@ -0,0 +1,347 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVVEC2_H +#define NV_NVFOUNDATION_NVVEC2_H + +/** \addtogroup foundation +@{ +*/ + +#include "NvMath.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/** +\brief 2 Element vector class. + +This is a 2-dimensional vector class with public data members. +*/ +class NvVec2 +{ + public: + /** + \brief default constructor leaves data uninitialized. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2() + { + } + + /** + \brief zero constructor. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(NvZERO r) : x(0.0f), y(0.0f) + { + NV_UNUSED(r); + } + + /** + \brief Assigns scalar parameter to all elements. + + Useful to initialize to zero or one. + + \param[in] a Value to assign to elements. + */ + explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(float a) : x(a), y(a) + { + } + + /** + \brief Initializes from 2 scalar parameters. + + \param[in] nx Value to initialize X component. + \param[in] ny Value to initialize Y component. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(float nx, float ny) : x(nx), y(ny) + { + } + + /** + \brief Copy ctor. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(const NvVec2& v) : x(v.x), y(v.y) + { + } + + // Operators + + /** + \brief Assignment operator + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator=(const NvVec2& p) + { + x = p.x; + y = p.y; + return *this; + } + + /** + \brief element access + */ + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator[](int index) + { + NV_ASSERT(index >= 0 && index <= 1); + + return reinterpret_cast<float*>(this)[index]; + } + + /** + \brief element access + */ + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const float& operator[](int index) const + { + NV_ASSERT(index >= 0 && index <= 1); + + return reinterpret_cast<const float*>(this)[index]; + } + + /** + \brief returns true if the two vectors are exactly equal. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator==(const NvVec2& v) const + { + return x == v.x && y == v.y; + } + + /** + \brief returns true if the two vectors are not exactly equal. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator!=(const NvVec2& v) const + { + return x != v.x || y != v.y; + } + + /** + \brief tests for exact zero vector + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isZero() const + { + return x == 0.0f && y == 0.0f; + } + + /** + \brief returns true if all 2 elems of the vector are finite (not NAN or INF, etc.) + */ + NV_CUDA_CALLABLE NV_INLINE bool isFinite() const + { + return NvIsFinite(x) && NvIsFinite(y); + } + + /** + \brief is normalized - used by API parameter validation + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isNormalized() const + { + const float unitTolerance = 1e-4f; + return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; + } + + /** + \brief returns the squared magnitude + + Avoids calling NvSqrt()! + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const + { + return x * x + y * y; + } + + /** + \brief returns the magnitude + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitude() const + { + return NvSqrt(magnitudeSquared()); + } + + /** + \brief negation + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator-() const + { + return NvVec2(-x, -y); + } + + /** + \brief vector addition + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator+(const NvVec2& v) const + { + return NvVec2(x + v.x, y + v.y); + } + + /** + \brief vector difference + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator-(const NvVec2& v) const + { + return NvVec2(x - v.x, y - v.y); + } + + /** + \brief scalar post-multiplication + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator*(float f) const + { + return NvVec2(x * f, y * f); + } + + /** + \brief scalar division + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator/(float f) const + { + f = 1.0f / f; // PT: inconsistent notation with operator /= + return NvVec2(x * f, y * f); + } + + /** + \brief vector addition + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator+=(const NvVec2& v) + { + x += v.x; + y += v.y; + return *this; + } + + /** + \brief vector difference + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator-=(const NvVec2& v) + { + x -= v.x; + y -= v.y; + return *this; + } + + /** + \brief scalar multiplication + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator*=(float f) + { + x *= f; + y *= f; + return *this; + } + /** + \brief scalar division + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator/=(float f) + { + f = 1.0f / f; // PT: inconsistent notation with operator / + x *= f; + y *= f; + return *this; + } + + /** + \brief returns the scalar product of this and other. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvVec2& v) const + { + return x * v.x + y * v.y; + } + + /** return a unit vector */ + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 getNormalized() const + { + const float m = magnitudeSquared(); + return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec2(0, 0); + } + + /** + \brief normalizes the vector in place + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float normalize() + { + const float m = magnitude(); + if(m > 0.0f) + *this /= m; + return m; + } + + /** + \brief a[i] * b[i], for all i. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 multiply(const NvVec2& a) const + { + return NvVec2(x * a.x, y * a.y); + } + + /** + \brief element-wise minimum + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 minimum(const NvVec2& v) const + { + return NvVec2(NvMin(x, v.x), NvMin(y, v.y)); + } + + /** + \brief returns MIN(x, y); + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float minElement() const + { + return NvMin(x, y); + } + + /** + \brief element-wise maximum + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 maximum(const NvVec2& v) const + { + return NvVec2(NvMax(x, v.x), NvMax(y, v.y)); + } + + /** + \brief returns MAX(x, y); + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float maxElement() const + { + return NvMax(x, y); + } + + float x, y; +}; + +NV_CUDA_CALLABLE static NV_FORCE_INLINE NvVec2 operator*(float f, const NvVec2& v) +{ + return NvVec2(f * v.x, f * v.y); +} + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVVEC2_H diff --git a/external/NvFoundation/1.1/include/NvVec3.h b/external/NvFoundation/1.1/include/NvVec3.h new file mode 100644 index 0000000..137fb8b --- /dev/null +++ b/external/NvFoundation/1.1/include/NvVec3.h @@ -0,0 +1,393 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVVEC3_H +#define NV_NVFOUNDATION_NVVEC3_H + +/** \addtogroup foundation +@{ +*/ + +#include "NvMath.h" + +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +/** +\brief 3 Element vector class. + +This is a 3-dimensional vector class with public data members. +*/ +class NvVec3 +{ + public: + /** + \brief default constructor leaves data uninitialized. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3() + { + } + + /** + \brief zero constructor. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(NvZERO r) : x(0.0f), y(0.0f), z(0.0f) + { + NV_UNUSED(r); + } + + /** + \brief Assigns scalar parameter to all elements. + + Useful to initialize to zero or one. + + \param[in] a Value to assign to elements. + */ + explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(float a) : x(a), y(a), z(a) + { + } + + /** + \brief Initializes from 3 scalar parameters. + + \param[in] nx Value to initialize X component. + \param[in] ny Value to initialize Y component. + \param[in] nz Value to initialize Z component. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(float nx, float ny, float nz) : x(nx), y(ny), z(nz) + { + } + + /** + \brief Copy ctor. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(const NvVec3& v) : x(v.x), y(v.y), z(v.z) + { + } + + // Operators + + /** + \brief Assignment operator + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator=(const NvVec3& p) + { + x = p.x; + y = p.y; + z = p.z; + return *this; + } + + /** + \brief element access + */ + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator[](unsigned int index) + { + NV_ASSERT(index <= 2); + + return reinterpret_cast<float*>(this)[index]; + } + + /** + \brief element access + */ + NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const float& operator[](unsigned int index) const + { + NV_ASSERT(index <= 2); + + return reinterpret_cast<const float*>(this)[index]; + } + /** + \brief returns true if the two vectors are exactly equal. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator==(const NvVec3& v) const + { + return x == v.x && y == v.y && z == v.z; + } + + /** + \brief returns true if the two vectors are not exactly equal. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator!=(const NvVec3& v) const + { + return x != v.x || y != v.y || z != v.z; + } + + /** + \brief tests for exact zero vector + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isZero() const + { + return x == 0.0f && y == 0.0f && z == 0.0f; + } + + /** + \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.) + */ + NV_CUDA_CALLABLE NV_INLINE bool isFinite() const + { + return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z); + } + + /** + \brief is normalized - used by API parameter validation + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE bool isNormalized() const + { + const float unitTolerance = 1e-4f; + return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; + } + + /** + \brief returns the squared magnitude + + Avoids calling NvSqrt()! + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const + { + return x * x + y * y + z * z; + } + + /** + \brief returns the magnitude + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitude() const + { + return NvSqrt(magnitudeSquared()); + } + + /** + \brief negation + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator-() const + { + return NvVec3(-x, -y, -z); + } + + /** + \brief vector addition + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator+(const NvVec3& v) const + { + return NvVec3(x + v.x, y + v.y, z + v.z); + } + + /** + \brief vector difference + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator-(const NvVec3& v) const + { + return NvVec3(x - v.x, y - v.y, z - v.z); + } + + /** + \brief scalar post-multiplication + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator*(float f) const + { + return NvVec3(x * f, y * f, z * f); + } + + /** + \brief scalar division + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator/(float f) const + { + f = 1.0f / f; + return NvVec3(x * f, y * f, z * f); + } + + /** + \brief vector addition + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator+=(const NvVec3& v) + { + x += v.x; + y += v.y; + z += v.z; + return *this; + } + + /** + \brief vector difference + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator-=(const NvVec3& v) + { + x -= v.x; + y -= v.y; + z -= v.z; + return *this; + } + + /** + \brief scalar multiplication + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator*=(float f) + { + x *= f; + y *= f; + z *= f; + return *this; + } + /** + \brief scalar division + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator/=(float f) + { + f = 1.0f / f; + x *= f; + y *= f; + z *= f; + return *this; + } + + /** + \brief returns the scalar product of this and other. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvVec3& v) const + { + return x * v.x + y * v.y + z * v.z; + } + + /** + \brief cross product + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 cross(const NvVec3& v) const + { + return NvVec3(y * v.z - z * v.y, z * v.x - x * v.z, x * v.y - y * v.x); + } + + /** return a unit vector */ + + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getNormalized() const + { + const float m = magnitudeSquared(); + return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec3(0, 0, 0); + } + + /** + \brief normalizes the vector in place + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float normalize() + { + const float m = magnitude(); + if(m > 0.0f) + *this /= m; + return m; + } + + /** + \brief normalizes the vector in place. Does nothing if vector magnitude is under NV_NORMALIZATION_EPSILON. + Returns vector magnitude if >= NV_NORMALIZATION_EPSILON and 0.0f otherwise. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float normalizeSafe() + { + const float mag = magnitude(); + if(mag < NV_NORMALIZATION_EPSILON) + return 0.0f; + *this *= 1.0f / mag; + return mag; + } + + /** + \brief normalizes the vector in place. Asserts if vector magnitude is under NV_NORMALIZATION_EPSILON. + returns vector magnitude. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float normalizeFast() + { + const float mag = magnitude(); + NV_ASSERT(mag >= NV_NORMALIZATION_EPSILON); + *this *= 1.0f / mag; + return mag; + } + + /** + \brief a[i] * b[i], for all i. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 multiply(const NvVec3& a) const + { + return NvVec3(x * a.x, y * a.y, z * a.z); + } + + /** + \brief element-wise minimum + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 minimum(const NvVec3& v) const + { + return NvVec3(NvMin(x, v.x), NvMin(y, v.y), NvMin(z, v.z)); + } + + /** + \brief returns MIN(x, y, z); + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float minElement() const + { + return NvMin(x, NvMin(y, z)); + } + + /** + \brief element-wise maximum + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 maximum(const NvVec3& v) const + { + return NvVec3(NvMax(x, v.x), NvMax(y, v.y), NvMax(z, v.z)); + } + + /** + \brief returns MAX(x, y, z); + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE float maxElement() const + { + return NvMax(x, NvMax(y, z)); + } + + /** + \brief returns absolute values of components; + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 abs() const + { + return NvVec3(NvAbs(x), NvAbs(y), NvAbs(z)); + } + + float x, y, z; +}; + +NV_CUDA_CALLABLE static NV_FORCE_INLINE NvVec3 operator*(float f, const NvVec3& v) +{ + return NvVec3(f * v.x, f * v.y, f * v.z); +} + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVVEC3_H diff --git a/external/NvFoundation/1.1/include/NvVec4.h b/external/NvFoundation/1.1/include/NvVec4.h new file mode 100644 index 0000000..6eaf10b --- /dev/null +++ b/external/NvFoundation/1.1/include/NvVec4.h @@ -0,0 +1,376 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_NVFOUNDATION_NVVEC4_H +#define NV_NVFOUNDATION_NVVEC4_H +/** \addtogroup foundation +@{ +*/ +#include "NvMath.h" +#include "NvVec3.h" +#include "NvAssert.h" + +/** +\brief 4 Element vector class. + +This is a 4-dimensional vector class with public data members. +*/ +#if !NV_DOXYGEN +namespace nvidia +{ +#endif + +class NvVec4 +{ + public: + /** + \brief default constructor leaves data uninitialized. + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4() + { + } + + /** + \brief zero constructor. + */ + NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec4(NvZERO r) : x(0.0f), y(0.0f), z(0.0f), w(0.0f) + { + NV_UNUSED(r); + } + + /** + \brief Assigns scalar parameter to all elements. + + Useful to initialize to zero or one. + + \param[in] a Value to assign to elements. + */ + explicit NV_CUDA_CALLABLE NV_INLINE NvVec4(float a) : x(a), y(a), z(a), w(a) + { + } + + /** + \brief Initializes from 3 scalar parameters. + + \param[in] nx Value to initialize X component. + \param[in] ny Value to initialize Y component. + \param[in] nz Value to initialize Z component. + \param[in] nw Value to initialize W component. + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4(float nx, float ny, float nz, float nw) : x(nx), y(ny), z(nz), w(nw) + { + } + + /** + \brief Initializes from 3 scalar parameters. + + \param[in] v Value to initialize the X, Y, and Z components. + \param[in] nw Value to initialize W component. + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4(const NvVec3& v, float nw) : x(v.x), y(v.y), z(v.z), w(nw) + { + } + + /** + \brief Initializes from an array of scalar parameters. + + \param[in] v Value to initialize with. + */ + explicit NV_CUDA_CALLABLE NV_INLINE NvVec4(const float v[]) : x(v[0]), y(v[1]), z(v[2]), w(v[3]) + { + } + + /** + \brief Copy ctor. + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4(const NvVec4& v) : x(v.x), y(v.y), z(v.z), w(v.w) + { + } + + // Operators + + /** + \brief Assignment operator + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4& operator=(const NvVec4& p) + { + x = p.x; + y = p.y; + z = p.z; + w = p.w; + return *this; + } + + /** + \brief element access + */ + NV_DEPRECATED NV_CUDA_CALLABLE NV_INLINE float& operator[](unsigned int index) + { + NV_ASSERT(index <= 3); + + return reinterpret_cast<float*>(this)[index]; + } + + /** + \brief element access + */ + NV_DEPRECATED NV_CUDA_CALLABLE NV_INLINE const float& operator[](unsigned int index) const + { + NV_ASSERT(index <= 3); + + return reinterpret_cast<const float*>(this)[index]; + } + + /** + \brief returns true if the two vectors are exactly equal. + */ + NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvVec4& v) const + { + return x == v.x && y == v.y && z == v.z && w == v.w; + } + + /** + \brief returns true if the two vectors are not exactly equal. + */ + NV_CUDA_CALLABLE NV_INLINE bool operator!=(const NvVec4& v) const + { + return x != v.x || y != v.y || z != v.z || w != v.w; + } + + /** + \brief tests for exact zero vector + */ + NV_CUDA_CALLABLE NV_INLINE bool isZero() const + { + return x == 0 && y == 0 && z == 0 && w == 0; + } + + /** + \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.) + */ + NV_CUDA_CALLABLE NV_INLINE bool isFinite() const + { + return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z) && NvIsFinite(w); + } + + /** + \brief is normalized - used by API parameter validation + */ + NV_CUDA_CALLABLE NV_INLINE bool isNormalized() const + { + const float unitTolerance = 1e-4f; + return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; + } + + /** + \brief returns the squared magnitude + + Avoids calling NvSqrt()! + */ + NV_CUDA_CALLABLE NV_INLINE float magnitudeSquared() const + { + return x * x + y * y + z * z + w * w; + } + + /** + \brief returns the magnitude + */ + NV_CUDA_CALLABLE NV_INLINE float magnitude() const + { + return NvSqrt(magnitudeSquared()); + } + + /** + \brief negation + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4 operator-() const + { + return NvVec4(-x, -y, -z, -w); + } + + /** + \brief vector addition + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4 operator+(const NvVec4& v) const + { + return NvVec4(x + v.x, y + v.y, z + v.z, w + v.w); + } + + /** + \brief vector difference + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4 operator-(const NvVec4& v) const + { + return NvVec4(x - v.x, y - v.y, z - v.z, w - v.w); + } + + /** + \brief scalar post-multiplication + */ + + NV_CUDA_CALLABLE NV_INLINE NvVec4 operator*(float f) const + { + return NvVec4(x * f, y * f, z * f, w * f); + } + + /** + \brief scalar division + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4 operator/(float f) const + { + f = 1.0f / f; + return NvVec4(x * f, y * f, z * f, w * f); + } + + /** + \brief vector addition + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4& operator+=(const NvVec4& v) + { + x += v.x; + y += v.y; + z += v.z; + w += v.w; + return *this; + } + + /** + \brief vector difference + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4& operator-=(const NvVec4& v) + { + x -= v.x; + y -= v.y; + z -= v.z; + w -= v.w; + return *this; + } + + /** + \brief scalar multiplication + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4& operator*=(float f) + { + x *= f; + y *= f; + z *= f; + w *= f; + return *this; + } + /** + \brief scalar division + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4& operator/=(float f) + { + f = 1.0f / f; + x *= f; + y *= f; + z *= f; + w *= f; + return *this; + } + + /** + \brief returns the scalar product of this and other. + */ + NV_CUDA_CALLABLE NV_INLINE float dot(const NvVec4& v) const + { + return x * v.x + y * v.y + z * v.z + w * v.w; + } + + /** return a unit vector */ + + NV_CUDA_CALLABLE NV_INLINE NvVec4 getNormalized() const + { + float m = magnitudeSquared(); + return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec4(0, 0, 0, 0); + } + + /** + \brief normalizes the vector in place + */ + NV_CUDA_CALLABLE NV_INLINE float normalize() + { + float m = magnitude(); + if(m > 0.0f) + *this /= m; + return m; + } + + /** + \brief a[i] * b[i], for all i. + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4 multiply(const NvVec4& a) const + { + return NvVec4(x * a.x, y * a.y, z * a.z, w * a.w); + } + + /** + \brief element-wise minimum + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4 minimum(const NvVec4& v) const + { + return NvVec4(NvMin(x, v.x), NvMin(y, v.y), NvMin(z, v.z), NvMin(w, v.w)); + } + + /** + \brief element-wise maximum + */ + NV_CUDA_CALLABLE NV_INLINE NvVec4 maximum(const NvVec4& v) const + { + return NvVec4(NvMax(x, v.x), NvMax(y, v.y), NvMax(z, v.z), NvMax(w, v.w)); + } + + NV_CUDA_CALLABLE NV_INLINE NvVec3 getXYZ() const + { + return NvVec3(x, y, z); + } + + /** + \brief set vector elements to zero + */ + NV_CUDA_CALLABLE NV_INLINE void setZero() + { + x = y = z = w = 0.0f; + } + + float x, y, z, w; +}; + +NV_CUDA_CALLABLE static NV_INLINE NvVec4 operator*(float f, const NvVec4& v) +{ + return NvVec4(f * v.x, f * v.y, f * v.z, f * v.w); +} + +#if !NV_DOXYGEN +} // namespace nvidia +#endif + +/** @} */ +#endif // #ifndef NV_NVFOUNDATION_NVVEC4_H diff --git a/external/NvFoundation/1.1/include/ps3/NvPS3Assert.h b/external/NvFoundation/1.1/include/ps3/NvPS3Assert.h new file mode 100644 index 0000000..52240b9 --- /dev/null +++ b/external/NvFoundation/1.1/include/ps3/NvPS3Assert.h @@ -0,0 +1,55 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. +// SCE CONFIDENTIAL +// Copyright (C) Sony Computer Entertainment Inc. +// All Rights Reserved. + +#ifndef NV_PS3_NVPS3ASSERT_H +#define NV_PS3_NVPS3ASSERT_H + +#include <NvFoundation/NvPreprocessor.h> + +#ifdef NV_SPU +#include "spu_printf.h" + +namespace nvidia +{ +NV_INLINE void NvPs3Assert(const char* exp, const char* file, int line) +{ + spu_printf("SPU: Assertion failed! exp %s \n, line %d \n, file %s \n ", exp, line, file); + __builtin_snpause(); +} +} + +#define NV_ASSERT(exp) ((void)(!!(exp) || (nvidia::NvPs3Assert(#exp, __FILE__, __LINE__), false))) +#define NV_ALWAYS_ASSERT_MESSAGE(exp) nvidia::NvPs3Assert(exp, __FILE__, __LINE__) +#define NV_ASSERT_WITH_MESSAGE(exp, message) \ + ((void)(!!(exp) || (nvidia::NvPs3Assert(message, __FILE__, __LINE__), false))) +#endif // NV_SPU +#endif // #ifndef NV_PS3_NVPS3ASSERT_H diff --git a/external/NvFoundation/1.1/include/ps3/NvPS3Error.h b/external/NvFoundation/1.1/include/ps3/NvPS3Error.h new file mode 100644 index 0000000..6f06da3 --- /dev/null +++ b/external/NvFoundation/1.1/include/ps3/NvPS3Error.h @@ -0,0 +1,51 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. +// dsequeira: I expect these have to be inlined on SPU, elsewhere they shouldn't be. + +#ifndef NV_PS3_NVPS3ERROR_H +#define NV_PS3_NVPS3ERROR_H + +#include <NvFoundation/NvPreprocessor.h> + +#if defined(__CELLOS_LV2__) && defined(__SPU__) +#include <spu_printf.h> + +NV_FORCE_INLINE void NvcOutputDebugString(const char* str) +{ + spu_printf(str); +} + +NV_FORCE_INLINE void reportError(const char* error) +{ + spu_printf("Internal error: %s: \n", error); + NV_ASSERT(0); +} +#endif + +#endif // #ifndef NV_PS3_NVPS3ERROR_H diff --git a/external/NvFoundation/1.1/include/ps3/NvPS3Intrinsics.h b/external/NvFoundation/1.1/include/ps3/NvPS3Intrinsics.h new file mode 100644 index 0000000..dbef32a --- /dev/null +++ b/external/NvFoundation/1.1/include/ps3/NvPS3Intrinsics.h @@ -0,0 +1,219 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. +#ifndef NV_PS3_NVPS3INTRINSICS_H +#define NV_PS3_NVPS3INTRINSICS_H + +#include "Nv.h" +#include "NvAssert.h" + +#if !NV_PS3 +#error "This file should only be included by ps3 builds!!" +#endif + +#include <math.h> +#ifdef __SPU__ +#include "spu_intrinsics.h" +#else +#include "ppu_intrinsics.h" +#endif + +namespace nvidia +{ +namespace intrinsics +{ +//! \brief platform-specific absolute value +NV_FORCE_INLINE float abs(float a) +{ + return ::fabsf(a); +} + +//! \brief platform-specific select float +#ifdef __SPU__ +NV_FORCE_INLINE float fsel(float a, float b, float c) +{ + return (a >= 0 ? b : c); +} +#else +NV_FORCE_INLINE float fsel(float a, float b, float c) +{ + return __fsels(a, b, c); +} +#endif + +//! \brief platform-specific sign +#ifdef __SPU__ +NV_FORCE_INLINE float sign(float a) +{ + return (a >= 0 ? 1.0f : -1.0f); +} +#else +NV_FORCE_INLINE float sign(float a) +{ + return __fsels(a, 1.0f, -1.0f); +} +#endif + +//! \brief platform-specific reciprocal +NV_FORCE_INLINE float recip(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific reciprocal estimate +#if defined(__SPU__) || !defined(_PPU_INTRINSICS_GCC_H) +NV_FORCE_INLINE float recipFast(float a) +{ + return 1.0f / a; +} +#else +NV_FORCE_INLINE float recipFast(float a) +{ + return __fres(a); +} +#endif + +//! \brief platform-specific square root +NV_FORCE_INLINE float sqrt(float a) +{ + return ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root +NV_FORCE_INLINE float recipSqrt(float a) +{ + return 1.0f / ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root estimate +#ifdef __SPU__ +NV_FORCE_INLINE float recipSqrtFast(float a) +{ + return 1.0f / ::sqrtf(a); +} +#else +NV_FORCE_INLINE float recipSqrtFast(float a) +{ + return float(__frsqrte(a)); +} +#endif + +//! \brief platform-specific sine +NV_FORCE_INLINE float sin(float a) +{ + return ::sinf(a); +} + +//! \brief platform-specific cosine +NV_FORCE_INLINE float cos(float a) +{ + return ::cosf(a); +} + +//! \brief platform-specific minimum +#ifdef __SPU__ +NV_FORCE_INLINE float selectMin(float a, float b) +{ + return (a >= b ? b : a); +} +#else +NV_FORCE_INLINE float selectMin(float a, float b) +{ + return __fsels(a - b, b, a); +} +#endif + +//! \brief platform-specific maximum +#ifdef __SPU__ +NV_FORCE_INLINE float selectMax(float a, float b) +{ + return (a >= b ? a : b); +} +#else +NV_FORCE_INLINE float selectMax(float a, float b) +{ + return __fsels(a - b, a, b); +} +#endif +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(float a) +{ + return !isnan(a) && !isinf(a); +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(double a) +{ + return !isnan(a) && !isinf(a); +} + +/*! +Sets \c count bytes starting at \c dst to zero. +*/ +NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) +{ + return memset(dest, 0, count); +} + +/*! +Sets \c count bytes starting at \c dst to \c c. +*/ +NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) +{ + return memset(dest, c, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. +*/ +NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memcpy(dest, src, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. Supports overlapping regions. +*/ +NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memmove(dest, src, count); +} + +/*! +Set 128B to zero starting at \c dst+offset. Must be aligned. +*/ +NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) +{ + NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); + memSet((char*)dest + offset, 0, 128); +} + +} // namespace intrinsics +} // namespace nvidia + +#endif // #ifndef NV_PS3_NVPS3INTRINSICS_H diff --git a/external/NvFoundation/1.1/include/psp2/NvPSP2Intrinsics.h b/external/NvFoundation/1.1/include/psp2/NvPSP2Intrinsics.h new file mode 100644 index 0000000..b61a800 --- /dev/null +++ b/external/NvFoundation/1.1/include/psp2/NvPSP2Intrinsics.h @@ -0,0 +1,180 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_PSP2_NVPSP2INTRINSICS_H +#define NV_PSP2_NVPSP2INTRINSICS_H + +#include "Nv.h" +#include "NvAssert.h" + +#if !NV_PSP2 +#error "This file should only be included by psp2 builds!!" +#endif + +#include <math.h> +#include <float.h> + +namespace nvidia +{ +namespace intrinsics +{ +//! \brief platform-specific absolute value +NV_FORCE_INLINE float abs(float a) +{ + return ::fabs(a); +} + +//! \brief platform-specific select float +NV_FORCE_INLINE float fsel(float a, float b, float c) +{ + return (a >= 0.0f) ? b : c; +} + +//! \brief platform-specific sign +NV_FORCE_INLINE float sign(float a) +{ + return (a >= 0.0f) ? 1.0f : -1.0f; +} + +//! \brief platform-specific reciprocal +NV_FORCE_INLINE float recip(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific reciprocal estimate +NV_FORCE_INLINE float recipFast(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific square root +NV_FORCE_INLINE float sqrt(float a) +{ + return ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root +NV_FORCE_INLINE float recipSqrt(float a) +{ + return 1.0f / ::sqrtf(a); +} + +NV_FORCE_INLINE float recipSqrtFast(float a) +{ + return 1.0f / ::sqrtf(a); +} + +//! \brief platform-specific sine +NV_FORCE_INLINE float sin(float a) +{ + return ::sinf(a); +} + +//! \brief platform-specific cosine +NV_FORCE_INLINE float cos(float a) +{ + return ::cosf(a); +} + +//! \brief platform-specific minimum +NV_FORCE_INLINE float selectMin(float a, float b) +{ + return a < b ? a : b; +} + +//! \brief platform-specific maximum +NV_FORCE_INLINE float selectMax(float a, float b) +{ + return a > b ? a : b; +} + +//! \brief platform-specific float floor +NV_FORCE_INLINE float floor(float a) +{ + return floor(a); +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(float a) +{ + return isfinite(a); +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(double a) +{ + return isfinite(a); +} + +/*! +Sets \c count bytes starting at \c dst to zero. +*/ +NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) +{ + return memset(dest, 0, count); +} + +/*! +Sets \c count bytes starting at \c dst to \c c. +*/ +NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) +{ + return memset(dest, c, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. +*/ +NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memcpy(dest, src, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. Supports overlapping regions. +*/ +NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memmove(dest, src, count); +} + +/*! +Set 128B to zero starting at \c dst+offset. Must be aligned. +*/ +NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) +{ + NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); + memSet((char*)dest + offset, 0, 128); +} + +} // namespace intrinsics +} // namespace nvidia + +#endif // #ifndef NV_PSP2_NVPSP2INTRINSICS_H diff --git a/external/NvFoundation/1.1/include/unix/NvUnixIntrinsics.h b/external/NvFoundation/1.1/include/unix/NvUnixIntrinsics.h new file mode 100644 index 0000000..4230c83 --- /dev/null +++ b/external/NvFoundation/1.1/include/unix/NvUnixIntrinsics.h @@ -0,0 +1,174 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_UNIX_NVUNIXINTRINSICS_H +#define NV_UNIX_NVUNIXINTRINSICS_H + +#include "Nv.h" +#include "NvAssert.h" + +#if !(NV_LINUX || NV_ANDROID || NV_PS4 || NV_APPLE_FAMILY) +#error "This file should only be included by Unix builds!!" +#endif + +#include <math.h> +#include <float.h> + +namespace nvidia +{ +namespace intrinsics +{ +//! \brief platform-specific absolute value +NV_CUDA_CALLABLE NV_FORCE_INLINE float abs(float a) +{ + return ::fabsf(a); +} + +//! \brief platform-specific select float +NV_CUDA_CALLABLE NV_FORCE_INLINE float fsel(float a, float b, float c) +{ + return (a >= 0.0f) ? b : c; +} + +//! \brief platform-specific sign +NV_CUDA_CALLABLE NV_FORCE_INLINE float sign(float a) +{ + return (a >= 0.0f) ? 1.0f : -1.0f; +} + +//! \brief platform-specific reciprocal +NV_CUDA_CALLABLE NV_FORCE_INLINE float recip(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific reciprocal estimate +NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific square root +NV_CUDA_CALLABLE NV_FORCE_INLINE float sqrt(float a) +{ + return ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root +NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrt(float a) +{ + return 1.0f / ::sqrtf(a); +} + +NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a) +{ + return 1.0f / ::sqrtf(a); +} + +//! \brief platform-specific sine +NV_CUDA_CALLABLE NV_FORCE_INLINE float sin(float a) +{ + return ::sinf(a); +} + +//! \brief platform-specific cosine +NV_CUDA_CALLABLE NV_FORCE_INLINE float cos(float a) +{ + return ::cosf(a); +} + +//! \brief platform-specific minimum +NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMin(float a, float b) +{ + return a < b ? a : b; +} + +//! \brief platform-specific maximum +NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMax(float a, float b) +{ + return a > b ? a : b; +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(float a) +{ + return !!isfinite(a); +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(double a) +{ + return !!isfinite(a); +} + +/*! +Sets \c count bytes starting at \c dst to zero. +*/ +NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) +{ + return memset(dest, 0, count); +} + +/*! +Sets \c count bytes starting at \c dst to \c c. +*/ +NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) +{ + return memset(dest, c, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. +*/ +NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memcpy(dest, src, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. Supports overlapping regions. +*/ +NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memmove(dest, src, count); +} + +/*! +Set 128B to zero starting at \c dst+offset. Must be aligned. +*/ +NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) +{ + NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); + memSet(reinterpret_cast<char * NV_RESTRICT>(dest) + offset, 0, 128); +} + +} // namespace intrinsics +} // namespace nvidia + +#endif // #ifndef NV_UNIX_NVUNIXINTRINSICS_H diff --git a/external/NvFoundation/1.1/include/wiiu/NvWiiUIntrinsics.h b/external/NvFoundation/1.1/include/wiiu/NvWiiUIntrinsics.h new file mode 100644 index 0000000..f50213b --- /dev/null +++ b/external/NvFoundation/1.1/include/wiiu/NvWiiUIntrinsics.h @@ -0,0 +1,179 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_WIIU_NVWIIUINTRINSICS_H +#define NV_WIIU_NVWIIUINTRINSICS_H + +#include "Nv.h" +#include "NvAssert.h" + +#if !NV_WIIU +#error "This file should only be included by Wii U builds!!" +#endif + +#include <math.h> +#include <ppc_ghs.h> + +namespace nvidia +{ +namespace intrinsics +{ +//! \brief platform-specific absolute value +NV_FORCE_INLINE float abs(float a) +{ + return __fabsf(a); +} + +//! \brief platform-specific select float +NV_FORCE_INLINE float fsel(float a, float b, float c) +{ + return __FSELF(a, b, c); +} + +//! \brief platform-specific sign +NV_FORCE_INLINE float sign(float a) +{ + return __FSELF(a, 1.0f, -1.0f); +} + +//! \brief platform-specific reciprocal +NV_FORCE_INLINE float recip(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific reciprocal estimate +NV_FORCE_INLINE float recipFast(float a) +{ + return __FRES(a); +} // note: resolution of less than 1/4000 + +//! \brief platform-specific square root +NV_FORCE_INLINE float sqrt(float a) +{ + return ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root +NV_FORCE_INLINE float recipSqrt(float a) +{ + return 1.0f / ::sqrtf(a); +} + +NV_FORCE_INLINE float recipSqrtFast(float a) +{ + return __FRSQRTEF(a); +} // note: resolution of less than 1/4000 + +//! \brief platform-specific sine +NV_FORCE_INLINE float sin(float a) +{ + return ::sinf(a); +} + +//! \brief platform-specific cosine +NV_FORCE_INLINE float cos(float a) +{ + return ::cosf(a); +} + +//! \brief platform-specific minimum +NV_FORCE_INLINE float selectMin(float a, float b) +{ + return __FSELF(a - b, b, a); +} + +//! \brief platform-specific maximum +NV_FORCE_INLINE float selectMax(float a, float b) +{ + return __FSELF(a - b, a, b); +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(float a) +{ + return isfinite(a); +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(double a) +{ + return isfinite(a); +} + +/*! +Sets \c count bytes starting at \c dst to zero. +*/ +NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) +{ + return memset(dest, 0, count); +} + +/*! +Sets \c count bytes starting at \c dst to \c c. +*/ +NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) +{ + return memset(dest, c, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. +*/ +NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memcpy(dest, src, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. Supports overlapping regions. +*/ +NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memmove(dest, src, count); // could try OSBlockMove() in <cafe/os.h> +} + +/*! +Set 128B to zero starting at \c dst+offset. Must be aligned. +*/ +NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) +{ + NV_UNUSED(offset); + NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); + + __DCBZ(dest, 0); + __DCBZ(dest, 32); + __DCBZ(dest, 64); + __DCBZ(dest, 96); +} + +} // namespace intrinsics +} // namespace nvidia + +#endif // #ifndef NV_WIIU_NVWIIUINTRINSICS_H diff --git a/external/NvFoundation/1.1/include/windows/NvWindowsIntrinsics.h b/external/NvFoundation/1.1/include/windows/NvWindowsIntrinsics.h new file mode 100644 index 0000000..e5ab4bb --- /dev/null +++ b/external/NvFoundation/1.1/include/windows/NvWindowsIntrinsics.h @@ -0,0 +1,188 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_WINDOWS_NVWINDOWSINTRINSICS_H +#define NV_WINDOWS_NVWINDOWSINTRINSICS_H + +#include "Nv.h" +#include "NvAssert.h" + +#if !NV_WINDOWS_FAMILY +#error "This file should only be included by Windows or WIN8ARM builds!!" +#endif + +#include <math.h> +#include <float.h> + +#if !NV_DOXYGEN +namespace nvidia +{ +namespace intrinsics +{ +#endif + +//! \brief platform-specific absolute value +NV_CUDA_CALLABLE NV_FORCE_INLINE float abs(float a) +{ + return ::fabsf(a); +} + +//! \brief platform-specific select float +NV_CUDA_CALLABLE NV_FORCE_INLINE float fsel(float a, float b, float c) +{ + return (a >= 0.0f) ? b : c; +} + +//! \brief platform-specific sign +NV_CUDA_CALLABLE NV_FORCE_INLINE float sign(float a) +{ + return (a >= 0.0f) ? 1.0f : -1.0f; +} + +//! \brief platform-specific reciprocal +NV_CUDA_CALLABLE NV_FORCE_INLINE float recip(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific reciprocal estimate +NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific square root +NV_CUDA_CALLABLE NV_FORCE_INLINE float sqrt(float a) +{ + return ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root +NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrt(float a) +{ + return 1.0f / ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root estimate +NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a) +{ + return 1.0f / ::sqrtf(a); +} + +//! \brief platform-specific sine +NV_CUDA_CALLABLE NV_FORCE_INLINE float sin(float a) +{ + return ::sinf(a); +} + +//! \brief platform-specific cosine +NV_CUDA_CALLABLE NV_FORCE_INLINE float cos(float a) +{ + return ::cosf(a); +} + +//! \brief platform-specific minimum +NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMin(float a, float b) +{ + return a < b ? a : b; +} + +//! \brief platform-specific maximum +NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMax(float a, float b) +{ + return a > b ? a : b; +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(float a) +{ +#ifdef __CUDACC__ + return !!isfinite(a); +#else + return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a))); +#endif +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(double a) +{ +#ifdef __CUDACC__ + return !!isfinite(a); +#else + return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a))); +#endif +} + +/*! +Sets \c count bytes starting at \c dst to zero. +*/ +NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) +{ + return memset(dest, 0, count); +} + +/*! +Sets \c count bytes starting at \c dst to \c c. +*/ +NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) +{ + return memset(dest, c, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. +*/ +NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memcpy(dest, src, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. Supports overlapping regions. +*/ +NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memmove(dest, src, count); +} + +/*! +Set 128B to zero starting at \c dst+offset. Must be aligned. +*/ +NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) +{ + NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); + memSet((char * NV_RESTRICT)dest + offset, 0, 128); +} + +#if !NV_DOXYGEN +} // namespace intrinsics +} // namespace nvidia +#endif + +#endif // #ifndef NV_WINDOWS_NVWINDOWSINTRINSICS_H diff --git a/external/NvFoundation/1.1/include/xbox360/NvXbox360Intrinsics.h b/external/NvFoundation/1.1/include/xbox360/NvXbox360Intrinsics.h new file mode 100644 index 0000000..8cee763 --- /dev/null +++ b/external/NvFoundation/1.1/include/xbox360/NvXbox360Intrinsics.h @@ -0,0 +1,189 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. +#ifndef NV_XBOX360_NVXBOX360INTRINSICS_H +#define NV_XBOX360_NVXBOX360INTRINSICS_H + +#include "Nv.h" +#include "NvAssert.h" + +#if !NV_X360 +#error "This file should only be included by xbox builds!!" +#endif + +#ifndef NOMINMAX +#define NOMINMAX +#endif + +#ifndef XM_NO_OPERATOR_OVERLOADS +#define XM_NO_OPERATOR_OVERLOADS +#endif + +#pragma warning(push) +// 'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives' +// enumerator 'identifier' in switch of enum 'enumeration' is not handled +#pragma warning(disable : 4061 4062 4668 4365) +#include <xtl.h> +#pragma warning(pop) + +#include <PPCIntrinsics.h> +#include <math.h> +#include <float.h> + +namespace nvidia +{ +namespace intrinsics +{ +//! \brief platform-specific absolute value +NV_FORCE_INLINE float abs(float a) +{ + return __fabs(a); +} + +//! \brief platform-specific select float +NV_FORCE_INLINE float fsel(float a, float b, float c) +{ + return __fself(a, b, c); +} + +//! \brief platform-specific sign +NV_FORCE_INLINE float sign(float a) +{ + return __fself(a, 1.0f, -1.0f); +} + +//! \brief platform-specific reciprocal +NV_FORCE_INLINE float recip(float a) +{ + return 1.0f / (a); +} + +//! \brief platform-specific reciprocal estimate +NV_FORCE_INLINE float recipFast(float a) +{ + return __fres(a); +} + +//! \brief platform-specific square root +NV_FORCE_INLINE float sqrt(float a) +{ + return __fsqrts(a); +} + +//! \brief platform-specific reciprocal square root +NV_FORCE_INLINE float recipSqrt(float a) +{ + return recip(__fsqrts(a)); +} + +//! \brief platform-specific reciprocal square root estimate +NV_FORCE_INLINE float recipSqrtFast(float a) +{ + return float(__frsqrte(a)); +} + +//! \brief platform-specific sine +NV_FORCE_INLINE float sin(float a) +{ + return ::sinf(a); +} + +//! \brief platform-specific cosine +NV_FORCE_INLINE float cos(float a) +{ + return ::cosf(a); +} + +//! \brief platform-specific minimum +NV_FORCE_INLINE float selectMin(float a, float b) +{ + return __fself(a - b, b, a); +} + +//! \brief platform-specific maximum +NV_FORCE_INLINE float selectMax(float a, float b) +{ + return __fself(a - b, a, b); +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(float a) +{ + return 0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)); +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(double a) +{ + return 0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)); +} + +/*! +Sets \c count bytes starting at \c dst to zero. +*/ +NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) +{ + return XMemSet(dest, 0, count); +} + +/*! +Sets \c count bytes starting at \c dst to \c c. +*/ +NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) +{ + return XMemSet(dest, c, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. +*/ +NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return XMemCpy(dest, src, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. Supports overlapping regions. +*/ +NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memmove(dest, src, count); +} + +/*! +Set 128B to zero starting at \c dst+offset. Must be aligned. +*/ +NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) +{ + NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); + __dcbz128((int)offset, dest); +} + +} // namespace intrinsics +} // namespace nvidia +#endif // #ifndef NV_XBOX360_NVXBOX360INTRINSICS_H diff --git a/external/NvFoundation/1.1/include/xboxone/NvXboxOneIntrinsics.h b/external/NvFoundation/1.1/include/xboxone/NvXboxOneIntrinsics.h new file mode 100644 index 0000000..ae58981 --- /dev/null +++ b/external/NvFoundation/1.1/include/xboxone/NvXboxOneIntrinsics.h @@ -0,0 +1,188 @@ +// This code contains NVIDIA Confidential Information and is disclosed to you +// under a form of NVIDIA software license agreement provided separately to you. +// +// Notice +// NVIDIA Corporation and its licensors retain all intellectual property and +// proprietary rights in and to this software and related documentation and +// any modifications thereto. Any use, reproduction, disclosure, or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA Corporation is strictly prohibited. +// +// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES +// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO +// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, +// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. +// +// Information and code furnished is believed to be accurate and reliable. +// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such +// information or for any infringement of patents or other rights of third parties that may +// result from its use. No license is granted by implication or otherwise under any patent +// or patent rights of NVIDIA Corporation. Details are subject to change without notice. +// This code supersedes and replaces all information previously supplied. +// NVIDIA Corporation products are not authorized for use as critical +// components in life support devices or systems without express written approval of +// NVIDIA Corporation. +// +// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved. +// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. +// Copyright (c) 2001-2004 NovodeX AG. All rights reserved. + +#ifndef NV_XBOXONE_NVXBOXONEINTRINSICS_H +#define NV_XBOXONE_NVXBOXONEINTRINSICS_H + +#include "Nv.h" +#include "NvAssert.h" + +#if !NV_XBOXONE +#error "This file should only be included by XboxOne builds!!" +#endif + +#include <math.h> +#include <float.h> + +#if !NV_DOXYGEN +namespace nvidia +{ +namespace intrinsics +{ +#endif + +//! \brief platform-specific absolute value +NV_FORCE_INLINE float abs(float a) +{ + return ::fabs(a); +} + +//! \brief platform-specific select float +NV_FORCE_INLINE float fsel(float a, float b, float c) +{ + return (a >= 0.0f) ? b : c; +} + +//! \brief platform-specific sign +NV_FORCE_INLINE float sign(float a) +{ + return (a >= 0.0f) ? 1.0f : -1.0f; +} + +//! \brief platform-specific reciprocal +NV_FORCE_INLINE float recip(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific reciprocal estimate +NV_FORCE_INLINE float recipFast(float a) +{ + return 1.0f / a; +} + +//! \brief platform-specific square root +NV_FORCE_INLINE float sqrt(float a) +{ + return ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root +NV_FORCE_INLINE float recipSqrt(float a) +{ + return 1.0f / ::sqrtf(a); +} + +//! \brief platform-specific reciprocal square root estimate +NV_FORCE_INLINE float recipSqrtFast(float a) +{ + return 1.0f / ::sqrtf(a); +} + +//! \brief platform-specific sine +NV_FORCE_INLINE float sin(float a) +{ + return ::sinf(a); +} + +//! \brief platform-specific cosine +NV_FORCE_INLINE float cos(float a) +{ + return ::cosf(a); +} + +//! \brief platform-specific minimum +NV_FORCE_INLINE float selectMin(float a, float b) +{ + return a < b ? a : b; +} + +//! \brief platform-specific maximum +NV_FORCE_INLINE float selectMax(float a, float b) +{ + return a > b ? a : b; +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(float a) +{ +#ifdef __CUDACC__ + return isfinite(a) ? true : false; +#else + return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a))); +#endif +} + +//! \brief platform-specific finiteness check (not INF or NAN) +NV_FORCE_INLINE bool isFinite(double a) +{ +#ifdef __CUDACC__ + return isfinite(a) ? true : false; +#else + return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a))); +#endif +} + +/*! +Sets \c count bytes starting at \c dst to zero. +*/ +NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) +{ + return memset(dest, 0, count); +} + +/*! +Sets \c count bytes starting at \c dst to \c c. +*/ +NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) +{ + return memset(dest, c, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. +*/ +NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memcpy(dest, src, count); +} + +/*! +Copies \c count bytes from \c src to \c dst. Supports overlapping regions. +*/ +NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) +{ + return memmove(dest, src, count); +} + +/*! +Set 128B to zero starting at \c dst+offset. Must be aligned. +*/ +NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) +{ + NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); + memSet((char * NV_RESTRICT)dest + offset, 0, 128); +} + +#if !NV_DOXYGEN +} // namespace intrinsics +} // namespace nvidia +#endif + +#endif // #ifndef NV_XBOXONE_NVXBOXONEINTRINSICS_H |