aboutsummaryrefslogtreecommitdiff
path: root/APEX_1.4/shared/general/HACD/src
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /APEX_1.4/shared/general/HACD/src
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'APEX_1.4/shared/general/HACD/src')
-rw-r--r--APEX_1.4/shared/general/HACD/src/AutoGeometry.cpp472
-rw-r--r--APEX_1.4/shared/general/HACD/src/ConvexDecomposition.cpp3020
-rw-r--r--APEX_1.4/shared/general/HACD/src/ConvexHull.cpp396
-rw-r--r--APEX_1.4/shared/general/HACD/src/HACD.cpp604
-rw-r--r--APEX_1.4/shared/general/HACD/src/MergeHulls.cpp533
-rw-r--r--APEX_1.4/shared/general/HACD/src/WuQuantizer.cpp1033
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgAABBPolygonSoup.cpp1266
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgConvexHull3d.cpp994
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgGoogol.cpp421
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgIntersections.cpp509
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgMatrix.cpp574
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgMeshEffect.cpp4752
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgPolygonSoupBuilder.cpp962
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgPolyhedra.cpp2433
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgQuaternion.cpp184
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgSmallDeterminant.cpp153
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgSphere.cpp894
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgTree.cpp415
-rw-r--r--APEX_1.4/shared/general/HACD/src/dgTypes.cpp426
19 files changed, 20041 insertions, 0 deletions
diff --git a/APEX_1.4/shared/general/HACD/src/AutoGeometry.cpp b/APEX_1.4/shared/general/HACD/src/AutoGeometry.cpp
new file mode 100644
index 00000000..63b7056f
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/AutoGeometry.cpp
@@ -0,0 +1,472 @@
+/*!
+**
+** Copyright (c) 2015 by John W. Ratcliff mailto:[email protected]
+**
+**
+** The MIT license:
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to deal
+** in the Software without restriction, including without limitation the rights
+** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+** copies of the Software, and to permit persons to whom the Software is furnished
+** to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in all
+** copies or substantial portions of the Software.
+
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+**
+** If you find this code snippet useful; you can tip me at this bitcoin address:
+**
+** BITCOIN TIP JAR: "1BT66EoaGySkbY9J6MugvQRhMMXDwPxPya"
+**
+
+
+
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "AutoGeometry.h"
+#include "ConvexHull.h"
+#include "ApexUsingNamespace.h"
+
+namespace HACD
+{
+
+ void fm_transform(const float matrix[16],const float v[3],float t[3]) // rotate and translate this point
+ {
+ if ( matrix )
+ {
+ float tx = (matrix[0*4+0] * v[0]) + (matrix[1*4+0] * v[1]) + (matrix[2*4+0] * v[2]) + matrix[3*4+0];
+ float ty = (matrix[0*4+1] * v[0]) + (matrix[1*4+1] * v[1]) + (matrix[2*4+1] * v[2]) + matrix[3*4+1];
+ float tz = (matrix[0*4+2] * v[0]) + (matrix[1*4+2] * v[1]) + (matrix[2*4+2] * v[2]) + matrix[3*4+2];
+ t[0] = tx;
+ t[1] = ty;
+ t[2] = tz;
+ }
+ else
+ {
+ t[0] = v[0];
+ t[1] = v[1];
+ t[2] = v[2];
+ }
+ }
+ inline float det(const float *p1,const float *p2,const float *p3)
+ {
+ return p1[0]*p2[1]*p3[2] + p2[0]*p3[1]*p1[2] + p3[0]*p1[1]*p2[2] -p1[0]*p3[1]*p2[2] - p2[0]*p1[1]*p3[2] - p3[0]*p2[1]*p1[2];
+ }
+
+
+ float fm_computeMeshVolume(const float *vertices,size_t tcount,const uint32_t *indices)
+ {
+ float volume = 0;
+
+ for (uint32_t i=0; i<tcount; i++,indices+=3)
+ {
+ const float *p1 = &vertices[ indices[0]*3 ];
+ const float *p2 = &vertices[ indices[1]*3 ];
+ const float *p3 = &vertices[ indices[2]*3 ];
+ volume+=det(p1,p2,p3); // compute the volume of the tetrahedran relative to the origin.
+ }
+
+ volume*=(1.0f/6.0f);
+ if ( volume < 0 )
+ volume*=-1;
+ return volume;
+ }
+
+
+
+ class Vec3
+ {
+ public:
+ Vec3(const float *pos)
+ {
+ x = pos[0];
+ y = pos[1];
+ z = pos[2];
+ }
+ float x;
+ float y;
+ float z;
+ };
+
+ typedef hacd::vector< Vec3 > Vec3Vector;
+
+ class MyHull : public HACD::SimpleHull, public UANS::UserAllocated
+ {
+ public:
+ MyHull(void)
+ {
+ mValidHull = false;
+ }
+
+ ~MyHull(void)
+ {
+ release();
+ }
+
+ void release(void)
+ {
+ HACD_FREE(mIndices);
+ HACD_FREE(mVertices);
+ mIndices = 0;
+ mVertices = 0;
+ mTriCount = 0;
+ mVertexCount = 0;
+ }
+
+ void addPos(const float *p)
+ {
+ Vec3 v(p);
+ mPoints.push_back(v);
+ }
+
+ float generateHull(void)
+ {
+ release();
+ if ( mPoints.size() >= 3 ) // must have at least 3 vertices to create a hull.
+ {
+ // now generate the convex hull.
+ HullDesc desc((uint32_t)mPoints.size(),&mPoints[0].x, sizeof(float)*3);
+ desc.mMaxVertices = 32;
+ desc.mSkinWidth = 0.001f;
+
+ HullLibrary h;
+ HullResult result;
+ HullError e = h.CreateConvexHull(desc,result);
+ if ( e == QE_OK )
+ {
+ mTriCount = result.mNumTriangles;
+ mIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*mTriCount*3);
+ memcpy(mIndices,result.mIndices,sizeof(uint32_t)*mTriCount*3);
+ mVertexCount = result.mNumOutputVertices;
+ mVertices = (float *)HACD_ALLOC(sizeof(float)*mVertexCount*3);
+ memcpy(mVertices,result.mOutputVertices,sizeof(float)*mVertexCount*3);
+ mValidHull = true;
+ mMeshVolume = fm_computeMeshVolume( mVertices, mTriCount, mIndices ); // compute the volume of this mesh.
+ h.ReleaseResult(result);
+ }
+ }
+ return mMeshVolume;
+ }
+
+ void inherit(MyHull &c)
+ {
+ Vec3Vector::iterator i;
+ for (i=c.mPoints.begin(); i!=c.mPoints.end(); ++i)
+ {
+ mPoints.push_back( (*i) );
+ }
+ c.mPoints.clear();
+ c.mValidHull = false;
+ generateHull();
+ }
+
+ void setTransform(const HACD::SimpleBone &b,int32_t bone_index)
+ {
+ mBoneName = b.mBoneName;
+ mBoneIndex = bone_index;
+ mParentIndex = b.mParentIndex;
+ memcpy(mTransform,b.mTransform,sizeof(float)*16);
+ if ( mVertexCount )
+ {
+ for (uint32_t i=0; i<mVertexCount; i++)
+ {
+ float *vtx = &mVertices[i*3];
+ fm_transform(b.mInverseTransform,vtx,vtx); // inverse transform the point into bone relative object space
+ }
+ }
+
+ }
+
+ bool mValidHull;
+ Vec3Vector mPoints;
+ };
+
+ class MyAutoGeometry : public HACD::AutoGeometry, public UANS::UserAllocated
+ {
+ public:
+ MyAutoGeometry()
+ {
+ mHulls = 0;
+ mSimpleHulls = 0;
+ }
+
+ virtual ~MyAutoGeometry(void)
+ {
+ release();
+ }
+
+ void release(void)
+ {
+ delete []mHulls;
+ mHulls = 0;
+ HACD_FREE(mSimpleHulls);
+ mSimpleHulls = 0;
+ }
+
+#define MAX_BONE_COUNT 8
+
+ void addBone(uint32_t bone,uint32_t *bones,uint32_t &bcount)
+ {
+ HACD_ASSERT(bcount < MAX_BONE_COUNT);
+ if ( bcount < MAX_BONE_COUNT )
+ {
+ bool found = false;
+
+ for (uint32_t i=0; i<bcount; i++)
+ {
+ if ( bones[i] == bone )
+ {
+ found = true;
+ break;
+ }
+ }
+ if ( !found )
+ {
+ bones[bcount] = bone;
+ bcount++;
+ }
+ }
+ }
+
+ void addBones(const HACD::SimpleSkinnedVertex &v,uint32_t *bones,uint32_t &bcount, const HACD::SimpleBone *sbones)
+ {
+ if ( v.mWeight[0] >= sbones[v.mBone[0]].mBoneMinimalWeight) addBone(v.mBone[0],bones,bcount);
+ if ( v.mWeight[1] >= sbones[v.mBone[1]].mBoneMinimalWeight) addBone(v.mBone[1],bones,bcount);
+ if ( v.mWeight[2] >= sbones[v.mBone[2]].mBoneMinimalWeight) addBone(v.mBone[2],bones,bcount);
+ if ( v.mWeight[3] >= sbones[v.mBone[3]].mBoneMinimalWeight) addBone(v.mBone[3],bones,bcount);
+ }
+
+ void addTri(const HACD::SimpleSkinnedVertex &v1,const HACD::SimpleSkinnedVertex &v2,const HACD::SimpleSkinnedVertex &v3,const HACD::SimpleBone *sbones)
+ {
+ uint32_t bcount = 0;
+ uint32_t bones[MAX_BONE_COUNT];
+ addBones(v1,bones,bcount, sbones);
+ addBones(v2,bones,bcount, sbones);
+ addBones(v3,bones,bcount, sbones);
+ for (uint32_t i=0; i<bcount; i++)
+ {
+ addPos(v1.mPos, (int32_t)bones[i], sbones );
+ addPos(v2.mPos, (int32_t)bones[i], sbones );
+ addPos(v3.mPos, (int32_t)bones[i], sbones );
+ }
+ }
+
+ virtual HACD::SimpleHull ** createCollisionVolumes(float collapse_percentage,
+ uint32_t bone_count,
+ const HACD::SimpleBone *bones,
+ const HACD::SimpleSkinnedMesh *mesh,
+ uint32_t &geom_count)
+ {
+ release();
+ geom_count = 0;
+
+ mHulls = HACD_NEW(MyHull)[bone_count];
+
+ for (uint32_t i=0; i<bone_count; i++)
+ {
+ const HACD::SimpleBone &b = bones[i];
+ mHulls[i].setTransform(b,(int32_t)i);
+ }
+
+ uint32_t tcount = mesh->mVertexCount/3;
+
+ for (uint32_t i=0; i<tcount; i++)
+ {
+ const HACD::SimpleSkinnedVertex &v1 = mesh->mVertices[i*3+0];
+ const HACD::SimpleSkinnedVertex &v2 = mesh->mVertices[i*3+0];
+ const HACD::SimpleSkinnedVertex &v3 = mesh->mVertices[i*3+0];
+ addTri(v1,v2,v3,bones);
+ }
+
+ float totalVolume = 0;
+ for (uint32_t i=0; i<bone_count; i++)
+ {
+ totalVolume+=mHulls[i].generateHull();
+ }
+
+ // ok.. now do auto-collapse of hulls...
+#if 1
+ if ( collapse_percentage > 0 )
+ {
+ float ratio = collapse_percentage / 100.0f;
+ for (int32_t i=(int32_t)(bone_count-1); i>=0; i--)
+ {
+ MyHull &h = mHulls[i];
+ const HACD::SimpleBone &b = bones[i];
+ if ( b.mParentIndex >= 0 )
+ {
+ MyHull &parent_hull = mHulls[b.mParentIndex];
+ if ( h.mValidHull && parent_hull.mValidHull )
+ {
+ if ( h.mMeshVolume < (parent_hull.mMeshVolume*ratio) ) // if we are less than 1/3 the volume of our parent, copy our vertices to the parent..
+ {
+ parent_hull.inherit(h);
+ }
+ }
+ }
+ }
+ }
+#endif
+ for (int32_t i=0; i<(int32_t)bone_count; i++)
+ {
+ MyHull &h = mHulls[i];
+ if ( h.mValidHull )
+ geom_count++;
+ }
+
+ if ( geom_count )
+ {
+ mSimpleHulls = (HACD::SimpleHull **)HACD_ALLOC(sizeof(HACD::SimpleHull *)*geom_count);
+ int32_t index = 0;
+ for (int32_t i=0; i<(int32_t)bone_count; i++)
+ {
+ MyHull *hull = &mHulls[i];
+ if ( hull->mValidHull )
+ {
+ const HACD::SimpleBone &b = bones[i];
+ hull->setTransform(b,i);
+ mSimpleHulls[index] = hull;
+ index++;
+ }
+ }
+ }
+
+ return mSimpleHulls;
+ }
+
+ void addPos(const float *p,int32_t bone,const HACD::SimpleBone *bones)
+ {
+ switch ( bones[bone].mOption )
+ {
+ case HACD::BO_INCLUDE:
+ mHulls[bone].addPos(p);
+ break;
+ case HACD::BO_EXCLUDE:
+ break;
+ case HACD::BO_COLLAPSE:
+ {
+ while ( bone >= 0 )
+ {
+ bone = bones[bone].mParentIndex;
+ if ( bones[bone].mOption == HACD::BO_INCLUDE )
+ break;
+ else if ( bones[bone].mOption == HACD::BO_EXCLUDE )
+ {
+ bone = -1;
+ break;
+ }
+ }
+ if ( bone >= 0 )
+ {
+ mHulls[bone].addPos(p); // collapse into the parent
+ }
+ }
+ break;
+ }
+ }
+
+ virtual HACD::SimpleHull ** createCollisionVolumes(float collapse_percentage,uint32_t &geom_count)
+ {
+ HACD::SimpleHull **ret = 0;
+
+ if ( !mVertices.empty() && !mBones.empty() )
+ {
+ HACD::SimpleSkinnedMesh mesh;
+ mesh.mVertexCount = (uint32_t)mVertices.size();
+ mesh.mVertices = &mVertices[0];
+ ret = createCollisionVolumes(collapse_percentage,(uint32_t)mBones.size(),&mBones[0],&mesh,geom_count);
+ mVertices.clear();
+ mBones.clear();
+ }
+
+ return ret;
+ }
+
+ virtual void addSimpleSkinnedTriangle(const HACD::SimpleSkinnedVertex &v1,const HACD::SimpleSkinnedVertex &v2,const HACD::SimpleSkinnedVertex &v3)
+ {
+ mVertices.push_back(v1);
+ mVertices.push_back(v2);
+ mVertices.push_back(v3);
+ }
+
+ virtual void addSimpleBone(const HACD::SimpleBone &_b)
+ {
+ HACD::SimpleBone b = _b;
+ mBones.push_back(b);
+ }
+
+ void mystrlwr(char *str)
+ {
+ while ( *str )
+ {
+ char c = *str;
+ if ( c >= 'A' && c <= 'Z' )
+ {
+ c+=32;
+ *str = c;
+ }
+ str++;
+ }
+ }
+
+ virtual const char * stristr(const char *str,const char *key) // case insensitive ststr
+ {
+ HACD_ASSERT( strlen(str) < 2048 );
+ HACD_ASSERT( strlen(key) < 2048 );
+
+ char istr[2048];
+ char ikey[2048];
+
+ strncpy(istr,str,2048);
+ strncpy(ikey,key,2048);
+ mystrlwr(istr);
+ mystrlwr(ikey);
+
+ char *foo = strstr(istr,ikey);
+ if ( foo )
+ {
+ uint32_t loc = (uint32_t)(foo - istr);
+ foo = (char *)str+loc;
+ }
+
+ return foo;
+ }
+
+ private:
+ typedef hacd::vector< HACD::SimpleBone > SimpleBoneVector;
+ typedef hacd::vector< HACD::SimpleSkinnedVertex > SimpleSkinnedVertexVector;
+ SimpleBoneVector mBones;
+ SimpleSkinnedVertexVector mVertices;
+
+ MyHull *mHulls;
+ HACD::SimpleHull **mSimpleHulls;
+ };
+
+
+ HACD::AutoGeometry * createAutoGeometry()
+ {
+ HACD::MyAutoGeometry *g = HACD_NEW(HACD::MyAutoGeometry);
+ return static_cast< HACD::AutoGeometry *>(g);
+ }
+
+ void releaseAutoGeometry(HACD::AutoGeometry *g)
+ {
+ HACD::MyAutoGeometry * m = static_cast<HACD::MyAutoGeometry *>(g);
+ delete m;
+ }
+
+}; // end of namespace
diff --git a/APEX_1.4/shared/general/HACD/src/ConvexDecomposition.cpp b/APEX_1.4/shared/general/HACD/src/ConvexDecomposition.cpp
new file mode 100644
index 00000000..167e8e24
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/ConvexDecomposition.cpp
@@ -0,0 +1,3020 @@
+/*!
+**
+** Copyright (c) 2015 by John W. Ratcliff mailto:[email protected]
+**
+**
+** The MIT license:
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to deal
+** in the Software without restriction, including without limitation the rights
+** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+** copies of the Software, and to permit persons to whom the Software is furnished
+** to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in all
+** copies or substantial portions of the Software.
+
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+**
+** If you find this code snippet useful; you can tip me at this bitcoin address:
+**
+** BITCOIN TIP JAR: "1BT66EoaGySkbY9J6MugvQRhMMXDwPxPya"
+**
+
+
+
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <float.h>
+#include <math.h>
+
+#include "ConvexDecomposition.h"
+#include "ConvexHull.h"
+#include "dgConvexHull3d.h"
+
+const float FM_PI = 3.1415926535897932384626433832795028841971693993751f;
+const float FM_DEG_TO_RAD = ((2.0f * FM_PI) / 360.0f);
+
+
+typedef hacd::vector< uint32_t > UintVector;
+
+namespace CONVEX_DECOMPOSITION
+{
+class ConvexDecompInterface
+{
+public:
+ virtual void ConvexDecompResult(const ConvexResult &result) = 0;
+};
+
+
+
+class Cdesc
+{
+public:
+ ConvexDecompInterface *mCallback;
+ float mMeshVolumePercent;
+ float mMasterVolume;
+ uint32_t mMaxDepth;
+ float mConcavePercent;
+ uint32_t mOutputCount;
+ uint32_t mOutputPow2;
+ hacd::ICallback *mICallback;
+};
+
+static float fm_computePlane(const float *A,const float *B,const float *C,float *n) // returns D
+{
+ float vx = (B[0] - C[0]);
+ float vy = (B[1] - C[1]);
+ float vz = (B[2] - C[2]);
+
+ float wx = (A[0] - B[0]);
+ float wy = (A[1] - B[1]);
+ float wz = (A[2] - B[2]);
+
+ float vw_x = vy * wz - vz * wy;
+ float vw_y = vz * wx - vx * wz;
+ float vw_z = vx * wy - vy * wx;
+
+ float mag = ::sqrtf((vw_x * vw_x) + (vw_y * vw_y) + (vw_z * vw_z));
+
+ if ( mag < 0.000001f )
+ {
+ mag = 0;
+ }
+ else
+ {
+ mag = 1.0f/mag;
+ }
+
+ float x = vw_x * mag;
+ float y = vw_y * mag;
+ float z = vw_z * mag;
+
+
+ float D = 0.0f - ((x*A[0])+(y*A[1])+(z*A[2]));
+
+ n[0] = x;
+ n[1] = y;
+ n[2] = z;
+
+ return D;
+}
+
+static float fm_dot(const float *p1,const float *p2)
+{
+ return p1[0]*p2[0]+p1[1]*p2[1]+p1[2]*p2[2];
+}
+
+
+
+static bool fm_samePlane(const float p1[4],const float p2[4],float normalEpsilon,float dEpsilon,bool doubleSided)
+{
+ bool ret = false;
+
+ float diff = (float) fabs(p1[3]-p2[3]);
+ if ( diff < dEpsilon ) // if the plane -d co-efficient is within our epsilon
+ {
+ float dot = fm_dot(p1,p2); // compute the dot-product of the vector normals.
+ if ( doubleSided ) dot = (float)fabs(dot);
+ float dmin = 1 - normalEpsilon;
+ float dmax = 1 + normalEpsilon;
+ if ( dot >= dmin && dot <= dmax )
+ {
+ ret = true; // then the plane equation is for practical purposes identical.
+ }
+ }
+
+ return ret;
+}
+
+static bool fm_isMeshCoplanar(uint32_t tcount,const uint32_t *indices,const float *vertices,bool doubleSided) // returns true if this collection of indexed triangles are co-planar!
+{
+ bool ret = true;
+
+ if ( tcount > 0 )
+ {
+ uint32_t i1 = indices[0];
+ uint32_t i2 = indices[1];
+ uint32_t i3 = indices[2];
+ const float *p1 = &vertices[i1*3];
+ const float *p2 = &vertices[i2*3];
+ const float *p3 = &vertices[i3*3];
+ float plane[4];
+ plane[3] = fm_computePlane(p1,p2,p3,plane);
+ const uint32_t *scan = &indices[3];
+ for (uint32_t i=1; i<tcount; i++)
+ {
+ i1 = *scan++;
+ i2 = *scan++;
+ i3 = *scan++;
+ p1 = &vertices[i1*3];
+ p2 = &vertices[i2*3];
+ p3 = &vertices[i3*3];
+ float _plane[4];
+ _plane[3] = fm_computePlane(p1,p2,p3,_plane);
+ if ( !fm_samePlane(plane,_plane,0.01f,0.001f,doubleSided) )
+ {
+ ret = false;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+
+static float fm_distToPlane(const float *plane,const float *p) // computes the distance of this point from the plane.
+{
+ return p[0]*plane[0]+p[1]*plane[1]+p[2]*plane[2]+plane[3];
+}
+
+static void fm_cross(float *cross,const float *a,const float *b)
+{
+ cross[0] = a[1]*b[2] - a[2]*b[1];
+ cross[1] = a[2]*b[0] - a[0]*b[2];
+ cross[2] = a[0]*b[1] - a[1]*b[0];
+}
+
+/* a = b - c */
+#define vector(a,b,c) \
+ (a)[0] = (b)[0] - (c)[0]; \
+ (a)[1] = (b)[1] - (c)[1]; \
+ (a)[2] = (b)[2] - (c)[2];
+
+
+
+#define innerProduct(v,q) \
+ ((v)[0] * (q)[0] + \
+ (v)[1] * (q)[1] + \
+ (v)[2] * (q)[2])
+
+#define crossProduct(a,b,c) \
+ (a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \
+ (a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \
+ (a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1];
+
+
+inline float det(const float *p1,const float *p2,const float *p3)
+{
+ return p1[0]*p2[1]*p3[2] + p2[0]*p3[1]*p1[2] + p3[0]*p1[1]*p2[2] -p1[0]*p3[1]*p2[2] - p2[0]*p1[1]*p3[2] - p3[0]*p2[1]*p1[2];
+}
+
+
+static float fm_computeMeshVolume(const float *vertices,uint32_t tcount,const uint32_t *indices)
+{
+ float volume = 0;
+
+ for (uint32_t i=0; i<tcount; i++,indices+=3)
+ {
+ const float *p1 = &vertices[ indices[0]*3 ];
+ const float *p2 = &vertices[ indices[1]*3 ];
+ const float *p3 = &vertices[ indices[2]*3 ];
+ volume+=det(p1,p2,p3); // compute the volume of the tetrahedran relative to the origin.
+ }
+
+ volume*=(1.0f/6.0f);
+ if ( volume < 0 )
+ volume*=-1;
+ return volume;
+}
+
+
+
+// assumes that the points are on opposite sides of the plane!
+static void fm_intersectPointPlane(const float *p1,const float *p2,float *split,const float *plane)
+{
+
+ float dp1 = fm_distToPlane(plane,p1);
+
+ float dir[3];
+
+ dir[0] = p2[0] - p1[0];
+ dir[1] = p2[1] - p1[1];
+ dir[2] = p2[2] - p1[2];
+
+ float dot1 = dir[0]*plane[0] + dir[1]*plane[1] + dir[2]*plane[2];
+ float dot2 = dp1 - plane[3];
+
+ float t = -(plane[3] + dot2 ) / dot1;
+
+ split[0] = (dir[0]*t)+p1[0];
+ split[1] = (dir[1]*t)+p1[1];
+ split[2] = (dir[2]*t)+p1[2];
+
+}
+
+
+static void fm_transform(const float matrix[16],const float v[3],float t[3]) // rotate and translate this point
+{
+ if ( matrix )
+ {
+ float tx = (matrix[0*4+0] * v[0]) + (matrix[1*4+0] * v[1]) + (matrix[2*4+0] * v[2]) + matrix[3*4+0];
+ float ty = (matrix[0*4+1] * v[0]) + (matrix[1*4+1] * v[1]) + (matrix[2*4+1] * v[2]) + matrix[3*4+1];
+ float tz = (matrix[0*4+2] * v[0]) + (matrix[1*4+2] * v[1]) + (matrix[2*4+2] * v[2]) + matrix[3*4+2];
+ t[0] = tx;
+ t[1] = ty;
+ t[2] = tz;
+ }
+ else
+ {
+ t[0] = v[0];
+ t[1] = v[1];
+ t[2] = v[2];
+ }
+}
+
+static void fm_rotate(const float matrix[16],const float v[3],float t[3]) // rotate and translate this point
+{
+ if ( matrix )
+ {
+ float tx = (matrix[0*4+0] * v[0]) + (matrix[1*4+0] * v[1]) + (matrix[2*4+0] * v[2]);
+ float ty = (matrix[0*4+1] * v[0]) + (matrix[1*4+1] * v[1]) + (matrix[2*4+1] * v[2]);
+ float tz = (matrix[0*4+2] * v[0]) + (matrix[1*4+2] * v[1]) + (matrix[2*4+2] * v[2]);
+ t[0] = tx;
+ t[1] = ty;
+ t[2] = tz;
+ }
+ else
+ {
+ t[0] = v[0];
+ t[1] = v[1];
+ t[2] = v[2];
+ }
+}
+
+
+
+
+static void fm_inverseRT(const float matrix[16],const float pos[3],float t[3]) // inverse rotate translate the point.
+{
+
+ float _x = pos[0] - matrix[3*4+0];
+ float _y = pos[1] - matrix[3*4+1];
+ float _z = pos[2] - matrix[3*4+2];
+
+ // Multiply inverse-translated source vector by inverted rotation transform
+
+ t[0] = (matrix[0*4+0] * _x) + (matrix[0*4+1] * _y) + (matrix[0*4+2] * _z);
+ t[1] = (matrix[1*4+0] * _x) + (matrix[1*4+1] * _y) + (matrix[1*4+2] * _z);
+ t[2] = (matrix[2*4+0] * _x) + (matrix[2*4+1] * _y) + (matrix[2*4+2] * _z);
+
+}
+
+template <class Type> class Eigen
+{
+public:
+
+
+ void DecrSortEigenStuff(void)
+ {
+ Tridiagonal(); //diagonalize the matrix.
+ QLAlgorithm(); //
+ DecreasingSort();
+ GuaranteeRotation();
+ }
+
+ void Tridiagonal(void)
+ {
+ Type fM00 = mElement[0][0];
+ Type fM01 = mElement[0][1];
+ Type fM02 = mElement[0][2];
+ Type fM11 = mElement[1][1];
+ Type fM12 = mElement[1][2];
+ Type fM22 = mElement[2][2];
+
+ m_afDiag[0] = fM00;
+ m_afSubd[2] = 0;
+ if (fM02 != (Type)0.0)
+ {
+ Type fLength = static_cast<Type>(::sqrt(fM01*fM01+fM02*fM02));
+ Type fInvLength = ((Type)1.0)/fLength;
+ fM01 *= fInvLength;
+ fM02 *= fInvLength;
+ Type fQ = ((Type)2.0)*fM01*fM12+fM02*(fM22-fM11);
+ m_afDiag[1] = fM11+fM02*fQ;
+ m_afDiag[2] = fM22-fM02*fQ;
+ m_afSubd[0] = fLength;
+ m_afSubd[1] = fM12-fM01*fQ;
+ mElement[0][0] = (Type)1.0;
+ mElement[0][1] = (Type)0.0;
+ mElement[0][2] = (Type)0.0;
+ mElement[1][0] = (Type)0.0;
+ mElement[1][1] = fM01;
+ mElement[1][2] = fM02;
+ mElement[2][0] = (Type)0.0;
+ mElement[2][1] = fM02;
+ mElement[2][2] = -fM01;
+ m_bIsRotation = false;
+ }
+ else
+ {
+ m_afDiag[1] = fM11;
+ m_afDiag[2] = fM22;
+ m_afSubd[0] = fM01;
+ m_afSubd[1] = fM12;
+ mElement[0][0] = (Type)1.0;
+ mElement[0][1] = (Type)0.0;
+ mElement[0][2] = (Type)0.0;
+ mElement[1][0] = (Type)0.0;
+ mElement[1][1] = (Type)1.0;
+ mElement[1][2] = (Type)0.0;
+ mElement[2][0] = (Type)0.0;
+ mElement[2][1] = (Type)0.0;
+ mElement[2][2] = (Type)1.0;
+ m_bIsRotation = true;
+ }
+ }
+
+ bool QLAlgorithm(void)
+ {
+ const int32_t iMaxIter = 32;
+
+ for (int32_t i0 = 0; i0 <3; i0++)
+ {
+ int32_t i1;
+ for (i1 = 0; i1 < iMaxIter; i1++)
+ {
+ int32_t i2;
+ for (i2 = i0; i2 <= (3-2); i2++)
+ {
+ Type fTmp = static_cast<Type>(fabs(m_afDiag[i2]) + fabs(m_afDiag[i2+1]));
+ if ( fabs(m_afSubd[i2]) + fTmp == fTmp )
+ break;
+ }
+ if (i2 == i0)
+ {
+ break;
+ }
+
+ Type fG = (m_afDiag[i0+1] - m_afDiag[i0])/(((Type)2.0) * m_afSubd[i0]);
+ Type fR = static_cast<Type>(::sqrt(fG*fG+(Type)1.0));
+ if (fG < (Type)0.0)
+ {
+ fG = m_afDiag[i2]-m_afDiag[i0]+m_afSubd[i0]/(fG-fR);
+ }
+ else
+ {
+ fG = m_afDiag[i2]-m_afDiag[i0]+m_afSubd[i0]/(fG+fR);
+ }
+ Type fSin = (Type)1.0, fCos = (Type)1.0, fP = (Type)0.0;
+ for (int32_t i3 = i2-1; i3 >= i0; i3--)
+ {
+ Type fF = fSin*m_afSubd[i3];
+ Type fB = fCos*m_afSubd[i3];
+ if (fabs(fF) >= fabs(fG))
+ {
+ fCos = fG/fF;
+ fR = static_cast<Type>(::sqrt(fCos*fCos+(Type)1.0));
+ m_afSubd[i3+1] = fF*fR;
+ fSin = ((Type)1.0)/fR;
+ fCos *= fSin;
+ }
+ else
+ {
+ fSin = fF/fG;
+ fR = static_cast<Type>(::sqrt(fSin*fSin+(Type)1.0));
+ m_afSubd[i3+1] = fG*fR;
+ fCos = ((Type)1.0)/fR;
+ fSin *= fCos;
+ }
+ fG = m_afDiag[i3+1]-fP;
+ fR = (m_afDiag[i3]-fG)*fSin+((Type)2.0)*fB*fCos;
+ fP = fSin*fR;
+ m_afDiag[i3+1] = fG+fP;
+ fG = fCos*fR-fB;
+ for (int32_t i4 = 0; i4 < 3; i4++)
+ {
+ fF = mElement[i4][i3+1];
+ mElement[i4][i3+1] = fSin*mElement[i4][i3]+fCos*fF;
+ mElement[i4][i3] = fCos*mElement[i4][i3]-fSin*fF;
+ }
+ }
+ m_afDiag[i0] -= fP;
+ m_afSubd[i0] = fG;
+ m_afSubd[i2] = (Type)0.0;
+ }
+ if (i1 == iMaxIter)
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void DecreasingSort(void)
+ {
+ //sort eigenvalues in decreasing order, e[0] >= ... >= e[iSize-1]
+ for (int32_t i0 = 0, i1; i0 <= 3-2; i0++)
+ {
+ // locate maximum eigenvalue
+ i1 = i0;
+ Type fMax = m_afDiag[i1];
+ int32_t i2;
+ for (i2 = i0+1; i2 < 3; i2++)
+ {
+ if (m_afDiag[i2] > fMax)
+ {
+ i1 = i2;
+ fMax = m_afDiag[i1];
+ }
+ }
+
+ if (i1 != i0)
+ {
+ // swap eigenvalues
+ m_afDiag[i1] = m_afDiag[i0];
+ m_afDiag[i0] = fMax;
+ // swap eigenvectors
+ for (i2 = 0; i2 < 3; i2++)
+ {
+ Type fTmp = mElement[i2][i0];
+ mElement[i2][i0] = mElement[i2][i1];
+ mElement[i2][i1] = fTmp;
+ m_bIsRotation = !m_bIsRotation;
+ }
+ }
+ }
+ }
+
+
+ void GuaranteeRotation(void)
+ {
+ if (!m_bIsRotation)
+ {
+ // change sign on the first column
+ for (int32_t iRow = 0; iRow <3; iRow++)
+ {
+ mElement[iRow][0] = -mElement[iRow][0];
+ }
+ }
+ }
+
+ Type mElement[3][3];
+ Type m_afDiag[3];
+ Type m_afSubd[3];
+ bool m_bIsRotation;
+};
+
+
+static bool fm_computeBestFitPlane(uint32_t vcount,
+ const float *points,
+ uint32_t vstride,
+ const float *weights,
+ uint32_t wstride,
+ float *plane)
+{
+ bool ret = false;
+
+ float kOrigin[3] = { 0, 0, 0 };
+
+ float wtotal = 0;
+
+ {
+ const char *source = (const char *) points;
+ const char *wsource = (const char *) weights;
+
+ for (uint32_t i=0; i<vcount; i++)
+ {
+
+ const float *p = (const float *) source;
+
+ float w = 1;
+
+ if ( wsource )
+ {
+ const float *ws = (const float *) wsource;
+ w = *ws; //
+ wsource+=wstride;
+ }
+
+ kOrigin[0]+=p[0]*w;
+ kOrigin[1]+=p[1]*w;
+ kOrigin[2]+=p[2]*w;
+
+ wtotal+=w;
+
+ source+=vstride;
+ }
+ }
+
+ float recip = 1.0f / wtotal; // reciprocol of total weighting
+
+ kOrigin[0]*=recip;
+ kOrigin[1]*=recip;
+ kOrigin[2]*=recip;
+
+
+ float fSumXX=0;
+ float fSumXY=0;
+ float fSumXZ=0;
+
+ float fSumYY=0;
+ float fSumYZ=0;
+ float fSumZZ=0;
+
+
+ {
+ const char *source = (const char *) points;
+ const char *wsource = (const char *) weights;
+
+ for (uint32_t i=0; i<vcount; i++)
+ {
+
+ const float *p = (const float *) source;
+
+ float w = 1;
+
+ if ( wsource )
+ {
+ const float *ws = (const float *) wsource;
+ w = *ws; //
+ wsource+=wstride;
+ }
+
+ float kDiff[3];
+
+ kDiff[0] = w*(p[0] - kOrigin[0]); // apply vertex weighting!
+ kDiff[1] = w*(p[1] - kOrigin[1]);
+ kDiff[2] = w*(p[2] - kOrigin[2]);
+
+ fSumXX+= kDiff[0] * kDiff[0]; // sume of the squares of the differences.
+ fSumXY+= kDiff[0] * kDiff[1]; // sume of the squares of the differences.
+ fSumXZ+= kDiff[0] * kDiff[2]; // sume of the squares of the differences.
+
+ fSumYY+= kDiff[1] * kDiff[1];
+ fSumYZ+= kDiff[1] * kDiff[2];
+ fSumZZ+= kDiff[2] * kDiff[2];
+
+
+ source+=vstride;
+ }
+ }
+
+ fSumXX *= recip;
+ fSumXY *= recip;
+ fSumXZ *= recip;
+ fSumYY *= recip;
+ fSumYZ *= recip;
+ fSumZZ *= recip;
+
+ // setup the eigensolver
+ Eigen<float> kES;
+
+ kES.mElement[0][0] = fSumXX;
+ kES.mElement[0][1] = fSumXY;
+ kES.mElement[0][2] = fSumXZ;
+
+ kES.mElement[1][0] = fSumXY;
+ kES.mElement[1][1] = fSumYY;
+ kES.mElement[1][2] = fSumYZ;
+
+ kES.mElement[2][0] = fSumXZ;
+ kES.mElement[2][1] = fSumYZ;
+ kES.mElement[2][2] = fSumZZ;
+
+ // compute eigenstuff, smallest eigenvalue is in last position
+ kES.DecrSortEigenStuff();
+
+ float kNormal[3];
+
+ kNormal[0] = kES.mElement[0][2];
+ kNormal[1] = kES.mElement[1][2];
+ kNormal[2] = kES.mElement[2][2];
+
+ // the minimum energy
+ plane[0] = kNormal[0];
+ plane[1] = kNormal[1];
+ plane[2] = kNormal[2];
+
+ plane[3] = 0 - fm_dot(kNormal,kOrigin);
+
+ ret = true;
+
+ return ret;
+}
+
+
+
+
+// computes the OBB for this set of points relative to this transform matrix.
+void computeOBB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float *matrix)
+{
+ const char *src = (const char *) points;
+
+ float bmin[3] = { 1e9, 1e9, 1e9 };
+ float bmax[3] = { -1e9, -1e9, -1e9 };
+
+ for (uint32_t i=0; i<vcount; i++)
+ {
+ const float *p = (const float *) src;
+ float t[3];
+
+ fm_inverseRT(matrix, p, t ); // inverse rotate translate
+
+ if ( t[0] < bmin[0] ) bmin[0] = t[0];
+ if ( t[1] < bmin[1] ) bmin[1] = t[1];
+ if ( t[2] < bmin[2] ) bmin[2] = t[2];
+
+ if ( t[0] > bmax[0] ) bmax[0] = t[0];
+ if ( t[1] > bmax[1] ) bmax[1] = t[1];
+ if ( t[2] > bmax[2] ) bmax[2] = t[2];
+
+ src+=pstride;
+ }
+
+ float center[3];
+
+ sides[0] = bmax[0]-bmin[0];
+ sides[1] = bmax[1]-bmin[1];
+ sides[2] = bmax[2]-bmin[2];
+
+ center[0] = sides[0]*0.5f+bmin[0];
+ center[1] = sides[1]*0.5f+bmin[1];
+ center[2] = sides[2]*0.5f+bmin[2];
+
+ float ocenter[3];
+
+ fm_rotate(matrix,center,ocenter);
+
+ matrix[12]+=ocenter[0];
+ matrix[13]+=ocenter[1];
+ matrix[14]+=ocenter[2];
+
+}
+
+// Reference, from Stan Melax in Game Gems I
+// Quaternion q;
+// vector3 c = CrossProduct(v0,v1);
+// float d = DotProduct(v0,v1);
+// float s = (float)::sqrt((1+d)*2);
+// q.x = c.x / s;
+// q.y = c.y / s;
+// q.z = c.z / s;
+// q.w = s /2.0f;
+// return q;
+static void fm_rotationArc(const float *v0,const float *v1,float *quat)
+{
+ float cross[3];
+
+ fm_cross(cross,v0,v1);
+ float d = fm_dot(v0,v1);
+
+ if(d<=-1.0f) // 180 about x axis
+ {
+ quat[0] = 1.0f;
+ quat[1] = quat[2] = quat[3] =0.0f;
+ return;
+ }
+ else
+ {
+ float s = ::sqrtf((1+d)*2);
+ float recip = 1.0f / s;
+
+ quat[0] = cross[0] * recip;
+ quat[1] = cross[1] * recip;
+ quat[2] = cross[2] * recip;
+ quat[3] = s * 0.5f;
+ }
+}
+
+static void fm_setTranslation(const float *translation,float *matrix)
+{
+ matrix[12] = translation[0];
+ matrix[13] = translation[1];
+ matrix[14] = translation[2];
+}
+
+
+static void fm_eulerToQuat(float roll,float pitch,float yaw,float *quat) // convert euler angles to quaternion.
+{
+ roll *= 0.5f;
+ pitch *= 0.5f;
+ yaw *= 0.5f;
+
+ float cr = ::cosf(roll);
+ float cp = ::cosf(pitch);
+ float cy = ::cosf(yaw);
+
+ float sr = ::sinf(roll);
+ float sp = ::sinf(pitch);
+ float sy = ::sinf(yaw);
+
+ float cpcy = cp * cy;
+ float spsy = sp * sy;
+ float spcy = sp * cy;
+ float cpsy = cp * sy;
+
+ quat[0] = ( sr * cpcy - cr * spsy);
+ quat[1] = ( cr * spcy + sr * cpsy);
+ quat[2] = ( cr * cpsy - sr * spcy);
+ quat[3] = cr * cpcy + sr * spsy;
+}
+
+
+static void fm_quatToMatrix(const float *quat,float *matrix) // convert quaterinion rotation to matrix, zeros out the translation component.
+{
+
+ float xx = quat[0]*quat[0];
+ float yy = quat[1]*quat[1];
+ float zz = quat[2]*quat[2];
+ float xy = quat[0]*quat[1];
+ float xz = quat[0]*quat[2];
+ float yz = quat[1]*quat[2];
+ float wx = quat[3]*quat[0];
+ float wy = quat[3]*quat[1];
+ float wz = quat[3]*quat[2];
+
+ matrix[0*4+0] = 1 - 2 * ( yy + zz );
+ matrix[1*4+0] = 2 * ( xy - wz );
+ matrix[2*4+0] = 2 * ( xz + wy );
+
+ matrix[0*4+1] = 2 * ( xy + wz );
+ matrix[1*4+1] = 1 - 2 * ( xx + zz );
+ matrix[2*4+1] = 2 * ( yz - wx );
+
+ matrix[0*4+2] = 2 * ( xz - wy );
+ matrix[1*4+2] = 2 * ( yz + wx );
+ matrix[2*4+2] = 1 - 2 * ( xx + yy );
+
+ matrix[3*4+0] = matrix[3*4+1] = matrix[3*4+2] = (float) 0.0f;
+ matrix[0*4+3] = matrix[1*4+3] = matrix[2*4+3] = (float) 0.0f;
+ matrix[3*4+3] =(float) 1.0f;
+
+}
+
+static void fm_matrixMultiply(const float *pA,const float *pB,float *pM)
+{
+ float a = pA[0*4+0] * pB[0*4+0] + pA[0*4+1] * pB[1*4+0] + pA[0*4+2] * pB[2*4+0] + pA[0*4+3] * pB[3*4+0];
+ float b = pA[0*4+0] * pB[0*4+1] + pA[0*4+1] * pB[1*4+1] + pA[0*4+2] * pB[2*4+1] + pA[0*4+3] * pB[3*4+1];
+ float c = pA[0*4+0] * pB[0*4+2] + pA[0*4+1] * pB[1*4+2] + pA[0*4+2] * pB[2*4+2] + pA[0*4+3] * pB[3*4+2];
+ float d = pA[0*4+0] * pB[0*4+3] + pA[0*4+1] * pB[1*4+3] + pA[0*4+2] * pB[2*4+3] + pA[0*4+3] * pB[3*4+3];
+
+ float e = pA[1*4+0] * pB[0*4+0] + pA[1*4+1] * pB[1*4+0] + pA[1*4+2] * pB[2*4+0] + pA[1*4+3] * pB[3*4+0];
+ float f = pA[1*4+0] * pB[0*4+1] + pA[1*4+1] * pB[1*4+1] + pA[1*4+2] * pB[2*4+1] + pA[1*4+3] * pB[3*4+1];
+ float g = pA[1*4+0] * pB[0*4+2] + pA[1*4+1] * pB[1*4+2] + pA[1*4+2] * pB[2*4+2] + pA[1*4+3] * pB[3*4+2];
+ float h = pA[1*4+0] * pB[0*4+3] + pA[1*4+1] * pB[1*4+3] + pA[1*4+2] * pB[2*4+3] + pA[1*4+3] * pB[3*4+3];
+
+ float i = pA[2*4+0] * pB[0*4+0] + pA[2*4+1] * pB[1*4+0] + pA[2*4+2] * pB[2*4+0] + pA[2*4+3] * pB[3*4+0];
+ float j = pA[2*4+0] * pB[0*4+1] + pA[2*4+1] * pB[1*4+1] + pA[2*4+2] * pB[2*4+1] + pA[2*4+3] * pB[3*4+1];
+ float k = pA[2*4+0] * pB[0*4+2] + pA[2*4+1] * pB[1*4+2] + pA[2*4+2] * pB[2*4+2] + pA[2*4+3] * pB[3*4+2];
+ float l = pA[2*4+0] * pB[0*4+3] + pA[2*4+1] * pB[1*4+3] + pA[2*4+2] * pB[2*4+3] + pA[2*4+3] * pB[3*4+3];
+
+ float m = pA[3*4+0] * pB[0*4+0] + pA[3*4+1] * pB[1*4+0] + pA[3*4+2] * pB[2*4+0] + pA[3*4+3] * pB[3*4+0];
+ float n = pA[3*4+0] * pB[0*4+1] + pA[3*4+1] * pB[1*4+1] + pA[3*4+2] * pB[2*4+1] + pA[3*4+3] * pB[3*4+1];
+ float o = pA[3*4+0] * pB[0*4+2] + pA[3*4+1] * pB[1*4+2] + pA[3*4+2] * pB[2*4+2] + pA[3*4+3] * pB[3*4+2];
+ float p = pA[3*4+0] * pB[0*4+3] + pA[3*4+1] * pB[1*4+3] + pA[3*4+2] * pB[2*4+3] + pA[3*4+3] * pB[3*4+3];
+
+ pM[0] = a;
+ pM[1] = b;
+ pM[2] = c;
+ pM[3] = d;
+
+ pM[4] = e;
+ pM[5] = f;
+ pM[6] = g;
+ pM[7] = h;
+
+ pM[8] = i;
+ pM[9] = j;
+ pM[10] = k;
+ pM[11] = l;
+
+ pM[12] = m;
+ pM[13] = n;
+ pM[14] = o;
+ pM[15] = p;
+}
+
+
+
+
+static void fm_planeToMatrix(const float *plane,float *matrix) // convert a plane equation to a 4x4 rotation matrix
+{
+ float ref[3] = { 0, 1, 0 };
+ float quat[4];
+ fm_rotationArc(ref,plane,quat);
+ fm_quatToMatrix(quat,matrix);
+ float origin[3] = { 0, -plane[3], 0 };
+ float center[3];
+ fm_transform(matrix,origin,center);
+ fm_setTranslation(center,matrix);
+}
+
+
+static void fm_computeBestFitOBB(uint32_t vcount,
+ const float *points,
+ uint32_t pstride,
+ float *sides,
+ float *matrix,
+ bool bruteForce)
+{
+ float plane[4];
+ fm_computeBestFitPlane(vcount,points,pstride,0,0,plane);
+ fm_planeToMatrix(plane,matrix);
+ computeOBB( vcount, points, pstride, sides, matrix );
+
+ float refmatrix[16];
+ memcpy(refmatrix,matrix,16*sizeof(float));
+
+ float volume = sides[0]*sides[1]*sides[2];
+ if ( bruteForce )
+ {
+ for (float a=10; a<180; a+=10)
+ {
+ float quat[4];
+ fm_eulerToQuat(0,a*FM_DEG_TO_RAD,0,quat);
+ float temp[16];
+ float pmatrix[16];
+ fm_quatToMatrix(quat,temp);
+ fm_matrixMultiply(temp,refmatrix,pmatrix);
+ float psides[3];
+ computeOBB( vcount, points, pstride, psides, pmatrix );
+ float v = psides[0]*psides[1]*psides[2];
+ if ( v < volume )
+ {
+ volume = v;
+ memcpy(matrix,pmatrix,sizeof(float)*16);
+ sides[0] = psides[0];
+ sides[1] = psides[1];
+ sides[2] = psides[2];
+ }
+ }
+ }
+}
+
+template <class T> class Rect3d
+{
+public:
+ Rect3d(void) { };
+
+ Rect3d(const T *bmin,const T *bmax)
+ {
+
+ mMin[0] = bmin[0];
+ mMin[1] = bmin[1];
+ mMin[2] = bmin[2];
+
+ mMax[0] = bmax[0];
+ mMax[1] = bmax[1];
+ mMax[2] = bmax[2];
+
+ }
+
+ void SetMin(const T *bmin)
+ {
+ mMin[0] = bmin[0];
+ mMin[1] = bmin[1];
+ mMin[2] = bmin[2];
+ }
+
+ void SetMax(const T *bmax)
+ {
+ mMax[0] = bmax[0];
+ mMax[1] = bmax[1];
+ mMax[2] = bmax[2];
+ }
+
+ void SetMin(T x,T y,T z)
+ {
+ mMin[0] = x;
+ mMin[1] = y;
+ mMin[2] = z;
+ }
+
+ void SetMax(T x,T y,T z)
+ {
+ mMax[0] = x;
+ mMax[1] = y;
+ mMax[2] = z;
+ }
+
+ T mMin[3];
+ T mMax[3];
+};
+
+void splitRect(uint32_t axis,
+ const Rect3d<float> &source,
+ Rect3d<float> &b1,
+ Rect3d<float> &b2,
+ const float *midpoint)
+{
+ switch ( axis )
+ {
+ case 0:
+ b1.SetMin(source.mMin);
+ b1.SetMax( midpoint[0], source.mMax[1], source.mMax[2] );
+
+ b2.SetMin( midpoint[0], source.mMin[1], source.mMin[2] );
+ b2.SetMax(source.mMax);
+
+ break;
+ case 1:
+ b1.SetMin(source.mMin);
+ b1.SetMax( source.mMax[0], midpoint[1], source.mMax[2] );
+
+ b2.SetMin( source.mMin[0], midpoint[1], source.mMin[2] );
+ b2.SetMax(source.mMax);
+
+ break;
+ case 2:
+ b1.SetMin(source.mMin);
+ b1.SetMax( source.mMax[0], source.mMax[1], midpoint[2] );
+
+ b2.SetMin( source.mMin[0], source.mMin[1], midpoint[2] );
+ b2.SetMax(source.mMax);
+
+ break;
+ }
+}
+
+
+
+static bool fm_computeSplitPlane(uint32_t vcount,
+ const float *vertices,
+ uint32_t /* tcount */,
+ const uint32_t * /* indices */,
+ float *plane)
+{
+
+ float sides[3];
+ float matrix[16];
+
+ fm_computeBestFitOBB( vcount, vertices, sizeof(float)*3, sides, matrix, false );
+
+ float bmax[3];
+ float bmin[3];
+
+ bmax[0] = sides[0]*0.5f;
+ bmax[1] = sides[1]*0.5f;
+ bmax[2] = sides[2]*0.5f;
+
+ bmin[0] = -bmax[0];
+ bmin[1] = -bmax[1];
+ bmin[2] = -bmax[2];
+
+
+ float dx = sides[0];
+ float dy = sides[1];
+ float dz = sides[2];
+
+
+ uint32_t axis = 0;
+
+ if ( dy > dx )
+ {
+ axis = 1;
+ }
+
+ if ( dz > dx && dz > dy )
+ {
+ axis = 2;
+ }
+
+ float p1[3];
+ float p2[3];
+ float p3[3];
+
+ p3[0] = p2[0] = p1[0] = bmin[0] + dx*0.5f;
+ p3[1] = p2[1] = p1[1] = bmin[1] + dy*0.5f;
+ p3[2] = p2[2] = p1[2] = bmin[2] + dz*0.5f;
+
+ Rect3d<float> b(bmin,bmax);
+
+ Rect3d<float> b1,b2;
+
+ splitRect(axis,b,b1,b2,p1);
+
+
+ switch ( axis )
+ {
+ case 0:
+ p2[1] = bmin[1];
+ p2[2] = bmin[2];
+
+ if ( dz > dy )
+ {
+ p3[1] = bmax[1];
+ p3[2] = bmin[2];
+ }
+ else
+ {
+ p3[1] = bmin[1];
+ p3[2] = bmax[2];
+ }
+
+ break;
+ case 1:
+ p2[0] = bmin[0];
+ p2[2] = bmin[2];
+
+ if ( dx > dz )
+ {
+ p3[0] = bmax[0];
+ p3[2] = bmin[2];
+ }
+ else
+ {
+ p3[0] = bmin[0];
+ p3[2] = bmax[2];
+ }
+
+ break;
+ case 2:
+ p2[0] = bmin[0];
+ p2[1] = bmin[1];
+
+ if ( dx > dy )
+ {
+ p3[0] = bmax[0];
+ p3[1] = bmin[1];
+ }
+ else
+ {
+ p3[0] = bmin[0];
+ p3[1] = bmax[1];
+ }
+
+ break;
+ }
+
+ float tp1[3];
+ float tp2[3];
+ float tp3[3];
+
+ fm_transform(matrix,p1,tp1);
+ fm_transform(matrix,p2,tp2);
+ fm_transform(matrix,p3,tp3);
+
+ plane[3] = fm_computePlane(tp1,tp2,tp3,plane);
+
+ return true;
+
+}
+
+
+
+#define FM_DEFAULT_GRANULARITY 0.001f // 1 millimeter is the default granularity
+
+class fm_VertexIndex
+{
+public:
+ virtual uint32_t getIndex(const float pos[3],bool &newPos) = 0; // get welded index for this float vector[3]
+ virtual uint32_t getIndex(const double pos[3],bool &newPos) = 0; // get welded index for this double vector[3]
+ virtual const float * getVerticesFloat(void) const = 0;
+ virtual const double * getVerticesDouble(void) const = 0;
+ virtual const float * getVertexFloat(uint32_t index) const = 0;
+ virtual const double * getVertexDouble(uint32_t index) const = 0;
+ virtual uint32_t getVcount(void) const = 0;
+ virtual bool isDouble(void) const = 0;
+ virtual bool saveAsObj(const char *fname,uint32_t tcount,uint32_t *indices) = 0;
+};
+
+//static fm_VertexIndex * fm_createVertexIndex(double granularity,bool snapToGrid); // create an indexed vertex system for doubles
+static void fm_releaseVertexIndex(fm_VertexIndex *vindex);
+
+
+
+class KdTreeNode;
+
+typedef hacd::vector< KdTreeNode * > KdTreeNodeVector;
+
+enum Axes
+{
+ X_AXIS = 0,
+ Y_AXIS = 1,
+ Z_AXIS = 2
+};
+
+class KdTreeFindNode
+{
+public:
+ KdTreeFindNode(void)
+ {
+ mNode = 0;
+ mDistance = 0;
+ }
+ KdTreeNode *mNode;
+ double mDistance;
+};
+
+class KdTreeInterface
+{
+public:
+ virtual const double * getPositionDouble(uint32_t index) const = 0;
+ virtual const float * getPositionFloat(uint32_t index) const = 0;
+};
+
+class KdTreeNode
+{
+public:
+ KdTreeNode(void)
+ {
+ mIndex = 0;
+ mLeft = 0;
+ mRight = 0;
+ }
+
+ KdTreeNode(uint32_t index)
+ {
+ mIndex = index;
+ mLeft = 0;
+ mRight = 0;
+ };
+
+ ~KdTreeNode(void)
+ {
+ }
+
+
+ void addDouble(KdTreeNode *node,Axes dim,const KdTreeInterface *iface)
+ {
+ const double *nodePosition = iface->getPositionDouble( node->mIndex );
+ const double *position = iface->getPositionDouble( mIndex );
+ switch ( dim )
+ {
+ case X_AXIS:
+ if ( nodePosition[0] <= position[0] )
+ {
+ if ( mLeft )
+ mLeft->addDouble(node,Y_AXIS,iface);
+ else
+ mLeft = node;
+ }
+ else
+ {
+ if ( mRight )
+ mRight->addDouble(node,Y_AXIS,iface);
+ else
+ mRight = node;
+ }
+ break;
+ case Y_AXIS:
+ if ( nodePosition[1] <= position[1] )
+ {
+ if ( mLeft )
+ mLeft->addDouble(node,Z_AXIS,iface);
+ else
+ mLeft = node;
+ }
+ else
+ {
+ if ( mRight )
+ mRight->addDouble(node,Z_AXIS,iface);
+ else
+ mRight = node;
+ }
+ break;
+ case Z_AXIS:
+ if ( nodePosition[2] <= position[2] )
+ {
+ if ( mLeft )
+ mLeft->addDouble(node,X_AXIS,iface);
+ else
+ mLeft = node;
+ }
+ else
+ {
+ if ( mRight )
+ mRight->addDouble(node,X_AXIS,iface);
+ else
+ mRight = node;
+ }
+ break;
+ }
+
+ }
+
+
+ void addFloat(KdTreeNode *node,Axes dim,const KdTreeInterface *iface)
+ {
+ const float *nodePosition = iface->getPositionFloat( node->mIndex );
+ const float *position = iface->getPositionFloat( mIndex );
+ switch ( dim )
+ {
+ case X_AXIS:
+ if ( nodePosition[0] <= position[0] )
+ {
+ if ( mLeft )
+ mLeft->addFloat(node,Y_AXIS,iface);
+ else
+ mLeft = node;
+ }
+ else
+ {
+ if ( mRight )
+ mRight->addFloat(node,Y_AXIS,iface);
+ else
+ mRight = node;
+ }
+ break;
+ case Y_AXIS:
+ if ( nodePosition[1] <= position[1] )
+ {
+ if ( mLeft )
+ mLeft->addFloat(node,Z_AXIS,iface);
+ else
+ mLeft = node;
+ }
+ else
+ {
+ if ( mRight )
+ mRight->addFloat(node,Z_AXIS,iface);
+ else
+ mRight = node;
+ }
+ break;
+ case Z_AXIS:
+ if ( nodePosition[2] <= position[2] )
+ {
+ if ( mLeft )
+ mLeft->addFloat(node,X_AXIS,iface);
+ else
+ mLeft = node;
+ }
+ else
+ {
+ if ( mRight )
+ mRight->addFloat(node,X_AXIS,iface);
+ else
+ mRight = node;
+ }
+ break;
+ }
+
+ }
+
+
+ uint32_t getIndex(void) const { return mIndex; };
+
+ void search(Axes axis,const double *pos,double radius,uint32_t &count,uint32_t maxObjects,KdTreeFindNode *found,const KdTreeInterface *iface)
+ {
+
+ const double *position = iface->getPositionDouble(mIndex);
+
+ double dx = pos[0] - position[0];
+ double dy = pos[1] - position[1];
+ double dz = pos[2] - position[2];
+
+ KdTreeNode *search1 = 0;
+ KdTreeNode *search2 = 0;
+
+ switch ( axis )
+ {
+ case X_AXIS:
+ if ( dx <= 0 ) // JWR if we are to the left
+ {
+ search1 = mLeft; // JWR then search to the left
+ if ( -dx < radius ) // JWR if distance to the right is less than our search radius, continue on the right as well.
+ search2 = mRight;
+ }
+ else
+ {
+ search1 = mRight; // JWR ok, we go down the left tree
+ if ( dx < radius ) // JWR if the distance from the right is less than our search radius
+ search2 = mLeft;
+ }
+ axis = Y_AXIS;
+ break;
+ case Y_AXIS:
+ if ( dy <= 0 )
+ {
+ search1 = mLeft;
+ if ( -dy < radius )
+ search2 = mRight;
+ }
+ else
+ {
+ search1 = mRight;
+ if ( dy < radius )
+ search2 = mLeft;
+ }
+ axis = Z_AXIS;
+ break;
+ case Z_AXIS:
+ if ( dz <= 0 )
+ {
+ search1 = mLeft;
+ if ( -dz < radius )
+ search2 = mRight;
+ }
+ else
+ {
+ search1 = mRight;
+ if ( dz < radius )
+ search2 = mLeft;
+ }
+ axis = X_AXIS;
+ break;
+ }
+
+ double r2 = radius*radius;
+ double m = dx*dx+dy*dy+dz*dz;
+
+ if ( m < r2 )
+ {
+ switch ( count )
+ {
+ case 0:
+ found[count].mNode = this;
+ found[count].mDistance = m;
+ break;
+ case 1:
+ if ( m < found[0].mDistance )
+ {
+ if ( maxObjects == 1 )
+ {
+ found[0].mNode = this;
+ found[0].mDistance = m;
+ }
+ else
+ {
+ found[1] = found[0];
+ found[0].mNode = this;
+ found[0].mDistance = m;
+ }
+ }
+ else if ( maxObjects > 1)
+ {
+ found[1].mNode = this;
+ found[1].mDistance = m;
+ }
+ break;
+ default:
+ {
+ bool inserted = false;
+
+ for (uint32_t i=0; i<count; i++)
+ {
+ if ( m < found[i].mDistance ) // if this one is closer than a pre-existing one...
+ {
+ // insertion sort...
+ uint32_t scan = count;
+ if ( scan >= maxObjects ) scan=maxObjects-1;
+ for (uint32_t j=scan; j>i; j--)
+ {
+ found[j] = found[j-1];
+ }
+ found[i].mNode = this;
+ found[i].mDistance = m;
+ inserted = true;
+ break;
+ }
+ }
+
+ if ( !inserted && count < maxObjects )
+ {
+ found[count].mNode = this;
+ found[count].mDistance = m;
+ }
+ }
+ break;
+ }
+ count++;
+ if ( count > maxObjects )
+ {
+ count = maxObjects;
+ }
+ }
+
+
+ if ( search1 )
+ search1->search( axis, pos,radius, count, maxObjects, found, iface);
+
+ if ( search2 )
+ search2->search( axis, pos,radius, count, maxObjects, found, iface);
+
+ }
+
+ void search(Axes axis,const float *pos,float radius,uint32_t &count,uint32_t maxObjects,KdTreeFindNode *found,const KdTreeInterface *iface)
+ {
+
+ const float *position = iface->getPositionFloat(mIndex);
+
+ float dx = pos[0] - position[0];
+ float dy = pos[1] - position[1];
+ float dz = pos[2] - position[2];
+
+ KdTreeNode *search1 = 0;
+ KdTreeNode *search2 = 0;
+
+ switch ( axis )
+ {
+ case X_AXIS:
+ if ( dx <= 0 ) // JWR if we are to the left
+ {
+ search1 = mLeft; // JWR then search to the left
+ if ( -dx < radius ) // JWR if distance to the right is less than our search radius, continue on the right as well.
+ search2 = mRight;
+ }
+ else
+ {
+ search1 = mRight; // JWR ok, we go down the left tree
+ if ( dx < radius ) // JWR if the distance from the right is less than our search radius
+ search2 = mLeft;
+ }
+ axis = Y_AXIS;
+ break;
+ case Y_AXIS:
+ if ( dy <= 0 )
+ {
+ search1 = mLeft;
+ if ( -dy < radius )
+ search2 = mRight;
+ }
+ else
+ {
+ search1 = mRight;
+ if ( dy < radius )
+ search2 = mLeft;
+ }
+ axis = Z_AXIS;
+ break;
+ case Z_AXIS:
+ if ( dz <= 0 )
+ {
+ search1 = mLeft;
+ if ( -dz < radius )
+ search2 = mRight;
+ }
+ else
+ {
+ search1 = mRight;
+ if ( dz < radius )
+ search2 = mLeft;
+ }
+ axis = X_AXIS;
+ break;
+ }
+
+ float r2 = radius*radius;
+ float m = dx*dx+dy*dy+dz*dz;
+
+ if ( m < r2 )
+ {
+ switch ( count )
+ {
+ case 0:
+ found[count].mNode = this;
+ found[count].mDistance = m;
+ break;
+ case 1:
+ if ( m < found[0].mDistance )
+ {
+ if ( maxObjects == 1 )
+ {
+ found[0].mNode = this;
+ found[0].mDistance = m;
+ }
+ else
+ {
+ found[1] = found[0];
+ found[0].mNode = this;
+ found[0].mDistance = m;
+ }
+ }
+ else if ( maxObjects > 1)
+ {
+ found[1].mNode = this;
+ found[1].mDistance = m;
+ }
+ break;
+ default:
+ {
+ bool inserted = false;
+
+ for (uint32_t i=0; i<count; i++)
+ {
+ if ( m < found[i].mDistance ) // if this one is closer than a pre-existing one...
+ {
+ // insertion sort...
+ uint32_t scan = count;
+ if ( scan >= maxObjects ) scan=maxObjects-1;
+ for (uint32_t j=scan; j>i; j--)
+ {
+ found[j] = found[j-1];
+ }
+ found[i].mNode = this;
+ found[i].mDistance = m;
+ inserted = true;
+ break;
+ }
+ }
+
+ if ( !inserted && count < maxObjects )
+ {
+ found[count].mNode = this;
+ found[count].mDistance = m;
+ }
+ }
+ break;
+ }
+ count++;
+ if ( count > maxObjects )
+ {
+ count = maxObjects;
+ }
+ }
+
+
+ if ( search1 )
+ search1->search( axis, pos,radius, count, maxObjects, found, iface);
+
+ if ( search2 )
+ search2->search( axis, pos,radius, count, maxObjects, found, iface);
+
+ }
+
+private:
+
+ void setLeft(KdTreeNode *left) { mLeft = left; };
+ void setRight(KdTreeNode *right) { mRight = right; };
+
+ KdTreeNode *getLeft(void) { return mLeft; }
+ KdTreeNode *getRight(void) { return mRight; }
+
+ uint32_t mIndex;
+ KdTreeNode *mLeft;
+ KdTreeNode *mRight;
+};
+
+
+#define MAX_BUNDLE_SIZE 1024 // 1024 nodes at a time, to minimize memory allocation and guarentee that pointers are persistent.
+
+class KdTreeNodeBundle : public UANS::UserAllocated
+{
+public:
+
+ KdTreeNodeBundle(void)
+ {
+ mNext = 0;
+ mIndex = 0;
+ }
+
+ bool isFull(void) const
+ {
+ return (bool)( mIndex == MAX_BUNDLE_SIZE );
+ }
+
+ KdTreeNode * getNextNode(void)
+ {
+ assert(mIndex<MAX_BUNDLE_SIZE);
+ KdTreeNode *ret = &mNodes[mIndex];
+ mIndex++;
+ return ret;
+ }
+
+ KdTreeNodeBundle *mNext;
+ uint32_t mIndex;
+ KdTreeNode mNodes[MAX_BUNDLE_SIZE];
+};
+
+
+typedef hacd::vector< double > DoubleVector;
+typedef hacd::vector< float > FloatVector;
+
+class KdTree : public KdTreeInterface, public UANS::UserAllocated
+{
+public:
+ KdTree(void)
+ {
+ mRoot = 0;
+ mBundle = 0;
+ mVcount = 0;
+ mUseDouble = false;
+ }
+
+ virtual ~KdTree(void)
+ {
+ reset();
+ }
+
+ const double * getPositionDouble(uint32_t index) const
+ {
+ assert( mUseDouble );
+ assert ( index < mVcount );
+ return &mVerticesDouble[index*3];
+ }
+
+ const float * getPositionFloat(uint32_t index) const
+ {
+ assert( !mUseDouble );
+ assert ( index < mVcount );
+ return &mVerticesFloat[index*3];
+ }
+
+ uint32_t search(const double *pos,double radius,uint32_t maxObjects,KdTreeFindNode *found) const
+ {
+ assert( mUseDouble );
+ if ( !mRoot ) return 0;
+ uint32_t count = 0;
+ mRoot->search(X_AXIS,pos,radius,count,maxObjects,found,this);
+ return count;
+ }
+
+ uint32_t search(const float *pos,float radius,uint32_t maxObjects,KdTreeFindNode *found) const
+ {
+ assert( !mUseDouble );
+ if ( !mRoot ) return 0;
+ uint32_t count = 0;
+ mRoot->search(X_AXIS,pos,radius,count,maxObjects,found,this);
+ return count;
+ }
+
+ void reset(void)
+ {
+ mRoot = 0;
+ mVerticesDouble.clear();
+ mVerticesFloat.clear();
+ KdTreeNodeBundle *bundle = mBundle;
+ while ( bundle )
+ {
+ KdTreeNodeBundle *next = bundle->mNext;
+ delete bundle;
+ bundle = next;
+ }
+ mBundle = 0;
+ mVcount = 0;
+ }
+
+ uint32_t add(double x,double y,double z)
+ {
+ assert(mUseDouble);
+ uint32_t ret = mVcount;
+ mVerticesDouble.push_back(x);
+ mVerticesDouble.push_back(y);
+ mVerticesDouble.push_back(z);
+ mVcount++;
+ KdTreeNode *node = getNewNode(ret);
+ if ( mRoot )
+ {
+ mRoot->addDouble(node,X_AXIS,this);
+ }
+ else
+ {
+ mRoot = node;
+ }
+ return ret;
+ }
+
+ uint32_t add(float x,float y,float z)
+ {
+ assert(!mUseDouble);
+ uint32_t ret = mVcount;
+ mVerticesFloat.push_back(x);
+ mVerticesFloat.push_back(y);
+ mVerticesFloat.push_back(z);
+ mVcount++;
+ KdTreeNode *node = getNewNode(ret);
+ if ( mRoot )
+ {
+ mRoot->addFloat(node,X_AXIS,this);
+ }
+ else
+ {
+ mRoot = node;
+ }
+ return ret;
+ }
+
+ KdTreeNode * getNewNode(uint32_t index)
+ {
+ if ( mBundle == 0 )
+ {
+ mBundle = HACD_NEW(KdTreeNodeBundle);
+ }
+ if ( mBundle->isFull() )
+ {
+ KdTreeNodeBundle *bundle = HACD_NEW(KdTreeNodeBundle);
+ mBundle->mNext = bundle;
+ mBundle = bundle;
+ }
+ KdTreeNode *node = mBundle->getNextNode();
+ new ( node ) KdTreeNode(index);
+ return node;
+ }
+
+ uint32_t getNearest(const double *pos,double radius,bool &_found) const // returns the nearest possible neighbor's index.
+ {
+ assert( mUseDouble );
+ uint32_t ret = 0;
+
+ _found = false;
+ KdTreeFindNode found[1];
+ uint32_t count = search(pos,radius,1,found);
+ if ( count )
+ {
+ KdTreeNode *node = found[0].mNode;
+ ret = node->getIndex();
+ _found = true;
+ }
+ return ret;
+ }
+
+ uint32_t getNearest(const float *pos,float radius,bool &_found) const // returns the nearest possible neighbor's index.
+ {
+ assert( !mUseDouble );
+ uint32_t ret = 0;
+
+ _found = false;
+ KdTreeFindNode found[1];
+ uint32_t count = search(pos,radius,1,found);
+ if ( count )
+ {
+ KdTreeNode *node = found[0].mNode;
+ ret = node->getIndex();
+ _found = true;
+ }
+ return ret;
+ }
+
+ const double * getVerticesDouble(void) const
+ {
+ assert( mUseDouble );
+ const double *ret = 0;
+ if ( !mVerticesDouble.empty() )
+ {
+ ret = &mVerticesDouble[0];
+ }
+ return ret;
+ }
+
+ const float * getVerticesFloat(void) const
+ {
+ assert( !mUseDouble );
+ const float * ret = 0;
+ if ( !mVerticesFloat.empty() )
+ {
+ ret = &mVerticesFloat[0];
+ }
+ return ret;
+ }
+
+ uint32_t getVcount(void) const { return mVcount; };
+
+ void setUseDouble(bool useDouble)
+ {
+ mUseDouble = useDouble;
+ }
+
+private:
+ bool mUseDouble;
+ KdTreeNode *mRoot;
+ KdTreeNodeBundle *mBundle;
+ uint32_t mVcount;
+ DoubleVector mVerticesDouble;
+ FloatVector mVerticesFloat;
+};
+
+class MyVertexIndex : public fm_VertexIndex, public UANS::UserAllocated
+{
+public:
+ MyVertexIndex(double granularity,bool snapToGrid)
+ {
+ mDoubleGranularity = granularity;
+ mFloatGranularity = (float)granularity;
+ mSnapToGrid = snapToGrid;
+ mUseDouble = true;
+ mKdTree.setUseDouble(true);
+ }
+
+ MyVertexIndex(float granularity,bool snapToGrid)
+ {
+ mDoubleGranularity = granularity;
+ mFloatGranularity = (float)granularity;
+ mSnapToGrid = snapToGrid;
+ mUseDouble = false;
+ mKdTree.setUseDouble(false);
+ }
+
+ virtual ~MyVertexIndex(void)
+ {
+
+ }
+
+
+ double snapToGrid(double p)
+ {
+ double m = fmod(p,mDoubleGranularity);
+ p-=m;
+ return p;
+ }
+
+ float snapToGrid(float p)
+ {
+ float m = fmodf(p,mFloatGranularity);
+ p-=m;
+ return p;
+ }
+
+ uint32_t getIndex(const float *_p,bool &newPos) // get index for a vector float
+ {
+ uint32_t ret;
+
+ if ( mUseDouble )
+ {
+ double p[3];
+ p[0] = _p[0];
+ p[1] = _p[1];
+ p[2] = _p[2];
+ return getIndex(p,newPos);
+ }
+
+ newPos = false;
+
+ float p[3];
+
+ if ( mSnapToGrid )
+ {
+ p[0] = snapToGrid(_p[0]);
+ p[1] = snapToGrid(_p[1]);
+ p[2] = snapToGrid(_p[2]);
+ }
+ else
+ {
+ p[0] = _p[0];
+ p[1] = _p[1];
+ p[2] = _p[2];
+ }
+
+ bool found;
+ ret = mKdTree.getNearest(p,mFloatGranularity,found);
+ if ( !found )
+ {
+ newPos = true;
+ ret = mKdTree.add(p[0],p[1],p[2]);
+ }
+
+
+ return ret;
+ }
+
+ uint32_t getIndex(const double *_p,bool &newPos) // get index for a vector double
+ {
+ uint32_t ret;
+
+ if ( !mUseDouble )
+ {
+ float p[3];
+ p[0] = (float)_p[0];
+ p[1] = (float)_p[1];
+ p[2] = (float)_p[2];
+ return getIndex(p,newPos);
+ }
+
+ newPos = false;
+
+ double p[3];
+
+ if ( mSnapToGrid )
+ {
+ p[0] = snapToGrid(_p[0]);
+ p[1] = snapToGrid(_p[1]);
+ p[2] = snapToGrid(_p[2]);
+ }
+ else
+ {
+ p[0] = _p[0];
+ p[1] = _p[1];
+ p[2] = _p[2];
+ }
+
+ bool found;
+ ret = mKdTree.getNearest(p,mDoubleGranularity,found);
+ if ( !found )
+ {
+ newPos = true;
+ ret = mKdTree.add(p[0],p[1],p[2]);
+ }
+
+
+ return ret;
+ }
+
+ const float * getVerticesFloat(void) const
+ {
+ const float * ret = 0;
+
+ assert( !mUseDouble );
+
+ ret = mKdTree.getVerticesFloat();
+
+ return ret;
+ }
+
+ const double * getVerticesDouble(void) const
+ {
+ const double * ret = 0;
+
+ assert( mUseDouble );
+
+ ret = mKdTree.getVerticesDouble();
+
+ return ret;
+ }
+
+ const float * getVertexFloat(uint32_t index) const
+ {
+ const float * ret = 0;
+ assert( !mUseDouble );
+#ifdef _DEBUG
+ uint32_t vcount = mKdTree.getVcount();
+ assert( index < vcount );
+#endif
+ ret = mKdTree.getVerticesFloat();
+ ret = &ret[index*3];
+ return ret;
+ }
+
+ const double * getVertexDouble(uint32_t index) const
+ {
+ const double * ret = 0;
+ assert( mUseDouble );
+#ifdef _DEBUG
+ uint32_t vcount = mKdTree.getVcount();
+ assert( index < vcount );
+#endif
+ ret = mKdTree.getVerticesDouble();
+ ret = &ret[index*3];
+
+ return ret;
+ }
+
+ uint32_t getVcount(void) const
+ {
+ return mKdTree.getVcount();
+ }
+
+ bool isDouble(void) const
+ {
+ return mUseDouble;
+ }
+
+
+ bool saveAsObj(const char *fname,uint32_t tcount,uint32_t *indices)
+ {
+ bool ret = false;
+
+
+ FILE *fph = fopen(fname,"wb");
+ if ( fph )
+ {
+ ret = true;
+
+ uint32_t vcount = getVcount();
+ if ( mUseDouble )
+ {
+ const double *v = getVerticesDouble();
+ for (uint32_t i=0; i<vcount; i++)
+ {
+ fprintf(fph,"v %0.9f %0.9f %0.9f\r\n", (float)v[0], (float)v[1], (float)v[2] );
+ v+=3;
+ }
+ }
+ else
+ {
+ const float *v = getVerticesFloat();
+ for (uint32_t i=0; i<vcount; i++)
+ {
+ fprintf(fph,"v %0.9f %0.9f %0.9f\r\n", v[0], v[1], v[2] );
+ v+=3;
+ }
+ }
+
+ for (uint32_t i=0; i<tcount; i++)
+ {
+ uint32_t i1 = *indices++;
+ uint32_t i2 = *indices++;
+ uint32_t i3 = *indices++;
+ fprintf(fph,"f %d %d %d\r\n", i1+1, i2+1, i3+1 );
+ }
+ fclose(fph);
+ }
+
+ return ret;
+ }
+
+private:
+ bool mUseDouble:1;
+ bool mSnapToGrid:1;
+ double mDoubleGranularity;
+ float mFloatGranularity;
+ KdTree mKdTree;
+};
+
+static fm_VertexIndex * fm_createVertexIndex(float granularity,bool snapToGrid) // create an indexed vertext system for floats
+{
+ MyVertexIndex *ret = HACD_NEW(MyVertexIndex)(granularity,snapToGrid);
+ return static_cast< fm_VertexIndex *>(ret);
+}
+
+static void fm_releaseVertexIndex(fm_VertexIndex *vindex)
+{
+ MyVertexIndex *m = static_cast< MyVertexIndex *>(vindex);
+ delete m;
+}
+
+
+//
+
+// Split Mesh
+
+class SimpleMesh
+{
+public:
+ SimpleMesh(void)
+ {
+ mVcount = 0;
+ mTcount = 0;
+ mVertices = NULL;
+ mIndices = NULL;
+ }
+ SimpleMesh(uint32_t vcount,uint32_t tcount,const float *vertices,const uint32_t *indices)
+ {
+ mVcount = 0;
+ mTcount = 0;
+ mVertices = NULL;
+ mIndices = NULL;
+ set(vcount,tcount,vertices,indices);
+ }
+
+ void set(uint32_t vcount,uint32_t tcount,const float *vertices,const uint32_t *indices)
+ {
+ release();
+ mVcount = vcount;
+ mTcount = tcount;
+ mVertices = (float *)HACD_ALLOC(sizeof(float)*3*mVcount);
+ memcpy(mVertices,vertices,sizeof(float)*3*mVcount);
+ mIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*3*mTcount);
+ memcpy(mIndices,indices,sizeof(uint32_t)*3*mTcount);
+ }
+
+
+ ~SimpleMesh(void)
+ {
+ release();
+ }
+
+ void release(void)
+ {
+ HACD_FREE(mVertices);
+ HACD_FREE(mIndices);
+ mVertices = NULL;
+ mIndices = NULL;
+ mVcount = 0;
+ mTcount = 0;
+ }
+
+
+ uint32_t mVcount;
+ uint32_t mTcount;
+ float *mVertices;
+ uint32_t *mIndices;
+};
+
+
+void splitMesh(const float *planeEquation,const SimpleMesh &input,SimpleMesh &left,SimpleMesh &right,bool closedMesh);
+
+static void addTri(const float *p1,
+ const float *p2,
+ const float *p3,
+ hacd::vector< uint32_t > &indices,
+ fm_VertexIndex *vertices)
+{
+ bool newPos;
+
+ uint32_t i1 = vertices->getIndex(p1,newPos);
+ uint32_t i2 = vertices->getIndex(p2,newPos);
+ uint32_t i3 = vertices->getIndex(p3,newPos);
+
+ indices.push_back(i1);
+ indices.push_back(i2);
+ indices.push_back(i3);
+}
+
+enum PlaneTriResult
+{
+ PTR_ON_PLANE,
+ PTR_FRONT,
+ PTR_BACK,
+ PTR_SPLIT,
+};
+
+static PlaneTriResult fm_getSidePlane(const float *p,const float *plane,float epsilon)
+{
+ PlaneTriResult ret = PTR_ON_PLANE;
+
+ float d = fm_distToPlane(plane,p);
+
+ if ( d < -epsilon || d > epsilon )
+ {
+ if ( d > 0 )
+ ret = PTR_FRONT; // it is 'in front' within the provided epsilon value.
+ else
+ ret = PTR_BACK;
+ }
+
+ return ret;
+}
+
+
+
+
+static PlaneTriResult fm_planeTriIntersection(const float plane[4], // the plane equation in Ax+By+Cz+D format
+ const float *triangle, // the source triangle.
+ uint32_t tstride, // stride in bytes of the input and output *vertices*
+ float epsilon, // the co-planer epsilon value.
+ float *front, // the triangle in front of the
+ uint32_t &fcount, // number of vertices in the 'front' triangle
+ float *back, // the triangle in back of the plane
+ uint32_t &bcount); // the number of vertices in the 'back' triangle.
+
+static inline void add(const float *p,float *dest,uint32_t tstride,uint32_t &pcount)
+{
+ char *d = (char *) dest;
+ d = d + pcount*tstride;
+ dest = (float *) d;
+ dest[0] = p[0];
+ dest[1] = p[1];
+ dest[2] = p[2];
+ pcount++;
+ HACD_ASSERT( pcount <= 4 );
+}
+
+#define MAXPTS 256
+
+template <class Type> class point
+{
+public:
+
+ void set(const Type *p)
+ {
+ x = p[0];
+ y = p[1];
+ z = p[2];
+ }
+
+ Type x;
+ Type y;
+ Type z;
+};
+
+template <class Type> class plane
+{
+public:
+ plane(const Type *p)
+ {
+ normal.x = p[0];
+ normal.y = p[1];
+ normal.z = p[2];
+ D = p[3];
+ }
+
+ Type Classify_Point(const point<Type> &p)
+ {
+ return p.x*normal.x + p.y*normal.y + p.z*normal.z + D;
+ }
+
+ point<Type> normal;
+ Type D;
+};
+
+template <class Type> class polygon
+{
+public:
+ polygon(void)
+ {
+ mVcount = 0;
+ }
+
+ polygon(const Type *p1,const Type *p2,const Type *p3)
+ {
+ mVcount = 3;
+ mVertices[0].set(p1);
+ mVertices[1].set(p2);
+ mVertices[2].set(p3);
+ }
+
+
+ int32_t NumVertices(void) const { return mVcount; };
+
+ const point<Type>& Vertex(int32_t index)
+ {
+ if ( index < 0 ) index+=mVcount;
+ return mVertices[index];
+ };
+
+
+ void set(const point<Type> *pts,int32_t count)
+ {
+ for (int32_t i=0; i<count; i++)
+ {
+ mVertices[i] = pts[i];
+ }
+ mVcount = count;
+ }
+
+
+ void Split_Polygon(polygon<Type> *poly,plane<Type> *part, polygon<Type> &front, polygon<Type> &back)
+ {
+ int32_t count = poly->NumVertices ();
+ int32_t out_c = 0, in_c = 0;
+ point<Type> ptA, ptB,outpts[MAXPTS],inpts[MAXPTS];
+ Type sideA, sideB;
+ ptA = poly->Vertex (count - 1);
+ sideA = part->Classify_Point (ptA);
+ for (int32_t i = -1; ++i < count;)
+ {
+ ptB = poly->Vertex(i);
+ sideB = part->Classify_Point(ptB);
+ if (sideB > 0)
+ {
+ if (sideA < 0)
+ {
+ point<Type> v;
+ fm_intersectPointPlane(&ptB.x, &ptA.x, &v.x, &part->normal.x );
+ outpts[out_c++] = inpts[in_c++] = v;
+ }
+ outpts[out_c++] = ptB;
+ }
+ else if (sideB < 0)
+ {
+ if (sideA > 0)
+ {
+ point<Type> v;
+ fm_intersectPointPlane(&ptB.x, &ptA.x, &v.x, &part->normal.x );
+ outpts[out_c++] = inpts[in_c++] = v;
+ }
+ inpts[in_c++] = ptB;
+ }
+ else
+ outpts[out_c++] = inpts[in_c++] = ptB;
+ ptA = ptB;
+ sideA = sideB;
+ }
+
+ front.set(&outpts[0], out_c);
+ back.set(&inpts[0], in_c);
+ }
+
+ int32_t mVcount;
+ point<Type> mVertices[MAXPTS];
+};
+/* a = b - c */
+#define vector(a,b,c) \
+ (a)[0] = (b)[0] - (c)[0]; \
+ (a)[1] = (b)[1] - (c)[1]; \
+ (a)[2] = (b)[2] - (c)[2];
+
+
+
+#define innerProduct(v,q) \
+ ((v)[0] * (q)[0] + \
+ (v)[1] * (q)[1] + \
+ (v)[2] * (q)[2])
+
+#define crossProduct(a,b,c) \
+ (a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \
+ (a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \
+ (a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1];
+
+
+static bool fm_rayIntersectsTriangle(const float *p,const float *d,const float *v0,const float *v1,const float *v2,float &t)
+{
+ float e1[3],e2[3],h[3],s[3],q[3];
+ float a,f,u,v;
+
+ vector(e1,v1,v0);
+ vector(e2,v2,v0);
+ crossProduct(h,d,e2);
+ a = innerProduct(e1,h);
+
+ if (a > -0.00001 && a < 0.00001)
+ return(false);
+
+ f = 1/a;
+ vector(s,p,v0);
+ u = f * (innerProduct(s,h));
+
+ if (u < 0.0 || u > 1.0)
+ return(false);
+
+ crossProduct(q,s,e1);
+ v = f * innerProduct(d,q);
+ if (v < 0.0 || u + v > 1.0)
+ return(false);
+ // at this stage we can compute t to find out where
+ // the intersection point is on the line
+ t = f * innerProduct(e2,q);
+ if (t > 0) // ray intersection
+ return(true);
+ else // this means that there is a line intersection
+ // but not a ray intersection
+ return (false);
+}
+
+
+
+static PlaneTriResult fm_planeTriIntersection(const float *_plane, // the plane equation in Ax+By+Cz+D format
+ const float *triangle, // the source triangle.
+ uint32_t tstride, // stride in bytes of the input and output *vertices*
+ float epsilon, // the co-planar epsilon value.
+ float *front, // the triangle in front of the
+ uint32_t &fcount, // number of vertices in the 'front' triangle
+ float *back, // the triangle in back of the plane
+ uint32_t &bcount) // the number of vertices in the 'back' triangle.
+{
+
+ fcount = 0;
+ bcount = 0;
+
+ const char *tsource = (const char *) triangle;
+
+ // get the three vertices of the triangle.
+ const float *p1 = (const float *) (tsource);
+ const float *p2 = (const float *) (tsource+tstride);
+ const float *p3 = (const float *) (tsource+tstride*2);
+
+
+ PlaneTriResult r1 = fm_getSidePlane(p1,_plane,epsilon); // compute the side of the plane each vertex is on
+ PlaneTriResult r2 = fm_getSidePlane(p2,_plane,epsilon);
+ PlaneTriResult r3 = fm_getSidePlane(p3,_plane,epsilon);
+
+ // If any of the points lay right *on* the plane....
+ if ( r1 == PTR_ON_PLANE || r2 == PTR_ON_PLANE || r3 == PTR_ON_PLANE )
+ {
+ // If the triangle is completely co-planar, then just treat it as 'front' and return!
+ if ( r1 == PTR_ON_PLANE && r2 == PTR_ON_PLANE && r3 == PTR_ON_PLANE )
+ {
+ add(p1,front,tstride,fcount);
+ add(p2,front,tstride,fcount);
+ add(p3,front,tstride,fcount);
+ return PTR_FRONT;
+ }
+ // Decide to place the co-planar points on the same side as the co-planar point.
+ PlaneTriResult r= PTR_ON_PLANE;
+ if ( r1 != PTR_ON_PLANE )
+ r = r1;
+ else if ( r2 != PTR_ON_PLANE )
+ r = r2;
+ else if ( r3 != PTR_ON_PLANE )
+ r = r3;
+
+ if ( r1 == PTR_ON_PLANE ) r1 = r;
+ if ( r2 == PTR_ON_PLANE ) r2 = r;
+ if ( r3 == PTR_ON_PLANE ) r3 = r;
+
+ }
+
+ if ( r1 == r2 && r1 == r3 ) // if all three vertices are on the same side of the plane.
+ {
+ if ( r1 == PTR_FRONT ) // if all three are in front of the plane, then copy to the 'front' output triangle.
+ {
+ add(p1,front,tstride,fcount);
+ add(p2,front,tstride,fcount);
+ add(p3,front,tstride,fcount);
+ }
+ else
+ {
+ add(p1,back,tstride,bcount); // if all three are in 'back' then copy to the 'back' output triangle.
+ add(p2,back,tstride,bcount);
+ add(p3,back,tstride,bcount);
+ }
+ return r1; // if all three points are on the same side of the plane return result
+ }
+
+
+ polygon<float> pi(p1,p2,p3);
+ polygon<float> pfront,pback;
+
+ plane<float> part(_plane);
+
+ pi.Split_Polygon(&pi,&part,pfront,pback);
+
+ for (int32_t i=0; i<pfront.mVcount; i++)
+ {
+ add( &pfront.mVertices[i].x, front, tstride, fcount );
+ }
+
+ for (int32_t i=0; i<pback.mVcount; i++)
+ {
+ add( &pback.mVertices[i].x, back, tstride, bcount );
+ }
+
+ PlaneTriResult ret = PTR_SPLIT;
+
+ if ( fcount < 3 ) fcount = 0;
+ if ( bcount < 3 ) bcount = 0;
+
+ if ( fcount == 0 && bcount )
+ ret = PTR_BACK;
+
+ if ( bcount == 0 && fcount )
+ ret = PTR_FRONT;
+
+
+ return ret;
+}
+
+
+
+void splitMesh(const float *planeEquation,const SimpleMesh &input,SimpleMesh &leftMesh,SimpleMesh &rightMesh)
+{
+ hacd::vector< uint32_t > leftIndices;
+ hacd::vector< uint32_t > rightIndices;
+
+ fm_VertexIndex *leftVertices = fm_createVertexIndex(0.00001f,false);
+ fm_VertexIndex *rightVertices = fm_createVertexIndex(0.00001f,false);
+
+ {
+ for (uint32_t i=0; i<input.mTcount; i++)
+ {
+ uint32_t i1 = input.mIndices[i*3+0];
+ uint32_t i2 = input.mIndices[i*3+1];
+ uint32_t i3 = input.mIndices[i*3+2];
+
+ float *p1 = &input.mVertices[i1*3];
+ float *p2 = &input.mVertices[i2*3];
+ float *p3 = &input.mVertices[i3*3];
+
+ float tri[3*3];
+
+ tri[0] = p1[0];
+ tri[1] = p1[1];
+ tri[2] = p1[2];
+
+ tri[3] = p2[0];
+ tri[4] = p2[1];
+ tri[5] = p2[2];
+
+ tri[6] = p3[0];
+ tri[7] = p3[1];
+ tri[8] = p3[2];
+
+ float front[3*5];
+ float back[3*5];
+
+ uint32_t fcount,bcount;
+
+ PlaneTriResult result = fm_planeTriIntersection(planeEquation,tri,sizeof(float)*3,0.00001f,front,fcount,back,bcount);
+
+ switch ( result )
+ {
+ case PTR_FRONT:
+ addTri(p1,p2,p3,leftIndices,leftVertices);
+ break;
+ case PTR_BACK:
+ addTri(p1,p2,p3,rightIndices,rightVertices);
+ break;
+ case PTR_SPLIT:
+ if ( fcount )
+ {
+ addTri(&front[0],&front[3],&front[6],leftIndices,leftVertices);
+ if ( fcount == 4 )
+ {
+ addTri(&front[0],&front[6],&front[9],leftIndices,leftVertices);
+ }
+ }
+ if ( bcount )
+ {
+ addTri(&back[0],&back[3],&back[6],rightIndices,rightVertices);
+ if ( bcount == 4 )
+ {
+ addTri(&back[0],&back[6],&back[9],rightIndices,rightVertices);
+ }
+ }
+ break;
+ case PTR_ON_PLANE: // Make compiler happy
+ break;
+ }
+ }
+ }
+
+ if ( !leftIndices.empty() )
+ {
+ leftMesh.set(leftVertices->getVcount(),leftIndices.size()/3,leftVertices->getVerticesFloat(),&leftIndices[0]);
+ }
+
+ if ( !rightIndices.empty() )
+ {
+ rightMesh.set(rightVertices->getVcount(),rightIndices.size()/3,rightVertices->getVerticesFloat(),&rightIndices[0]);
+ }
+ fm_releaseVertexIndex(leftVertices);
+ fm_releaseVertexIndex(rightVertices);
+}
+
+
+//
+
+static float enorm0_3d ( float x0, float y0, float z0, float x1, float y1, float z1 )
+
+/**********************************************************************/
+
+/*
+Purpose:
+
+ENORM0_3D computes the Euclidean norm of (P1-P0) in 3D.
+
+Modified:
+
+18 April 1999
+
+Author:
+
+John Burkardt
+
+Parameters:
+
+Input, float X0, Y0, Z0, X1, Y1, Z1, the coordinates of the points
+P0 and P1.
+
+Output, float ENORM0_3D, the Euclidean norm of (P1-P0).
+*/
+{
+ float value;
+
+ value = ::sqrtf (
+ ( x1 - x0 ) * ( x1 - x0 ) +
+ ( y1 - y0 ) * ( y1 - y0 ) +
+ ( z1 - z0 ) * ( z1 - z0 ) );
+
+ return value;
+}
+
+
+static float triangle_area_3d ( float x1, float y1, float z1, float x2,float y2, float z2, float x3, float y3, float z3 )
+
+ /**********************************************************************/
+
+ /*
+ Purpose:
+
+ TRIANGLE_AREA_3D computes the area of a triangle in 3D.
+
+ Modified:
+
+ 22 April 1999
+
+ Author:
+
+ John Burkardt
+
+ Parameters:
+
+ Input, float X1, Y1, Z1, X2, Y2, Z2, X3, Y3, Z3, the (X,Y,Z)
+ coordinates of the corners of the triangle.
+
+ Output, float TRIANGLE_AREA_3D, the area of the triangle.
+ */
+{
+ float a;
+ float alpha;
+ float area;
+ float b;
+ float base;
+ float c;
+ float dot;
+ float height;
+ /*
+ Find the projection of (P3-P1) onto (P2-P1).
+ */
+ dot =
+ ( x2 - x1 ) * ( x3 - x1 ) +
+ ( y2 - y1 ) * ( y3 - y1 ) +
+ ( z2 - z1 ) * ( z3 - z1 );
+
+ base = enorm0_3d ( x1, y1, z1, x2, y2, z2 );
+ /*
+ The height of the triangle is the length of (P3-P1) after its
+ projection onto (P2-P1) has been subtracted.
+ */
+ if ( base == 0.0 ) {
+
+ height = 0.0;
+
+ }
+ else {
+
+ alpha = dot / ( base * base );
+
+ a = x3 - x1 - alpha * ( x2 - x1 );
+ b = y3 - y1 - alpha * ( y2 - y1 );
+ c = z3 - z1 - alpha * ( z2 - z1 );
+
+ height = ::sqrtf ( a * a + b * b + c * c );
+
+ }
+
+ area = 0.5f * base * height;
+
+ return area;
+}
+
+
+float fm_computeArea(const float *p1,const float *p2,const float *p3)
+{
+ float ret = 0;
+
+ ret = triangle_area_3d(p1[0],p1[1],p1[2],p2[0],p2[1],p2[2],p3[0],p3[1],p3[2]);
+
+ return ret;
+}
+
+void validate(float v)
+{
+ HACD_UNUSED(v);
+ HACD_ASSERT( v >= -1000 && v < 1000 );
+}
+
+void validate(const float *v)
+{
+ validate(v[0]);
+ validate(v[1]);
+ validate(v[2]);
+}
+
+
+void addVertex(const float *p,float *dest,uint32_t index)
+{
+ dest[index*3+0] = p[0];
+ dest[index*3+1] = p[1];
+ dest[index*3+2] = p[2];
+
+ validate( &dest[index*3]);
+
+}
+
+void addTriangle(uint32_t *indices,uint32_t i1,uint32_t i2,uint32_t i3,uint32_t index)
+{
+ indices[index*3+0] = i1;
+ indices[index*3+1] = i2;
+ indices[index*3+2] = i3;
+}
+
+bool projectRay(const float *p,const float *n,float *t,const HACD::HullResult &hull)
+{
+ bool ret = false;
+
+ t[0] = p[0];
+ t[1] = p[1];
+ t[2] = p[2];
+ validate(p);
+ validate(n);
+
+ for (uint32_t i=0; i<hull.mNumTriangles; i++)
+ {
+ uint32_t i1 = hull.mIndices[i*3+0];
+ uint32_t i2 = hull.mIndices[i*3+1];
+ uint32_t i3 = hull.mIndices[i*3+2];
+
+ const float *p1 = &hull.mOutputVertices[i1*3];
+ const float *p2 = &hull.mOutputVertices[i2*3];
+ const float *p3 = &hull.mOutputVertices[i3*3];
+
+ float tm;
+ if ( fm_rayIntersectsTriangle(p,n,p1,p2,p3,tm))
+ {
+ if ( tm > 100 )
+ {
+ fm_rayIntersectsTriangle(p,n,p1,p2,p3,tm);
+ }
+ t[0] = p[0]+n[0]*tm;
+ t[1] = p[1]+n[1]*tm;
+ t[2] = p[2]+n[2]*tm;
+ ret = true;
+ break;
+ }
+ }
+
+ if ( ret )
+ {
+ validate(t);
+ }
+
+ return ret;
+}
+
+float computeProjectedVolume(const float *p1,const float *p2,const float *p3,const HACD::HullResult &hull)
+{
+ float ret = 0;
+
+ float area = fm_computeArea(p1,p2,p3);
+ if ( area <= 0 )
+ {
+ return 0;
+ }
+
+ float normal[3];
+ fm_computePlane(p3,p2,p1,normal);
+
+ float t1[3];
+ float t2[3];
+ float t3[3];
+
+ bool hit1 = projectRay(p1,normal,t1,hull);
+ bool hit2 = projectRay(p2,normal,t2,hull);
+ bool hit3 = projectRay(p3,normal,t3,hull);
+
+ if ( hit1 || hit2 || hit3 )
+ {
+ // now we build the little triangle mesh piece...
+ uint32_t indices[8*3];
+
+ float vertices[6*3];
+ addVertex(p1,vertices,0);
+ addVertex(p2,vertices,1);
+ addVertex(p3,vertices,2);
+ addVertex(t1,vertices,3);
+ addVertex(t2,vertices,4);
+ addVertex(t3,vertices,5);
+
+ addTriangle(indices,2,1,0,0);
+ addTriangle(indices,3,4,5,1);
+
+ addTriangle(indices,0,3,4,2);
+ addTriangle(indices,0,4,1,3);
+
+ addTriangle(indices,2,5,3,4);
+ addTriangle(indices,2,3,0,5);
+
+ addTriangle(indices,1,4,5,6);
+ addTriangle(indices,1,5,2,7);
+
+ ret = fm_computeMeshVolume(vertices,8,indices);
+
+#if 0
+ static FILE *fph = fopen("project.obj", "wb" );
+ static int baseVertex = 1;
+ for (int i=0; i<6; i++)
+ {
+ fprintf(fph,"v %0.9f %0.9f %0.9f\r\n", vertices[i*3+0], vertices[i*3+1], vertices[i*3+2] );
+ }
+ for (int i=0; i<8; i++)
+ {
+ fprintf(fph,"f %d %d %d\r\n", indices[i*3+0]+baseVertex, indices[i*3+1]+baseVertex, indices[i*3+2]+baseVertex );
+ }
+ fflush(fph);
+ baseVertex+=6;
+#endif
+ }
+
+ return ret;
+}
+float computeConcavityVolume(uint32_t /*vcount*/,
+ const float *vertices,
+ uint32_t tcount,
+ const uint32_t *indices,
+ const HACD::HullResult &result)
+{
+ float ret = 0;
+
+ for (uint32_t i=0; i<tcount; i++)
+ {
+ uint32_t i1 = indices[i*3+0];
+ uint32_t i2 = indices[i*3+1];
+ uint32_t i3 = indices[i*3+2];
+
+ const float *p1 = &vertices[i1*3];
+ const float *p2 = &vertices[i2*3];
+ const float *p3 = &vertices[i3*3];
+
+ ret+=computeProjectedVolume(p1,p2,p3,result);
+
+ }
+
+ return ret;
+}
+
+//
+
+
+typedef hacd::vector< ConvexResult > ConvexResultVector;
+
+class ConvexBuilder : public ConvexDecompInterface, public ConvexDecomposition, public UANS::UserAllocated
+{
+public:
+ ConvexBuilder(void)
+ {
+ };
+
+ virtual ~ConvexBuilder(void)
+ {
+ for (uint32_t i=0; i<mResults.size(); i++)
+ {
+ ConvexResult &r = mResults[i];
+ HACD_FREE(r.mHullIndices);
+ HACD_FREE(r.mHullVertices);
+ }
+ }
+
+ virtual void ConvexDecompResult(const ConvexResult &result)
+ {
+ ConvexResult r;
+ r.mHullTcount = result.mHullTcount;
+ r.mHullVcount = result.mHullVcount;
+ r.mHullIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*r.mHullTcount*3);
+ memcpy(r.mHullIndices,result.mHullIndices,sizeof(uint32_t)*r.mHullTcount*3);
+ r.mHullVertices = (float *)HACD_ALLOC(sizeof(float)*r.mHullVcount*3);
+ memcpy(r.mHullVertices,result.mHullVertices,sizeof(float)*r.mHullVcount*3);
+ mResults.push_back(r);
+ }
+
+void doConvexDecomposition(uint32_t vcount,
+ const float *vertices,
+ uint32_t tcount,
+ const uint32_t *indices,
+ Cdesc &cdesc,
+ uint32_t depth)
+{
+
+ // first see if the input mesh is co-planar.
+ // If it is, then we return because we can't do anything with a co-planer mesh
+ bool isCoplanar = fm_isMeshCoplanar(tcount,indices,vertices,true);
+ if ( isCoplanar ) return;
+
+
+ // Next build a convex hull for the input vertices for this mesh fragment
+ HACD::HullResult result;
+ HACD::HullLibrary hl;
+ HACD::HullDesc desc;
+ desc.mVcount = vcount;
+ desc.mVertices = vertices;
+ desc.mVertexStride = sizeof(float)*3;
+ HACD::HullError ret = hl.CreateConvexHull(desc,result);
+ if ( ret != HACD::QE_OK )
+ {
+ return; // unable to build a hull for this remaining piece of mesh; so return.
+ }
+
+ bool split = false;
+ if ( depth < cdesc.mMaxDepth ) // if have not reached the maximum depth
+ {
+ // compute the volume of the convex hull prior to the plist.
+ float hullVolume = fm_computeMeshVolume(result.mOutputVertices,result.mNumTriangles,result.mIndices);
+ if (depth == 0 )
+ {
+ cdesc.mMasterVolume = hullVolume;
+ }
+ float percent = (hullVolume*100)/cdesc.mMasterVolume;
+ // if this convex hull is still considered significant enough in size to keep splitting...
+ if ( percent > cdesc.mMeshVolumePercent ) // if not too small of a feature...
+ {
+ // find the split plane by computing the OBB and slicing in half
+ float plane[4];
+ split = fm_computeSplitPlane(result.mNumOutputVertices,result.mOutputVertices,result.mNumTriangles,result.mIndices,plane);
+ if ( split )
+ {
+ {
+ float concaveVolume = computeConcavityVolume(vcount,vertices,tcount,indices,result);
+ float percentVolume = concaveVolume*100 / hullVolume;
+
+ if ( percentVolume < cdesc.mConcavePercent )
+ {
+ split = false;
+ }
+ }
+
+ SimpleMesh mesh(vcount, tcount, vertices, indices);
+ SimpleMesh leftMesh;
+ SimpleMesh rightMesh;
+ splitMesh(plane,mesh,leftMesh,rightMesh);
+
+ if ( split )
+ {
+
+ if ( leftMesh.mTcount )
+ {
+ doConvexDecomposition(leftMesh.mVcount, leftMesh.mVertices, leftMesh.mTcount,leftMesh.mIndices,cdesc,depth+1);
+ }
+ if ( rightMesh.mTcount )
+ {
+ doConvexDecomposition(rightMesh.mVcount, rightMesh.mVertices, rightMesh.mTcount,rightMesh.mIndices, cdesc, depth+1);
+ }
+ }
+ }
+ }
+ }
+ if ( !split )
+ {
+ ConvexResult r;
+ r.mHullIndices = result.mIndices;
+ r.mHullVertices = result.mOutputVertices;
+ r.mHullTcount = result.mNumTriangles;
+ r.mHullVcount = result.mNumOutputVertices;
+ cdesc.mCallback->ConvexDecompResult(r);
+ hl.ReleaseResult(result); // do not release the result!
+
+ if ( cdesc.mICallback )
+ {
+ float progress = (float)cdesc.mOutputCount / (float)cdesc.mOutputPow2;
+ cdesc.mOutputCount++;
+ cdesc.mICallback->ReportProgress("SplittingMesh", progress );
+ }
+
+
+ }
+}
+
+ virtual uint32_t performConvexDecomposition(const DecompDesc &desc) // returns the number of hulls produced.
+ {
+ Cdesc cdesc;
+ cdesc.mMaxDepth = desc.mDepth;
+ cdesc.mConcavePercent = desc.mCpercent;
+ cdesc.mMeshVolumePercent= desc.mMeshVolumePercent;
+ cdesc.mCallback = this;
+ cdesc.mICallback = desc.mCallback;
+ cdesc.mOutputCount = 0;
+ uint32_t p2[17] = { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536 };
+ if ( cdesc.mMaxDepth > 10 )
+ cdesc.mMaxDepth = 10;
+ cdesc.mOutputPow2 = p2[ cdesc.mMaxDepth];
+ doConvexDecomposition(desc.mVcount, desc.mVertices, desc.mTcount, desc.mIndices, cdesc, 0);
+ return mResults.size();
+ }
+
+ virtual void release(void)
+ {
+ delete this;
+ }
+
+ virtual ConvexResult * getConvexResult(uint32_t index,bool takeMemoryOwnership)
+ {
+ ConvexResult *ret = NULL;
+ if ( index < mResults.size() )
+ {
+ ret = &mResults[index];
+ if ( takeMemoryOwnership )
+ {
+ mTempResult = *ret;
+ ret->mHullIndices = NULL;
+ ret->mHullVertices = NULL;
+ ret = &mTempResult;
+ }
+ }
+ return ret;
+ }
+
+ ConvexResult mTempResult;
+ ConvexResultVector mResults;
+};
+
+ConvexDecomposition * createConvexDecomposition(void)
+{
+ ConvexBuilder *m = HACD_NEW(ConvexBuilder);
+ return static_cast<ConvexDecomposition *>(m);
+}
+
+
+}; // end of namespace
+
diff --git a/APEX_1.4/shared/general/HACD/src/ConvexHull.cpp b/APEX_1.4/shared/general/HACD/src/ConvexHull.cpp
new file mode 100644
index 00000000..f73265f0
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/ConvexHull.cpp
@@ -0,0 +1,396 @@
+
+#include "ConvexHull.h"
+#include "dgConvexHull3d.h"
+#include "WuQuantizer.h"
+#include "ApexUsingNamespace.h"
+
+/*!
+**
+** Copyright (c) 2015 by John W. Ratcliff mailto:[email protected]
+**
+**
+** The MIT license:
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to deal
+** in the Software without restriction, including without limitation the rights
+** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+** copies of the Software, and to permit persons to whom the Software is furnished
+** to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in all
+** copies or substantial portions of the Software.
+
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+**
+** If you find this code snippet useful; you can tip me at this bitcoin address:
+**
+** BITCOIN TIP JAR: "1BT66EoaGySkbY9J6MugvQRhMMXDwPxPya"
+**
+
+
+*/
+
+#include <math.h>
+#include <float.h>
+#include <string.h>
+
+
+using namespace hacd;
+
+namespace HACD
+{
+
+HullError HullLibrary::CreateConvexHull(const HullDesc &desc, // describes the input request
+ HullResult &result) // contains the resulst
+{
+ HullError ret = QE_FAIL;
+
+ uint32_t vcount = desc.mVcount;
+ if ( vcount < 8 ) vcount = 8;
+
+ float *vsource = (float *) HACD_ALLOC( sizeof(float)*vcount*3 );
+ float scale[3];
+ float center[3];
+
+ uint32_t ovcount;
+ bool ok = NormalizeAndCleanupVertices(desc.mVcount,desc.mVertices, desc.mVertexStride, ovcount, vsource, desc.mNormalEpsilon, scale, center, desc.mMaxVertices*2, desc.mUseWuQuantizer ); // normalize point cloud, remove duplicates!
+ if ( ok )
+ {
+ double *bigVertices = (double *)HACD_ALLOC(sizeof(double)*3*ovcount);
+ for (uint32_t i=0; i<3*ovcount; i++)
+ {
+ bigVertices[i] = vsource[i];
+ }
+
+ dgConvexHull3d convexHull(bigVertices,sizeof(double)*3,(int32_t)ovcount,0.0001f,(int32_t)desc.mMaxVertices);
+
+ if ( convexHull.GetCount() )
+ {
+ float *hullVertices = (float *)HACD_ALLOC( sizeof(float)*3*convexHull.GetVertexCount() );
+
+ float *dest = hullVertices;
+ for (int32_t i=0; i<convexHull.GetVertexCount(); i++)
+ {
+ const dgBigVector &v = convexHull.GetVertex(i);
+ dest[0] = (float)v.m_x*scale[0]+center[0];
+ dest[1] = (float)v.m_y*scale[1]+center[1];
+ dest[2] = (float)v.m_z*scale[2]+center[2];
+ dest+=3;
+ }
+
+ uint32_t triangleCount = (uint32_t)convexHull.GetCount();
+ uint32_t *indices = (uint32_t*)HACD_ALLOC(triangleCount*sizeof(uint32_t)*3);
+ uint32_t *destIndices = indices;
+ dgList<dgConvexHull3DFace>::Iterator iter(convexHull);
+ uint32_t outCount = 0;
+ for (iter.Begin(); iter; iter++)
+ {
+ dgConvexHull3DFace &face = (*iter);
+ destIndices[0] = (uint32_t)face.m_index[0];
+ destIndices[1] = (uint32_t)face.m_index[1];
+ destIndices[2] = (uint32_t)face.m_index[2];
+ destIndices+=3;
+ outCount++;
+ }
+ HACD_ASSERT( outCount == triangleCount );
+
+ // re-index triangle mesh so it refers to only used vertices, rebuild a new vertex table.
+ float *vscratch = (float *) HACD_ALLOC( sizeof(float)*convexHull.GetVertexCount()*3 );
+ BringOutYourDead(hullVertices,(uint32_t)convexHull.GetVertexCount(),vscratch, ovcount, indices, triangleCount*3 );
+
+ ret = QE_OK;
+
+ result.mNumOutputVertices = ovcount;
+ result.mOutputVertices = (float *)HACD_ALLOC( sizeof(float)*ovcount*3);
+ result.mNumTriangles = triangleCount;
+ result.mIndices = (uint32_t *) HACD_ALLOC( sizeof(uint32_t)*triangleCount*3);
+ memcpy(result.mOutputVertices, vscratch, sizeof(float)*3*ovcount );
+ memcpy(result.mIndices, indices, sizeof(uint32_t)*triangleCount*3);
+
+ HACD_FREE(indices);
+ HACD_FREE(vscratch);
+ HACD_FREE(hullVertices);
+ }
+
+ HACD_FREE(bigVertices);
+ }
+
+ HACD_FREE(vsource);
+
+
+ return ret;
+}
+
+
+
+HullError HullLibrary::ReleaseResult(HullResult &result) // release memory allocated for this result, we are done with it.
+{
+ if ( result.mOutputVertices )
+ {
+ HACD_FREE(result.mOutputVertices);
+ result.mOutputVertices = 0;
+ }
+ if ( result.mIndices )
+ {
+ HACD_FREE(result.mIndices);
+ result.mIndices = 0;
+ }
+ return QE_OK;
+}
+
+
+bool HullLibrary::NormalizeAndCleanupVertices(uint32_t svcount,
+ const float *svertices,
+ uint32_t /*stride*/,
+ uint32_t &vcount, // output number of vertices
+ float *vertices, // location to store the results.
+ float /*normalepsilon*/,
+ float *scale,
+ float *center,
+ uint32_t maxVertices,
+ bool useWuQuantizer)
+{
+ bool ret = false;
+
+ WuQuantizer *wq = createWuQuantizer();
+ if ( wq )
+ {
+ const float *quantizedVertices;
+ if ( useWuQuantizer )
+ {
+ quantizedVertices = wq->wuQuantize3D(svcount,svertices,false,maxVertices,vcount);
+ }
+ else
+ {
+ quantizedVertices = wq->kmeansQuantize3D(svcount,svertices,false,maxVertices,vcount);
+ }
+ if ( quantizedVertices )
+ {
+ memcpy(vertices,quantizedVertices,sizeof(float)*3*vcount);
+ const float *_scale = wq->getDenormalizeScale();
+ scale[0] = _scale[0];
+ scale[1] = _scale[1];
+ scale[2] = _scale[2];
+ const float *_center = wq->getDenormalizeCenter();
+ center[0] = _center[0];
+ center[1] = _center[1];
+ center[2] = _center[2];
+ ret = true;
+ }
+ wq->release();
+ }
+ return ret;
+}
+
+void HullLibrary::BringOutYourDead(const float *verts,uint32_t vcount, float *overts,uint32_t &ocount,uint32_t *indices,uint32_t indexcount)
+{
+ uint32_t *used = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*vcount);
+ memset(used,0,sizeof(uint32_t)*vcount);
+
+ ocount = 0;
+
+ for (uint32_t i=0; i<indexcount; i++)
+ {
+ uint32_t v = indices[i]; // original array index
+
+ HACD_ASSERT( v < vcount );
+
+ if ( used[v] ) // if already remapped
+ {
+ indices[i] = used[v]-1; // index to new array
+ }
+ else
+ {
+
+ indices[i] = ocount; // new index mapping
+
+ overts[ocount*3+0] = verts[v*3+0]; // copy old vert to new vert array
+ overts[ocount*3+1] = verts[v*3+1];
+ overts[ocount*3+2] = verts[v*3+2];
+
+ ocount++; // increment output vert count
+
+ HACD_ASSERT( ocount <= vcount );
+
+ used[v] = ocount; // assign new index remapping
+ }
+ }
+
+ HACD_FREE(used);
+}
+
+//==================================================================================
+HullError HullLibrary::CreateTriangleMesh(HullResult &answer,ConvexHullTriangleInterface *iface)
+{
+ HullError ret = QE_FAIL;
+
+
+ const float *p = answer.mOutputVertices;
+ const uint32_t *idx = answer.mIndices;
+ uint32_t fcount = answer.mNumTriangles;
+
+ if ( p && idx && fcount )
+ {
+ ret = QE_OK;
+
+ for (uint32_t i=0; i<fcount; i++)
+ {
+ uint32_t pcount = *idx++;
+
+ uint32_t i1 = *idx++;
+ uint32_t i2 = *idx++;
+ uint32_t i3 = *idx++;
+
+ const float *p1 = &p[i1*3];
+ const float *p2 = &p[i2*3];
+ const float *p3 = &p[i3*3];
+
+ AddConvexTriangle(iface,p1,p2,p3);
+
+ pcount-=3;
+ while ( pcount )
+ {
+ i3 = *idx++;
+ p2 = p3;
+ p3 = &p[i3*3];
+
+ AddConvexTriangle(iface,p1,p2,p3);
+ pcount--;
+ }
+
+ }
+ }
+
+ return ret;
+}
+
+//==================================================================================
+void HullLibrary::AddConvexTriangle(ConvexHullTriangleInterface *callback,const float *p1,const float *p2,const float *p3)
+{
+ ConvexHullVertex v1,v2,v3;
+
+ #define TSCALE1 (1.0f/4.0f)
+
+ v1.mPos[0] = p1[0];
+ v1.mPos[1] = p1[1];
+ v1.mPos[2] = p1[2];
+
+ v2.mPos[0] = p2[0];
+ v2.mPos[1] = p2[1];
+ v2.mPos[2] = p2[2];
+
+ v3.mPos[0] = p3[0];
+ v3.mPos[1] = p3[1];
+ v3.mPos[2] = p3[2];
+
+ float n[3];
+ ComputeNormal(n,p1,p2,p3);
+
+ v1.mNormal[0] = n[0];
+ v1.mNormal[1] = n[1];
+ v1.mNormal[2] = n[2];
+
+ v2.mNormal[0] = n[0];
+ v2.mNormal[1] = n[1];
+ v2.mNormal[2] = n[2];
+
+ v3.mNormal[0] = n[0];
+ v3.mNormal[1] = n[1];
+ v3.mNormal[2] = n[2];
+
+ const float *tp1 = p1;
+ const float *tp2 = p2;
+ const float *tp3 = p3;
+
+ int32_t i1 = 0;
+ int32_t i2 = 0;
+
+ float nx = fabsf(n[0]);
+ float ny = fabsf(n[1]);
+ float nz = fabsf(n[2]);
+
+ if ( nx <= ny && nx <= nz )
+ i1 = 0;
+ if ( ny <= nx && ny <= nz )
+ i1 = 1;
+ if ( nz <= nx && nz <= ny )
+ i1 = 2;
+
+ switch ( i1 )
+ {
+ case 0:
+ if ( ny < nz )
+ i2 = 1;
+ else
+ i2 = 2;
+ break;
+ case 1:
+ if ( nx < nz )
+ i2 = 0;
+ else
+ i2 = 2;
+ break;
+ case 2:
+ if ( nx < ny )
+ i2 = 0;
+ else
+ i2 = 1;
+ break;
+ }
+
+ v1.mTexel[0] = tp1[i1]*TSCALE1;
+ v1.mTexel[1] = tp1[i2]*TSCALE1;
+
+ v2.mTexel[0] = tp2[i1]*TSCALE1;
+ v2.mTexel[1] = tp2[i2]*TSCALE1;
+
+ v3.mTexel[0] = tp3[i1]*TSCALE1;
+ v3.mTexel[1] = tp3[i2]*TSCALE1;
+
+ callback->ConvexHullTriangle(v3,v2,v1);
+}
+
+//==================================================================================
+float HullLibrary::ComputeNormal(float *n,const float *A,const float *B,const float *C)
+{
+ float vx,vy,vz,wx,wy,wz,vw_x,vw_y,vw_z,mag;
+
+ vx = (B[0] - C[0]);
+ vy = (B[1] - C[1]);
+ vz = (B[2] - C[2]);
+
+ wx = (A[0] - B[0]);
+ wy = (A[1] - B[1]);
+ wz = (A[2] - B[2]);
+
+ vw_x = vy * wz - vz * wy;
+ vw_y = vz * wx - vx * wz;
+ vw_z = vx * wy - vy * wx;
+
+ mag = sqrtf((vw_x * vw_x) + (vw_y * vw_y) + (vw_z * vw_z));
+
+ if ( mag < 0.000001f )
+ {
+ mag = 0;
+ }
+ else
+ {
+ mag = 1.0f/mag;
+ }
+
+ n[0] = vw_x * mag;
+ n[1] = vw_y * mag;
+ n[2] = vw_z * mag;
+
+ return mag;
+}
+
+}; // End of namespace HACD
diff --git a/APEX_1.4/shared/general/HACD/src/HACD.cpp b/APEX_1.4/shared/general/HACD/src/HACD.cpp
new file mode 100644
index 00000000..1a4a39cc
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/HACD.cpp
@@ -0,0 +1,604 @@
+/*!
+**
+** Copyright (c) 2015 by John W. Ratcliff mailto:[email protected]
+**
+**
+** The MIT license:
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to deal
+** in the Software without restriction, including without limitation the rights
+** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+** copies of the Software, and to permit persons to whom the Software is furnished
+** to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in all
+** copies or substantial portions of the Software.
+
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+**
+** If you find this code snippet useful; you can tip me at this bitcoin address:
+**
+** BITCOIN TIP JAR: "1BT66EoaGySkbY9J6MugvQRhMMXDwPxPya"
+**
+
+
+
+*/
+#include "HACD.h"
+#include <stdlib.h>
+#include <string.h>
+#include "PlatformConfigHACD.h"
+
+#include "dgMeshEffect.h"
+#include "dgConvexHull3d.h"
+#include "MergeHulls.h"
+#include "ConvexDecomposition.h"
+#include "WuQuantizer.h"
+
+#ifdef _DEBUG
+#define DEBUG_WAVEFRONT 0 // if true, will save the input convex decomposition to disk as a wavefront OBJ file for debugging purposes.
+#endif
+
+using namespace hacd;
+
+namespace HACD
+{
+
+inline float det(const float *p1,const float *p2,const float *p3)
+{
+ return p1[0]*p2[1]*p3[2] + p2[0]*p3[1]*p1[2] + p3[0]*p1[1]*p2[2] -p1[0]*p3[1]*p2[2] - p2[0]*p1[1]*p3[2] - p3[0]*p2[1]*p1[2];
+}
+
+
+static float fm_computeMeshVolume(const float *vertices,uint32_t tcount,const uint32_t *indices)
+{
+ float volume = 0;
+
+ for (uint32_t i=0; i<tcount; i++,indices+=3)
+ {
+ const float *p1 = &vertices[ indices[0]*3 ];
+ const float *p2 = &vertices[ indices[1]*3 ];
+ const float *p3 = &vertices[ indices[2]*3 ];
+ volume+=det(p1,p2,p3); // compute the volume of the tetrahedran relative to the origin.
+ }
+
+ volume*=(1.0f/6.0f);
+ if ( volume < 0 )
+ volume*=-1;
+ return volume;
+}
+
+
+class MyHACD_API : public HACD_API, public UANS::UserAllocated
+{
+public:
+ class Vec3
+ {
+ public:
+ Vec3(void)
+ {
+
+ }
+ Vec3(float _x,float _y,float _z)
+ {
+ x = _x;
+ y = _y;
+ z = _z;
+ }
+ float x;
+ float y;
+ float z;
+ };
+
+ MyHACD_API(void)
+ {
+
+ }
+ virtual ~MyHACD_API(void)
+ {
+ releaseHACD();
+ }
+
+ void normalizeInputMesh(Desc &desc,Vec3 &inputScale,Vec3 &inputCenter)
+ {
+ const float *source = desc.mVertices;
+
+ Vec3 bmin(0,0,0),bmax(0,0,0);
+
+ for (uint32_t i=0; i<desc.mVertexCount; i++)
+ {
+ const Vec3 &v = *(const Vec3 *)source;
+ if ( i == 0 )
+ {
+ bmin = v;
+ bmax = v;
+ }
+ else
+ {
+ if ( v.x < bmin.x ) bmin.x = v.x;
+ if ( v.y < bmin.y ) bmin.y = v.y;
+ if ( v.z < bmin.z ) bmin.z = v.z;
+
+ if ( v.x > bmax.x ) bmax.x = v.x;
+ if ( v.y > bmax.y ) bmax.y = v.y;
+ if ( v.z > bmax.z ) bmax.z = v.z;
+
+ }
+ source+=3;
+ }
+ inputCenter.x = (bmin.x+bmax.x)*0.5f;
+ inputCenter.y = (bmin.y+bmax.y)*0.5f;
+ inputCenter.z = (bmin.z+bmax.z)*0.5f;
+
+ float dx = bmax.x - bmin.x;
+ float dy = bmax.y - bmin.y;
+ float dz = bmax.z - bmin.z;
+
+ if ( dx > 0 )
+ {
+ inputScale.x = 1.0f / dx;
+ }
+ else
+ {
+ inputScale.x = 1;
+ }
+
+ if ( dy > 0 )
+ {
+ inputScale.y = 1.0f / dy;
+ }
+ else
+ {
+ inputScale.y = 1;
+ }
+
+ if ( dz > 0 )
+ {
+ inputScale.z = 1.0f / dz;
+ }
+ else
+ {
+ inputScale.z = 1;
+ }
+
+ source = desc.mVertices;
+ desc.mVertices = (const float *)HACD_ALLOC( sizeof(float)*3*desc.mVertexCount );
+ float *dest = (float *)desc.mVertices;
+ for (uint32_t i=0; i<desc.mVertexCount; i++)
+ {
+ dest[0] = (source[0]-inputCenter.x)*inputScale.x;
+ dest[1] = (source[1]-inputCenter.y)*inputScale.y;
+ dest[2] = (source[2]-inputCenter.z)*inputScale.z;
+ dest+=3;
+ source+=3;
+ }
+ inputScale.x = dx;
+ inputScale.y = dy;
+ inputScale.z = dz;
+ }
+
+ void releaseNormalizedInputMesh(Desc &desc)
+ {
+ HACD_FREE( (void *)desc.mVertices );
+ }
+
+ virtual uint32_t performHACD(const Desc &_desc)
+ {
+ uint32_t ret = 0;
+
+ if ( _desc.mCallback )
+ {
+ _desc.mCallback->ReportProgress("Starting HACD",1);
+ }
+
+#if DEBUG_WAVEFRONT
+ {
+ static uint32_t saveCount=0;
+ saveCount++;
+ char scratch[512];
+ sprintf_s(scratch,512,"HACD_DEBUG_%d.obj", saveCount );
+ FILE *fph = fopen(scratch,"wb");
+ if ( fph )
+ {
+ fprintf(fph,"# NormalizeInputMesh: %s\r\n", _desc.mNormalizeInputMesh ? "true" : "false");
+ fprintf(fph,"# UseFastVersion: %s\r\n", _desc.mUseFastVersion ? "true" : "false" );
+ fprintf(fph,"# TriangleCount: %d\r\n", _desc.mTriangleCount);
+ fprintf(fph,"# VertexCount: %d\r\n", _desc.mVertexCount);
+ fprintf(fph,"# MaxHullCount: %d\r\n", _desc.mMaxHullCount);
+ fprintf(fph,"# MaxMergeHullCount: %d\r\n", _desc.mMaxMergeHullCount);
+ fprintf(fph,"# MaxHullVertices: %d\r\n", _desc.mMaxHullVertices);
+ fprintf(fph,"# Concavity: %0.4f\r\n", _desc.mConcavity);
+ fprintf(fph,"# SmallClusterThreshold: %0.4f\r\n", _desc.mSmallClusterThreshold);
+ fprintf(fph,"# BackFaceDistanceFactor: %0.4f\r\n", _desc.mBackFaceDistanceFactor);
+ fprintf(fph,"# DecompositionDepth: %d\r\n", _desc.mDecompositionDepth);
+ fprintf(fph,"# JobSwarmContext: %s\r\n", _desc.mJobSwarmContext ? "true" : "false");
+ fprintf(fph,"# Callback: %s\r\n", _desc.mCallback ? "true" : "false");
+
+ for (uint32_t i=0; i<_desc.mVertexCount; i++)
+ {
+ const float *p = &_desc.mVertices[i*3];
+ fprintf(fph,"v %0.9f %0.9f %0.9f\r\n", p[0], p[1], p[2] );
+ }
+ for (uint32_t i=0; i<_desc.mTriangleCount; i++)
+ {
+ uint32_t i1 = _desc.mIndices[i*3+0];
+ uint32_t i2 = _desc.mIndices[i*3+1];
+ uint32_t i3 = _desc.mIndices[i*3+2];
+ fprintf(fph,"f %d %d %d\r\n", i1+1, i2+1, i3+1 );
+ }
+ fclose(fph);
+ }
+ }
+#endif
+
+ releaseHACD();
+
+ Desc desc = _desc;
+
+ float *tempPositions = NULL; // temp memory holding remapped vertex positions
+ uint32_t *tempIndices = NULL; // temp memory holding remapped triangle indices
+ // This method scans the input mesh for duplicate vertices.
+ if ( desc.mRemoveDuplicateVertices )
+ {
+ if ( desc.mCallback )
+ {
+ desc.mCallback->ReportProgress("Removing duplicate vertices",1);
+ }
+
+ tempPositions = (float *)HACD_ALLOC(sizeof(float)*desc.mVertexCount*3); // room to hold all of the input vertex positions
+ tempIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*desc.mTriangleCount*3); // room to hold all of the triangle indices
+
+ desc.mVertices = tempPositions; // the remapped vertex position data
+ desc.mIndices = tempIndices; // the remapped triangle indices
+
+ uint32_t removeCount = 0;
+
+ desc.mVertexCount = 0;
+ uint32_t *remapPositions = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*_desc.mVertexCount);
+
+ // Scan each input position and see if it duplicates an already defined vertex position
+ for (uint32_t i=0; i<_desc.mVertexCount; i++)
+ {
+
+ const float *p1 = &_desc.mVertices[i*3]; // see if this position is already represented in out vertex list.
+ // Iterate through all positions we have already defined
+
+ bool found = false;
+ for (uint32_t j=0; j<desc.mVertexCount; j++)
+ {
+ const float *p2 = &desc.mVertices[j*3]; // an existing psotion
+
+ float dx = p1[0] - p2[0];
+ float dy = p1[1] - p2[1];
+ float dz = p1[2] - p2[2];
+
+ float dist = dx*dx+dy*dy+dz*dz; // Compute teh squared distance between this position and a previously defined position
+
+ if ( dist < (0.001f*0.001f)) // if the position is essentially identical; less than 1mm different location then we do not add it.
+ {
+ found = true;
+ remapPositions[i] = j; // remap the original source position I to the new index position J
+ removeCount++; // increment the counter indicating the number of duplicates we have fou8nd
+ }
+ }
+ if ( !found ) // if no duplicate was found; then this is a unique input position and we add it to the output.
+ {
+ remapPositions[i] = desc.mVertexCount; // This input position 'I' remaps to the current output position location desc.mVertexCount
+ float *p2 = &tempPositions[desc.mVertexCount*3]; // This is the destination for the unique input position.
+ p2[0] = p1[0]; // copy X
+ p2[1] = p1[1]; // copy Y
+ p2[2] = p1[2]; // copy Z
+ desc.mVertexCount++; // increment the number of vertices in the new output
+ }
+ }
+ // now we need to build the remapped index table.
+ for (uint32_t i=0; i<desc.mTriangleCount*3; i++)
+ {
+ tempIndices[i] = remapPositions[ _desc.mIndices[i] ];
+ }
+ HACD_FREE(remapPositions);
+ if ( desc.mCallback )
+ {
+ char scratch[512];
+ HACD_SPRINTF_S(scratch,512,"Removed %d duplicate vertices.", removeCount );
+ desc.mCallback->ReportProgress(scratch,1);
+ }
+ }
+ if ( desc.mVertexCount )
+ {
+
+ if ( desc.mDecompositionDepth ) // if using legacy ACD
+ {
+ CONVEX_DECOMPOSITION::ConvexDecomposition *cd = CONVEX_DECOMPOSITION::createConvexDecomposition();
+ CONVEX_DECOMPOSITION::DecompDesc dcompDesc;
+ dcompDesc.mIndices = desc.mIndices;
+ dcompDesc.mVertices = desc.mVertices;
+ dcompDesc.mTcount = desc.mTriangleCount;
+ dcompDesc.mVcount = desc.mVertexCount;
+ dcompDesc.mMaxVertices = desc.mMaxHullVertices;
+ dcompDesc.mDepth = desc.mDecompositionDepth;
+ dcompDesc.mCpercent = desc.mConcavity*10;
+ dcompDesc.mMeshVolumePercent = desc.mSmallClusterThreshold;
+ dcompDesc.mCallback = desc.mCallback;
+
+ if ( desc.mMaxMergeHullCount == 1 ) // if we only want a single hull output then set the decomposition depth to zero!
+ {
+ dcompDesc.mDepth = 0;
+ }
+
+ ret = cd->performConvexDecomposition(dcompDesc);
+
+ for (uint32_t i=0; i<ret; i++)
+ {
+ CONVEX_DECOMPOSITION::ConvexResult *result =cd->getConvexResult(i,true);
+ Hull h;
+ h.mVertices = result->mHullVertices;
+ h.mIndices = result->mHullIndices;
+ h.mTriangleCount = result->mHullTcount;
+ h.mVertexCount = result->mHullVcount;
+ h.mVolume = fm_computeMeshVolume(h.mVertices,h.mTriangleCount,h.mIndices);
+ mHulls.push_back(h);
+ }
+ }
+ else
+ {
+ Vec3 inputScale(1,1,1);
+ Vec3 inputCenter(0,0,0);
+
+ if ( desc.mNormalizeInputMesh )
+ {
+ if ( desc.mCallback )
+ {
+ desc.mCallback->ReportProgress("Normalizing Input Mesh",1);
+ }
+ normalizeInputMesh(desc,inputScale,inputCenter);
+ }
+
+ {
+ dgMeshEffect mesh(true);
+
+ float normal[3] = { 0,1,0 };
+ float uv[2] = { 0,0 };
+
+ int32_t *faceIndexCount = (int32_t *)HACD_ALLOC(sizeof(int32_t)*desc.mTriangleCount);
+ int32_t *dummyIndex = (int32_t *)HACD_ALLOC(sizeof(int32_t)*desc.mTriangleCount*3);
+
+ for (uint32_t i=0; i<desc.mTriangleCount; i++)
+ {
+ faceIndexCount[i] = 3;
+ dummyIndex[i*3+0] = 0;
+ dummyIndex[i*3+1] = 0;
+ dummyIndex[i*3+2] = 0;
+ }
+
+ if ( desc.mCallback )
+ {
+ desc.mCallback->ReportProgress("Building Mesh from Vertex Index List",1);
+ }
+ mesh.BuildFromVertexListIndexList((int32_t)desc.mTriangleCount,faceIndexCount,dummyIndex,
+ desc.mVertices,(int32_t)sizeof(float)*3,(const int32_t *const)desc.mIndices,
+ normal,(int32_t)sizeof(float)*3,dummyIndex,
+ uv,(int32_t)sizeof(float)*2,dummyIndex,
+ uv,(int32_t)sizeof(float)*2,dummyIndex);
+
+ dgMeshEffect *result;
+ {
+ if ( desc.mCallback )
+ {
+ desc.mCallback->ReportProgress("Begin HACD",1);
+ }
+ if ( desc.mUseFastVersion )
+ {
+ result = mesh.CreateConvexApproximationFast(desc.mConcavity,(int32_t)desc.mMaxHullCount,desc.mCallback);
+ }
+ else
+ {
+ result = mesh.CreateConvexApproximation(desc.mConcavity,desc.mBackFaceDistanceFactor,(int32_t)desc.mMaxHullCount,(int32_t)desc.mMaxHullVertices,desc.mCallback);
+ }
+ }
+
+ if ( result )
+ {
+ // now we build hulls for each connected surface...
+ if ( desc.mCallback )
+ {
+ desc.mCallback->ReportProgress("Getting connected surfaces",1);
+ }
+ dgPolyhedra segment;
+ result->BeginConectedSurface();
+
+ if ( result->GetConectedSurface(segment))
+ {
+ dgMeshEffect *solid = HACD_NEW(dgMeshEffect)(segment,*result);
+ while ( solid )
+ {
+ dgConvexHull3d *hull = solid->CreateConvexHull(0.00001,(int32_t)desc.mMaxHullVertices);
+ if ( hull )
+ {
+ Hull h;
+ h.mVertexCount = (uint32_t)hull->GetVertexCount();
+ h.mVertices = (float *)HACD_ALLOC( sizeof(float)*3*h.mVertexCount);
+ for (uint32_t i=0; i<h.mVertexCount; i++)
+ {
+ float *dest = (float *)&h.mVertices[i*3];
+ const dgBigVector &source = hull->GetVertex((int32_t)i);
+ dest[0] = (float)source.m_x*inputScale.x+inputCenter.x;
+ dest[1] = (float)source.m_y*inputScale.y+inputCenter.y;
+ dest[2] = (float)source.m_z*inputScale.z+inputCenter.z;
+ }
+
+ h.mTriangleCount = (uint32_t)hull->GetCount();
+ uint32_t *destIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*3*h.mTriangleCount);
+ h.mIndices = destIndices;
+
+ dgList<dgConvexHull3DFace>::Iterator iter(*hull);
+ for (iter.Begin(); iter; iter++)
+ {
+ dgConvexHull3DFace &face = (*iter);
+ destIndices[0] = (uint32_t)face.m_index[0];
+ destIndices[1] = (uint32_t)face.m_index[1];
+ destIndices[2] = (uint32_t)face.m_index[2];
+ destIndices+=3;
+ }
+
+ h.mVolume = fm_computeMeshVolume(h.mVertices,h.mTriangleCount,h.mIndices);
+
+ mHulls.push_back(h);
+
+ // save it!
+ delete hull;
+ }
+
+ delete solid;
+ solid = NULL;
+ dgPolyhedra nextSegment;
+ int32_t moreSegments = result->GetConectedSurface(nextSegment);
+ if ( moreSegments )
+ {
+ solid = HACD_NEW(dgMeshEffect)(nextSegment,*result);
+ }
+ else
+ {
+ result->EndConectedSurface();
+ }
+ }
+ }
+
+ delete result;
+ }
+ }
+ ret= (uint32_t)mHulls.size();
+ }
+
+ if ( desc.mNormalizeInputMesh && desc.mDecompositionDepth == 0 )
+ {
+ releaseNormalizedInputMesh(desc);
+ }
+ }
+
+ if ( ret && ((ret > desc.mMaxMergeHullCount) ||
+ (desc.mSmallClusterThreshold != 0.0f)) )
+ {
+ MergeHullsInterface *mhi = createMergeHullsInterface();
+ if ( mhi )
+ {
+ if ( desc.mCallback )
+ {
+ desc.mCallback->ReportProgress("Gathering Input Hulls",1);
+ }
+ MergeHullVector inputHulls;
+ MergeHullVector outputHulls;
+ for (uint32_t i=0; i<ret; i++)
+ {
+ Hull &h = mHulls[i];
+ MergeHull mh;
+ mh.mTriangleCount = h.mTriangleCount;
+ mh.mVertexCount = h.mVertexCount;
+ mh.mVertices = h.mVertices;
+ mh.mIndices = h.mIndices;
+ inputHulls.push_back(mh);
+ }
+
+ {
+ ret = mhi->mergeHulls(inputHulls,outputHulls,desc.mMaxMergeHullCount, desc.mSmallClusterThreshold + FLT_EPSILON, desc.mMaxHullVertices, desc.mCallback);
+ }
+
+ for (uint32_t i=0; i<ret; i++)
+ {
+ Hull &h = mHulls[i];
+ releaseHull(h);
+ }
+ mHulls.clear();
+
+ if ( desc.mCallback )
+ {
+ desc.mCallback->ReportProgress("Gathering Merged Hulls",1);
+ }
+ for (uint32_t i=0; i<outputHulls.size(); i++)
+ {
+ Hull h;
+ const MergeHull &mh = outputHulls[i];
+ h.mTriangleCount = mh.mTriangleCount;
+ h.mVertexCount = mh.mVertexCount;
+ h.mIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*3*h.mTriangleCount);
+ h.mVertices = (float *)HACD_ALLOC(sizeof(float)*3*h.mVertexCount);
+ memcpy((uint32_t *)h.mIndices,mh.mIndices,sizeof(uint32_t)*3*h.mTriangleCount);
+ memcpy((float *)h.mVertices,mh.mVertices,sizeof(float)*3*h.mVertexCount);
+
+ h.mVolume = fm_computeMeshVolume(h.mVertices,h.mTriangleCount,h.mIndices);
+
+ mHulls.push_back(h);
+ }
+
+ ret = (uint32_t)mHulls.size();
+
+ mhi->release();
+ }
+ HACD_FREE(tempIndices);
+ HACD_FREE(tempPositions);
+ }
+
+ return ret;
+ }
+
+ void releaseHull(Hull &h)
+ {
+ HACD_FREE((void *)h.mIndices);
+ HACD_FREE((void *)h.mVertices);
+ h.mIndices = NULL;
+ h.mVertices = NULL;
+ }
+
+ virtual const Hull *getHull(uint32_t index) const
+ {
+ const Hull *ret = NULL;
+ if ( index < mHulls.size() )
+ {
+ ret = &mHulls[index];
+ }
+ return ret;
+ }
+
+ virtual void releaseHACD(void) // release memory associated with the last HACD request
+ {
+ for (uint32_t i=0; i<mHulls.size(); i++)
+ {
+ releaseHull(mHulls[i]);
+ }
+ mHulls.clear();
+ }
+
+
+ virtual void release(void) // release the HACD_API interface
+ {
+ delete this;
+ }
+
+ virtual uint32_t getHullCount(void)
+ {
+ return (uint32_t) mHulls.size();
+ }
+
+private:
+ hacd::vector< Hull > mHulls;
+};
+
+HACD_API * createHACD_API(void)
+{
+ MyHACD_API *m = HACD_NEW(MyHACD_API);
+ return static_cast<HACD_API *>(m);
+}
+
+
+};
+
+
+
diff --git a/APEX_1.4/shared/general/HACD/src/MergeHulls.cpp b/APEX_1.4/shared/general/HACD/src/MergeHulls.cpp
new file mode 100644
index 00000000..5468845b
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/MergeHulls.cpp
@@ -0,0 +1,533 @@
+#include "MergeHulls.h"
+#include "ConvexHull.h"
+#include "SparseArray.h"
+#include <string.h>
+#include <math.h>
+
+/*!
+**
+** Copyright (c) 2015 by John W. Ratcliff mailto:[email protected]
+**
+**
+** The MIT license:
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to deal
+** in the Software without restriction, including without limitation the rights
+** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+** copies of the Software, and to permit persons to whom the Software is furnished
+** to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in all
+** copies or substantial portions of the Software.
+
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+**
+** If you find this code snippet useful; you can tip me at this bitcoin address:
+**
+** BITCOIN TIP JAR: "1BT66EoaGySkbY9J6MugvQRhMMXDwPxPya"
+**
+
+*/
+
+using namespace hacd;
+
+namespace HACD
+{
+
+typedef SparseArray< float > TestedMap;
+
+static int gCombineCount=0;
+
+static float fm_computeBestFitAABB(uint32_t vcount,const float *points,uint32_t pstride,float *bmin,float *bmax) // returns the diagonal distance
+{
+
+ const uint8_t *source = (const uint8_t *) points;
+
+ bmin[0] = points[0];
+ bmin[1] = points[1];
+ bmin[2] = points[2];
+
+ bmax[0] = points[0];
+ bmax[1] = points[1];
+ bmax[2] = points[2];
+
+
+ for (uint32_t i=1; i<vcount; i++)
+ {
+ source+=pstride;
+ const float *p = (const float *) source;
+
+ if ( p[0] < bmin[0] ) bmin[0] = p[0];
+ if ( p[1] < bmin[1] ) bmin[1] = p[1];
+ if ( p[2] < bmin[2] ) bmin[2] = p[2];
+
+ if ( p[0] > bmax[0] ) bmax[0] = p[0];
+ if ( p[1] > bmax[1] ) bmax[1] = p[1];
+ if ( p[2] > bmax[2] ) bmax[2] = p[2];
+
+ }
+
+ float dx = bmax[0] - bmin[0];
+ float dy = bmax[1] - bmin[1];
+ float dz = bmax[2] - bmin[2];
+
+ return (float) ::sqrtf( dx*dx + dy*dy + dz*dz );
+
+}
+
+
+
+
+static bool fm_intersectAABB(const float *bmin1,const float *bmax1,const float *bmin2,const float *bmax2)
+{
+ if ((bmin1[0] > bmax2[0]) || (bmin2[0] > bmax1[0])) return false;
+ if ((bmin1[1] > bmax2[1]) || (bmin2[1] > bmax1[1])) return false;
+ if ((bmin1[2] > bmax2[2]) || (bmin2[2] > bmax1[2])) return false;
+ return true;
+}
+
+
+static HACD_INLINE float det(const float *p1,const float *p2,const float *p3)
+{
+ return p1[0]*p2[1]*p3[2] + p2[0]*p3[1]*p1[2] + p3[0]*p1[1]*p2[2] -p1[0]*p3[1]*p2[2] - p2[0]*p1[1]*p3[2] - p3[0]*p2[1]*p1[2];
+}
+
+
+static float fm_computeMeshVolume(const float *vertices,uint32_t tcount,const uint32_t *indices)
+{
+ float volume = 0;
+ for (uint32_t i=0; i<tcount; i++,indices+=3)
+ {
+ const float *p1 = &vertices[ indices[0]*3 ];
+ const float *p2 = &vertices[ indices[1]*3 ];
+ const float *p3 = &vertices[ indices[2]*3 ];
+ volume+=det(p1,p2,p3); // compute the volume of the tetrahedran relative to the origin.
+ }
+
+ volume*=(1.0f/6.0f);
+ if ( volume < 0 )
+ volume*=-1;
+ return volume;
+}
+
+
+
+class CHull : public UANS::UserAllocated
+ {
+ public:
+ CHull(uint32_t vcount,const float *vertices,uint32_t tcount,const uint32_t *indices,uint32_t guid)
+ {
+ mGuid = guid;
+ mVertexCount = vcount;
+ mTriangleCount = tcount;
+ mVertices = (float *)HACD_ALLOC(sizeof(float)*3*vcount);
+ memcpy(mVertices,vertices,sizeof(float)*3*vcount);
+ mIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*3*tcount);
+ memcpy(mIndices,indices,sizeof(uint32_t)*3*tcount);
+ mVolume = fm_computeMeshVolume( mVertices, mTriangleCount, mIndices);
+ mDiagonal = fm_computeBestFitAABB( mVertexCount, mVertices, sizeof(float)*3, mMin, mMax );
+ float dx = mMax[0] - mMin[0];
+ float dy = mMax[1] - mMin[1];
+ float dz = mMax[2] - mMin[2];
+ dx*=0.1f; // inflate 1/10th on each edge
+ dy*=0.1f; // inflate 1/10th on each edge
+ dz*=0.1f; // inflate 1/10th on each edge
+ mMin[0]-=dx;
+ mMin[1]-=dy;
+ mMin[2]-=dz;
+ mMax[0]+=dx;
+ mMax[1]+=dy;
+ mMax[2]+=dz;
+ }
+
+ ~CHull(void)
+ {
+ HACD_FREE(mVertices);
+ HACD_FREE(mIndices);
+ }
+
+ bool overlap(const CHull &h) const
+ {
+ return fm_intersectAABB(mMin,mMax, h.mMin, h.mMax );
+ }
+
+ uint32_t mGuid;
+ float mMin[3];
+ float mMax[3];
+ float mVolume;
+ float mDiagonal; // long edge..
+ uint32_t mVertexCount;
+ uint32_t mTriangleCount;
+ float *mVertices;
+ uint32_t *mIndices;
+ };
+
+ // Usage: std::sort( list.begin(), list.end(), StringSortRef() );
+ class CHullSort
+ {
+ public:
+
+ bool operator()(const CHull *a,const CHull *b) const
+ {
+ return a->mVolume < b->mVolume;
+ }
+ };
+
+
+
+typedef hacd::vector< CHull * > CHullVector;
+
+class MyMergeHullsInterface : public MergeHullsInterface, public UANS::UserAllocated
+{
+public:
+ MyMergeHullsInterface(void)
+ {
+ mHasBeenTested = NULL;
+ }
+
+ virtual ~MyMergeHullsInterface(void)
+ {
+
+ }
+
+ // Merge these input hulls.
+ virtual uint32_t mergeHulls(const MergeHullVector &inputHulls,
+ MergeHullVector &outputHulls,
+ uint32_t mergeHullCount,
+ float smallClusterThreshold,
+ uint32_t maxHullVertices,
+ hacd::ICallback *callback)
+ {
+ mGuid = 0;
+
+ uint32_t count = (uint32_t)inputHulls.size();
+ mHasBeenTested = HACD_NEW(TestedMap)(count*count);
+ mSmallClusterThreshold = smallClusterThreshold;
+ mMaxHullVertices = maxHullVertices;
+ mMergeNumHulls = mergeHullCount;
+
+ mTotalVolume = 0;
+ for (uint32_t i=0; i<inputHulls.size(); i++)
+ {
+ const MergeHull &h = inputHulls[i];
+ CHull *ch = HACD_NEW(CHull)(h.mVertexCount,h.mVertices,h.mTriangleCount,h.mIndices,mGuid++);
+ mChulls.push_back(ch);
+ mTotalVolume+=ch->mVolume;
+ if ( callback )
+ {
+ float fraction = (float)i / (float)inputHulls.size();
+ callback->ReportProgress("Gathering Hulls To Merge", fraction );
+ }
+ }
+
+ //
+ uint32_t mergeCount = count - mergeHullCount;
+ uint32_t mergeIndex = 0;
+
+ for(;;)
+ {
+ if ( callback )
+ {
+ float fraction = (float)mergeIndex / (float)mergeCount;
+ callback->ReportProgress("Merging", fraction );
+ }
+ bool combined = combineHulls(); // mege smallest hulls first, up to the max merge count.
+ if ( !combined ) break;
+ mergeIndex++;
+ }
+
+ // return results..
+ for (uint32_t i=0; i<mChulls.size(); i++)
+ {
+ CHull *ch = mChulls[i];
+ MergeHull mh;
+ mh.mVertexCount = ch->mVertexCount;
+ mh.mTriangleCount = ch->mTriangleCount;
+ mh.mIndices = ch->mIndices;
+ mh.mVertices = ch->mVertices;
+ outputHulls.push_back(mh);
+ if ( callback )
+ {
+ float fraction = (float)i / (float)mChulls.size();
+ callback->ReportProgress("Gathering Merged Hulls Output", fraction );
+ }
+
+ }
+ delete mHasBeenTested;
+ return (uint32_t)outputHulls.size();
+ }
+
+ virtual void ConvexDecompResult(uint32_t hvcount,const float *hvertices,uint32_t htcount,const uint32_t *hindices)
+ {
+ CHull *ch = HACD_NEW(CHull)(hvcount,hvertices,htcount,hindices,mGuid++);
+ if ( ch->mVolume > 0.00001f )
+ {
+ mChulls.push_back(ch);
+ }
+ else
+ {
+ delete ch;
+ }
+ }
+
+
+ virtual void release(void)
+ {
+ delete this;
+ }
+
+ static float canMerge(CHull *a,CHull *b)
+ {
+ if ( !a->overlap(*b) ) return 0; // if their AABB's (with a little slop) don't overlap, then return.
+
+ // ok..we are going to combine both meshes into a single mesh
+ // and then we are going to compute the concavity...
+ float ret = 0;
+
+ uint32_t combinedVertexCount = a->mVertexCount + b->mVertexCount;
+ float *combinedVertices = (float *)HACD_ALLOC(combinedVertexCount*sizeof(float)*3);
+ float *dest = combinedVertices;
+ memcpy(dest,a->mVertices, sizeof(float)*3*a->mVertexCount);
+ dest+=a->mVertexCount*3;
+ memcpy(dest,b->mVertices,sizeof(float)*3*b->mVertexCount);
+
+ HullResult hresult;
+ HullLibrary hl;
+ HullDesc desc;
+ desc.mVcount = combinedVertexCount;
+ desc.mVertices = combinedVertices;
+ desc.mVertexStride = sizeof(float)*3;
+ desc.mUseWuQuantizer = true;
+ HullError hret = hl.CreateConvexHull(desc,hresult);
+ HACD_ASSERT( hret == QE_OK );
+ if ( hret == QE_OK )
+ {
+ ret = fm_computeMeshVolume( hresult.mOutputVertices, hresult.mNumTriangles, hresult.mIndices );
+ }
+ HACD_FREE(combinedVertices);
+ hl.ReleaseResult(hresult);
+ return ret;
+ }
+
+
+ CHull * doMerge(CHull *a,CHull *b)
+ {
+ CHull *ret = 0;
+ uint32_t combinedVertexCount = a->mVertexCount + b->mVertexCount;
+ float *combinedVertices = (float *)HACD_ALLOC(combinedVertexCount*sizeof(float)*3);
+ float *dest = combinedVertices;
+ memcpy(dest,a->mVertices, sizeof(float)*3*a->mVertexCount);
+ dest+=a->mVertexCount*3;
+ memcpy(dest,b->mVertices,sizeof(float)*3*b->mVertexCount);
+ HullResult hresult;
+ HullLibrary hl;
+ HullDesc desc;
+ desc.mVcount = combinedVertexCount;
+ desc.mVertices = combinedVertices;
+ desc.mVertexStride = sizeof(float)*3;
+ desc.mMaxVertices = mMaxHullVertices;
+ desc.mUseWuQuantizer = true;
+ HullError hret = hl.CreateConvexHull(desc,hresult);
+ HACD_ASSERT( hret == QE_OK );
+ if ( hret == QE_OK )
+ {
+ ret = HACD_NEW(CHull)(hresult.mNumOutputVertices, hresult.mOutputVertices, hresult.mNumTriangles, hresult.mIndices,mGuid++);
+ }
+ HACD_FREE(combinedVertices);
+ hl.ReleaseResult(hresult);
+ return ret;
+ }
+
+ class CombineVolumeJob
+ {
+ public:
+ CombineVolumeJob(CHull *hullA,CHull *hullB,uint32_t hashIndex)
+ {
+ mHullA = hullA;
+ mHullB = hullB;
+ mHashIndex = hashIndex;
+ mCombinedVolume = 0;
+ }
+
+ virtual ~CombineVolumeJob() {}
+
+ void startJob(void)
+ {
+ job_process(NULL,0);
+ }
+
+ virtual void job_process(void * /*userData*/,int32_t /*userId*/) // RUNS IN ANOTHER THREAD!! MUST BE THREAD SAFE!
+ {
+ mCombinedVolume = canMerge(mHullA,mHullB);
+ }
+
+ virtual void job_onFinish(void * /*userData*/,int32_t /*userId*/) // runs in primary thread of the context
+ {
+ gCombineCount--;
+ }
+
+ virtual void job_onCancel(void * /*userData*/,int32_t /*userId*/) // runs in primary thread of the context
+ {
+
+ }
+
+ //private:
+ uint32_t mHashIndex;
+ CHull *mHullA;
+ CHull *mHullB;
+ float mCombinedVolume;
+ };
+
+ bool combineHulls(void)
+ {
+ bool combine = false;
+ // each new convex hull is given a unique guid.
+ // A hash map is used to make sure that no hulls are tested twice.
+ CHullVector output;
+ uint32_t count = (uint32_t)mChulls.size();
+
+ // Early out to save walking all the hulls. Hulls are combined based on
+ // a target number or on a number of generated hulls.
+ bool mergeTargetMet = (uint32_t)mChulls.size() <= mMergeNumHulls;
+ if (mergeTargetMet && (mSmallClusterThreshold == 0.0f))
+ return false;
+
+ hacd::vector< CombineVolumeJob > jobs;
+
+ // First, see if there are any pairs of hulls who's combined volume we have not yet calculated.
+ // If there are, then we add them to the jobs list
+ {
+ for (uint32_t i=0; i<count; i++)
+ {
+ CHull *cr = mChulls[i];
+ for (uint32_t j=i+1; j<count; j++)
+ {
+ CHull *match = mChulls[j];
+ uint32_t hashIndex;
+ if ( match->mGuid < cr->mGuid )
+ {
+ hashIndex = (match->mGuid << 16) | cr->mGuid;
+ }
+ else
+ {
+ hashIndex = (cr->mGuid << 16 ) | match->mGuid;
+ }
+
+ float *v = mHasBeenTested->find(hashIndex);
+
+ if ( v == NULL )
+ {
+ CombineVolumeJob job(cr,match,hashIndex);
+ jobs.push_back(job);
+ (*mHasBeenTested)[hashIndex] = 0.0f; // assign it to some value so we don't try to create more than one job for it.
+ }
+ }
+ }
+ }
+
+ // ok..we have posted all of the jobs, let's let's solve them in parallel
+ for (uint32_t i=0; i<jobs.size(); i++)
+ {
+ jobs[i].startJob();
+ }
+
+
+ // once we have the answers, now put the results into the hash table.
+ for (uint32_t i=0; i<jobs.size(); i++)
+ {
+ CombineVolumeJob &job = jobs[i];
+ (*mHasBeenTested)[job.mHashIndex] = job.mCombinedVolume;
+ }
+
+ float bestVolume = 1e9;
+ CHull *mergeA = NULL;
+ CHull *mergeB = NULL;
+ // now find the two hulls which merged produce the smallest combined volume.
+ {
+ for (uint32_t i=0; i<count; i++)
+ {
+ CHull *cr = mChulls[i];
+ for (uint32_t j=i+1; j<count; j++)
+ {
+ CHull *match = mChulls[j];
+ uint32_t hashIndex;
+ if ( match->mGuid < cr->mGuid )
+ {
+ hashIndex = (match->mGuid << 16) | cr->mGuid;
+ }
+ else
+ {
+ hashIndex = (cr->mGuid << 16 ) | match->mGuid;
+ }
+ float *v = mHasBeenTested->find(hashIndex);
+ HACD_ASSERT(v);
+ if ( v && *v != 0 && *v < bestVolume )
+ {
+ bestVolume = *v;
+ mergeA = cr;
+ mergeB = match;
+ }
+ }
+ }
+ }
+
+ // If we found a merge pair, and we are below the merge threshold or we haven't reduced to the target
+ // do the merge.
+ bool thresholdBelow = ((bestVolume / mTotalVolume) * 100.0f) < mSmallClusterThreshold;
+ if ( mergeA && (thresholdBelow || !mergeTargetMet))
+ {
+ CHull *merge = doMerge(mergeA,mergeB);
+
+ float volumeA = mergeA->mVolume;
+ float volumeB = mergeB->mVolume;
+
+ if ( merge )
+ {
+ combine = true;
+ output.push_back(merge);
+ for (CHullVector::iterator j=mChulls.begin(); j!=mChulls.end(); ++j)
+ {
+ CHull *h = (*j);
+ if ( h !=mergeA && h != mergeB )
+ {
+ output.push_back(h);
+ }
+ }
+ delete mergeA;
+ delete mergeB;
+ // Remove the old volumes and add the new one.
+ mTotalVolume -= (volumeA + volumeB);
+ mTotalVolume += merge->mVolume;
+ }
+ mChulls = output;
+ }
+
+ return combine;
+ }
+
+private:
+ TestedMap *mHasBeenTested;
+ uint32_t mGuid;
+ float mTotalVolume;
+ float mSmallClusterThreshold;
+ uint32_t mMergeNumHulls;
+ uint32_t mMaxHullVertices;
+ CHullVector mChulls;
+};
+
+MergeHullsInterface * createMergeHullsInterface(void)
+{
+ MyMergeHullsInterface *m = HACD_NEW(MyMergeHullsInterface);
+ return static_cast< MergeHullsInterface *>(m);
+}
+
+
+};
diff --git a/APEX_1.4/shared/general/HACD/src/WuQuantizer.cpp b/APEX_1.4/shared/general/HACD/src/WuQuantizer.cpp
new file mode 100644
index 00000000..83f16ba5
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/WuQuantizer.cpp
@@ -0,0 +1,1033 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <float.h>
+
+/*!
+**
+** Copyright (c) 2015 by John W. Ratcliff mailto:[email protected]
+**
+**
+** The MIT license:
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to deal
+** in the Software without restriction, including without limitation the rights
+** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+** copies of the Software, and to permit persons to whom the Software is furnished
+** to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in all
+** copies or substantial portions of the Software.
+
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+**
+** If you find this code snippet useful; you can tip me at this bitcoin address:
+**
+** BITCOIN TIP JAR: "1BT66EoaGySkbY9J6MugvQRhMMXDwPxPya"
+**
+
+*/
+
+
+#include "WuQuantizer.h"
+#include "SparseArray.h"
+
+///////////////////////////////////////////////////////////////////////
+// C Implementation of Wu's Color Quantizer (v. 2)
+// (see Graphics Gems vol. II, pp. 126-133)
+//
+// Author: Xiaolin Wu
+// Dept. of Computer Science
+// Univ. of Western Ontario
+// London, Ontario N6A 5B7
+//
+// Algorithm: Greedy orthogonal bipartition of RGB space for variance
+// minimization aided by inclusion-exclusion tricks.
+// For speed no nearest neighbor search is done. Slightly
+// better performance can be expected by more sophisticated
+// but more expensive versions.
+//
+// The author thanks Tom Lane at [email protected] for much of
+// additional documentation and a cure to a previous bug.
+//
+// Free to distribute, comments and suggestions are appreciated.
+///////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////
+// History
+// -------
+// July 2000: C++ Implementation of Wu's Color Quantizer
+// and adaptation for the FreeImage 2 Library
+// Author: Herve Drolon ([email protected])
+// March 2004: Adaptation for the FreeImage 3 library (port to big endian processors)
+// Author: Herve Drolon ([email protected])
+///////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////
+
+
+
+using namespace hacd;
+
+namespace HACD
+{
+
+#define FI_RGBA_RED 2
+#define FI_RGBA_GREEN 1
+#define FI_RGBA_BLUE 0
+#define FI_RGBA_ALPHA 3
+#define FI_RGBA_RED_MASK 0x00FF0000
+#define FI_RGBA_GREEN_MASK 0x0000FF00
+#define FI_RGBA_BLUE_MASK 0x000000FF
+#define FI_RGBA_ALPHA_MASK 0xFF000000
+#define FI_RGBA_RED_SHIFT 16
+#define FI_RGBA_GREEN_SHIFT 8
+#define FI_RGBA_BLUE_SHIFT 0
+#define FI_RGBA_ALPHA_SHIFT 24
+
+////////////////////////////////////////////////////////////////
+class Vec3
+{
+public:
+
+ Vec3(float _x,float _y,float _z)
+ {
+ x = _x;
+ y = _y;
+ z = _z;
+ }
+
+ Vec3(void)
+ {
+ }
+
+ Vec3(const float *p)
+ {
+ x = p[0];
+ y = p[1];
+ z = p[2];
+ }
+
+ float x;
+ float y;
+ float z;
+};
+
+ typedef hacd::vector< Vec3 > Vec3Vector;
+
+
+/**
+ Xiaolin Wu color quantization algorithm
+*/
+class WuColorQuantizer
+{
+public:
+ // Constructor - Input parameter: DIB 24-bit to be quantized
+ WuColorQuantizer(void);
+ // Destructor
+ ~WuColorQuantizer();
+ // Quantizer - Return value: quantized 8-bit (color palette) DIB
+ void Quantize(int32_t PaletteSize,Vec3Vector &outputVertices);
+
+ void addColor(float x,float y,float z);
+
+
+typedef struct tagBox
+{
+ int32_t r0; // min value, exclusive
+ int32_t r1; // max value, inclusive
+ int32_t g0;
+ int32_t g1;
+ int32_t b0;
+ int32_t b1;
+ int32_t vol;
+} Box;
+
+private:
+ int32_t table[256];
+ float *mSumSquared;
+ int32_t *mWeight;
+ int32_t *mSumX;
+ int32_t *mSumY;
+ int32_t *mSumZ;
+
+ void M3D(int32_t *vwt, int32_t *vmr, int32_t *vmg, int32_t *vmb, float *m2);
+ int32_t Vol(Box *cube, int32_t *mmt);
+ int32_t Bottom(Box *cube, uint8_t dir, int32_t *mmt);
+ int32_t Top(Box *cube, uint8_t dir, int32_t pos, int32_t *mmt);
+ float Var(Box *cube);
+ float Maximize(Box *cube, uint8_t dir, int32_t first, int32_t last , int32_t *cut,
+ int32_t whole_r, int32_t whole_g, int32_t whole_b, int32_t whole_w);
+ bool Cut(Box *set1, Box *set2);
+ void Mark(Box *cube, int32_t label, uint8_t *tag);
+
+};
+
+
+
+// Size of a 3D array : 33 x 33 x 33
+#define SIZE_3D 35937
+
+// 3D array indexation
+#define INDEX(r, g, b) ((r << 10) + (r << 6) + r + (g << 5) + g + b)
+
+#define MAXCOLOR 1024
+
+// Constructor / Destructor
+
+WuColorQuantizer::WuColorQuantizer(void)
+{
+ // Allocate 3D arrays
+ mSumSquared = (float*)malloc(SIZE_3D * sizeof(float));
+ mWeight = (int32_t*)malloc(SIZE_3D * sizeof(int32_t));
+ mSumX = (int32_t*)malloc(SIZE_3D * sizeof(int32_t));
+ mSumY = (int32_t*)malloc(SIZE_3D * sizeof(int32_t));
+ mSumZ = (int32_t*)malloc(SIZE_3D * sizeof(int32_t));
+
+ memset(mSumSquared, 0, SIZE_3D * sizeof(float));
+ memset(mWeight, 0, SIZE_3D * sizeof(int32_t));
+ memset(mSumX, 0, SIZE_3D * sizeof(int32_t));
+ memset(mSumY, 0, SIZE_3D * sizeof(int32_t));
+ memset(mSumZ, 0, SIZE_3D * sizeof(int32_t));
+
+ for(int32_t i = 0; i < 256; i++)
+ table[i] = i * i;
+
+}
+
+WuColorQuantizer::~WuColorQuantizer()
+{
+ if(mSumSquared) free(mSumSquared);
+ if(mWeight) free(mWeight);
+ if(mSumX) free(mSumX);
+ if(mSumY) free(mSumY);
+ if(mSumZ) free(mSumZ);
+}
+
+
+// Histogram is in elements 1..HISTSIZE along each axis,
+// element 0 is for base or marginal value
+// NB: these must start out 0!
+
+// Build 3-D color histogram of counts, r/g/b, c^2
+
+void WuColorQuantizer::addColor(float x,float y,float z)
+{
+ uint32_t red = (uint32_t)(x*128+128);
+ uint32_t green = (uint32_t)(y*128+128);
+ uint32_t blue = (uint32_t)(z*128+128);
+ HACD_ASSERT( red < 256 );
+ HACD_ASSERT( green < 256 );
+ HACD_ASSERT( blue < 256 );
+
+ uint32_t inr = (red >> 3) + 1;
+ uint32_t ing = (green >> 3) + 1;
+ uint32_t inb = (blue >> 3) + 1;
+ uint32_t ind = INDEX(inr, ing, inb);
+
+ mWeight[ind]++;
+ mSumX[ind] += red;
+ mSumY[ind] += green;
+ mSumZ[ind] += blue;
+ mSumSquared[ind] += table[red] + table[green] + table[blue];
+}
+
+
+// At conclusion of the histogram step, we can interpret
+// mWeight[r][g][b] = sum over voxel of P(c)
+// mSumX[r][g][b] = sum over voxel of r*P(c) , similarly for mSumY, mSumZ
+// m2[r][g][b] = sum over voxel of c^2*P(c)
+// Actually each of these should be divided by 'ImageSize' to give the usual
+// interpretation of P() as ranging from 0 to 1, but we needn't do that here.
+
+
+// We now convert histogram into moments so that we can rapidly calculate
+// the sums of the above quantities over any desired box.
+
+// Compute cumulative moments
+void WuColorQuantizer::M3D(int32_t *vwt, int32_t *vmr, int32_t *vmg, int32_t *vmb, float *m2)
+{
+ uint32_t ind1, ind2;
+ uint8_t i, r, g, b;
+ int32_t line, line_r, line_g, line_b;
+ int32_t area[33], area_r[33], area_g[33], area_b[33];
+ float line2, area2[33];
+
+ for(r = 1; r <= 32; r++)
+ {
+ for(i = 0; i <= 32; i++)
+ {
+ area2[i] = 0;
+ area[i] = area_r[i] = area_g[i] = area_b[i] = 0;
+ }
+ for(g = 1; g <= 32; g++)
+ {
+ line2 = 0;
+ line = line_r = line_g = line_b = 0;
+ for(b = 1; b <= 32; b++)
+ {
+ ind1 = (uint32_t)INDEX(r, g, b); // [r][g][b]
+ line += vwt[ind1];
+ line_r += vmr[ind1];
+ line_g += vmg[ind1];
+ line_b += vmb[ind1];
+ line2 += m2[ind1];
+ area[b] += line;
+ area_r[b] += line_r;
+ area_g[b] += line_g;
+ area_b[b] += line_b;
+ area2[b] += line2;
+ ind2 = ind1 - 1089; // [r-1][g][b]
+ vwt[ind1] = vwt[ind2] + area[b];
+ vmr[ind1] = vmr[ind2] + area_r[b];
+ vmg[ind1] = vmg[ind2] + area_g[b];
+ vmb[ind1] = vmb[ind2] + area_b[b];
+ m2[ind1] = m2[ind2] + area2[b];
+ }
+ }
+ }
+}
+
+ // Compute sum over a box of any given statistic
+ int32_t
+ WuColorQuantizer::Vol( Box *cube, int32_t *mmt ) {
+ return( mmt[INDEX(cube->r1, cube->g1, cube->b1)]
+ - mmt[INDEX(cube->r1, cube->g1, cube->b0)]
+ - mmt[INDEX(cube->r1, cube->g0, cube->b1)]
+ + mmt[INDEX(cube->r1, cube->g0, cube->b0)]
+ - mmt[INDEX(cube->r0, cube->g1, cube->b1)]
+ + mmt[INDEX(cube->r0, cube->g1, cube->b0)]
+ + mmt[INDEX(cube->r0, cube->g0, cube->b1)]
+ - mmt[INDEX(cube->r0, cube->g0, cube->b0)] );
+ }
+
+ // The next two routines allow a slightly more efficient calculation
+ // of Vol() for a proposed subbox of a given box. The sum of Top()
+ // and Bottom() is the Vol() of a subbox split in the given direction
+ // and with the specified new upper bound.
+
+
+ // Compute part of Vol(cube, mmt) that doesn't depend on r1, g1, or b1
+ // (depending on dir)
+
+ int32_t
+ WuColorQuantizer::Bottom(Box *cube, uint8_t dir, int32_t *mmt)
+ {
+ switch(dir)
+ {
+ case FI_RGBA_RED:
+ return( - mmt[INDEX(cube->r0, cube->g1, cube->b1)]
+ + mmt[INDEX(cube->r0, cube->g1, cube->b0)]
+ + mmt[INDEX(cube->r0, cube->g0, cube->b1)]
+ - mmt[INDEX(cube->r0, cube->g0, cube->b0)] );
+ case FI_RGBA_GREEN:
+ return( - mmt[INDEX(cube->r1, cube->g0, cube->b1)]
+ + mmt[INDEX(cube->r1, cube->g0, cube->b0)]
+ + mmt[INDEX(cube->r0, cube->g0, cube->b1)]
+ - mmt[INDEX(cube->r0, cube->g0, cube->b0)] );
+ case FI_RGBA_BLUE:
+ return( - mmt[INDEX(cube->r1, cube->g1, cube->b0)]
+ + mmt[INDEX(cube->r1, cube->g0, cube->b0)]
+ + mmt[INDEX(cube->r0, cube->g1, cube->b0)]
+ - mmt[INDEX(cube->r0, cube->g0, cube->b0)] );
+ }
+
+ return 0;
+ }
+
+
+ // Compute remainder of Vol(cube, mmt), substituting pos for
+ // r1, g1, or b1 (depending on dir)
+
+ int32_t WuColorQuantizer::Top(Box *cube, uint8_t dir, int32_t pos, int32_t *mmt)
+ {
+ switch(dir)
+ {
+ case FI_RGBA_RED:
+ return( mmt[INDEX(pos, cube->g1, cube->b1)]
+ -mmt[INDEX(pos, cube->g1, cube->b0)]
+ -mmt[INDEX(pos, cube->g0, cube->b1)]
+ +mmt[INDEX(pos, cube->g0, cube->b0)] );
+ case FI_RGBA_GREEN:
+ return( mmt[INDEX(cube->r1, pos, cube->b1)]
+ -mmt[INDEX(cube->r1, pos, cube->b0)]
+ -mmt[INDEX(cube->r0, pos, cube->b1)]
+ +mmt[INDEX(cube->r0, pos, cube->b0)] );
+ case FI_RGBA_BLUE:
+ return( mmt[INDEX(cube->r1, cube->g1, pos)]
+ -mmt[INDEX(cube->r1, cube->g0, pos)]
+ -mmt[INDEX(cube->r0, cube->g1, pos)]
+ +mmt[INDEX(cube->r0, cube->g0, pos)] );
+ }
+
+ return 0;
+ }
+
+ // Compute the weighted variance of a box
+ // NB: as with the raw statistics, this is really the variance * ImageSize
+
+ float WuColorQuantizer::Var(Box *cube)
+ {
+ float dr = (float) Vol(cube, mSumX);
+ float dg = (float) Vol(cube, mSumY);
+ float db = (float) Vol(cube, mSumZ);
+ float xx = mSumSquared[INDEX(cube->r1, cube->g1, cube->b1)]
+ -mSumSquared[INDEX(cube->r1, cube->g1, cube->b0)]
+ -mSumSquared[INDEX(cube->r1, cube->g0, cube->b1)]
+ +mSumSquared[INDEX(cube->r1, cube->g0, cube->b0)]
+ -mSumSquared[INDEX(cube->r0, cube->g1, cube->b1)]
+ +mSumSquared[INDEX(cube->r0, cube->g1, cube->b0)]
+ +mSumSquared[INDEX(cube->r0, cube->g0, cube->b1)]
+ -mSumSquared[INDEX(cube->r0, cube->g0, cube->b0)];
+
+ return (xx - (dr*dr+dg*dg+db*db)/(float)Vol(cube,mWeight));
+ }
+
+ // We want to minimize the sum of the variances of two subboxes.
+ // The sum(c^2) terms can be ignored since their sum over both subboxes
+ // is the same (the sum for the whole box) no matter where we split.
+ // The remaining terms have a minus sign in the variance formula,
+ // so we drop the minus sign and MAXIMIZE the sum of the two terms.
+
+ float WuColorQuantizer::Maximize(Box *cube, uint8_t dir, int32_t first, int32_t last , int32_t *cut, int32_t whole_r, int32_t whole_g, int32_t whole_b, int32_t whole_w)
+ {
+ int32_t half_r, half_g, half_b, half_w;
+ int32_t i;
+ float temp;
+
+ int32_t base_r = Bottom(cube, dir, mSumX);
+ int32_t base_g = Bottom(cube, dir, mSumY);
+ int32_t base_b = Bottom(cube, dir, mSumZ);
+ int32_t base_w = Bottom(cube, dir, mWeight);
+
+ float max = 0.0;
+
+ *cut = -1;
+
+ for (i = first; i < last; i++) {
+ half_r = base_r + Top(cube, dir, i, mSumX);
+ half_g = base_g + Top(cube, dir, i, mSumY);
+ half_b = base_b + Top(cube, dir, i, mSumZ);
+ half_w = base_w + Top(cube, dir, i, mWeight);
+
+ // now half_x is sum over lower half of box, if split at i
+
+ if (half_w == 0) { // subbox could be empty of pixels!
+ continue; // never split into an empty box
+ } else {
+ temp = ((float)half_r*half_r + (float)half_g*half_g + (float)half_b*half_b)/half_w;
+ }
+
+ half_r = whole_r - half_r;
+ half_g = whole_g - half_g;
+ half_b = whole_b - half_b;
+ half_w = whole_w - half_w;
+
+ if (half_w == 0) { // subbox could be empty of pixels!
+ continue; // never split into an empty box
+ } else {
+ temp += ((float)half_r*half_r + (float)half_g*half_g + (float)half_b*half_b)/half_w;
+ }
+
+ if (temp > max) {
+ max=temp;
+ *cut=i;
+ }
+ }
+
+ return max;
+ }
+
+ bool
+ WuColorQuantizer::Cut(Box *set1, Box *set2) {
+ uint8_t dir;
+ int32_t cutr, cutg, cutb;
+
+ int32_t whole_r = Vol(set1, mSumX);
+ int32_t whole_g = Vol(set1, mSumY);
+ int32_t whole_b = Vol(set1, mSumZ);
+ int32_t whole_w = Vol(set1, mWeight);
+
+ float maxr = Maximize(set1, FI_RGBA_RED, set1->r0+1, set1->r1, &cutr, whole_r, whole_g, whole_b, whole_w);
+ float maxg = Maximize(set1, FI_RGBA_GREEN, set1->g0+1, set1->g1, &cutg, whole_r, whole_g, whole_b, whole_w);
+ float maxb = Maximize(set1, FI_RGBA_BLUE, set1->b0+1, set1->b1, &cutb, whole_r, whole_g, whole_b, whole_w);
+
+ if ((maxr >= maxg) && (maxr >= maxb)) {
+ dir = FI_RGBA_RED;
+
+ if (cutr < 0) {
+ return false; // can't split the box
+ }
+ } else if ((maxg >= maxr) && (maxg>=maxb)) {
+ dir = FI_RGBA_GREEN;
+ } else {
+ dir = FI_RGBA_BLUE;
+ }
+
+ set2->r1 = set1->r1;
+ set2->g1 = set1->g1;
+ set2->b1 = set1->b1;
+
+ switch (dir) {
+ case FI_RGBA_RED:
+ set2->r0 = set1->r1 = cutr;
+ set2->g0 = set1->g0;
+ set2->b0 = set1->b0;
+ break;
+
+ case FI_RGBA_GREEN:
+ set2->g0 = set1->g1 = cutg;
+ set2->r0 = set1->r0;
+ set2->b0 = set1->b0;
+ break;
+
+ case FI_RGBA_BLUE:
+ set2->b0 = set1->b1 = cutb;
+ set2->r0 = set1->r0;
+ set2->g0 = set1->g0;
+ break;
+ }
+
+ set1->vol = (set1->r1-set1->r0)*(set1->g1-set1->g0)*(set1->b1-set1->b0);
+ set2->vol = (set2->r1-set2->r0)*(set2->g1-set2->g0)*(set2->b1-set2->b0);
+
+ return true;
+ }
+
+
+ void
+ WuColorQuantizer::Mark(Box *cube, int32_t label, uint8_t *tag) {
+ for (int32_t r = cube->r0 + 1; r <= cube->r1; r++) {
+ for (int32_t g = cube->g0 + 1; g <= cube->g1; g++) {
+ for (int32_t b = cube->b0 + 1; b <= cube->b1; b++) {
+ tag[INDEX(r, g, b)] = (uint8_t)label;
+ }
+ }
+ }
+ }
+
+// Wu Quantization algorithm
+void WuColorQuantizer::Quantize(int32_t PaletteSize,Vec3Vector &outputVertices)
+{
+ uint8_t *tag = NULL;
+
+ if ( PaletteSize > MAXCOLOR )
+ {
+ PaletteSize = MAXCOLOR;
+ }
+ Box cube[MAXCOLOR];
+ int32_t next;
+ int32_t i, weight;
+ int32_t k;
+ float vv[MAXCOLOR], temp;
+
+ // Compute moments
+ M3D(mWeight, mSumX, mSumY, mSumZ, mSumSquared);
+
+ cube[0].r0 = cube[0].g0 = cube[0].b0 = 0;
+ cube[0].r1 = cube[0].g1 = cube[0].b1 = 32;
+ next = 0;
+
+ for (i = 1; i < PaletteSize; i++)
+ {
+ if(Cut(&cube[next], &cube[i]))
+ {
+ // volume test ensures we won't try to cut one-cell box
+ vv[next] = (cube[next].vol > 1) ? Var(&cube[next]) : 0;
+ vv[i] = (cube[i].vol > 1) ? Var(&cube[i]) : 0;
+ }
+ else
+ {
+ vv[next] = 0.0; // don't try to split this box again
+ i--; // didn't create box i
+ }
+
+ next = 0; temp = vv[0];
+
+ for (k = 1; k <= i; k++)
+ {
+ if (vv[k] > temp)
+ {
+ temp = vv[k]; next = k;
+ }
+ }
+
+ if (temp <= 0.0)
+ {
+ PaletteSize = i + 1;
+ // Error: "Only got 'PaletteSize' boxes"
+ break;
+ }
+ }
+
+ // Partition done
+ // the space for array mSumSquared can be freed now
+ free(mSumSquared);
+ mSumSquared = NULL;
+
+ // create an optimized palette
+ tag = (uint8_t*) malloc(SIZE_3D * sizeof(uint8_t));
+ memset(tag, 0, SIZE_3D * sizeof(uint8_t));
+
+ for (k = 0; k < PaletteSize ; k++)
+ {
+ Mark(&cube[k], k, tag);
+ weight = Vol(&cube[k], mWeight);
+
+ if (weight)
+ {
+ int32_t red = (int32_t)(((float)Vol(&cube[k], mSumX) / (float)weight) + 0.5f);
+ int32_t green = (int32_t)(((float)Vol(&cube[k], mSumY) / (float)weight) + 0.5f);
+ int32_t blue = (int32_t)(((float)Vol(&cube[k], mSumZ) / (float)weight) + 0.5f);
+ HACD_ASSERT( red >= 0 && red < 256 );
+ HACD_ASSERT( green >= 0 && green < 256 );
+ HACD_ASSERT( blue >= 0 && blue < 256 );
+ Vec3 v;
+ v.x = (red-128.0f)/128.0f;
+ v.y = (green-128.0f)/128.0f;
+ v.z = (blue-128.0f)/128.0f;
+ outputVertices.push_back(v);
+ }
+ else
+ {
+ }
+ }
+}
+
+
+uint32_t kmeans_cluster3d(const float *input, // an array of input 3d data points.
+ uint32_t inputSize, // the number of input data points.
+ uint32_t clumpCount, // the number of clumps you wish to product.
+ float *outputClusters, // The output array of clumps 3d vectors, should be at least 'clumpCount' in size.
+ uint32_t *outputIndices, // A set of indices which remaps the input vertices to clumps; should be at least 'inputSize'
+ float errorThreshold=0.01f, // The error threshold to converge towards before giving up.
+ float collapseDistance=0.01f); // distance so small it is not worth bothering to create a new clump.
+
+template <class Type> struct Vec3d
+{
+ inline Type distanceSquared(const Vec3d &a) const
+ {
+ Type dx = x-a.x;
+ Type dy = y-a.y;
+ Type dz = z-a.z;
+ return dx*dx+dy*dy+dz*dz;
+ }
+ inline void operator+=(const Vec3d &v)
+ {
+ x+=v.x;
+ y+=v.y;
+ z+=v.z;
+ }
+ inline void operator*=(const Type v)
+ {
+ x*=v;
+ y*=v;
+ z*=v;
+ }
+ inline void zero(void) { x = 0; y = 0; z = 0; };
+
+ Type x;
+ Type y;
+ Type z;
+};
+
+template <class Vec,class Type >
+uint32_t kmeans_cluster(const Vec *input,
+ uint32_t inputCount,
+ uint32_t clumpCount,
+ Vec *clusters,
+ uint32_t *outputIndices,
+ Type threshold, // controls how long it works to converge towards a least errors solution.
+ Type collapseDistance) // distance between clumps to consider them to be essentially equal.
+{
+ uint32_t convergeCount = 64; // maximum number of iterations attempting to converge to a solution..
+ uint32_t *counts = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*clumpCount);
+ Type error=0;
+ if ( inputCount <= clumpCount ) // if the number of input points is less than our clumping size, just return the input points.
+ {
+ clumpCount = inputCount;
+ for (uint32_t i=0; i<inputCount; i++)
+ {
+ if ( outputIndices )
+ {
+ outputIndices[i] = i;
+ }
+ clusters[i] = input[i];
+ counts[i] = 1;
+ }
+ }
+ else
+ {
+ Vec *centroids = (Vec *)HACD_ALLOC(sizeof(Vec)*clumpCount);
+
+ // Take a sampling of the input points as initial centroid estimates.
+ for (uint32_t i=0; i<clumpCount; i++)
+ {
+ uint32_t index = (i*inputCount)/clumpCount;
+ HACD_ASSERT( index < inputCount );
+ clusters[i] = input[index];
+ }
+
+ // Here is the main convergence loop
+ Type old_error = FLT_MAX; // old and initial error estimates are max Type
+ error = FLT_MAX;
+ do
+ {
+ old_error = error; // preserve the old error
+ // reset the counts and centroids to current cluster location
+ for (uint32_t i=0; i<clumpCount; i++)
+ {
+ counts[i] = 0;
+ centroids[i].zero();
+ }
+ error = 0;
+ // For each input data point, figure out which cluster it is closest too and add it to that cluster.
+ for (uint32_t i=0; i<inputCount; i++)
+ {
+ Type min_distance = FLT_MAX;
+ // find the nearest clump to this point.
+ for (uint32_t j=0; j<clumpCount; j++)
+ {
+ Type distance = input[i].distanceSquared( clusters[j] );
+ if ( distance < min_distance )
+ {
+ min_distance = distance;
+ outputIndices[i] = j; // save which clump this point indexes
+ }
+ }
+ uint32_t index = outputIndices[i]; // which clump was nearest to this point.
+ centroids[index]+=input[i];
+ counts[index]++; // increment the counter indicating how many points are in this clump.
+ error+=min_distance; // save the error accumulation
+ }
+ // Now, for each clump, compute the mean and store the result.
+ for (uint32_t i=0; i<clumpCount; i++)
+ {
+ if ( counts[i] ) // if this clump got any points added to it...
+ {
+ Type recip = 1.0f / (Type)counts[i]; // compute the average (center of those points)
+ centroids[i]*=recip; // compute the average center of the points in this clump.
+ clusters[i] = centroids[i]; // store it as the new cluster.
+ }
+ }
+ // decrement the convergence counter and bail if it is taking too long to converge to a solution.
+ convergeCount--;
+ if (convergeCount == 0 )
+ {
+ break;
+ }
+ if ( error < threshold ) // early exit if our first guess is already good enough (if all input points are the same)
+ break;
+ } while ( fabs(error - old_error) > threshold ); // keep going until the error is reduced by this threshold amount.
+
+ HACD_FREE(centroids);
+ }
+
+ // ok..now we prune the clumps if necessary.
+ // The rules are; first, if a clump has no 'counts' then we prune it as it's unused.
+ // The second, is if the centroid of this clump is essentially the same (based on the distance tolerance)
+ // as an existing clump, then it is pruned and all indices which used to point to it, now point to the one
+ // it is closest too.
+ uint32_t outCount = 0; // number of clumps output after pruning performed.
+ Type d2 = collapseDistance*collapseDistance; // squared collapse distance.
+ for (uint32_t i=0; i<clumpCount; i++)
+ {
+ if ( counts[i] == 0 ) // if no points ended up in this clump, eliminate it.
+ continue;
+ // see if this clump is too close to any already accepted clump.
+ bool add = true;
+ uint32_t remapIndex = outCount; // by default this clump will be remapped to its current index.
+ for (uint32_t j=0; j<outCount; j++)
+ {
+ Type distance = clusters[i].distanceSquared(clusters[j]);
+ if ( distance < d2 )
+ {
+ remapIndex = j;
+ add = false; // we do not add this clump
+ break;
+ }
+ }
+ // If we have fewer output clumps than input clumps so far, then we need to remap the old indices to the new ones.
+ if ( outputIndices )
+ {
+ if ( outCount != i || !add ) // we need to remap indices! everything that was index 'i' now needs to be remapped to 'outCount'
+ {
+ for (uint32_t j=0; j<inputCount; j++)
+ {
+ if ( outputIndices[j] == i )
+ {
+ outputIndices[j] = remapIndex; //
+ }
+ }
+ }
+ }
+ if ( add )
+ {
+ clusters[outCount] = clusters[i];
+ outCount++;
+ }
+ }
+ HACD_FREE(counts);
+ clumpCount = outCount;
+ return clumpCount;
+};
+
+uint32_t kmeans_cluster3d(const float *input, // an array of input 3d data points.
+ uint32_t inputSize, // the number of input data points.
+ uint32_t clumpCount, // the number of clumps you wish to produce
+ float *outputClusters, // The output array of clumps 3d vectors, should be at least 'clumpCount' in size.
+ uint32_t *outputIndices, // A set of indices which remaps the input vertices to clumps; should be at least 'inputSize'
+ float errorThreshold, // The error threshold to converge towards before giving up.
+ float collapseDistance) // distance so small it is not worth bothering to create a new clump.
+{
+ const Vec3d< float > *_input = (const Vec3d<float> *)input;
+ Vec3d<float> *_output = (Vec3d<float> *)outputClusters;
+ return kmeans_cluster< Vec3d<float>, float >(_input,inputSize,clumpCount,_output,outputIndices,errorThreshold,collapseDistance);
+}
+
+class MyWuQuantizer : public WuQuantizer, public UANS::UserAllocated
+{
+public:
+ MyWuQuantizer(void)
+ {
+ mScale = Vec3(1,1,1);
+ mCenter = Vec3(0,0,0);
+ }
+
+ // use the Wu quantizer with 10 bits of resolution on each axis. Precision down to 0.0009765625
+ // All input data is normalized to a unit cube.
+
+ virtual const float * wuQuantize3D(uint32_t vcount,
+ const float *vertices,
+ bool denormalizeResults,
+ uint32_t maxVertices,
+ uint32_t &outputCount)
+ {
+ const float *ret = NULL;
+ outputCount = 0;
+
+ normalizeInput(vcount,vertices);
+
+ WuColorQuantizer wcq;
+
+ for (uint32_t i=0; i<vcount; i++)
+ {
+ const Vec3 &v = mNormalizedInput[i];
+ wcq.addColor(v.x,v.y,v.z);
+ }
+
+ wcq.Quantize((int32_t)maxVertices,mQuantizedOutput);
+
+ outputCount = (uint32_t)mQuantizedOutput.size();
+
+ if ( outputCount > 0 )
+ {
+ if ( denormalizeResults )
+ {
+ for (uint32_t i=0; i<outputCount; i++)
+ {
+ Vec3 &v = mQuantizedOutput[i];
+ v.x = v.x*mScale.x + mCenter.x;
+ v.y = v.x*mScale.y + mCenter.y;
+ v.z = v.x*mScale.z + mCenter.z;
+ mQuantizedOutput.push_back(v);
+ }
+ }
+ ret = &mQuantizedOutput[0].x;
+ }
+
+
+ return ret;
+ }
+
+ // Use the kemans quantizer, similar results, but much slower.
+ virtual const float * kmeansQuantize3D(uint32_t vcount,
+ const float *vertices,
+ bool denormalizeResults,
+ uint32_t maxVertices,
+ uint32_t &outputCount)
+ {
+ const float *ret = NULL;
+ outputCount = 0;
+ mNormalizedInput.clear();
+ mQuantizedOutput.clear();
+
+ if ( vcount > 0 )
+ {
+ normalizeInput(vcount,vertices);
+ float *quantizedOutput = (float *)HACD_ALLOC( sizeof(float)*3*vcount);
+ uint32_t *quantizedIndices = (uint32_t *)HACD_ALLOC( sizeof(uint32_t)*vcount );
+ outputCount = kmeans_cluster3d(&mNormalizedInput[0].x, vcount, maxVertices, quantizedOutput, quantizedIndices, 0.01f, 0.0001f );
+ if ( outputCount > 0 )
+ {
+ if ( denormalizeResults )
+ {
+ for (uint32_t i=0; i<outputCount; i++)
+ {
+ Vec3 v( &quantizedOutput[i*3] );
+ v.x = v.x*mScale.x + mCenter.x;
+ v.y = v.x*mScale.y + mCenter.y;
+ v.z = v.x*mScale.z + mCenter.z;
+ mQuantizedOutput.push_back(v);
+ }
+ }
+ else
+ {
+ for (uint32_t i=0; i<outputCount; i++)
+ {
+ Vec3 v( &quantizedOutput[i*3] );
+ mQuantizedOutput.push_back(v);
+ }
+ }
+ ret = &mQuantizedOutput[0].x;
+ }
+ HACD_FREE(quantizedOutput);
+ HACD_FREE(quantizedIndices);
+ }
+
+ return ret;
+ }
+
+ virtual void release(void)
+ {
+ delete this;
+ }
+
+ virtual const float * getDenormalizeScale(void) const
+ {
+ return &mScale.x;
+ }
+
+ virtual const float * getDenormalizeCenter(void) const
+ {
+ return &mCenter.x;
+ }
+
+
+
+private:
+
+ void normalizeInput(uint32_t vcount,const float *vertices)
+ {
+ mNormalizedInput.clear();
+ mQuantizedOutput.clear();
+ Vec3 bmin(vertices);
+ Vec3 bmax(vertices);
+ for (uint32_t i=1; i<vcount; i++)
+ {
+ Vec3 v(&vertices[i*3]);
+
+ if ( v.x < bmin.x )
+ {
+ bmin.x = v.x;
+ }
+ else if ( v.x > bmax.x )
+ {
+ bmax.x = v.x;
+ }
+
+ if ( v.y < bmin.y )
+ {
+ bmin.y = v.y;
+ }
+ else if ( v.y > bmax.y )
+ {
+ bmax.y = v.y;
+ }
+
+ if ( v.z < bmin.z )
+ {
+ bmin.z = v.z;
+ }
+ else if ( v.z > bmax.z )
+ {
+ bmax.z = v.z;
+ }
+ }
+
+ mCenter.x = (bmin.x+bmax.x)*0.5f;
+ mCenter.y = (bmin.y+bmax.y)*0.5f;
+ mCenter.z = (bmin.z+bmax.z)*0.5f;
+
+ float dx = bmax.x-bmin.x;
+ float dy = bmax.y-bmin.y;
+ float dz = bmax.z-bmin.z;
+
+ if ( dx == 0 )
+ {
+ mScale.x = 1;
+ }
+ else
+ {
+ dx = dx*1.001f;
+ mScale.x = dx*0.5f;
+ }
+ if ( dy == 0 )
+ {
+ mScale.y = 1;
+ }
+ else
+ {
+ dy = dy*1.001f;
+ mScale.y = dy*0.5f;
+ }
+ if ( dz == 0 )
+ {
+ mScale.z = 1;
+ }
+ else
+ {
+ dz = dz*1.001f;
+ mScale.z = dz*0.5f;
+ }
+
+ Vec3 recip;
+ recip.x = 1.0f / mScale.x;
+ recip.y = 1.0f / mScale.y;
+ recip.z = 1.0f / mScale.z;
+
+ for (uint32_t i=0; i<vcount; i++)
+ {
+ Vec3 v(&vertices[i*3]);
+
+ v.x = (v.x-mCenter.x)*recip.x;
+ v.y = (v.y-mCenter.y)*recip.y;
+ v.z = (v.z-mCenter.z)*recip.z;
+
+ HACD_ASSERT( v.x >= -1 && v.x <= 1 );
+ HACD_ASSERT( v.y >= -1 && v.y <= 1 );
+ HACD_ASSERT( v.z >= -1 && v.z <= 1 );
+ mNormalizedInput.push_back(v);
+ }
+ }
+
+ virtual ~MyWuQuantizer(void)
+ {
+
+ }
+
+ Vec3 mScale;
+ Vec3 mCenter;
+ Vec3Vector mNormalizedInput;
+ Vec3Vector mQuantizedOutput;
+};
+
+WuQuantizer * createWuQuantizer(void)
+{
+ MyWuQuantizer *m = HACD_NEW(MyWuQuantizer);
+ return static_cast< WuQuantizer *>(m);
+}
+
+
+}; // end of HACD namespace
diff --git a/APEX_1.4/shared/general/HACD/src/dgAABBPolygonSoup.cpp b/APEX_1.4/shared/general/HACD/src/dgAABBPolygonSoup.cpp
new file mode 100644
index 00000000..d7c7278b
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgAABBPolygonSoup.cpp
@@ -0,0 +1,1266 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+/****************************************************************************
+*
+* File Name : Bitmap.C
+* Visual C++ 4.0 base by Julio Jerez
+*
+****************************************************************************/
+#include "dgHeap.h"
+#include "dgStack.h"
+#include "dgList.h"
+#include "dgMatrix.h"
+#include "dgPolygonSoupBuilder.h"
+#include "dgAABBPolygonSoup.h"
+#include "dgIntersections.h"
+
+#define DG_STACK_DEPTH 63
+
+class dgAABBTree
+{
+ class TreeNode
+ {
+ #define DG_INDEX_COUNT_BITS 6
+
+ public:
+ inline TreeNode ()
+ {
+ HACD_ALWAYS_ASSERT();
+ }
+
+ inline TreeNode (uint32_t node)
+ {
+ m_node = node;
+ HACD_ASSERT (!IsLeaf());
+ }
+
+ inline uint32_t IsLeaf () const
+ {
+ return m_node & 0x80000000;
+ }
+
+ inline uint32_t GetCount() const
+ {
+ HACD_ASSERT (IsLeaf());
+ return (m_node & (~0x80000000)) >> (32 - DG_INDEX_COUNT_BITS - 1);
+ }
+
+ inline uint32_t GetIndex() const
+ {
+ HACD_ASSERT (IsLeaf());
+ return m_node & (~(-(1 << (32 - DG_INDEX_COUNT_BITS - 1))));
+ }
+
+
+ inline TreeNode (uint32_t faceIndexCount, uint32_t faceIndexStart)
+ {
+ HACD_ASSERT (faceIndexCount < (1<<DG_INDEX_COUNT_BITS));
+ m_node = 0x80000000 | (faceIndexCount << (32 - DG_INDEX_COUNT_BITS - 1)) | faceIndexStart;
+ }
+
+ inline dgAABBTree* GetNode (const void* root) const
+ {
+ return ((dgAABBTree*) root) + m_node;
+ }
+
+ union {
+ uint32_t m_node;
+ };
+ };
+
+ class dgHeapNodePair
+ {
+ public:
+ int32_t m_nodeA;
+ int32_t m_nodeB;
+ };
+
+ class dgConstructionTree : public UANS::UserAllocated
+ {
+ public:
+ dgConstructionTree ()
+ {
+ }
+
+ ~dgConstructionTree ()
+ {
+ if (m_back) {
+ delete m_back;
+ }
+ if (m_front) {
+ delete m_front;
+ }
+ }
+
+ dgVector m_p0;
+ dgVector m_p1;
+ int32_t m_boxIndex;
+ float m_surfaceArea;
+ dgConstructionTree* m_back;
+ dgConstructionTree* m_front;
+ dgConstructionTree* m_parent;
+ };
+
+
+
+ public:
+ void CalcExtends (dgTriplex* const vertex, int32_t indexCount, const int32_t* const indexArray)
+ {
+ dgVector minP ( float (1.0e15f), float (1.0e15f), float (1.0e15f), float (0.0f));
+ dgVector maxP (-float (1.0e15f), -float (1.0e15f), -float (1.0e15f), float (0.0f));
+ for (int32_t i = 1; i < indexCount; i ++) {
+ int32_t index = indexArray[i];
+ dgVector p (&vertex[index].m_x);
+
+ minP.m_x = GetMin (p.m_x, minP.m_x);
+ minP.m_y = GetMin (p.m_y, minP.m_y);
+ minP.m_z = GetMin (p.m_z, minP.m_z);
+
+ maxP.m_x = GetMax (p.m_x, maxP.m_x);
+ maxP.m_y = GetMax (p.m_y, maxP.m_y);
+ maxP.m_z = GetMax (p.m_z, maxP.m_z);
+ }
+
+ vertex[m_minIndex].m_x = minP.m_x - float (1.0e-3f);
+ vertex[m_minIndex].m_y = minP.m_y - float (1.0e-3f);
+ vertex[m_minIndex].m_z = minP.m_z - float (1.0e-3f);
+ vertex[m_maxIndex].m_x = maxP.m_x + float (1.0e-3f);
+ vertex[m_maxIndex].m_y = maxP.m_y + float (1.0e-3f);
+ vertex[m_maxIndex].m_z = maxP.m_z + float (1.0e-3f);
+ }
+
+ int32_t BuildTree (dgConstructionTree* const root, dgAABBTree* const boxArray, dgAABBTree* const boxCopy, dgTriplex* const vertexArrayOut, int32_t &treeVCount)
+ {
+ TreeNode* parent[128];
+ dgConstructionTree* pool[128];
+
+ int32_t index = 0;
+ int32_t stack = 1;
+ parent[0] = NULL;
+ pool[0] = root;
+ while (stack) {
+ stack --;
+
+ dgConstructionTree* const node = pool[stack];
+ TreeNode* const parentNode = parent[stack];
+ if (node->m_boxIndex != -1) {
+ if (parentNode) {
+ *parentNode = boxCopy[node->m_boxIndex].m_back;
+ } else {
+
+ //HACD_ASSERT (boxCount == 1);
+ dgAABBTree* const newNode = &boxArray[index];
+ *newNode = boxCopy[node->m_boxIndex];
+ index ++;
+ }
+
+ } else {
+ dgAABBTree* const newNode = &boxArray[index];
+
+ newNode->m_minIndex = treeVCount;
+ vertexArrayOut[treeVCount].m_x = node->m_p0.m_x;
+ vertexArrayOut[treeVCount].m_y = node->m_p0.m_y;
+ vertexArrayOut[treeVCount].m_z = node->m_p0.m_z;
+
+ newNode->m_maxIndex = treeVCount + 1;
+ vertexArrayOut[treeVCount + 1].m_x = node->m_p1.m_x;
+ vertexArrayOut[treeVCount + 1].m_y = node->m_p1.m_y;
+ vertexArrayOut[treeVCount + 1].m_z = node->m_p1.m_z;
+ treeVCount += 2;
+
+ if (parentNode) {
+ *parentNode = TreeNode (uint32_t(index));
+ }
+ index ++;
+
+ pool[stack] = node->m_front;
+ parent[stack] = &newNode->m_front;
+ stack ++;
+ pool[stack] = node->m_back;
+ parent[stack] = &newNode->m_back;
+ stack ++;
+ }
+ }
+
+ // this object is not to be deleted when using stack allocation
+ //delete root;
+ return index;
+ }
+
+ void PushNodes (dgConstructionTree* const root, dgList<dgConstructionTree*>& list) const
+ {
+ if (root->m_back) {
+ PushNodes (root->m_back, list);
+ }
+ if (root->m_front) {
+ PushNodes (root->m_front, list);
+ }
+ if (root->m_boxIndex == -1) {
+ list.Append(root);
+ }
+ }
+
+ int32_t CalculateMaximunDepth (dgConstructionTree* tree) const
+ {
+ int32_t depthPool[128];
+ dgConstructionTree* pool[128];
+
+ depthPool[0] = 0;
+ pool[0] = tree;
+ int32_t stack = 1;
+
+ int32_t maxDepth = -1;
+ while (stack) {
+ stack --;
+
+ int32_t depth = depthPool[stack];
+ dgConstructionTree* const node = pool[stack];
+ maxDepth = GetMax(maxDepth, depth);
+
+ if (node->m_boxIndex == -1) {
+ HACD_ASSERT (node->m_back);
+ HACD_ASSERT (node->m_front);
+
+ depth ++;
+ depthPool[stack] = depth;
+ pool[stack] = node->m_back;
+ stack ++;
+
+ depthPool[stack] = depth;
+ pool[stack] = node->m_front;
+ stack ++;
+ }
+ }
+
+ return maxDepth + 1;
+ }
+
+ float CalculateArea (dgConstructionTree* const node0, dgConstructionTree* const node1) const
+ {
+ dgVector p0 (GetMin (node0->m_p0.m_x, node1->m_p0.m_x), GetMin (node0->m_p0.m_y, node1->m_p0.m_y), GetMin (node0->m_p0.m_z, node1->m_p0.m_z), float (0.0f));
+ dgVector p1 (GetMax (node0->m_p1.m_x, node1->m_p1.m_x), GetMax (node0->m_p1.m_y, node1->m_p1.m_y), GetMax (node0->m_p1.m_z, node1->m_p1.m_z), float (0.0f));
+ dgVector side0 (p1 - p0);
+ dgVector side1 (side0.m_y, side0.m_z, side0.m_x, float (0.0f));
+ return side0 % side1;
+ }
+
+ void SetBox (dgConstructionTree* const node) const
+ {
+ node->m_p0.m_x = GetMin (node->m_back->m_p0.m_x, node->m_front->m_p0.m_x);
+ node->m_p0.m_y = GetMin (node->m_back->m_p0.m_y, node->m_front->m_p0.m_y);
+ node->m_p0.m_z = GetMin (node->m_back->m_p0.m_z, node->m_front->m_p0.m_z);
+ node->m_p1.m_x = GetMax (node->m_back->m_p1.m_x, node->m_front->m_p1.m_x);
+ node->m_p1.m_y = GetMax (node->m_back->m_p1.m_y, node->m_front->m_p1.m_y);
+ node->m_p1.m_z = GetMax (node->m_back->m_p1.m_z, node->m_front->m_p1.m_z);
+ dgVector side0 (node->m_p1 - node->m_p0);
+ dgVector side1 (side0.m_y, side0.m_z, side0.m_x, float (0.0f));
+ node->m_surfaceArea = side0 % side1;
+ }
+
+ void ImproveNodeFitness (dgConstructionTree* const node) const
+ {
+ HACD_ASSERT (node->m_back);
+ HACD_ASSERT (node->m_front);
+
+ if (node->m_parent) {
+ if (node->m_parent->m_back == node) {
+ float cost0 = node->m_surfaceArea;
+ float cost1 = CalculateArea (node->m_front, node->m_parent->m_front);
+ float cost2 = CalculateArea (node->m_back, node->m_parent->m_front);
+ if ((cost1 <= cost0) && (cost1 <= cost2)) {
+ dgConstructionTree* const parent = node->m_parent;
+ node->m_p0 = parent->m_p0;
+ node->m_p1 = parent->m_p1;
+ node->m_surfaceArea = parent->m_surfaceArea;
+ if (parent->m_parent) {
+ if (parent->m_parent->m_back == parent) {
+ parent->m_parent->m_back = node;
+ } else {
+ HACD_ASSERT (parent->m_parent->m_front == parent);
+ parent->m_parent->m_front = node;
+ }
+ }
+ node->m_parent = parent->m_parent;
+ parent->m_parent = node;
+ node->m_front->m_parent = parent;
+ parent->m_back = node->m_front;
+ node->m_front = parent;
+ SetBox (parent);
+ } else if ((cost2 <= cost0) && (cost2 <= cost1)) {
+ dgConstructionTree* const parent = node->m_parent;
+ node->m_p0 = parent->m_p0;
+ node->m_p1 = parent->m_p1;
+ node->m_surfaceArea = parent->m_surfaceArea;
+ if (parent->m_parent) {
+ if (parent->m_parent->m_back == parent) {
+ parent->m_parent->m_back = node;
+ } else {
+ HACD_ASSERT (parent->m_parent->m_front == parent);
+ parent->m_parent->m_front = node;
+ }
+ }
+ node->m_parent = parent->m_parent;
+ parent->m_parent = node;
+ node->m_back->m_parent = parent;
+ parent->m_back = node->m_back;
+ node->m_back = parent;
+ SetBox (parent);
+ }
+
+ } else {
+
+ float cost0 = node->m_surfaceArea;
+ float cost1 = CalculateArea (node->m_back, node->m_parent->m_back);
+ float cost2 = CalculateArea (node->m_front, node->m_parent->m_back);
+ if ((cost1 <= cost0) && (cost1 <= cost2)) {
+ dgConstructionTree* const parent = node->m_parent;
+ node->m_p0 = parent->m_p0;
+ node->m_p1 = parent->m_p1;
+ node->m_surfaceArea = parent->m_surfaceArea;
+ if (parent->m_parent) {
+ if (parent->m_parent->m_back == parent) {
+ parent->m_parent->m_back = node;
+ } else {
+ HACD_ASSERT (parent->m_parent->m_front == parent);
+ parent->m_parent->m_front = node;
+ }
+ }
+ node->m_parent = parent->m_parent;
+ parent->m_parent = node;
+ node->m_back->m_parent = parent;
+ parent->m_front = node->m_back;
+ node->m_back = parent;
+ SetBox (parent);
+ } else if ((cost2 <= cost0) && (cost2 <= cost1)) {
+ dgConstructionTree* const parent = node->m_parent;
+ node->m_p0 = parent->m_p0;
+ node->m_p1 = parent->m_p1;
+ node->m_surfaceArea = parent->m_surfaceArea;
+ if (parent->m_parent) {
+ if (parent->m_parent->m_back == parent) {
+ parent->m_parent->m_back = node;
+ } else {
+ HACD_ASSERT (parent->m_parent->m_front == parent);
+ parent->m_parent->m_front = node;
+ }
+ }
+ node->m_parent = parent->m_parent;
+ parent->m_parent = node;
+ node->m_front->m_parent = parent;
+ parent->m_front = node->m_front;
+ node->m_front = parent;
+ SetBox (parent);
+ }
+ }
+ }
+ }
+
+ double TotalFitness (dgList<dgConstructionTree*>& nodeList) const
+ {
+ double cost = float (0.0f);
+ for (dgList<dgConstructionTree*>::dgListNode* node = nodeList.GetFirst(); node; node = node->GetNext()) {
+ dgConstructionTree* const box = node->GetInfo();
+ cost += box->m_surfaceArea;
+ }
+ return cost;
+ }
+
+ dgConstructionTree* ImproveTotalFitness (dgConstructionTree* const root, dgAABBTree* const boxArray)
+ {
+ HACD_FORCE_PARAMETER_REFERENCE(boxArray);
+ dgList<dgConstructionTree*> nodesList;
+
+ dgConstructionTree* newRoot = root;
+ PushNodes (root, nodesList);
+ if (nodesList.GetCount()) {
+ int32_t maxPasses = CalculateMaximunDepth (root) * 2;
+ double newCost = TotalFitness (nodesList);
+ double prevCost = newCost;
+ do {
+ prevCost = newCost;
+ for (dgList<dgConstructionTree*>::dgListNode* node = nodesList.GetFirst(); node; node = node->GetNext()) {
+ dgConstructionTree* const box = node->GetInfo();
+ ImproveNodeFitness (box);
+ }
+
+ newCost = TotalFitness (nodesList);
+ maxPasses --;
+ } while (maxPasses && (newCost < prevCost));
+
+ newRoot = nodesList.GetLast()->GetInfo();
+ while (newRoot->m_parent) {
+ newRoot = newRoot->m_parent;
+ }
+ }
+
+ return newRoot;
+ }
+
+ dgConstructionTree* BuildTree (int32_t firstBox, int32_t lastBox, dgAABBTree* const boxArray, const dgTriplex* const vertexArray, dgConstructionTree* parent)
+ {
+ dgConstructionTree* const tree = HACD_NEW(dgConstructionTree);
+ HACD_ASSERT (firstBox >= 0);
+ HACD_ASSERT (lastBox >= 0);
+
+ tree->m_parent = parent;
+
+ if (lastBox == firstBox) {
+ int32_t j0 = boxArray[firstBox].m_minIndex;
+ int32_t j1 = boxArray[firstBox].m_maxIndex;
+ tree->m_p0 = dgVector (vertexArray[j0].m_x, vertexArray[j0].m_y, vertexArray[j0].m_z, float (0.0f));
+ tree->m_p1 = dgVector (vertexArray[j1].m_x, vertexArray[j1].m_y, vertexArray[j1].m_z, float (0.0f));
+
+ tree->m_boxIndex = firstBox;
+ tree->m_back = NULL;
+ tree->m_front = NULL;
+ } else {
+
+ struct dgSpliteInfo
+ {
+ dgSpliteInfo (dgAABBTree* const boxArray, int32_t boxCount, const dgTriplex* const vertexArray, const dgConstructionTree* const /*tree*/)
+ {
+
+ dgVector minP ( float (1.0e15f), float (1.0e15f), float (1.0e15f), float (0.0f));
+ dgVector maxP (-float (1.0e15f), -float (1.0e15f), -float (1.0e15f), float (0.0f));
+
+ if (boxCount == 2) {
+ m_axis = 1;
+
+ for (int32_t i = 0; i < boxCount; i ++) {
+ int32_t j0 = boxArray[i].m_minIndex;
+ int32_t j1 = boxArray[i].m_maxIndex;
+
+ dgVector p0 (vertexArray[j0].m_x, vertexArray[j0].m_y, vertexArray[j0].m_z, float (0.0f));
+ dgVector p1 (vertexArray[j1].m_x, vertexArray[j1].m_y, vertexArray[j1].m_z, float (0.0f));
+
+ minP.m_x = GetMin (p0.m_x, minP.m_x);
+ minP.m_y = GetMin (p0.m_y, minP.m_y);
+ minP.m_z = GetMin (p0.m_z, minP.m_z);
+
+ maxP.m_x = GetMax (p1.m_x, maxP.m_x);
+ maxP.m_y = GetMax (p1.m_y, maxP.m_y);
+ maxP.m_z = GetMax (p1.m_z, maxP.m_z);
+ }
+
+ } else {
+ dgVector median (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgVector varian (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (int32_t i = 0; i < boxCount; i ++) {
+
+ int32_t j0 = boxArray[i].m_minIndex;
+ int32_t j1 = boxArray[i].m_maxIndex;
+
+ dgVector p0 (vertexArray[j0].m_x, vertexArray[j0].m_y, vertexArray[j0].m_z, float (0.0f));
+ dgVector p1 (vertexArray[j1].m_x, vertexArray[j1].m_y, vertexArray[j1].m_z, float (0.0f));
+
+ minP.m_x = GetMin (p0.m_x, minP.m_x);
+ minP.m_y = GetMin (p0.m_y, minP.m_y);
+ minP.m_z = GetMin (p0.m_z, minP.m_z);
+
+ maxP.m_x = GetMax (p1.m_x, maxP.m_x);
+ maxP.m_y = GetMax (p1.m_y, maxP.m_y);
+ maxP.m_z = GetMax (p1.m_z, maxP.m_z);
+
+ dgVector p ((p0 + p1).Scale (0.5f));
+
+ median += p;
+ varian += p.CompProduct (p);
+ }
+
+ varian = varian.Scale (float (boxCount)) - median.CompProduct(median);
+
+
+ int32_t index = 0;
+ float maxVarian = float (-1.0e10f);
+ for (int32_t i = 0; i < 3; i ++) {
+ if (varian[i] > maxVarian) {
+ index = i;
+ maxVarian = varian[i];
+ }
+ }
+
+ dgVector center = median.Scale (float (1.0f) / float (boxCount));
+
+ float test = center[index];
+
+ int32_t i0 = 0;
+ int32_t i1 = boxCount - 1;
+ int32_t step = sizeof (dgTriplex) / sizeof (float);
+ const float* const points = &vertexArray[0].m_x;
+ do {
+ for (; i0 <= i1; i0 ++) {
+ int32_t j0 = boxArray[i0].m_minIndex;
+ int32_t j1 = boxArray[i0].m_maxIndex;
+
+ float val = (points[j0 * step + index] + points[j1 * step + index]) * float (0.5f);
+ if (val > test) {
+ break;
+ }
+ }
+
+ for (; i1 >= i0; i1 --) {
+ int32_t j0 = boxArray[i1].m_minIndex;
+ int32_t j1 = boxArray[i1].m_maxIndex;
+
+ float val = (points[j0 * step + index] + points[j1 * step + index]) * float (0.5f);
+ if (val < test) {
+ break;
+ }
+ }
+
+ if (i0 < i1) {
+ Swap(boxArray[i0], boxArray[i1]);
+ i0++;
+ i1--;
+ }
+ } while (i0 <= i1);
+
+ if (i0 > 0){
+ i0 --;
+ }
+ if ((i0 + 1) >= boxCount) {
+ i0 = boxCount - 2;
+ }
+
+ m_axis = i0 + 1;
+ }
+
+ HACD_ASSERT (maxP.m_x - minP.m_x >= float (0.0f));
+ HACD_ASSERT (maxP.m_y - minP.m_y >= float (0.0f));
+ HACD_ASSERT (maxP.m_z - minP.m_z >= float (0.0f));
+ m_p0 = minP;
+ m_p1 = maxP;
+ }
+
+ int32_t m_axis;
+ dgVector m_p0;
+ dgVector m_p1;
+ };
+
+ dgSpliteInfo info (&boxArray[firstBox], lastBox - firstBox + 1, vertexArray, tree);
+
+ tree->m_boxIndex = -1;
+ tree->m_p0 = info.m_p0;
+ tree->m_p1 = info.m_p1;
+
+ tree->m_front = BuildTree (firstBox + info.m_axis, lastBox, boxArray, vertexArray, tree);
+ tree->m_back = BuildTree (firstBox, firstBox + info.m_axis - 1, boxArray, vertexArray, tree);
+ }
+
+ dgVector side0 (tree->m_p1 - tree->m_p0);
+ dgVector side1 (side0.m_y, side0.m_z, side0.m_x, float (0.0f));
+ tree->m_surfaceArea = side0 % side1;
+
+ return tree;
+ }
+
+
+ int32_t BuildTopDown (int32_t boxCount, dgAABBTree* const boxArray, dgTriplex* const vertexArrayOut, int32_t &treeVCount, bool optimizedBuild)
+ {
+ dgStack <dgAABBTree> boxCopy (boxCount);
+ memcpy (&boxCopy[0], boxArray, boxCount * sizeof (dgAABBTree));
+
+ dgConstructionTree* tree = BuildTree (0, boxCount - 1, &boxCopy[0], vertexArrayOut, NULL);
+
+ optimizedBuild = true;
+ if (optimizedBuild) {
+ tree = ImproveTotalFitness (tree, &boxCopy[0]);
+ }
+
+ int32_t count = BuildTree (tree, boxArray, &boxCopy[0], vertexArrayOut, treeVCount);
+ delete tree;
+ return count;
+ }
+
+ HACD_FORCE_INLINE int32_t BoxIntersect (const dgTriplex* const vertexArray, const dgVector& min, const dgVector& max) const
+ {
+ dgVector boxP0 (vertexArray[m_minIndex].m_x, vertexArray[m_minIndex].m_y, vertexArray[m_minIndex].m_z, float (0.0f));
+ dgVector boxP1 (vertexArray[m_maxIndex].m_x, vertexArray[m_maxIndex].m_y, vertexArray[m_maxIndex].m_z, float (0.0f));
+ return dgOverlapTest (boxP0, boxP1, min, max) - 1;
+ }
+
+
+ HACD_FORCE_INLINE int32_t RayTest (const dgFastRayTest& ray, const dgTriplex* const vertexArray) const
+ {
+ float tmin = 0.0f;
+ float tmax = 1.0f;
+
+ dgVector minBox (&vertexArray[m_minIndex].m_x);
+ dgVector maxBox (&vertexArray[m_maxIndex].m_x);
+
+ for (int32_t i = 0; i < 3; i++) {
+ if (ray.m_isParallel[i]) {
+ if (ray.m_p0[i] < minBox[i] || ray.m_p0[i] > maxBox[i]) {
+ return 0;
+ }
+ } else {
+ float t1 = (minBox[i] - ray.m_p0[i]) * ray.m_dpInv[i];
+ float t2 = (maxBox[i] - ray.m_p0[i]) * ray.m_dpInv[i];
+
+ if (t1 > t2) {
+ Swap(t1, t2);
+ }
+ if (t1 > tmin) {
+ tmin = t1;
+ }
+ if (t2 < tmax) {
+ tmax = t2;
+ }
+ if (tmin > tmax) {
+ return 0;
+ }
+ }
+ }
+
+ return 0xffffffff;
+ }
+
+
+ void ForAllSectors (const int32_t* const indexArray, const float* const vertexArray, const dgVector& min, const dgVector& max, dgAABBIntersectCallback callback, void* const context) const
+ {
+ int32_t stack;
+ const dgAABBTree *stackPool[DG_STACK_DEPTH];
+
+ stack = 1;
+ stackPool[0] = this;
+ while (stack) {
+ stack --;
+ const dgAABBTree* const me = stackPool[stack];
+
+ if (me->BoxIntersect ((dgTriplex*) vertexArray, min, max) >= 0) {
+
+ if (me->m_back.IsLeaf()) {
+ int32_t index = int32_t (me->m_back.GetIndex());
+ int32_t vCount = int32_t ((me->m_back.GetCount() >> 1) - 1);
+ if ((vCount > 0) && callback(context, vertexArray, sizeof (dgTriplex), &indexArray[index + 1], vCount) == t_StopSearh) {
+ return;
+ }
+
+ } else {
+ HACD_ASSERT (stack < DG_STACK_DEPTH);
+ stackPool[stack] = me->m_back.GetNode(this);
+ stack++;
+ }
+
+ if (me->m_front.IsLeaf()) {
+ int32_t index = int32_t (me->m_front.GetIndex());
+ int32_t vCount = int32_t ((me->m_front.GetCount() >> 1) - 1);
+ if ((vCount > 0) && callback(context, vertexArray, sizeof (dgTriplex), &indexArray[index + 1], vCount) == t_StopSearh) {
+ return;
+ }
+
+ } else {
+ HACD_ASSERT (stack < DG_STACK_DEPTH);
+ stackPool[stack] = me->m_front.GetNode(this);
+ stack ++;
+ }
+ }
+ }
+ }
+
+
+ void ForAllSectorsRayHit (const dgFastRayTest& raySrc, const int32_t* indexArray, const float* vertexArray, dgRayIntersectCallback callback, void* const context) const
+ {
+ const dgAABBTree *stackPool[DG_STACK_DEPTH];
+ dgFastRayTest ray (raySrc);
+
+ int32_t stack = 1;
+ float maxParam = float (1.0f);
+
+ stackPool[0] = this;
+ while (stack) {
+ stack --;
+ const dgAABBTree *const me = stackPool[stack];
+ if (me->RayTest (ray, (dgTriplex*) vertexArray)) {
+
+ if (me->m_back.IsLeaf()) {
+ int32_t vCount = int32_t ((me->m_back.GetCount() >> 1) - 1);
+ if (vCount > 0) {
+ int32_t index = int32_t (me->m_back.GetIndex());
+ float param = callback(context, vertexArray, sizeof (dgTriplex), &indexArray[index + 1], vCount);
+ HACD_ASSERT (param >= float (0.0f));
+ if (param < maxParam) {
+ maxParam = param;
+ if (maxParam == float (0.0f)) {
+ break;
+ }
+ ray.Reset (maxParam) ;
+ }
+ }
+
+ } else {
+ HACD_ASSERT (stack < DG_STACK_DEPTH);
+ stackPool[stack] = me->m_back.GetNode(this);
+ stack++;
+ }
+
+ if (me->m_front.IsLeaf()) {
+ int32_t vCount = int32_t ((me->m_front.GetCount() >> 1) - 1);
+ if (vCount > 0) {
+ int32_t index = int32_t (me->m_front.GetIndex());
+ float param = callback(context, vertexArray, sizeof (dgTriplex), &indexArray[index + 1], vCount);
+ HACD_ASSERT (param >= float (0.0f));
+ if (param < maxParam) {
+ maxParam = param;
+ if (maxParam == float (0.0f)) {
+ break;
+ }
+ ray.Reset (maxParam);
+ }
+ }
+
+ } else {
+ HACD_ASSERT (stack < DG_STACK_DEPTH);
+ stackPool[stack] = me->m_front.GetNode(this);
+ stack ++;
+ }
+ }
+ }
+ }
+
+
+ dgVector ForAllSectorsSupportVertex (const dgVector& dir, const int32_t* const indexArray, const float* const vertexArray) const
+ {
+ float aabbProjection[DG_STACK_DEPTH];
+ const dgAABBTree *stackPool[DG_STACK_DEPTH];
+
+ int32_t stack = 1;
+ stackPool[0] = this;
+ aabbProjection[0] = float (1.0e10f);
+ const dgTriplex* const boxArray = (dgTriplex*)vertexArray;
+ dgVector supportVertex (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+
+ float maxProj = float (-1.0e20f);
+ int32_t ix = (dir[0] > float (0.0f)) ? 1 : 0;
+ int32_t iy = (dir[1] > float (0.0f)) ? 1 : 0;
+ int32_t iz = (dir[2] > float (0.0f)) ? 1 : 0;
+
+ while (stack) {
+ float boxSupportValue;
+
+ stack--;
+ boxSupportValue = aabbProjection[stack];
+ if (boxSupportValue > maxProj) {
+ float backSupportDist = float (0.0f);
+ float frontSupportDist = float (0.0f);
+ const dgAABBTree* const me = stackPool[stack];
+ if (me->m_back.IsLeaf()) {
+ backSupportDist = float (-1.0e20f);
+ int32_t index = int32_t (me->m_back.GetIndex());
+ int32_t vCount = int32_t ((me->m_back.GetCount() >> 1) - 1);
+
+ dgVector vertex (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (int32_t j = 0; j < vCount; j ++) {
+ int32_t i0 = indexArray[index + j + 1] * int32_t (sizeof (dgTriplex) / sizeof (float));
+ dgVector p (&vertexArray[i0]);
+ float dist = p % dir;
+ if (dist > backSupportDist) {
+ backSupportDist = dist;
+ vertex = p;
+ }
+ }
+
+ if (backSupportDist > maxProj) {
+ maxProj = backSupportDist;
+ supportVertex = vertex;
+ }
+
+ } else {
+ dgVector box[2];
+ const dgAABBTree* const node = me->m_back.GetNode(this);
+ box[0].m_x = boxArray[node->m_minIndex].m_x;
+ box[0].m_y = boxArray[node->m_minIndex].m_y;
+ box[0].m_z = boxArray[node->m_minIndex].m_z;
+ box[1].m_x = boxArray[node->m_maxIndex].m_x;
+ box[1].m_y = boxArray[node->m_maxIndex].m_y;
+ box[1].m_z = boxArray[node->m_maxIndex].m_z;
+
+ dgVector supportPoint (box[ix].m_x, box[iy].m_y, box[iz].m_z, float (0.0));
+ backSupportDist = supportPoint % dir;
+ }
+
+ if (me->m_front.IsLeaf()) {
+ frontSupportDist = float (-1.0e20f);
+ int32_t index = int32_t (me->m_front.GetIndex());
+ int32_t vCount = int32_t ((me->m_front.GetCount() >> 1) - 1);
+
+ dgVector vertex (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (int32_t j = 0; j < vCount; j ++) {
+ int32_t i0 = indexArray[index + j + 1] * int32_t (sizeof (dgTriplex) / sizeof (float));
+ dgVector p (&vertexArray[i0]);
+ float dist = p % dir;
+ if (dist > frontSupportDist) {
+ frontSupportDist = dist;
+ vertex = p;
+ }
+ }
+ if (frontSupportDist > maxProj) {
+ maxProj = frontSupportDist;
+ supportVertex = vertex;
+ }
+
+ } else {
+ dgVector box[2];
+ const dgAABBTree* const node = me->m_front.GetNode(this);
+ box[0].m_x = boxArray[node->m_minIndex].m_x;
+ box[0].m_y = boxArray[node->m_minIndex].m_y;
+ box[0].m_z = boxArray[node->m_minIndex].m_z;
+ box[1].m_x = boxArray[node->m_maxIndex].m_x;
+ box[1].m_y = boxArray[node->m_maxIndex].m_y;
+ box[1].m_z = boxArray[node->m_maxIndex].m_z;
+
+ dgVector supportPoint (box[ix].m_x, box[iy].m_y, box[iz].m_z, float (0.0));
+ frontSupportDist = supportPoint % dir;
+ }
+
+ if (frontSupportDist >= backSupportDist) {
+ if (!me->m_back.IsLeaf()) {
+ aabbProjection[stack] = backSupportDist;
+ stackPool[stack] = me->m_back.GetNode(this);
+ stack++;
+ }
+
+ if (!me->m_front.IsLeaf()) {
+ aabbProjection[stack] = frontSupportDist;
+ stackPool[stack] = me->m_front.GetNode(this);
+ stack++;
+ }
+
+ } else {
+
+ if (!me->m_front.IsLeaf()) {
+ aabbProjection[stack] = frontSupportDist;
+ stackPool[stack] = me->m_front.GetNode(this);
+ stack++;
+ }
+
+ if (!me->m_back.IsLeaf()) {
+ aabbProjection[stack] = backSupportDist;
+ stackPool[stack] = me->m_back.GetNode(this);
+ stack++;
+ }
+ }
+ }
+ }
+
+ return supportVertex;
+ }
+
+
+ private:
+
+ int32_t GetAxis (dgConstructionTree** boxArray, int32_t boxCount) const
+ {
+ int32_t axis;
+ float maxVal;
+ dgVector median (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgVector varian (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (int32_t i = 0; i < boxCount; i ++) {
+
+ median += boxArray[i]->m_p0;
+ median += boxArray[i]->m_p1;
+
+ varian += boxArray[i]->m_p0.CompProduct(boxArray[i]->m_p0);
+ varian += boxArray[i]->m_p1.CompProduct(boxArray[i]->m_p1);
+ }
+
+ boxCount *= 2;
+ varian.m_x = boxCount * varian.m_x - median.m_x * median.m_x;
+ varian.m_y = boxCount * varian.m_y - median.m_y * median.m_y;
+ varian.m_z = boxCount * varian.m_z - median.m_z * median.m_z;
+
+ axis = 0;
+ maxVal = varian[0];
+ for (int32_t i = 1; i < 3; i ++) {
+ if (varian[i] > maxVal) {
+ axis = i;
+ maxVal = varian[i];
+ }
+ }
+
+ return axis;
+ }
+
+/*
+ class DG_AABB_CMPBOX
+ {
+ public:
+ int32_t m_axis;
+ const dgTriplex* m_points;
+ };
+ static inline int32_t CompareBox (const dgAABBTree* const boxA, const dgAABBTree* const boxB, void* const context)
+ {
+ DG_AABB_CMPBOX& info = *((DG_AABB_CMPBOX*)context);
+
+ int32_t axis = info.m_axis;
+ const float* p0 = &info.m_points[boxA->m_minIndex].m_x;
+ const float* p1 = &info.m_points[boxB->m_minIndex].m_x;
+
+ if (p0[axis] < p1[axis]) {
+ return -1;
+ } else if (p0[axis] > p1[axis]) {
+ return 1;
+ }
+ return 0;
+ }
+*/
+
+ static inline int32_t CompareBox (const dgConstructionTree* const boxA, const dgConstructionTree* const boxB, void* const context)
+ {
+ int32_t axis;
+
+ axis = *((int32_t*) context);
+
+ if (boxA->m_p0[axis] < boxB->m_p0[axis]) {
+ return -1;
+ } else if (boxA->m_p0[axis] > boxB->m_p0[axis]) {
+ return 1;
+ }
+ return 0;
+ }
+
+
+
+
+ int32_t m_minIndex;
+ int32_t m_maxIndex;
+ TreeNode m_back;
+ TreeNode m_front;
+ friend class dgAABBPolygonSoup;
+};
+
+
+
+dgAABBPolygonSoup::dgAABBPolygonSoup ()
+ :dgPolygonSoupDatabase()
+{
+ m_aabb = NULL;
+ m_indices = NULL;
+ m_indexCount = 0;
+ m_nodesCount = 0;
+}
+
+dgAABBPolygonSoup::~dgAABBPolygonSoup ()
+{
+ if (m_aabb) {
+ HACD_FREE (m_aabb);
+ HACD_FREE (m_indices);
+ }
+}
+
+
+void* dgAABBPolygonSoup::GetRootNode() const
+{
+ return m_aabb;
+}
+
+void* dgAABBPolygonSoup::GetBackNode(const void* const root) const
+{
+ dgAABBTree* const node = (dgAABBTree*) root;
+ return node->m_back.IsLeaf() ? NULL : node->m_back.GetNode(m_aabb);
+}
+
+void* dgAABBPolygonSoup::GetFrontNode(const void* const root) const
+{
+ dgAABBTree* const node = (dgAABBTree*) root;
+ return node->m_front.IsLeaf() ? NULL : node->m_front.GetNode(m_aabb);
+}
+
+
+void dgAABBPolygonSoup::GetNodeAABB(const void* const root, dgVector& p0, dgVector& p1) const
+{
+ dgAABBTree* const node = (dgAABBTree*) root;
+ const dgTriplex* const vertex = (dgTriplex*) m_localVertex;
+
+ p0 = dgVector (vertex[node->m_minIndex].m_x, vertex[node->m_minIndex].m_y, vertex[node->m_minIndex].m_z, float (0.0f));
+ p1 = dgVector (vertex[node->m_maxIndex].m_x, vertex[node->m_maxIndex].m_y, vertex[node->m_maxIndex].m_z, float (0.0f));
+}
+
+
+void dgAABBPolygonSoup::ForAllSectors (
+ const dgVector& min,
+ const dgVector& max,
+ dgAABBIntersectCallback callback,
+ void* const context) const
+{
+ dgAABBTree* tree;
+
+ if (m_aabb) {
+ tree = (dgAABBTree*) m_aabb;
+ tree->ForAllSectors (m_indices, m_localVertex, min, max, callback, context);
+ }
+}
+
+dgVector dgAABBPolygonSoup::ForAllSectorsSupportVectex (const dgVector& dir) const
+{
+ dgAABBTree* tree;
+
+ if (m_aabb) {
+ tree = (dgAABBTree*) m_aabb;
+ return tree->ForAllSectorsSupportVertex (dir, m_indices, m_localVertex);
+ } else {
+ return dgVector (0, 0, 0, 0);
+ }
+}
+
+
+void dgAABBPolygonSoup::GetAABB (dgVector& p0, dgVector& p1) const
+{
+ if (m_aabb) {
+ dgAABBTree* tree;
+ tree = (dgAABBTree*) m_aabb;
+ p0 = dgVector (&m_localVertex[tree->m_minIndex * 3]);
+ p1 = dgVector (&m_localVertex[tree->m_maxIndex * 3]);
+ } else {
+ p0 = dgVector (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ p1 = dgVector (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ }
+}
+
+
+void dgAABBPolygonSoup::ForAllSectorsRayHit (const dgFastRayTest& ray, dgRayIntersectCallback callback, void* const context) const
+{
+ dgAABBTree* tree;
+
+ if (m_aabb) {
+ tree = (dgAABBTree*) m_aabb;
+ tree->ForAllSectorsRayHit (ray, m_indices, m_localVertex, callback, context);
+ }
+}
+
+dgIntersectStatus dgAABBPolygonSoup::CalculateThisFaceEdgeNormals (void *context, const float* const polygon, int32_t strideInBytes, const int32_t* const indexArray, int32_t indexCount)
+{
+ AdjacentdFaces& adjacentFaces = *((AdjacentdFaces*)context);
+
+ int32_t count = adjacentFaces.m_count;
+ int32_t stride = int32_t (strideInBytes / sizeof (float));
+
+ int32_t j0 = indexArray[indexCount - 1];
+ for (int32_t j = 0; j < indexCount; j ++) {
+ int32_t j1 = indexArray[j];
+ int64_t key = (int64_t (j0) << 32) + j1;
+ for (int32_t i = 0; i < count; i ++) {
+ if (adjacentFaces.m_edgeMap[i] == key) {
+ float maxDist = float (0.0f);
+ for (int32_t k = 0; k < indexCount; k ++) {
+ dgVector r (&polygon[indexArray[k] * stride]);
+ float dist = adjacentFaces.m_normal.Evalue(r);
+ if (dgAbsf (dist) > dgAbsf (maxDist)) {
+ maxDist = dist;
+ }
+ }
+ if (maxDist < float (1.0e-4f)) {
+ adjacentFaces.m_index[i + count + 1] = indexArray[indexCount];
+ }
+ break;
+ }
+ }
+
+ j0 = j1;
+ }
+
+ return t_ContinueSearh;
+}
+
+
+dgIntersectStatus dgAABBPolygonSoup::CalculateAllFaceEdgeNormals (void *context, const float* const polygon, int32_t strideInBytes, const int32_t* const indexArray, int32_t indexCount)
+{
+ dgAABBPolygonSoup* const me = (dgAABBPolygonSoup*) context;
+
+ int32_t stride = int32_t (strideInBytes / sizeof (float));
+
+ AdjacentdFaces adjacentFaces;
+ adjacentFaces.m_count = indexCount;
+ adjacentFaces.m_index = (int32_t*) indexArray;
+ dgVector n (&polygon[indexArray[indexCount] * stride]);
+ dgVector p (&polygon[indexArray[0] * stride]);
+ adjacentFaces.m_normal = dgPlane (n, - (n % p));
+
+ HACD_ASSERT (indexCount < int32_t (sizeof (adjacentFaces.m_edgeMap) / sizeof (adjacentFaces.m_edgeMap[0])));
+
+ int32_t edgeIndex = indexCount - 1;
+ int32_t i0 = indexArray[indexCount - 1];
+ dgVector p0 ( float (1.0e15f), float (1.0e15f), float (1.0e15f), float (0.0f));
+ dgVector p1 (-float (1.0e15f), -float (1.0e15f), -float (1.0e15f), float (0.0f));
+ for (int32_t i = 0; i < indexCount; i ++) {
+ int32_t i1 = indexArray[i];
+ int32_t index = i1 * stride;
+ dgVector p (&polygon[index]);
+
+ p0.m_x = GetMin (p.m_x, p0.m_x);
+ p0.m_y = GetMin (p.m_y, p0.m_y);
+ p0.m_z = GetMin (p.m_z, p0.m_z);
+
+ p1.m_x = GetMax (p.m_x, p1.m_x);
+ p1.m_y = GetMax (p.m_y, p1.m_y);
+ p1.m_z = GetMax (p.m_z, p1.m_z);
+
+ adjacentFaces.m_edgeMap[edgeIndex] = (int64_t (i1) << 32) + i0;
+ edgeIndex = i;
+ i0 = i1;
+ }
+
+ p0.m_x -= float (0.5f);
+ p0.m_y -= float (0.5f);
+ p0.m_z -= float (0.5f);
+ p1.m_x += float (0.5f);
+ p1.m_y += float (0.5f);
+ p1.m_z += float (0.5f);
+
+ me->ForAllSectors (p0, p1, CalculateThisFaceEdgeNormals, &adjacentFaces);
+
+ return t_ContinueSearh;
+}
+
+float dgAABBPolygonSoup::CalculateFaceMaxSize (dgTriplex* const vertex, int32_t indexCount, const int32_t* const indexArray) const
+{
+ float maxSize = float (0.0f);
+ int32_t index = indexArray[indexCount - 1];
+ dgVector p0 (vertex[index].m_x, vertex[index].m_y, vertex[index].m_z, float (0.0f));
+ for (int32_t i = 0; i < indexCount; i ++) {
+ int32_t index = indexArray[i];
+ dgVector p1 (vertex[index].m_x, vertex[index].m_y, vertex[index].m_z, float (0.0f));
+
+ dgVector dir (p1 - p0);
+ dir = dir.Scale (dgRsqrt (dir % dir));
+
+ float maxVal = float (-1.0e10f);
+ float minVal = float ( 1.0e10f);
+ for (int32_t j = 0; j < indexCount; j ++) {
+ int32_t index = indexArray[j];
+ dgVector q (vertex[index].m_x, vertex[index].m_y, vertex[index].m_z, float (0.0f));
+ float val = dir % q;
+ minVal = GetMin(minVal, val);
+ maxVal = GetMax(maxVal, val);
+ }
+
+ float size = maxVal - minVal;
+ maxSize = GetMax(maxSize, size);
+ p0 = p1;
+ }
+
+ return maxSize;
+}
+
+void dgAABBPolygonSoup::CalculateAdjacendy ()
+{
+ dgVector p0;
+ dgVector p1;
+ GetAABB (p0, p1);
+ ForAllSectors (p0, p1, CalculateAllFaceEdgeNormals, this);
+}
+
+void dgAABBPolygonSoup::Create (const dgPolygonSoupDatabaseBuilder& builder, bool optimizedBuild)
+{
+ if (builder.m_faceCount == 0) {
+ return;
+ }
+
+ m_strideInBytes = sizeof (dgTriplex);
+ m_indexCount = builder.m_indexCount * 2 + builder.m_faceCount;
+ m_indices = (int32_t*) HACD_ALLOC (sizeof (int32_t) * m_indexCount);
+ m_aabb = (dgAABBTree*) HACD_ALLOC (sizeof (dgAABBTree) * builder.m_faceCount);
+ m_localVertex = (float*) HACD_ALLOC (sizeof (dgTriplex) * (builder.m_vertexCount + builder.m_normalCount + builder.m_faceCount * 4));
+
+ dgAABBTree* const tree = (dgAABBTree*) m_aabb;
+ dgTriplex* const tmpVertexArray = (dgTriplex*)m_localVertex;
+
+ for (int32_t i = 0; i < builder.m_vertexCount; i ++) {
+ tmpVertexArray[i].m_x = float (builder.m_vertexPoints[i].m_x);
+ tmpVertexArray[i].m_y = float (builder.m_vertexPoints[i].m_y);
+ tmpVertexArray[i].m_z = float (builder.m_vertexPoints[i].m_z);
+ }
+
+ for (int32_t i = 0; i < builder.m_normalCount; i ++) {
+ tmpVertexArray[i + builder.m_vertexCount].m_x = float (builder.m_normalPoints[i].m_x);
+ tmpVertexArray[i + builder.m_vertexCount].m_y = float (builder.m_normalPoints[i].m_y);
+ tmpVertexArray[i + builder.m_vertexCount].m_z = float (builder.m_normalPoints[i].m_z);
+ }
+
+ int32_t polygonIndex = 0;
+ int32_t* indexMap = m_indices;
+ const int32_t* const indices = &builder.m_vertexIndex[0];
+ for (int32_t i = 0; i < builder.m_faceCount; i ++) {
+
+ int32_t indexCount = builder.m_faceVertexCount[i];
+ dgAABBTree& box = tree[i];
+
+ box.m_minIndex = builder.m_normalCount + builder.m_vertexCount + i * 2;
+ box.m_maxIndex = builder.m_normalCount + builder.m_vertexCount + i * 2 + 1;
+
+ box.m_front = dgAABBTree::TreeNode(0, 0);
+ box.m_back = dgAABBTree::TreeNode (uint32_t (indexCount * 2), uint32_t (indexMap - m_indices));
+ box.CalcExtends (&tmpVertexArray[0], indexCount, &indices[polygonIndex]);
+
+ for (int32_t j = 0; j < indexCount; j ++) {
+ indexMap[0] = indices[polygonIndex + j];
+ indexMap ++;
+ }
+
+ indexMap[0] = builder.m_vertexCount + builder.m_normalIndex[i];
+ indexMap ++;
+ for (int32_t j = 1; j < indexCount; j ++) {
+ indexMap[0] = -1;
+ indexMap ++;
+ }
+
+ float faceSize = CalculateFaceMaxSize (&tmpVertexArray[0], indexCount - 1, &indices[polygonIndex + 1]);
+ indexMap[0] = int32_t (faceSize);
+ indexMap ++;
+ polygonIndex += indexCount;
+ }
+
+ int32_t extraVertexCount = builder.m_normalCount + builder.m_vertexCount + builder.m_faceCount * 2;
+// if (optimizedBuild) {
+// m_nodesCount = tree->BuildBottomUp (builder.m_allocator, builder.m_faceCount, tree, &tmpVertexArray[0], extraVertexCount);
+// } else {
+// m_nodesCount = tree->BuildTopDown (builder.m_allocator, builder.m_faceCount, tree, &tmpVertexArray[0], extraVertexCount);
+// }
+
+// m_nodesCount = tree->BuildBottomUp (builder.m_allocator, builder.m_faceCount, tree, &tmpVertexArray[0], extraVertexCount, optimizedBuild);
+ m_nodesCount = tree->BuildTopDown (builder.m_faceCount, tree, &tmpVertexArray[0], extraVertexCount, optimizedBuild);
+
+ m_localVertex[tree->m_minIndex * 3 + 0] -= float (0.1f);
+ m_localVertex[tree->m_minIndex * 3 + 1] -= float (0.1f);
+ m_localVertex[tree->m_minIndex * 3 + 2] -= float (0.1f);
+ m_localVertex[tree->m_maxIndex * 3 + 0] += float (0.1f);
+ m_localVertex[tree->m_maxIndex * 3 + 1] += float (0.1f);
+ m_localVertex[tree->m_maxIndex * 3 + 2] += float (0.1f);
+
+
+ extraVertexCount -= (builder.m_normalCount + builder.m_vertexCount);
+
+ dgStack<int32_t> indexArray (extraVertexCount);
+ extraVertexCount = dgVertexListToIndexList (&tmpVertexArray[builder.m_normalCount + builder.m_vertexCount].m_x, sizeof (dgTriplex), sizeof (dgTriplex), 0, extraVertexCount, &indexArray[0], float (1.0e-6f));
+
+ for (int32_t i = 0; i < m_nodesCount; i ++) {
+ dgAABBTree& box = tree[i];
+
+ int32_t j = box.m_minIndex - builder.m_normalCount - builder.m_vertexCount;
+ box.m_minIndex = indexArray[j] + builder.m_normalCount + builder.m_vertexCount;
+ j = box.m_maxIndex - builder.m_normalCount - builder.m_vertexCount;
+ box.m_maxIndex = indexArray[j] + builder.m_normalCount + builder.m_vertexCount;
+ }
+
+ m_vertexCount = extraVertexCount + builder.m_normalCount + builder.m_vertexCount;
+}
+
diff --git a/APEX_1.4/shared/general/HACD/src/dgConvexHull3d.cpp b/APEX_1.4/shared/general/HACD/src/dgConvexHull3d.cpp
new file mode 100644
index 00000000..e08cdb3f
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgConvexHull3d.cpp
@@ -0,0 +1,994 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgStack.h"
+#include "dgTree.h"
+#include "dgGoogol.h"
+#include "dgConvexHull3d.h"
+#include "dgSmallDeterminant.h"
+
+#define DG_VERTEX_CLUMP_SIZE_3D 8
+
+class dgAABBPointTree3d
+{
+ public:
+#ifdef _DEBUG
+ dgAABBPointTree3d()
+ {
+ static int32_t id = 0;
+ m_id = id;
+ id ++;
+ }
+ int32_t m_id;
+#endif
+
+ dgBigVector m_box[2];
+ dgAABBPointTree3d* m_left;
+ dgAABBPointTree3d* m_right;
+ dgAABBPointTree3d* m_parent;
+};
+
+class dgHullVertex: public dgBigVector
+{
+ public:
+ int32_t m_index;
+};
+
+class dgAABBPointTree3dClump: public dgAABBPointTree3d
+{
+ public:
+ int32_t m_count;
+ int32_t m_indices[DG_VERTEX_CLUMP_SIZE_3D];
+};
+
+
+dgConvexHull3DFace::dgConvexHull3DFace()
+{
+ m_mark = 0;
+ m_twin[0] = NULL;
+ m_twin[1] = NULL;
+ m_twin[2] = NULL;
+}
+
+double dgConvexHull3DFace::Evalue (const dgBigVector* const pointArray, const dgBigVector& point) const
+{
+ const dgBigVector& p0 = pointArray[m_index[0]];
+ const dgBigVector& p1 = pointArray[m_index[1]];
+ const dgBigVector& p2 = pointArray[m_index[2]];
+
+ double matrix[3][3];
+ for (int32_t i = 0; i < 3; i ++) {
+ matrix[0][i] = p2[i] - p0[i];
+ matrix[1][i] = p1[i] - p0[i];
+ matrix[2][i] = point[i] - p0[i];
+ }
+
+ double error;
+ double det = Determinant3x3 (matrix, &error);
+ double precision = double (1.0f) / double (1<<24);
+ double errbound = error * precision;
+ if (fabs(det) > errbound) {
+ return det;
+ }
+
+ dgGoogol exactMatrix[3][3];
+ for (int32_t i = 0; i < 3; i ++) {
+ exactMatrix[0][i] = dgGoogol(p2[i]) - dgGoogol(p0[i]);
+ exactMatrix[1][i] = dgGoogol(p1[i]) - dgGoogol(p0[i]);
+ exactMatrix[2][i] = dgGoogol(point[i]) - dgGoogol(p0[i]);
+ }
+
+ dgGoogol exactDet (Determinant3x3(exactMatrix));
+ det = exactDet.GetAproximateValue();
+ return det;
+}
+
+dgBigPlane dgConvexHull3DFace::GetPlaneEquation (const dgBigVector* const pointArray) const
+{
+ const dgBigVector& p0 = pointArray[m_index[0]];
+ const dgBigVector& p1 = pointArray[m_index[1]];
+ const dgBigVector& p2 = pointArray[m_index[2]];
+ dgBigPlane plane (p0, p1, p2);
+ plane = plane.Scale (1.0f / sqrt (plane % plane));
+ return plane;
+}
+
+
+dgConvexHull3d::dgConvexHull3d ()
+ :dgList<dgConvexHull3DFace>()
+ ,m_count (0)
+ ,m_diag()
+ ,m_aabbP0(dgBigVector (double (0.0), double (0.0), double (0.0), double (0.0)))
+ ,m_aabbP1(dgBigVector (double (0.0), double (0.0), double (0.0), double (0.0)))
+ ,m_points(1024)
+{
+}
+
+dgConvexHull3d::dgConvexHull3d(const dgConvexHull3d& source)
+ :dgList<dgConvexHull3DFace>()
+ ,m_count (source.m_count)
+ ,m_diag(source.m_diag)
+ ,m_aabbP0 (source.m_aabbP0)
+ ,m_aabbP1 (source.m_aabbP1)
+ ,m_points(source.m_count)
+{
+ m_points[m_count-1].m_w = double (0.0f);
+ for (int i = 0; i < m_count; i ++) {
+ m_points[i] = source.m_points[i];
+ }
+ dgTree<dgListNode*, dgListNode*> map;
+ for(dgListNode* sourceNode = source.GetFirst(); sourceNode; sourceNode = sourceNode->GetNext() ) {
+ dgListNode* const node = Append();
+ map.Insert(node, sourceNode);
+ }
+
+ for(dgListNode* sourceNode = source.GetFirst(); sourceNode; sourceNode = sourceNode->GetNext() ) {
+ dgListNode* const node = map.Find(sourceNode)->GetInfo();
+
+ dgConvexHull3DFace& face = node->GetInfo();
+ dgConvexHull3DFace& srcFace = sourceNode->GetInfo();
+
+ face.m_mark = 0;
+ for (int32_t i = 0; i < 3; i ++) {
+ face.m_index[i] = srcFace.m_index[i];
+ face.m_twin[i] = map.Find (srcFace.m_twin[i])->GetInfo();
+ }
+ }
+}
+
+dgConvexHull3d::dgConvexHull3d(const double* const vertexCloud, int32_t strideInBytes, int32_t count, double distTol, int32_t maxVertexCount)
+ :m_count (0)
+ ,m_diag()
+ ,m_aabbP0 (dgBigVector (double (0.0), double (0.0), double (0.0), double (0.0)))
+ ,m_aabbP1 (dgBigVector (double (0.0), double (0.0), double (0.0), double (0.0)))
+ ,m_points(count)
+{
+ BuildHull (vertexCloud, strideInBytes, count, distTol, maxVertexCount);
+}
+
+dgConvexHull3d::~dgConvexHull3d(void)
+{
+}
+
+void dgConvexHull3d::BuildHull (const double* const vertexCloud, int32_t strideInBytes, int32_t count, double distTol, int32_t maxVertexCount)
+{
+ int32_t treeCount = count / (DG_VERTEX_CLUMP_SIZE_3D>>1);
+ if (treeCount < 4) {
+ treeCount = 4;
+ }
+ treeCount *= 2;
+
+ dgStack<dgHullVertex> points (count);
+ dgStack<dgAABBPointTree3dClump> treePool (treeCount + 256);
+ count = InitVertexArray(&points[0], vertexCloud, strideInBytes, count, &treePool[0], treePool.GetSizeInBytes());
+
+ if (m_count >= 4) {
+ CalculateConvexHull (&treePool[0], &points[0], count, distTol, maxVertexCount);
+ }
+}
+
+int32_t dgConvexHull3d::ConvexCompareVertex(const dgHullVertex* const A, const dgHullVertex* const B, void* const context)
+{
+ HACD_FORCE_PARAMETER_REFERENCE(context);
+ for (int32_t i = 0; i < 3; i ++) {
+ if ((*A)[i] < (*B)[i]) {
+ return -1;
+ } else if ((*A)[i] > (*B)[i]) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+
+dgAABBPointTree3d* dgConvexHull3d::BuildTree (dgAABBPointTree3d* const parent, dgHullVertex* const points, int32_t count, int32_t baseIndex, int8_t** memoryPool, int32_t& maxMemSize) const
+{
+ dgAABBPointTree3d* tree = NULL;
+
+ HACD_ASSERT (count);
+ dgBigVector minP ( float (1.0e15f), float (1.0e15f), float (1.0e15f), float (0.0f));
+ dgBigVector maxP (-float (1.0e15f), -float (1.0e15f), -float (1.0e15f), float (0.0f));
+ if (count <= DG_VERTEX_CLUMP_SIZE_3D) {
+
+ dgAABBPointTree3dClump* const clump = new (*memoryPool) dgAABBPointTree3dClump;
+ *memoryPool += sizeof (dgAABBPointTree3dClump);
+ maxMemSize -= sizeof (dgAABBPointTree3dClump);
+ HACD_ASSERT (maxMemSize >= 0);
+
+
+ clump->m_count = count;
+ for (int32_t i = 0; i < count; i ++) {
+ clump->m_indices[i] = i + baseIndex;
+
+ const dgBigVector& p = points[i];
+ minP.m_x = GetMin (p.m_x, minP.m_x);
+ minP.m_y = GetMin (p.m_y, minP.m_y);
+ minP.m_z = GetMin (p.m_z, minP.m_z);
+
+ maxP.m_x = GetMax (p.m_x, maxP.m_x);
+ maxP.m_y = GetMax (p.m_y, maxP.m_y);
+ maxP.m_z = GetMax (p.m_z, maxP.m_z);
+ }
+
+ clump->m_left = NULL;
+ clump->m_right = NULL;
+ tree = clump;
+
+ } else {
+ dgBigVector median (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgBigVector varian (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (int32_t i = 0; i < count; i ++) {
+
+ const dgBigVector& p = points[i];
+ minP.m_x = GetMin (p.m_x, minP.m_x);
+ minP.m_y = GetMin (p.m_y, minP.m_y);
+ minP.m_z = GetMin (p.m_z, minP.m_z);
+
+ maxP.m_x = GetMax (p.m_x, maxP.m_x);
+ maxP.m_y = GetMax (p.m_y, maxP.m_y);
+ maxP.m_z = GetMax (p.m_z, maxP.m_z);
+
+ median += p;
+ varian += p.CompProduct (p);
+ }
+
+ varian = varian.Scale (float (count)) - median.CompProduct(median);
+ int32_t index = 0;
+ double maxVarian = double (-1.0e10f);
+ for (int32_t i = 0; i < 3; i ++) {
+ if (varian[i] > maxVarian) {
+ index = i;
+ maxVarian = varian[i];
+ }
+ }
+ dgBigVector center = median.Scale (double (1.0f) / double (count));
+
+ double test = center[index];
+
+ int32_t i0 = 0;
+ int32_t i1 = count - 1;
+ do {
+ for (; i0 <= i1; i0 ++) {
+ double val = points[i0][index];
+ if (val > test) {
+ break;
+ }
+ }
+
+ for (; i1 >= i0; i1 --) {
+ double val = points[i1][index];
+ if (val < test) {
+ break;
+ }
+ }
+
+ if (i0 < i1) {
+ Swap(points[i0], points[i1]);
+ i0++;
+ i1--;
+ }
+ } while (i0 <= i1);
+
+ if (i0 == 0){
+ i0 = count / 2;
+ }
+ if (i0 == (count - 1)){
+ i0 = count / 2;
+ }
+
+ tree = new (*memoryPool) dgAABBPointTree3d;
+ *memoryPool += sizeof (dgAABBPointTree3d);
+ maxMemSize -= sizeof (dgAABBPointTree3d);
+ HACD_ASSERT (maxMemSize >= 0);
+
+ HACD_ASSERT (i0);
+ HACD_ASSERT (count - i0);
+
+ tree->m_left = BuildTree (tree, points, i0, baseIndex, memoryPool, maxMemSize);
+ tree->m_right = BuildTree (tree, &points[i0], count - i0, i0 + baseIndex, memoryPool, maxMemSize);
+ }
+
+ HACD_ASSERT (tree);
+ tree->m_parent = parent;
+ tree->m_box[0] = minP - dgBigVector (double (1.0e-3f), double (1.0e-3f), double (1.0e-3f), double (1.0f));
+ tree->m_box[1] = maxP + dgBigVector (double (1.0e-3f), double (1.0e-3f), double (1.0e-3f), double (1.0f));
+ return tree;
+}
+
+
+
+
+
+int32_t dgConvexHull3d::InitVertexArray(dgHullVertex* const points, const double* const vertexCloud, int32_t strideInBytes, int32_t count, void* const memoryPool, int32_t maxMemSize)
+{
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+ if (stride >= 4) {
+ for (int32_t i = 0; i < count; i ++) {
+ int32_t index = i * stride;
+ dgBigVector& vertex = points[i];
+ vertex = dgBigVector (vertexCloud[index], vertexCloud[index + 1], vertexCloud[index + 2], vertexCloud[index + 3]);
+ HACD_ASSERT (dgCheckVector(vertex));
+ points[i].m_index = 0;
+ }
+ } else {
+ for (int32_t i = 0; i < count; i ++) {
+ int32_t index = i * stride;
+ dgBigVector& vertex = points[i];
+ vertex = dgBigVector (vertexCloud[index], vertexCloud[index + 1], vertexCloud[index + 2], double (0.0f));
+ HACD_ASSERT (dgCheckVector(vertex));
+ points[i].m_index = 0;
+ }
+ }
+
+ dgSort(points, count, ConvexCompareVertex);
+
+ int32_t indexCount = 0;
+ for (int i = 1; i < count; i ++) {
+ for (; i < count; i ++) {
+ if (ConvexCompareVertex (&points[indexCount], &points[i], NULL)) {
+ indexCount ++;
+ points[indexCount] = points[i];
+ break;
+ }
+ }
+ }
+ count = indexCount + 1;
+ if (count < 4) {
+ m_count = 0;
+ return count;
+ }
+
+ dgAABBPointTree3d* tree = BuildTree (NULL, points, count, 0, (int8_t**) &memoryPool, maxMemSize);
+
+ m_aabbP0 = tree->m_box[0];
+ m_aabbP1 = tree->m_box[1];
+
+ dgBigVector boxSize (tree->m_box[1] - tree->m_box[0]);
+ m_diag = float (sqrt (boxSize % boxSize));
+
+ dgStack<dgBigVector> normalArrayPool (256);
+ dgBigVector* const normalArray = &normalArrayPool[0];
+ int32_t normalCount = BuildNormalList (&normalArray[0]);
+
+ int32_t index = SupportVertex (&tree, points, normalArray[0]);
+ m_points[0] = points[index];
+ points[index].m_index = 1;
+
+ bool validTetrahedrum = false;
+ dgBigVector e1 (double (0.0f), double (0.0f), double (0.0f), double (0.0f)) ;
+ for (int32_t i = 1; i < normalCount; i ++) {
+ int32_t index = SupportVertex (&tree, points, normalArray[i]);
+ HACD_ASSERT (index >= 0);
+
+ e1 = points[index] - m_points[0];
+ double error2 = e1 % e1;
+ if (error2 > (float (1.0e-4f) * m_diag * m_diag)) {
+ m_points[1] = points[index];
+ points[index].m_index = 1;
+ validTetrahedrum = true;
+ break;
+ }
+ }
+ if (!validTetrahedrum) {
+ m_count = 0;
+ HACD_ASSERT (0);
+ return count;
+ }
+
+ validTetrahedrum = false;
+ dgBigVector e2(float (0.0f), float (0.0f), float (0.0f), float (0.0f));;
+ dgBigVector normal (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (int32_t i = 2; i < normalCount; i ++) {
+ int32_t index = SupportVertex (&tree, points, normalArray[i]);
+ HACD_ASSERT (index >= 0);
+ e2 = points[index] - m_points[0];
+ normal = e1 * e2;
+ double error2 = sqrt (normal % normal);
+ if (error2 > (float (1.0e-4f) * m_diag * m_diag)) {
+ m_points[2] = points[index];
+ points[index].m_index = 1;
+ validTetrahedrum = true;
+ break;
+ }
+ }
+
+ if (!validTetrahedrum) {
+ m_count = 0;
+ HACD_ASSERT (0);
+ return count;
+ }
+
+ // find the largest possible tetrahedron
+ validTetrahedrum = false;
+ dgBigVector e3(float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+
+ index = SupportVertex (&tree, points, normal);
+ e3 = points[index] - m_points[0];
+ double error2 = normal % e3;
+ if (fabs (error2) > (double (1.0e-6f) * m_diag * m_diag)) {
+ // we found a valid tetrahedra, about and start build the hull by adding the rest of the points
+ m_points[3] = points[index];
+ points[index].m_index = 1;
+ validTetrahedrum = true;
+ }
+ if (!validTetrahedrum) {
+ dgVector n (normal.Scale(double (-1.0f)));
+ int32_t index = SupportVertex (&tree, points, n);
+ e3 = points[index] - m_points[0];
+ double error2 = normal % e3;
+ if (fabs (error2) > (double (1.0e-6f) * m_diag * m_diag)) {
+ // we found a valid tetrahedra, about and start build the hull by adding the rest of the points
+ m_points[3] = points[index];
+ points[index].m_index = 1;
+ validTetrahedrum = true;
+ }
+ }
+ if (!validTetrahedrum) {
+ for (int32_t i = 3; i < normalCount; i ++) {
+ int32_t index = SupportVertex (&tree, points, normalArray[i]);
+ HACD_ASSERT (index >= 0);
+
+ //make sure the volume of the fist tetrahedral is no negative
+ e3 = points[index] - m_points[0];
+ double error2 = normal % e3;
+ if (fabs (error2) > (double (1.0e-6f) * m_diag * m_diag)) {
+ // we found a valid tetrahedra, about and start build the hull by adding the rest of the points
+ m_points[3] = points[index];
+ points[index].m_index = 1;
+ validTetrahedrum = true;
+ break;
+ }
+ }
+ }
+ if (!validTetrahedrum) {
+ // the points do not form a convex hull
+ m_count = 0;
+ //HACD_ASSERT (0);
+ return count;
+ }
+
+ m_count = 4;
+ double volume = TetrahedrumVolume (m_points[0], m_points[1], m_points[2], m_points[3]);
+ if (volume > double (0.0f)) {
+ Swap(m_points[2], m_points[3]);
+ }
+ HACD_ASSERT (TetrahedrumVolume(m_points[0], m_points[1], m_points[2], m_points[3]) < double(0.0f));
+
+ return count;
+}
+
+double dgConvexHull3d::TetrahedrumVolume (const dgBigVector& p0, const dgBigVector& p1, const dgBigVector& p2, const dgBigVector& p3) const
+{
+ dgBigVector p1p0 (p1 - p0);
+ dgBigVector p2p0 (p2 - p0);
+ dgBigVector p3p0 (p3 - p0);
+ return (p1p0 * p2p0) % p3p0;
+}
+
+
+void dgConvexHull3d::TessellateTriangle (int32_t level, const dgVector& p0, const dgVector& p1, const dgVector& p2, int32_t& count, dgBigVector* const ouput, int32_t& start) const
+{
+ if (level) {
+ HACD_ASSERT (dgAbsf (p0 % p0 - float (1.0f)) < float (1.0e-4f));
+ HACD_ASSERT (dgAbsf (p1 % p1 - float (1.0f)) < float (1.0e-4f));
+ HACD_ASSERT (dgAbsf (p2 % p2 - float (1.0f)) < float (1.0e-4f));
+ dgVector p01 (p0 + p1);
+ dgVector p12 (p1 + p2);
+ dgVector p20 (p2 + p0);
+
+ p01 = p01.Scale (float (1.0f) / dgSqrt(p01 % p01));
+ p12 = p12.Scale (float (1.0f) / dgSqrt(p12 % p12));
+ p20 = p20.Scale (float (1.0f) / dgSqrt(p20 % p20));
+
+ HACD_ASSERT (dgAbsf (p01 % p01 - float (1.0f)) < float (1.0e-4f));
+ HACD_ASSERT (dgAbsf (p12 % p12 - float (1.0f)) < float (1.0e-4f));
+ HACD_ASSERT (dgAbsf (p20 % p20 - float (1.0f)) < float (1.0e-4f));
+
+ TessellateTriangle (level - 1, p0, p01, p20, count, ouput, start);
+ TessellateTriangle (level - 1, p1, p12, p01, count, ouput, start);
+ TessellateTriangle (level - 1, p2, p20, p12, count, ouput, start);
+ TessellateTriangle (level - 1, p01, p12, p20, count, ouput, start);
+
+ } else {
+ dgBigPlane n (p0, p1, p2);
+ n = n.Scale (double(1.0f) / sqrt (n % n));
+ n.m_w = double(0.0f);
+ ouput[start] = n;
+ start += 8;
+ count ++;
+ }
+}
+
+
+int32_t dgConvexHull3d::SupportVertex (dgAABBPointTree3d** const treePointer, const dgHullVertex* const points, const dgBigVector& dir) const
+{
+/*
+ double dist = float (-1.0e10f);
+ int32_t index = -1;
+ for (int32_t i = 0; i < count; i ++) {
+ //double dist1 = dir.DotProduct4 (points[i]);
+ double dist1 = dir % points[i];
+ if (dist1 > dist) {
+ dist = dist1;
+ index = i;
+ }
+ }
+ HACD_ASSERT (index != -1);
+ return index;
+*/
+
+ #define DG_STACK_DEPTH_3D 64
+ double aabbProjection[DG_STACK_DEPTH_3D];
+ const dgAABBPointTree3d *stackPool[DG_STACK_DEPTH_3D];
+
+ int32_t index = -1;
+ int32_t stack = 1;
+ stackPool[0] = *treePointer;
+ aabbProjection[0] = float (1.0e20f);
+ double maxProj = double (-1.0e20f);
+ int32_t ix = (dir[0] > double (0.0f)) ? 1 : 0;
+ int32_t iy = (dir[1] > double (0.0f)) ? 1 : 0;
+ int32_t iz = (dir[2] > double (0.0f)) ? 1 : 0;
+ while (stack) {
+ stack--;
+ double boxSupportValue = aabbProjection[stack];
+ if (boxSupportValue > maxProj) {
+ const dgAABBPointTree3d* const me = stackPool[stack];
+
+ if (me->m_left && me->m_right) {
+ dgBigVector leftSupportPoint (me->m_left->m_box[ix].m_x, me->m_left->m_box[iy].m_y, me->m_left->m_box[iz].m_z, float (0.0));
+ double leftSupportDist = leftSupportPoint % dir;
+
+ dgBigVector rightSupportPoint (me->m_right->m_box[ix].m_x, me->m_right->m_box[iy].m_y, me->m_right->m_box[iz].m_z, float (0.0));
+ double rightSupportDist = rightSupportPoint % dir;
+
+
+ if (rightSupportDist >= leftSupportDist) {
+ aabbProjection[stack] = leftSupportDist;
+ stackPool[stack] = me->m_left;
+ stack++;
+ HACD_ASSERT (stack < DG_STACK_DEPTH_3D);
+ aabbProjection[stack] = rightSupportDist;
+ stackPool[stack] = me->m_right;
+ stack++;
+ HACD_ASSERT (stack < DG_STACK_DEPTH_3D);
+ } else {
+ aabbProjection[stack] = rightSupportDist;
+ stackPool[stack] = me->m_right;
+ stack++;
+ HACD_ASSERT (stack < DG_STACK_DEPTH_3D);
+ aabbProjection[stack] = leftSupportDist;
+ stackPool[stack] = me->m_left;
+ stack++;
+ HACD_ASSERT (stack < DG_STACK_DEPTH_3D);
+ }
+
+ } else {
+ dgAABBPointTree3dClump* const clump = (dgAABBPointTree3dClump*) me;
+ for (int32_t i = 0; i < clump->m_count; i ++) {
+ const dgHullVertex& p = points[clump->m_indices[i]];
+ HACD_ASSERT (p.m_x >= clump->m_box[0].m_x);
+ HACD_ASSERT (p.m_x <= clump->m_box[1].m_x);
+ HACD_ASSERT (p.m_y >= clump->m_box[0].m_y);
+ HACD_ASSERT (p.m_y <= clump->m_box[1].m_y);
+ HACD_ASSERT (p.m_z >= clump->m_box[0].m_z);
+ HACD_ASSERT (p.m_z <= clump->m_box[1].m_z);
+ if (!p.m_index) {
+ double dist = p % dir;
+ if (dist > maxProj) {
+ maxProj = dist;
+ index = clump->m_indices[i];
+ }
+ } else {
+ clump->m_indices[i] = clump->m_indices[clump->m_count - 1];
+ clump->m_count = clump->m_count - 1;
+ i --;
+ }
+ }
+
+ if (clump->m_count == 0) {
+ dgAABBPointTree3d* const parent = clump->m_parent;
+ if (parent) {
+ dgAABBPointTree3d* const sibling = (parent->m_left != clump) ? parent->m_left : parent->m_right;
+ HACD_ASSERT (sibling != clump);
+ dgAABBPointTree3d* const grandParent = parent->m_parent;
+ if (grandParent) {
+ sibling->m_parent = grandParent;
+ if (grandParent->m_right == parent) {
+ grandParent->m_right = sibling;
+ } else {
+ grandParent->m_left = sibling;
+ }
+ } else {
+ sibling->m_parent = NULL;
+ *treePointer = sibling;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ HACD_ASSERT (index != -1);
+ return index;
+}
+
+
+int32_t dgConvexHull3d::BuildNormalList (dgBigVector* const normalArray) const
+{
+ dgVector p0 ( float (1.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgVector p1 (-float (1.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgVector p2 ( float (0.0f), float (1.0f), float (0.0f), float (0.0f));
+ dgVector p3 ( float (0.0f),-float (1.0f), float (0.0f), float (0.0f));
+ dgVector p4 ( float (0.0f), float (0.0f), float (1.0f), float (0.0f));
+ dgVector p5 ( float (0.0f), float (0.0f),-float (1.0f), float (0.0f));
+
+ int32_t count = 0;
+ int32_t subdivitions = 1;
+
+ int32_t start = 0;
+ TessellateTriangle (subdivitions, p4, p0, p2, count, normalArray, start);
+ start = 1;
+ TessellateTriangle (subdivitions, p5, p3, p1, count, normalArray, start);
+ start = 2;
+ TessellateTriangle (subdivitions, p5, p1, p2, count, normalArray, start);
+ start = 3;
+ TessellateTriangle (subdivitions, p4, p3, p0, count, normalArray, start);
+ start = 4;
+ TessellateTriangle (subdivitions, p4, p2, p1, count, normalArray, start);
+ start = 5;
+ TessellateTriangle (subdivitions, p5, p0, p3, count, normalArray, start);
+ start = 6;
+ TessellateTriangle (subdivitions, p5, p2, p0, count, normalArray, start);
+ start = 7;
+ TessellateTriangle (subdivitions, p4, p1, p3, count, normalArray, start);
+ return count;
+}
+
+dgConvexHull3d::dgListNode* dgConvexHull3d::AddFace (int32_t i0, int32_t i1, int32_t i2)
+{
+ dgListNode* const node = Append();
+ dgConvexHull3DFace& face = node->GetInfo();
+
+ face.m_index[0] = i0;
+ face.m_index[1] = i1;
+ face.m_index[2] = i2;
+ return node;
+}
+
+void dgConvexHull3d::DeleteFace (dgListNode* const node)
+{
+ Remove (node);
+}
+
+bool dgConvexHull3d::Sanity() const
+{
+/*
+ for (dgListNode* node = GetFirst(); node; node = node->GetNext()) {
+ dgConvexHull3DFace* const face = &node->GetInfo();
+ for (int32_t i = 0; i < 3; i ++) {
+ dgListNode* const twinNode = face->m_twin[i];
+ if (!twinNode) {
+ return false;
+ }
+
+ int32_t count = 0;
+ dgListNode* me = NULL;
+ dgConvexHull3DFace* const twinFace = &twinNode->GetInfo();
+ for (int32_t j = 0; j < 3; j ++) {
+ if (twinFace->m_twin[j] == node) {
+ count ++;
+ me = twinFace->m_twin[j];
+ }
+ }
+ if (count != 1) {
+ return false;
+ }
+ if (me != node) {
+ return false;
+ }
+ }
+ }
+*/
+ return true;
+}
+
+void dgConvexHull3d::CalculateConvexHull (dgAABBPointTree3d* vertexTree, dgHullVertex* const points, int32_t count, double distTol, int32_t maxVertexCount)
+{
+ distTol = fabs (distTol) * m_diag;
+ dgListNode* const f0Node = AddFace (0, 1, 2);
+ dgListNode* const f1Node = AddFace (0, 2, 3);
+ dgListNode* const f2Node = AddFace (2, 1, 3);
+ dgListNode* const f3Node = AddFace (1, 0, 3);
+
+ dgConvexHull3DFace* const f0 = &f0Node->GetInfo();
+ dgConvexHull3DFace* const f1 = &f1Node->GetInfo();
+ dgConvexHull3DFace* const f2 = &f2Node->GetInfo();
+ dgConvexHull3DFace* const f3 = &f3Node->GetInfo();
+
+ f0->m_twin[0] = (dgList<dgConvexHull3DFace>::dgListNode*)f3Node;
+ f0->m_twin[1] = (dgList<dgConvexHull3DFace>::dgListNode*)f2Node;
+ f0->m_twin[2] = (dgList<dgConvexHull3DFace>::dgListNode*)f1Node;
+
+ f1->m_twin[0] = (dgList<dgConvexHull3DFace>::dgListNode*)f0Node;
+ f1->m_twin[1] = (dgList<dgConvexHull3DFace>::dgListNode*)f2Node;
+ f1->m_twin[2] = (dgList<dgConvexHull3DFace>::dgListNode*)f3Node;
+
+ f2->m_twin[0] = (dgList<dgConvexHull3DFace>::dgListNode*)f0Node;
+ f2->m_twin[1] = (dgList<dgConvexHull3DFace>::dgListNode*)f3Node;
+ f2->m_twin[2] = (dgList<dgConvexHull3DFace>::dgListNode*)f1Node;
+
+ f3->m_twin[0] = (dgList<dgConvexHull3DFace>::dgListNode*)f0Node;
+ f3->m_twin[1] = (dgList<dgConvexHull3DFace>::dgListNode*)f1Node;
+ f3->m_twin[2] = (dgList<dgConvexHull3DFace>::dgListNode*)f2Node;
+
+ dgList<dgListNode*> boundaryFaces;
+
+ boundaryFaces.Append(f0Node);
+ boundaryFaces.Append(f1Node);
+ boundaryFaces.Append(f2Node);
+ boundaryFaces.Append(f3Node);
+
+ dgStack<dgListNode*> stackPool(1024 + m_count);
+ dgStack<dgListNode*> coneListPool(1024 + m_count);
+ dgStack<dgListNode*> deleteListPool(1024 + m_count);
+
+ dgListNode** const stack = &stackPool[0];
+ dgListNode** const coneList = &stackPool[0];
+ dgListNode** const deleteList = &deleteListPool[0];
+
+ count -= 4;
+ maxVertexCount -= 4;
+ int32_t currentIndex = 4;
+
+ while (boundaryFaces.GetCount() && count && (maxVertexCount > 0)) {
+ // my definition of the optimal convex hull of a given vertex count,
+ // is the convex hull formed by a subset of vertex from the input array
+ // that minimized the volume difference between the perfect hull form those vertex, and the hull of the sub set of vertex.
+ // Only using a priority heap we can be sure that it will generate the best hull selecting the best points from the vertex array.
+ // Since all our tools do not have a limit on the point count of a hull I can use either a list of a queue.
+ // a stack maximize construction speed, a Queue tend to maximize the volume of the generated Hull. for now we use a queue.
+ // For perfect hulls it does not make a difference if we use a stack, queue, or a priority heap,
+ // this only apply for when build hull of a limited vertex count.
+ //
+ // Also when building Hulls of a limited vertex count, this function runs in constant time.
+ // yes that is correct, it does not makes a difference if you build a N point hull from 100 vertex
+ // or from 100000 vertex input array.
+
+ #if 0
+ // using stack (faster)
+ dgListNode* const faceNode = boundaryFaces.GetFirst()->GetInfo();
+ #else
+ // using a queue (some what slower by better hull for reduce vertex count)
+ dgListNode* const faceNode = boundaryFaces.GetLast()->GetInfo();
+ #endif
+
+ dgConvexHull3DFace* const face = &faceNode->GetInfo();
+ dgBigPlane planeEquation (face->GetPlaneEquation (&m_points[0]));
+
+ int32_t index = SupportVertex (&vertexTree, points, planeEquation);
+ const dgBigVector& p = points[index];
+ double dist = planeEquation.Evalue(p);
+
+ if ((dist >= distTol) && (face->Evalue(&m_points[0], p) > double(0.0f))) {
+ HACD_ASSERT (Sanity());
+
+ HACD_ASSERT (faceNode);
+ stack[0] = faceNode;
+
+ int32_t stackIndex = 1;
+ int32_t deletedCount = 0;
+
+ while (stackIndex) {
+ stackIndex --;
+ dgListNode* const node = stack[stackIndex];
+ dgConvexHull3DFace* const face = &node->GetInfo();
+
+ if (!face->m_mark && (face->Evalue(&m_points[0], p) > double(0.0f))) {
+ #ifdef _DEBUG
+ for (int32_t i = 0; i < deletedCount; i ++) {
+ HACD_ASSERT (deleteList[i] != node);
+ }
+ #endif
+
+ deleteList[deletedCount] = node;
+ deletedCount ++;
+ HACD_ASSERT (deletedCount < int32_t (deleteListPool.GetElementsCount()));
+ face->m_mark = 1;
+ for (int32_t i = 0; i < 3; i ++) {
+ dgListNode* const twinNode = (dgListNode*)face->m_twin[i];
+ HACD_ASSERT (twinNode);
+ dgConvexHull3DFace* const twinFace = &twinNode->GetInfo();
+ if (!twinFace->m_mark) {
+ stack[stackIndex] = twinNode;
+ stackIndex ++;
+ HACD_ASSERT (stackIndex < int32_t (stackPool.GetElementsCount()));
+ }
+ }
+ }
+ }
+
+// Swap (hullVertexArray[index], hullVertexArray[currentIndex]);
+ m_points[currentIndex] = points[index];
+ points[index].m_index = 1;
+
+ int32_t newCount = 0;
+ for (int32_t i = 0; i < deletedCount; i ++) {
+ dgListNode* const node = deleteList[i];
+ dgConvexHull3DFace* const face = &node->GetInfo();
+ HACD_ASSERT (face->m_mark == 1);
+ for (int32_t j0 = 0; j0 < 3; j0 ++) {
+ dgListNode* const twinNode = face->m_twin[j0];
+ dgConvexHull3DFace* const twinFace = &twinNode->GetInfo();
+ if (!twinFace->m_mark) {
+ int32_t j1 = (j0 == 2) ? 0 : j0 + 1;
+ dgListNode* const newNode = AddFace (currentIndex, face->m_index[j0], face->m_index[j1]);
+ boundaryFaces.Addtop(newNode);
+
+ dgConvexHull3DFace* const newFace = &newNode->GetInfo();
+ newFace->m_twin[1] = twinNode;
+ for (int32_t k = 0; k < 3; k ++) {
+ if (twinFace->m_twin[k] == node) {
+ twinFace->m_twin[k] = newNode;
+ }
+ }
+ coneList[newCount] = newNode;
+ newCount ++;
+ HACD_ASSERT (newCount < int32_t (coneListPool.GetElementsCount()));
+ }
+ }
+ }
+
+ for (int32_t i = 0; i < newCount - 1; i ++) {
+ dgListNode* const nodeA = coneList[i];
+ dgConvexHull3DFace* const faceA = &nodeA->GetInfo();
+ HACD_ASSERT (faceA->m_mark == 0);
+ for (int32_t j = i + 1; j < newCount; j ++) {
+ dgListNode* const nodeB = coneList[j];
+ dgConvexHull3DFace* const faceB = &nodeB->GetInfo();
+ HACD_ASSERT (faceB->m_mark == 0);
+ if (faceA->m_index[2] == faceB->m_index[1]) {
+ faceA->m_twin[2] = nodeB;
+ faceB->m_twin[0] = nodeA;
+ break;
+ }
+ }
+
+ for (int32_t j = i + 1; j < newCount; j ++) {
+ dgListNode* const nodeB = coneList[j];
+ dgConvexHull3DFace* const faceB = &nodeB->GetInfo();
+ HACD_ASSERT (faceB->m_mark == 0);
+ if (faceA->m_index[1] == faceB->m_index[2]) {
+ faceA->m_twin[0] = nodeB;
+ faceB->m_twin[2] = nodeA;
+ break;
+ }
+ }
+ }
+
+ for (int32_t i = 0; i < deletedCount; i ++) {
+ dgListNode* const node = deleteList[i];
+ boundaryFaces.Remove (node);
+ DeleteFace (node);
+ }
+
+ maxVertexCount --;
+ currentIndex ++;
+ count --;
+ } else {
+ boundaryFaces.Remove (faceNode);
+ }
+ }
+ m_count = currentIndex;
+}
+
+
+void dgConvexHull3d::CalculateVolumeAndSurfaceArea (double& volume, double& surfaceArea) const
+{
+ double areaAcc = float (0.0f);
+ double volumeAcc = float (0.0f);
+ for (dgListNode* node = GetFirst(); node; node = node->GetNext()) {
+ const dgConvexHull3DFace* const face = &node->GetInfo();
+ int32_t i0 = face->m_index[0];
+ int32_t i1 = face->m_index[1];
+ int32_t i2 = face->m_index[2];
+ const dgBigVector& p0 = m_points[i0];
+ const dgBigVector& p1 = m_points[i1];
+ const dgBigVector& p2 = m_points[i2];
+ dgBigVector normal ((p1 - p0) * (p2 - p0));
+ double area = sqrt (normal % normal);
+ areaAcc += area;
+ volumeAcc += (p0 * p1) % p2;
+ }
+ HACD_ASSERT (volumeAcc >= double (0.0f));
+ volume = volumeAcc * double (1.0f/6.0f);
+ surfaceArea = areaAcc * double (0.5f);
+}
+
+// this code has linear time complexity on the number of faces
+double dgConvexHull3d::RayCast (const dgBigVector& localP0, const dgBigVector& localP1) const
+{
+ double interset = float (1.2f);
+ double tE = double (0.0f); // for the maximum entering segment parameter;
+ double tL = double (1.0f); // for the minimum leaving segment parameter;
+ dgBigVector dS (localP1 - localP0); // is the segment direction vector;
+ int32_t hasHit = 0;
+
+ for (dgListNode* node = GetFirst(); node; node = node->GetNext())
+ {
+ const dgConvexHull3DFace* const face = &node->GetInfo();
+
+ int32_t i0 = face->m_index[0];
+ int32_t i1 = face->m_index[1];
+ int32_t i2 = face->m_index[2];
+
+ const dgBigVector& p0 = m_points[i0];
+ dgBigVector normal ((m_points[i1] - p0) * (m_points[i2] - p0));
+
+ double N = -((localP0 - p0) % normal);
+ double D = dS % normal;
+
+ if (fabs(D) < double (1.0e-12f))
+ { //
+ if (N < double (0.0f))
+ {
+ return double (1.2f);
+ }
+ else
+ {
+ continue;
+ }
+ }
+
+ double t = N / D;
+ if (D < double (0.0f))
+ {
+ if (t > tE)
+ {
+ tE = t;
+ hasHit = 1;
+ }
+ if (tE > tL)
+ {
+ return double (1.2f);
+ }
+ }
+ else
+ {
+ HACD_ASSERT (D >= double (0.0f));
+ tL = GetMin (tL, t);
+ if (tL < tE)
+ {
+ return double (1.2f);
+ }
+ }
+ }
+
+ if (hasHit)
+ {
+ interset = tE;
+ }
+
+ return interset;
+}
+
+
diff --git a/APEX_1.4/shared/general/HACD/src/dgGoogol.cpp b/APEX_1.4/shared/general/HACD/src/dgGoogol.cpp
new file mode 100644
index 00000000..666ed0ec
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgGoogol.cpp
@@ -0,0 +1,421 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+
+#include "dgGoogol.h"
+#include <string.h>
+
+
+dgGoogol::dgGoogol(void)
+ :m_sign(0)
+ ,m_exponent(0)
+{
+ memset (m_mantissa, 0, sizeof (m_mantissa));
+}
+
+dgGoogol::dgGoogol(double value)
+ :m_sign(0)
+ ,m_exponent(0)
+{
+ int32_t exp;
+ double mantissa = fabs (frexp(value, &exp));
+
+ m_exponent = int16_t (exp);
+ m_sign = (value >= 0) ? 0 : 1;
+
+ memset (m_mantissa, 0, sizeof (m_mantissa));
+ m_mantissa[0] = (uint64_t (double (uint64_t(1)<<62) * mantissa));
+
+ // it looks like GCC have problems with this
+// HACD_ASSERT (m_mantissa[0] >= 0);
+}
+
+
+dgGoogol::~dgGoogol(void)
+{
+}
+
+double dgGoogol::GetAproximateValue() const
+{
+ double mantissa = (double(1.0f) / double (uint64_t(1)<<62)) * double (m_mantissa[0]);
+ mantissa = ldexp(mantissa, m_exponent) * (m_sign ? double (-1.0f) : double (1.0f));
+ return mantissa;
+}
+
+void dgGoogol::NegateMantissa (uint64_t* const mantissa) const
+{
+ uint64_t carrier = 1;
+ for (int32_t i = DG_GOOGOL_SIZE - 1; i >= 0; i --) {
+ uint64_t a = ~mantissa[i] + carrier;
+ if (a) {
+ carrier = 0;
+ }
+ mantissa[i] = a;
+ }
+}
+
+void dgGoogol::CopySignedMantissa (uint64_t* const mantissa) const
+{
+ memcpy (mantissa, m_mantissa, sizeof (m_mantissa));
+ if (m_sign) {
+ NegateMantissa (mantissa);
+ }
+}
+
+void dgGoogol::ShiftRightMantissa (uint64_t* const mantissa, int32_t bits) const
+{
+ uint64_t carrier = 0;
+ if (int64_t (mantissa[0]) < int64_t (0)) {
+ carrier = uint64_t (-1);
+ }
+
+ while (bits >= 64) {
+ for (int32_t i = DG_GOOGOL_SIZE - 2; i >= 0; i --) {
+ mantissa[i + 1] = mantissa[i];
+ }
+ mantissa[0] = carrier;
+ bits -= 64;
+ }
+
+ if (bits > 0) {
+ carrier <<= (64 - bits);
+ for (int32_t i = 0; i < DG_GOOGOL_SIZE; i ++) {
+ uint64_t a = mantissa[i];
+ mantissa[i] = (a >> bits) | carrier;
+ carrier = a << (64 - bits);
+ }
+ }
+}
+
+int32_t dgGoogol::LeadinZeros (uint64_t a) const
+{
+ #define dgCOUNTBIT(mask,add) \
+ { \
+ uint64_t test = a & mask; \
+ n += test ? 0 : add; \
+ a = test ? test : (a & ~mask); \
+ }
+
+ int32_t n = 0;
+ dgCOUNTBIT (0xffffffff00000000LL, 32);
+ dgCOUNTBIT (0xffff0000ffff0000LL, 16);
+ dgCOUNTBIT (0xff00ff00ff00ff00LL, 8);
+ dgCOUNTBIT (0xf0f0f0f0f0f0f0f0LL, 4);
+ dgCOUNTBIT (0xccccccccccccccccLL, 2);
+ dgCOUNTBIT (0xaaaaaaaaaaaaaaaaLL, 1);
+
+ return n;
+}
+
+int32_t dgGoogol::NormalizeMantissa (uint64_t* const mantissa) const
+{
+// HACD_ASSERT((mantissa[0] & 0x8000000000000000ULL) == 0);
+
+ int32_t bits = 0;
+ if(int64_t (mantissa[0] * 2) < 0) {
+ bits = 1;
+ ShiftRightMantissa (mantissa, 1);
+ } else {
+ while (!mantissa[0] && bits > (-64 * DG_GOOGOL_SIZE)) {
+ bits -= 64;
+ for (int32_t i = 1; i < DG_GOOGOL_SIZE; i ++) {
+ mantissa[i - 1] = mantissa[i];
+ }
+ mantissa[DG_GOOGOL_SIZE - 1] = 0;
+ }
+
+ if (bits > (-64 * DG_GOOGOL_SIZE)) {
+ int32_t n = LeadinZeros (mantissa[0]) - 2;
+ uint64_t carrier = 0;
+ for (int32_t i = DG_GOOGOL_SIZE-1; i >= 0; i --) {
+ uint64_t a = mantissa[i];
+ mantissa[i] = (a << n) | carrier;
+ carrier = a >> (64 - n);
+ }
+ bits -= n;
+ }
+ }
+ return bits;
+}
+
+uint64_t dgGoogol::CheckCarrier (uint64_t a, uint64_t b) const
+{
+ return (uint64_t)(((uint64_t (-1) - b) < a) ? 1 : 0);
+}
+
+dgGoogol dgGoogol::operator+ (const dgGoogol &A) const
+{
+ dgGoogol tmp;
+ if (m_mantissa[0] && A.m_mantissa[0]) {
+ uint64_t mantissa0[DG_GOOGOL_SIZE];
+ uint64_t mantissa1[DG_GOOGOL_SIZE];
+ uint64_t mantissa[DG_GOOGOL_SIZE];
+
+ CopySignedMantissa (mantissa0);
+ A.CopySignedMantissa (mantissa1);
+
+ int32_t exponetDiff = m_exponent - A.m_exponent;
+ int32_t exponent = m_exponent;
+ if (exponetDiff > 0) {
+ ShiftRightMantissa (mantissa1, exponetDiff);
+ } else if (exponetDiff < 0) {
+ exponent = A.m_exponent;
+ ShiftRightMantissa (mantissa0, -exponetDiff);
+ }
+
+ uint64_t carrier = 0;
+ for (int32_t i = DG_GOOGOL_SIZE - 1; i >= 0; i --) {
+ uint64_t m0 = mantissa0[i];
+ uint64_t m1 = mantissa1[i];
+ mantissa[i] = m0 + m1 + carrier;
+ carrier = CheckCarrier (m0, m1) | CheckCarrier (m0 + m1, carrier);
+ }
+
+ int8_t sign = 0;
+ if (int64_t (mantissa[0]) < 0) {
+ sign = 1;
+ NegateMantissa (mantissa);
+ }
+
+ int32_t bits = NormalizeMantissa (mantissa);
+ if (bits <= (-64 * DG_GOOGOL_SIZE)) {
+ tmp.m_sign = 0;
+ tmp.m_exponent = 0;
+ } else {
+ tmp.m_sign = sign;
+ tmp.m_exponent = int16_t (exponent + bits);
+ }
+
+ memcpy (tmp.m_mantissa, mantissa, sizeof (m_mantissa));
+
+
+ } else if (A.m_mantissa[0]) {
+ tmp = A;
+ } else {
+ tmp = *this;
+ }
+
+ return tmp;
+}
+
+
+dgGoogol dgGoogol::operator- (const dgGoogol &A) const
+{
+ dgGoogol tmp (A);
+ tmp.m_sign = !tmp.m_sign;
+ return *this + tmp;
+}
+
+
+void dgGoogol::ExtendeMultiply (uint64_t a, uint64_t b, uint64_t& high, uint64_t& low) const
+{
+ uint64_t bLow = b & 0xffffffff;
+ uint64_t bHigh = b >> 32;
+ uint64_t aLow = a & 0xffffffff;
+ uint64_t aHigh = a >> 32;
+
+ uint64_t l = bLow * aLow;
+
+ uint64_t c1 = bHigh * aLow;
+ uint64_t c2 = bLow * aHigh;
+ uint64_t m = c1 + c2;
+ uint64_t carrier = CheckCarrier (c1, c2) << 32;
+
+ uint64_t h = bHigh * aHigh + carrier;
+
+ uint64_t ml = m << 32;
+ uint64_t ll = l + ml;
+ uint64_t mh = (m >> 32) + CheckCarrier (l, ml);
+ HACD_ASSERT ((mh & ~0xffffffff) == 0);
+
+ uint64_t hh = h + mh;
+
+ low = ll;
+ high = hh;
+}
+
+void dgGoogol::ScaleMantissa (uint64_t* const dst, uint64_t scale) const
+{
+ uint64_t carrier = 0;
+ for (int32_t i = DG_GOOGOL_SIZE - 1; i >= 0; i --) {
+ if (m_mantissa[i]) {
+ uint64_t low;
+ uint64_t high;
+ ExtendeMultiply (scale, m_mantissa[i], high, low);
+ uint64_t acc = low + carrier;
+ carrier = CheckCarrier (low, carrier);
+ HACD_ASSERT (CheckCarrier (carrier, high) == 0);
+ carrier += high;
+ dst[i + 1] = acc;
+ } else {
+ dst[i + 1] = carrier;
+ carrier = 0;
+ }
+
+ }
+ dst[0] = carrier;
+}
+
+dgGoogol dgGoogol::operator* (const dgGoogol &A) const
+{
+ HACD_ASSERT((m_mantissa[0] & 0x8000000000000000ULL) == 0);
+ HACD_ASSERT((A.m_mantissa[0] & 0x8000000000000000ULL) == 0);
+
+ if (m_mantissa[0] && A.m_mantissa[0]) {
+ uint64_t mantissaAcc[DG_GOOGOL_SIZE * 2];
+ memset (mantissaAcc, 0, sizeof (mantissaAcc));
+ for (int32_t i = DG_GOOGOL_SIZE - 1; i >= 0; i --) {
+ uint64_t a = m_mantissa[i];
+ if (a) {
+ uint64_t mantissaScale[2 * DG_GOOGOL_SIZE];
+ memset (mantissaScale, 0, sizeof (mantissaScale));
+ A.ScaleMantissa (&mantissaScale[i], a);
+
+ uint64_t carrier = 0;
+ for (int32_t j = 2 * DG_GOOGOL_SIZE - 1; j >= 0; j --) {
+ uint64_t m0 = mantissaAcc[j];
+ uint64_t m1 = mantissaScale[j];
+ mantissaAcc[j] = m0 + m1 + carrier;
+ carrier = CheckCarrier (m0, m1) | CheckCarrier (m0 + m1, carrier);
+ }
+ }
+ }
+
+ uint64_t carrier = 0;
+ int32_t bits = LeadinZeros (mantissaAcc[0]) - 2;
+ for (int32_t i = 2 * DG_GOOGOL_SIZE - 1; i >= 0; i --) {
+ uint64_t a = mantissaAcc[i];
+ mantissaAcc[i] = (a << bits) | carrier;
+ carrier = a >> (64 - bits);
+ }
+
+ int32_t exp = m_exponent + A.m_exponent - (bits - 2);
+
+ dgGoogol tmp;
+ tmp.m_sign = m_sign ^ A.m_sign;
+ tmp.m_exponent = int16_t (exp);
+ memcpy (tmp.m_mantissa, mantissaAcc, sizeof (m_mantissa));
+
+ return tmp;
+ }
+ return dgGoogol(0.0);
+}
+
+dgGoogol dgGoogol::operator/ (const dgGoogol &A) const
+{
+ dgGoogol tmp (1.0 / A.GetAproximateValue());
+ dgGoogol two (2.0);
+
+ tmp = tmp * (two - A * tmp);
+ tmp = tmp * (two - A * tmp);
+
+ uint64_t copy[DG_GOOGOL_SIZE];
+
+ bool test = true;
+ int passes = 0;
+ do {
+ passes ++;
+ memcpy (copy, tmp.m_mantissa, sizeof (tmp.m_mantissa));
+ tmp = tmp * (two - A * tmp);
+ test = true;
+ for (int32_t i = 0; test && (i < DG_GOOGOL_SIZE); i ++) {
+ test = (copy[i] == tmp.m_mantissa[i]);
+ }
+ } while (!test || (passes > (2 * DG_GOOGOL_SIZE)));
+ HACD_ASSERT (passes <= (2 * DG_GOOGOL_SIZE));
+ return (*this) * tmp;
+}
+
+
+dgGoogol dgGoogol::operator+= (const dgGoogol &A)
+{
+ *this = *this + A;
+ return *this;
+}
+
+dgGoogol dgGoogol::operator-= (const dgGoogol &A)
+{
+ *this = *this - A;
+ return *this;
+}
+
+dgGoogol dgGoogol::Floor () const
+{
+ if (m_exponent < 1) {
+ return dgGoogol (0.0);
+ }
+ int32_t bits = m_exponent + 2;
+ int32_t start = 0;
+ while (bits >= 64) {
+ bits -= 64;
+ start ++;
+ }
+
+ dgGoogol tmp (*this);
+ for (int32_t i = DG_GOOGOL_SIZE - 1; i > start; i --) {
+ tmp.m_mantissa[i] = 0;
+ }
+ uint64_t mask = uint64_t(-1) << (64 - bits);
+ tmp.m_mantissa[start] &= mask;
+ if (m_sign) {
+ HACD_ASSERT (0);
+ }
+
+ return tmp;
+}
+
+#ifdef _DEBUG
+void dgGoogol::ToString (char* const string) const
+{
+ dgGoogol tmp (*this);
+ dgGoogol base (10.0);
+
+//char aaaa[256];
+double a = tmp.GetAproximateValue();
+
+ while (tmp.GetAproximateValue() > 1.0) {
+ tmp = tmp/base;
+ }
+a = tmp.GetAproximateValue();
+
+ int32_t index = 0;
+// tmp.m_exponent = 1;
+ while (tmp.m_mantissa[0]) {
+
+//double xxx = tmp.GetAproximateValue();
+//double xxx1 = digit.GetAproximateValue();
+//double m = floor (a);
+//a = a - m;
+//a = a * 10;
+//aaaa[index] = char (m) + '0';
+
+ tmp = tmp * base;
+ dgGoogol digit (tmp.Floor());
+ tmp -= digit;
+ double val = digit.GetAproximateValue();
+ string[index] = char (val) + '0';
+ index ++;
+ }
+ string[index] = 0;
+}
+
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/shared/general/HACD/src/dgIntersections.cpp b/APEX_1.4/shared/general/HACD/src/dgIntersections.cpp
new file mode 100644
index 00000000..b65448c9
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgIntersections.cpp
@@ -0,0 +1,509 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgGoogol.h"
+#include "dgIntersections.h"
+
+#define USE_FLOAT_VERSION
+
+//#define DG_RAY_TOL_ERROR (float (-1.0e-5f))
+#define DG_RAY_TOL_ERROR (float (-1.0e-3f))
+//#define DG_RAY_TOL_ERROR (float (-1.0e-2f))
+//#define DG_RAY_TOL_ERROR (float (-1.0e-1f))
+
+dgFastRayTest::dgFastRayTest(const dgVector& l0, const dgVector& l1)
+ :m_p0 (l0), m_p1(l1), m_diff (l1 - l0)
+ ,m_minT(float (0.0f), float (0.0f), float (0.0f), float (0.0f))
+ ,m_maxT(float (1.0f), float (1.0f), float (1.0f), float (1.0f))
+ ,m_tolerance (DG_RAY_TOL_ERROR, DG_RAY_TOL_ERROR, DG_RAY_TOL_ERROR, DG_RAY_TOL_ERROR)
+ ,m_zero(float (0.0f), float (0.0f), float (0.0f), float (0.0f))
+{
+ m_diff.m_w = float (0.0f);
+ m_isParallel[0] = (dgAbsf (m_diff.m_x) > float (1.0e-8f)) ? 0 : int32_t (0xffffffff);
+ m_isParallel[1] = (dgAbsf (m_diff.m_y) > float (1.0e-8f)) ? 0 : int32_t (0xffffffff);
+ m_isParallel[2] = (dgAbsf (m_diff.m_z) > float (1.0e-8f)) ? 0 : int32_t (0xffffffff);
+ m_isParallel[3] = 0;
+
+ m_dpInv.m_x = (!m_isParallel[0]) ? (float (1.0f) / m_diff.m_x) : float (1.0e20f);
+ m_dpInv.m_y = (!m_isParallel[1]) ? (float (1.0f) / m_diff.m_y) : float (1.0e20f);
+ m_dpInv.m_z = (!m_isParallel[2]) ? (float (1.0f) / m_diff.m_z) : float (1.0e20f);
+ m_dpInv.m_w = float (0.0f);
+ m_dpBaseInv = m_dpInv;
+
+ m_dirError = -float (0.0175f) * dgSqrt (m_diff % m_diff);
+
+// tollerance = simd_set (DG_RAY_TOL_ERROR, DG_RAY_TOL_ERROR, DG_RAY_TOL_ERROR, DG_RAY_TOL_ERROR);
+// m_tolerance = dgVector (DG_RAY_TOL_ERROR, DG_RAY_TOL_ERROR, DG_RAY_TOL_ERROR, float (0.0f));
+
+}
+
+int32_t dgFastRayTest::BoxTest (const dgVector& minBox, const dgVector& maxBox) const
+{
+ float tmin = 0.0f;
+ float tmax = 1.0f;
+
+ for (int32_t i = 0; i < 3; i++) {
+ if (m_isParallel[i]) {
+ if (m_p0[i] <= minBox[i] || m_p0[i] >= maxBox[i]) {
+ return 0;
+ }
+ } else {
+ float t1 = (minBox[i] - m_p0[i]) * m_dpInv[i];
+ float t2 = (maxBox[i] - m_p0[i]) * m_dpInv[i];
+
+ if (t1 > t2) {
+ Swap(t1, t2);
+ }
+ if (t1 > tmin) {
+ tmin = t1;
+ }
+ if (t2 < tmax) {
+ tmax = t2;
+ }
+ if (tmin > tmax) {
+ return 0;
+ }
+ }
+ }
+ return 0x1;
+}
+
+
+float dgFastRayTest::PolygonIntersect (const dgVector& normal, const float* const polygon, int32_t strideInBytes, const int32_t* const indexArray, int32_t indexCount) const
+{
+ HACD_ASSERT (m_p0.m_w == m_p1.m_w);
+
+
+ #ifndef __USE_DOUBLE_PRECISION__
+ float unrealible = float (1.0e10f);
+ #endif
+
+ float dist = normal % m_diff;
+ if (dist < m_dirError) {
+
+ int32_t stride = int32_t (strideInBytes / sizeof (float));
+
+ dgVector v0 (&polygon[indexArray[indexCount - 1] * stride]);
+ dgVector p0v0 (v0 - m_p0);
+ float tOut = normal % p0v0;
+ // this only work for convex polygons and for single side faces
+ // walk the polygon around the edges and calculate the volume
+ if ((tOut < float (0.0f)) && (tOut > dist)) {
+ for (int32_t i = 0; i < indexCount; i ++) {
+ int32_t i2 = indexArray[i] * stride;
+ dgVector v1 (&polygon[i2]);
+ dgVector p0v1 (v1 - m_p0);
+ // calculate the volume formed by the line and the edge of the polygon
+ float alpha = (m_diff * p0v1) % p0v0;
+ // if a least one volume is negative it mean the line cross the polygon outside this edge and do not hit the face
+ if (alpha < DG_RAY_TOL_ERROR) {
+ #ifdef __USE_DOUBLE_PRECISION__
+ return 1.2f;
+ #else
+ unrealible = alpha;
+ break;
+ #endif
+ }
+ p0v0 = p0v1;
+ }
+
+ #ifndef __USE_DOUBLE_PRECISION__
+ if ((unrealible < float (0.0f)) && (unrealible > (DG_RAY_TOL_ERROR * float (10.0f)))) {
+ // the edge is too close to an edge float is not reliable, do the calculation with double
+ dgBigVector v0_ (v0);
+ dgBigVector m_p0_ (m_p0);
+ //dgBigVector m_p1_ (m_p1);
+ dgBigVector p0v0_ (v0_ - m_p0_);
+ dgBigVector normal_ (normal);
+ dgBigVector diff_ (m_diff);
+ double tOut_ = normal_ % p0v0_;
+ //double dist_ = normal_ % diff_;
+ if ((tOut < double (0.0f)) && (tOut > dist)) {
+ for (int32_t i = 0; i < indexCount; i ++) {
+ int32_t i2 = indexArray[i] * stride;
+ dgBigVector v1 (&polygon[i2]);
+ dgBigVector p0v1_ (v1 - m_p0_);
+ // calculate the volume formed by the line and the edge of the polygon
+ double alpha = (diff_ * p0v1_) % p0v0_;
+ // if a least one volume is negative it mean the line cross the polygon outside this edge and do not hit the face
+ if (alpha < DG_RAY_TOL_ERROR) {
+ return 1.2f;
+ }
+ p0v0_ = p0v1_;
+ }
+
+ tOut = float (tOut_);
+ }
+ }
+ #endif
+
+ //the line is to the left of all the polygon edges,
+ //then the intersection is the point we the line intersect the plane of the polygon
+ tOut = tOut / dist;
+ HACD_ASSERT (tOut >= float (0.0f));
+ HACD_ASSERT (tOut <= float (1.0f));
+ return tOut;
+ }
+ }
+ return float (1.2f);
+
+}
+
+
+
+
+bool dgRayBoxClip (dgVector& p0, dgVector& p1, const dgVector& boxP0, const dgVector& boxP1)
+{
+ for (int i = 0; i < 3; i ++) {
+ float tmp0;
+ float tmp1;
+
+ tmp0 = boxP1[i] - p0[i];
+ if (tmp0 > float (0.0f)) {
+ tmp1 = boxP1[i] - p1[i];
+ if (tmp1 < float (0.0f)) {
+ p1 = p0 + (p1 - p0).Scale (tmp0 / (p1[i] - p0[i]));
+ p1[i] = boxP1[i];
+ }
+ } else {
+ tmp1 = boxP1[i] - p1[i];
+ if (tmp1 > float (0.0f)) {
+ p0 += (p1 - p0).Scale (tmp0 / (p1[i] - p0[i]));
+ p0[i] = boxP1[i];
+ } else {
+ return false;
+ }
+ }
+
+ tmp0 = boxP0[i] - p0[i];
+ if (tmp0 < float (0.0f)) {
+ tmp1 = boxP0[i] - p1[i];
+ if (tmp1 > float (0.0f)) {
+ p1 = p0 + (p1 - p0).Scale (tmp0 / (p1[i] - p0[i]));
+ p1[i] = boxP0[i];
+ }
+ } else {
+ tmp1 = boxP0[i] - p1[i];
+ if (tmp1 < float (0.0f)) {
+ p0 += (p1 - p0).Scale (tmp0 / (p1[i] - p0[i]));
+ p0[i] = boxP0[i];
+ } else {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+dgVector dgPointToRayDistance (const dgVector& point, const dgVector& ray_p0, const dgVector& ray_p1)
+{
+ float t;
+ dgVector dp (ray_p1 - ray_p0);
+ t = ClampValue (((point - ray_p0) % dp) / (dp % dp), float(float (0.0f)), float(float (1.0f)));
+ return ray_p0 + dp.Scale (t);
+}
+
+void dgRayToRayDistance (const dgVector& ray_p0, const dgVector& ray_p1, const dgVector& ray_q0, const dgVector& ray_q1, dgVector& pOut, dgVector& qOut)
+{
+ float sN;
+ float tN;
+
+ dgVector u (ray_p1 - ray_p0);
+ dgVector v (ray_q1 - ray_q0);
+ dgVector w (ray_p0 - ray_q0);
+
+ float a = u % u; // always >= 0
+ float b = u % v;
+ float c = v % v; // always >= 0
+ float d = u % w;
+ float e = v % w;
+ float D = a*c - b*b; // always >= 0
+ float sD = D; // sc = sN / sD, default sD = D >= 0
+ float tD = D; // tc = tN / tD, default tD = D >= 0
+
+ // compute the line parameters of the two closest points
+ if (D < float (1.0e-8f)) { // the lines are almost parallel
+ sN = float (0.0f); // force using point P0 on segment S1
+ sD = float (1.0f); // to prevent possible division by 0.0 later
+ tN = e;
+ tD = c;
+ } else { // get the closest points on the infinite lines
+ sN = (b*e - c*d);
+ tN = (a*e - b*d);
+ if (sN < float (0.0f)) { // sc < 0 => the s=0 edge is visible
+ sN = float (0.0f);
+ tN = e;
+ tD = c;
+ }
+ else if (sN > sD) { // sc > 1 => the s=1 edge is visible
+ sN = sD;
+ tN = e + b;
+ tD = c;
+ }
+ }
+
+
+ if (tN < float (0.0f)) { // tc < 0 => the t=0 edge is visible
+ tN = float (0.0f);
+ // recompute sc for this edge
+ if (-d < float (0.0f))
+ sN = float (0.0f);
+ else if (-d > a)
+ sN = sD;
+ else {
+ sN = -d;
+ sD = a;
+ }
+ }
+ else if (tN > tD) { // tc > 1 => the t=1 edge is visible
+ tN = tD;
+ // recompute sc for this edge
+ if ((-d + b) < float (0.0f))
+ sN = float (0.0f);
+ else if ((-d + b) > a)
+ sN = sD;
+ else {
+ sN = (-d + b);
+ sD = a;
+ }
+ }
+
+ // finally do the division to get sc and tc
+ float sc = (dgAbsf(sN) < float(1.0e-8f) ? float (0.0f) : sN / sD);
+ float tc = (dgAbsf(tN) < float(1.0e-8f) ? float (0.0f) : tN / tD);
+
+ pOut = ray_p0 + u.Scale (sc);
+ qOut = ray_q0 + v.Scale (tc);
+}
+
+
+
+
+dgVector dgPointToTriangleDistance (const dgVector& point, const dgVector& p0, const dgVector& p1, const dgVector& p2)
+{
+ // const dgVector p (float (0.0f), float (0.0f), float (0.0f));
+ const dgVector p10 (p1 - p0);
+ const dgVector p20 (p2 - p0);
+ const dgVector p_p0 (point - p0);
+
+ float alpha1 = p10 % p_p0;
+ float alpha2 = p20 % p_p0;
+ if ((alpha1 <= float (0.0f)) && (alpha2 <= float (0.0f))) {
+ return p0;
+ }
+
+ dgVector p_p1 (point - p1);
+ float alpha3 = p10 % p_p1;
+ float alpha4 = p20 % p_p1;
+ if ((alpha3 >= float (0.0f)) && (alpha4 <= alpha3)) {
+ return p1;
+ }
+
+ float vc = alpha1 * alpha4 - alpha3 * alpha2;
+ if ((vc <= float (0.0f)) && (alpha1 >= float (0.0f)) && (alpha3 <= float (0.0f))) {
+ float t = alpha1 / (alpha1 - alpha3);
+ HACD_ASSERT (t >= float (0.0f));
+ HACD_ASSERT (t <= float (1.0f));
+ return p0 + p10.Scale (t);
+ }
+
+
+ dgVector p_p2 (point - p2);
+ float alpha5 = p10 % p_p2;
+ float alpha6 = p20 % p_p2;
+ if ((alpha6 >= float (0.0f)) && (alpha5 <= alpha6)) {
+ return p2;
+ }
+
+
+ float vb = alpha5 * alpha2 - alpha1 * alpha6;
+ if ((vb <= float (0.0f)) && (alpha2 >= float (0.0f)) && (alpha6 <= float (0.0f))) {
+ float t = alpha2 / (alpha2 - alpha6);
+ HACD_ASSERT (t >= float (0.0f));
+ HACD_ASSERT (t <= float (1.0f));
+ return p0 + p20.Scale (t);
+ }
+
+
+ float va = alpha3 * alpha6 - alpha5 * alpha4;
+ if ((va <= float (0.0f)) && ((alpha4 - alpha3) >= float (0.0f)) && ((alpha5 - alpha6) >= float (0.0f))) {
+ float t = (alpha4 - alpha3) / ((alpha4 - alpha3) + (alpha5 - alpha6));
+ HACD_ASSERT (t >= float (0.0f));
+ HACD_ASSERT (t <= float (1.0f));
+ return p1 + (p2 - p1).Scale (t);
+ }
+
+ float den = float(float (1.0f)) / (va + vb + vc);
+ float t = vb * den;
+ float s = vc * den;
+ HACD_ASSERT (t >= float (0.0f));
+ HACD_ASSERT (s >= float (0.0f));
+ HACD_ASSERT (t <= float (1.0f));
+ HACD_ASSERT (s <= float (1.0f));
+ return p0 + p10.Scale (t) + p20.Scale (s);
+}
+
+dgBigVector dgPointToTriangleDistance (const dgBigVector& point, const dgBigVector& p0, const dgBigVector& p1, const dgBigVector& p2)
+{
+ // const dgBigVector p (double (0.0f), double (0.0f), double (0.0f));
+ const dgBigVector p10 (p1 - p0);
+ const dgBigVector p20 (p2 - p0);
+ const dgBigVector p_p0 (point - p0);
+
+ double alpha1 = p10 % p_p0;
+ double alpha2 = p20 % p_p0;
+ if ((alpha1 <= double (0.0f)) && (alpha2 <= double (0.0f))) {
+ return p0;
+ }
+
+ dgBigVector p_p1 (point - p1);
+ double alpha3 = p10 % p_p1;
+ double alpha4 = p20 % p_p1;
+ if ((alpha3 >= double (0.0f)) && (alpha4 <= alpha3)) {
+ return p1;
+ }
+
+ double vc = alpha1 * alpha4 - alpha3 * alpha2;
+ if ((vc <= double (0.0f)) && (alpha1 >= double (0.0f)) && (alpha3 <= double (0.0f))) {
+ double t = alpha1 / (alpha1 - alpha3);
+ HACD_ASSERT (t >= double (0.0f));
+ HACD_ASSERT (t <= double (1.0f));
+ return p0 + p10.Scale (t);
+ }
+
+
+ dgBigVector p_p2 (point - p2);
+ double alpha5 = p10 % p_p2;
+ double alpha6 = p20 % p_p2;
+ if ((alpha6 >= double (0.0f)) && (alpha5 <= alpha6)) {
+ return p2;
+ }
+
+
+ double vb = alpha5 * alpha2 - alpha1 * alpha6;
+ if ((vb <= double (0.0f)) && (alpha2 >= double (0.0f)) && (alpha6 <= double (0.0f))) {
+ double t = alpha2 / (alpha2 - alpha6);
+ HACD_ASSERT (t >= double (0.0f));
+ HACD_ASSERT (t <= double (1.0f));
+ return p0 + p20.Scale (t);
+ }
+
+
+ double va = alpha3 * alpha6 - alpha5 * alpha4;
+ if ((va <= double (0.0f)) && ((alpha4 - alpha3) >= double (0.0f)) && ((alpha5 - alpha6) >= double (0.0f))) {
+ double t = (alpha4 - alpha3) / ((alpha4 - alpha3) + (alpha5 - alpha6));
+ HACD_ASSERT (t >= double (0.0f));
+ HACD_ASSERT (t <= double (1.0f));
+ return p1 + (p2 - p1).Scale (t);
+ }
+
+ double den = float(double (1.0f)) / (va + vb + vc);
+ double t = vb * den;
+ double s = vc * den;
+ HACD_ASSERT (t >= double (0.0f));
+ HACD_ASSERT (s >= double (0.0f));
+ HACD_ASSERT (t <= double (1.0f));
+ HACD_ASSERT (s <= double (1.0f));
+ return p0 + p10.Scale (t) + p20.Scale (s);
+}
+
+
+bool dgPointToPolygonDistance (const dgVector& p, const float* const polygon, int32_t strideInBytes,
+ const int32_t* const indexArray, int32_t indexCount, float bailDistance, dgVector& out)
+{
+ HACD_ALWAYS_ASSERT();
+ int32_t stride = int32_t (strideInBytes / sizeof (float));
+
+ int32_t i0 = indexArray[0] * stride;
+ int32_t i1 = indexArray[1] * stride;
+
+ const dgVector v0 (&polygon[i0]);
+ dgVector v1 (&polygon[i1]);
+ dgVector closestPoint (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ float minDist = float (1.0e20f);
+ for (int32_t i = 2; i < indexCount; i ++) {
+ int32_t i2 = indexArray[i] * stride;
+ const dgVector v2 (&polygon[i2]);
+ const dgVector q (dgPointToTriangleDistance (p, v0, v1, v2));
+ const dgVector error (q - p);
+ float dist = error % error;
+ if (dist < minDist) {
+ minDist = dist;
+ closestPoint = q;
+ }
+ v1 = v2;
+ }
+
+ if (minDist > (bailDistance * bailDistance)) {
+ return false;
+ }
+
+ out = closestPoint;
+ return true;
+}
+
+
+
+dgBigVector LineTriangleIntersection (const dgBigVector& p0, const dgBigVector& p1, const dgBigVector& A, const dgBigVector& B, const dgBigVector& C)
+{
+ dgHugeVector ph0 (p0);
+ dgHugeVector ph1 (p1);
+ dgHugeVector Ah (A);
+ dgHugeVector Bh (B);
+ dgHugeVector Ch (C);
+
+ dgHugeVector p1p0 (ph1 - ph0);
+ dgHugeVector Ap0 (Ah - ph0);
+ dgHugeVector Bp0 (Bh - ph0);
+ dgHugeVector Cp0 (Ch - ph0);
+
+ dgGoogol t0 ((Bp0 * Cp0) % p1p0);
+ double val0 = t0.GetAproximateValue();
+ if (val0 < double (0.0f)) {
+ return dgBigVector (float (0.0f), float (0.0f), float (0.0f), float (-1.0f));
+ }
+
+ dgGoogol t1 ((Cp0 * Ap0) % p1p0);
+ double val1 = t1.GetAproximateValue();
+ if (val1 < double (0.0f)) {
+ return dgBigVector (float (0.0f), float (0.0f), float (0.0f), float (-1.0f));
+ }
+
+ dgGoogol t2 ((Ap0 * Bp0) % p1p0);
+ double val2 = t2.GetAproximateValue();
+ if (val2 < double (0.0f)) {
+ return dgBigVector (float (0.0f), float (0.0f), float (0.0f), float (-1.0f));
+ }
+
+ dgGoogol sum = t0 + t1 + t2;
+ double den = sum.GetAproximateValue();
+
+ return dgBigVector (val0 / den, val1 / den, val2 / den, float (0.0f));
+}
+
+
+
+
+
+
+
+
diff --git a/APEX_1.4/shared/general/HACD/src/dgMatrix.cpp b/APEX_1.4/shared/general/HACD/src/dgMatrix.cpp
new file mode 100644
index 00000000..77a5faf0
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgMatrix.cpp
@@ -0,0 +1,574 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgTypes.h"
+#include "dgMatrix.h"
+#include "dgQuaternion.h"
+
+
+static dgMatrix zeroMatrix (dgVector (float(0.0f), float(0.0f), float(0.0f), float(0.0f)),
+ dgVector (float(0.0f), float(0.0f), float(0.0f), float(0.0f)),
+ dgVector (float(0.0f), float(0.0f), float(0.0f), float(0.0f)),
+ dgVector (float(0.0f), float(0.0f), float(0.0f), float(0.0f)));
+
+static dgMatrix identityMatrix (dgVector (float(1.0f), float(0.0f), float(0.0f), float(0.0f)),
+ dgVector (float(0.0f), float(1.0f), float(0.0f), float(0.0f)),
+ dgVector (float(0.0f), float(0.0f), float(1.0f), float(0.0f)),
+ dgVector (float(0.0f), float(0.0f), float(0.0f), float(1.0f)));
+
+const dgMatrix& dgGetIdentityMatrix()
+{
+ return identityMatrix;
+}
+
+const dgMatrix& dgGetZeroMatrix ()
+{
+ return zeroMatrix;
+}
+
+
+dgMatrix::dgMatrix (const dgQuaternion &rotation, const dgVector &position)
+{
+ float x2 = float (2.0f) * rotation.m_q1 * rotation.m_q1;
+ float y2 = float (2.0f) * rotation.m_q2 * rotation.m_q2;
+ float z2 = float (2.0f) * rotation.m_q3 * rotation.m_q3;
+
+ float xy = float (2.0f) * rotation.m_q1 * rotation.m_q2;
+ float xz = float (2.0f) * rotation.m_q1 * rotation.m_q3;
+ float xw = float (2.0f) * rotation.m_q1 * rotation.m_q0;
+ float yz = float (2.0f) * rotation.m_q2 * rotation.m_q3;
+ float yw = float (2.0f) * rotation.m_q2 * rotation.m_q0;
+ float zw = float (2.0f) * rotation.m_q3 * rotation.m_q0;
+
+ m_front = dgVector (float(1.0f) - y2 - z2, xy + zw, xz - yw , float(0.0f));
+ m_up = dgVector (xy - zw, float(1.0f) - x2 - z2, yz + xw , float(0.0f));
+ m_right = dgVector (xz + yw, yz - xw, float(1.0f) - x2 - y2 , float(0.0f));
+
+
+ m_posit.m_x = position.m_x;
+ m_posit.m_y = position.m_y;
+ m_posit.m_z = position.m_z;
+ m_posit.m_w = float(1.0f);
+}
+
+dgMatrix dgMatrix::operator* (const dgMatrix &B) const
+{
+ const dgMatrix& A = *this;
+ return dgMatrix (dgVector (A[0][0] * B[0][0] + A[0][1] * B[1][0] + A[0][2] * B[2][0] + A[0][3] * B[3][0],
+ A[0][0] * B[0][1] + A[0][1] * B[1][1] + A[0][2] * B[2][1] + A[0][3] * B[3][1],
+ A[0][0] * B[0][2] + A[0][1] * B[1][2] + A[0][2] * B[2][2] + A[0][3] * B[3][2],
+ A[0][0] * B[0][3] + A[0][1] * B[1][3] + A[0][2] * B[2][3] + A[0][3] * B[3][3]),
+ dgVector (A[1][0] * B[0][0] + A[1][1] * B[1][0] + A[1][2] * B[2][0] + A[1][3] * B[3][0],
+ A[1][0] * B[0][1] + A[1][1] * B[1][1] + A[1][2] * B[2][1] + A[1][3] * B[3][1],
+ A[1][0] * B[0][2] + A[1][1] * B[1][2] + A[1][2] * B[2][2] + A[1][3] * B[3][2],
+ A[1][0] * B[0][3] + A[1][1] * B[1][3] + A[1][2] * B[2][3] + A[1][3] * B[3][3]),
+ dgVector (A[2][0] * B[0][0] + A[2][1] * B[1][0] + A[2][2] * B[2][0] + A[2][3] * B[3][0],
+ A[2][0] * B[0][1] + A[2][1] * B[1][1] + A[2][2] * B[2][1] + A[2][3] * B[3][1],
+ A[2][0] * B[0][2] + A[2][1] * B[1][2] + A[2][2] * B[2][2] + A[2][3] * B[3][2],
+ A[2][0] * B[0][3] + A[2][1] * B[1][3] + A[2][2] * B[2][3] + A[2][3] * B[3][3]),
+ dgVector (A[3][0] * B[0][0] + A[3][1] * B[1][0] + A[3][2] * B[2][0] + A[3][3] * B[3][0],
+ A[3][0] * B[0][1] + A[3][1] * B[1][1] + A[3][2] * B[2][1] + A[3][3] * B[3][1],
+ A[3][0] * B[0][2] + A[3][1] * B[1][2] + A[3][2] * B[2][2] + A[3][3] * B[3][2],
+ A[3][0] * B[0][3] + A[3][1] * B[1][3] + A[3][2] * B[2][3] + A[3][3] * B[3][3]));
+}
+
+
+
+void dgMatrix::TransformTriplex (float* const dst, int32_t dstStrideInBytes, const float* const src, int32_t srcStrideInBytes, int32_t count) const
+{
+ int32_t dstStride = dstStrideInBytes / (int32_t)sizeof (float);
+ int32_t srcStride = srcStrideInBytes / (int32_t)sizeof (float);
+
+ int32_t dstIndex = 0;
+ int32_t srcIndex = 0;
+ for (int32_t i = 0 ; i < count; i ++ ) {
+ float x = src[srcIndex + 0];
+ float y = src[srcIndex + 1];
+ float z = src[srcIndex + 2];
+ srcIndex += srcStride;
+ dst[dstIndex + 0] = x * m_front.m_x + y * m_up.m_x + z * m_right.m_x + m_posit.m_x;
+ dst[dstIndex + 1] = x * m_front.m_y + y * m_up.m_y + z * m_right.m_y + m_posit.m_y;
+ dst[dstIndex + 2] = x * m_front.m_z + y * m_up.m_z + z * m_right.m_z + m_posit.m_z;
+ dstIndex += dstStride;
+ }
+}
+
+void dgMatrix::TransformTriplex (double* const dst, int32_t dstStrideInBytes, const double* const src, int32_t srcStrideInBytes, int32_t count) const
+{
+ int32_t dstStride = dstStrideInBytes / (int32_t)sizeof (double);
+ int32_t srcStride = srcStrideInBytes / (int32_t)sizeof (double);
+
+ int32_t dstIndex = 0;
+ int32_t srcIndex = 0;
+ for (int32_t i = 0 ; i < count; i ++ ) {
+ double x = src[srcIndex + 0];
+ double y = src[srcIndex + 1];
+ double z = src[srcIndex + 2];
+ srcIndex += srcStride;
+ dst[dstIndex + 0] = x * m_front.m_x + y * m_up.m_x + z * m_right.m_x + m_posit.m_x;
+ dst[dstIndex + 1] = x * m_front.m_y + y * m_up.m_y + z * m_right.m_y + m_posit.m_y;
+ dst[dstIndex + 2] = x * m_front.m_z + y * m_up.m_z + z * m_right.m_z + m_posit.m_z;
+ dstIndex += dstStride;
+ }
+}
+
+
+void dgMatrix::TransformTriplex (double* const dst, int32_t dstStrideInBytes, const float* const src, int32_t srcStrideInBytes, int32_t count) const
+{
+ int32_t dstStride = dstStrideInBytes / (int32_t)sizeof (double);
+ int32_t srcStride = srcStrideInBytes / (int32_t)sizeof (float);
+
+ int32_t dstIndex = 0;
+ int32_t srcIndex = 0;
+ for (int32_t i = 0 ; i < count; i ++ ) {
+ double x = src[srcIndex + 0];
+ double y = src[srcIndex + 1];
+ double z = src[srcIndex + 2];
+ srcIndex += srcStride;
+ dst[dstIndex + 0] = x * m_front.m_x + y * m_up.m_x + z * m_right.m_x + m_posit.m_x;
+ dst[dstIndex + 1] = x * m_front.m_y + y * m_up.m_y + z * m_right.m_y + m_posit.m_y;
+ dst[dstIndex + 2] = x * m_front.m_z + y * m_up.m_z + z * m_right.m_z + m_posit.m_z;
+ dstIndex += dstStride;
+ }
+}
+
+
+void dgMatrix::TransformBBox (const dgVector& p0local, const dgVector& p1local, dgVector& p0, dgVector& p1) const
+{
+ dgVector box[8];
+
+ box[0][0] = p0local[0];
+ box[0][1] = p0local[1];
+ box[0][2] = p0local[2];
+ box[0][3] = float(1.0f);
+
+ box[1][0] = p0local[0];
+ box[1][1] = p0local[1];
+ box[1][2] = p1local[2];
+ box[1][3] = float(1.0f);
+
+ box[2][0] = p0local[0];
+ box[2][1] = p1local[1];
+ box[2][2] = p0local[2];
+ box[2][3] = float(1.0f);
+
+ box[3][0] = p0local[0];
+ box[3][1] = p1local[1];
+ box[3][2] = p1local[2];
+ box[3][3] = float(1.0f);
+
+ box[4][0] = p1local[0];
+ box[4][1] = p0local[1];
+ box[4][2] = p0local[2];
+ box[4][3] = float(1.0f);
+
+ box[5][0] = p1local[0];
+ box[5][1] = p0local[1];
+ box[5][2] = p1local[2];
+ box[1][3] = float(1.0f);
+
+ box[6][0] = p1local[0];
+ box[6][1] = p1local[1];
+ box[6][2] = p0local[2];
+ box[6][3] = float(1.0f);
+
+ box[7][0] = p1local[0];
+ box[7][1] = p1local[1];
+ box[7][2] = p1local[2];
+ box[7][3] = float(1.0f);
+
+ TransformTriplex (&box[0].m_x, sizeof (dgVector), &box[0].m_x, sizeof (dgVector), 8);
+
+ p0 = box[0];
+ p1 = box[0];
+ for (int32_t i = 1; i < 8; i ++) {
+ p0.m_x = GetMin (p0.m_x, box[i].m_x);
+ p0.m_y = GetMin (p0.m_y, box[i].m_y);
+ p0.m_z = GetMin (p0.m_z, box[i].m_z);
+
+ p1.m_x = GetMax (p1.m_x, box[i].m_x);
+ p1.m_y = GetMax (p1.m_y, box[i].m_y);
+ p1.m_z = GetMax (p1.m_z, box[i].m_z);
+ }
+}
+
+
+
+dgMatrix dgMatrix::Symetric3by3Inverse () const
+{
+ const dgMatrix& mat = *this;
+ double det = mat[0][0] * mat[1][1] * mat[2][2] +
+ mat[0][1] * mat[1][2] * mat[0][2] * float (2.0f) -
+ mat[0][2] * mat[1][1] * mat[0][2] -
+ mat[0][1] * mat[0][1] * mat[2][2] -
+ mat[0][0] * mat[1][2] * mat[1][2];
+
+ det = float (1.0f) / det;
+
+ float x11 = (float)(det * (mat[1][1] * mat[2][2] - mat[1][2] * mat[1][2]));
+ float x22 = (float)(det * (mat[0][0] * mat[2][2] - mat[0][2] * mat[0][2]));
+ float x33 = (float)(det * (mat[0][0] * mat[1][1] - mat[0][1] * mat[0][1]));
+
+ float x12 = (float)(det * (mat[1][2] * mat[2][0] - mat[1][0] * mat[2][2]));
+ float x13 = (float)(det * (mat[1][0] * mat[2][1] - mat[1][1] * mat[2][0]));
+ float x23 = (float)(det * (mat[0][1] * mat[2][0] - mat[0][0] * mat[2][1]));
+
+
+#ifdef _DEBUG
+ dgMatrix matInv (dgVector (x11, x12, x13, float(0.0f)),
+ dgVector (x12, x22, x23, float(0.0f)),
+ dgVector (x13, x23, x33, float(0.0f)),
+ dgVector (float(0.0f), float(0.0f), float(0.0f), float(1.0f)));
+
+ dgMatrix test (matInv * mat);
+ HACD_ASSERT (dgAbsf (test[0][0] - float(1.0f)) < float(0.01f));
+ HACD_ASSERT (dgAbsf (test[1][1] - float(1.0f)) < float(0.01f));
+ HACD_ASSERT (dgAbsf (test[2][2] - float(1.0f)) < float(0.01f));
+#endif
+
+ return dgMatrix (dgVector (x11, x12, x13, float(0.0f)),
+ dgVector (x12, x22, x23, float(0.0f)),
+ dgVector (x13, x23, x33, float(0.0f)),
+ dgVector (float(0.0f), float(0.0f), float(0.0f), float(1.0f)));
+}
+
+
+
+
+
+dgVector dgMatrix::CalcPitchYawRoll () const
+{
+ const float minSin = float(0.99995f);
+
+ const dgMatrix& matrix = *this;
+
+ float roll = float(0.0f);
+ float pitch = float(0.0f);
+ float yaw = dgAsin (-ClampValue (matrix[0][2], float(-0.999999f), float(0.999999f)));
+
+ HACD_ASSERT (HACD_ISFINITE (yaw));
+ if (matrix[0][2] < minSin) {
+ if (matrix[0][2] > (-minSin)) {
+ roll = dgAtan2 (matrix[0][1], matrix[0][0]);
+ pitch = dgAtan2 (matrix[1][2], matrix[2][2]);
+ } else {
+ pitch = dgAtan2 (matrix[1][0], matrix[1][1]);
+ }
+ } else {
+ pitch = -dgAtan2 (matrix[1][0], matrix[1][1]);
+ }
+
+ return dgVector (pitch, yaw, roll, float(0.0f));
+}
+
+
+/*
+static inline void ROT(dgMatrix &a, int32_t i, int32_t j, int32_t k, int32_t l, float s, float tau)
+{
+ float g = a[i][j];
+ float h = a[k][l];
+ a[i][j] = g - s * (h + g * tau);
+ a[k][l] = h + s * (g - h * tau);
+}
+
+// from numerical recipes in c
+// Jacobian method for computing the eigenvectors of a symmetric matrix
+void dgMatrix::EigenVectors (dgVector &eigenValues, const dgMatrix& initialGuess)
+{
+ float b[3];
+ float z[3];
+ float d[3];
+
+ // dgMatrix eigenVectors (initialGuess.Transpose4X4());
+ // dgMatrix &mat = *this;
+
+ dgMatrix& mat = *this;
+ dgMatrix eigenVectors (initialGuess.Transpose4X4());
+ mat = initialGuess * mat * eigenVectors;
+
+
+ b[0] = mat[0][0];
+ b[1] = mat[1][1];
+ b[2] = mat[2][2];
+
+ d[0] = mat[0][0];
+ d[1] = mat[1][1];
+ d[2] = mat[2][2];
+
+ z[0] = float (0.0f);
+ z[1] = float (0.0f);
+ z[2] = float (0.0f);
+
+ int32_t nrot = 0;
+ for (int32_t i = 0; i < 50; i++) {
+ float sm = dgAbsf(mat[0][1]) + dgAbsf(mat[0][2]) + dgAbsf(mat[1][2]);
+
+ if (sm < float (1.0e-6f)) {
+ HACD_ASSERT (dgAbsf((eigenVectors.m_front % eigenVectors.m_front) - float(1.0f)) < dgEPSILON);
+ HACD_ASSERT (dgAbsf((eigenVectors.m_up % eigenVectors.m_up) - float(1.0f)) < dgEPSILON);
+ HACD_ASSERT (dgAbsf((eigenVectors.m_right % eigenVectors.m_right) - float(1.0f)) < dgEPSILON);
+
+ // order the eigenvalue vectors
+ dgVector tmp (eigenVectors.m_front * eigenVectors.m_up);
+ if (tmp % eigenVectors.m_right < float(0.0f)) {
+ eigenVectors.m_right = eigenVectors.m_right.Scale (-float(1.0f));
+ }
+
+ eigenValues = dgVector (d[0], d[1], d[2], float (0.0f));
+ *this = eigenVectors.Inverse();
+ return;
+ }
+
+ float thresh = float (0.0f);
+ if (i < 3) {
+ thresh = (float)(0.2f / 9.0f) * sm;
+ }
+
+
+ // First row
+ float g = float (100.0f) * dgAbsf(mat[0][1]);
+ if ((i > 3) && (dgAbsf(d[0]) + g == dgAbsf(d[0])) && (dgAbsf(d[1]) + g == dgAbsf(d[1]))) {
+ mat[0][1] = float (0.0f);
+ } else if (dgAbsf(mat[0][1]) > thresh) {
+ float h = d[1] - d[0];
+ float t;
+ if (dgAbsf(h) + g == dgAbsf(h)) {
+ t = mat[0][1] / h;
+ } else {
+ float theta = float (0.5f) * h / mat[0][1];
+ t = float(1.0f) / (dgAbsf(theta) + dgSqrt(float(1.0f) + theta * theta));
+ if (theta < float (0.0f)) {
+ t = -t;
+ }
+ }
+ float c = float(1.0f) / dgSqrt (float (1.0f) + t * t);
+ float s = t * c;
+ float tau = s / (float(1.0f) + c);
+ h = t * mat[0][1];
+ z[0] -= h;
+ z[1] += h;
+ d[0] -= h;
+ d[1] += h;
+ mat[0][1] = float(0.0f);
+ ROT (mat, 0, 2, 1, 2, s, tau);
+ ROT (eigenVectors, 0, 0, 0, 1, s, tau);
+ ROT (eigenVectors, 1, 0, 1, 1, s, tau);
+ ROT (eigenVectors, 2, 0, 2, 1, s, tau);
+
+ nrot++;
+ }
+
+
+ // second row
+ g = float (100.0f) * dgAbsf(mat[0][2]);
+ if ((i > 3) && (dgAbsf(d[0]) + g == dgAbsf(d[0])) && (dgAbsf(d[2]) + g == dgAbsf(d[2]))) {
+ mat[0][2] = float (0.0f);
+ } else if (dgAbsf(mat[0][2]) > thresh) {
+ float h = d[2] - d[0];
+ float t;
+ if (dgAbsf(h) + g == dgAbsf(h)) {
+ t = (mat[0][2]) / h;
+ } else {
+ float theta = float (0.5f) * h / mat[0][2];
+ t = float(1.0f) / (dgAbsf(theta) + dgSqrt(float(1.0f) + theta * theta));
+ if (theta < float (0.0f)) {
+ t = -t;
+ }
+ }
+ float c = float(1.0f) / dgSqrt(float (1.0f) + t * t);
+ float s = t * c;
+ float tau = s / (float(1.0f) + c);
+ h = t * mat[0][2];
+ z[0] -= h;
+ z[2] += h;
+ d[0] -= h;
+ d[2] += h;
+ mat[0][2]=float (0.0f);
+ ROT (mat, 0, 1, 1, 2, s, tau);
+ ROT (eigenVectors, 0, 0, 0, 2, s, tau);
+ ROT (eigenVectors, 1, 0, 1, 2, s, tau);
+ ROT (eigenVectors, 2, 0, 2, 2, s, tau);
+ }
+
+ // third row
+ g = float (100.0f) * dgAbsf(mat[1][2]);
+ if ((i > 3) && (dgAbsf(d[1]) + g == dgAbsf(d[1])) && (dgAbsf(d[2]) + g == dgAbsf(d[2]))) {
+ mat[1][2] = float (0.0f);
+ } else if (dgAbsf(mat[1][2]) > thresh) {
+ float h = d[2] - d[1];
+ float t;
+ if (dgAbsf(h) + g == dgAbsf(h)) {
+ t = mat[1][2] / h;
+ } else {
+ float theta = float (0.5f) * h / mat[1][2];
+ t = float(1.0f) / (dgAbsf(theta) + dgSqrt(float(1.0f) + theta * theta));
+ if (theta < float (0.0f)) {
+ t = -t;
+ }
+ }
+ float c = float(1.0f) / dgSqrt(float (1.0f) + t*t);
+ float s = t * c;
+ float tau = s / (float(1.0f) + c);
+
+ h = t * mat[1][2];
+ z[1] -= h;
+ z[2] += h;
+ d[1] -= h;
+ d[2] += h;
+ mat[1][2] = float (0.0f);
+ ROT (mat, 0, 1, 0, 2, s, tau);
+ ROT (eigenVectors, 0, 1, 0, 2, s, tau);
+ ROT (eigenVectors, 1, 1, 1, 2, s, tau);
+ ROT (eigenVectors, 2, 1, 2, 2, s, tau);
+ nrot++;
+ }
+
+ b[0] += z[0]; d[0] = b[0]; z[0] = float (0.0f);
+ b[1] += z[1]; d[1] = b[1]; z[1] = float (0.0f);
+ b[2] += z[2]; d[2] = b[2]; z[2] = float (0.0f);
+ }
+
+ eigenValues = dgVector (d[0], d[1], d[2], float (0.0f));
+ *this = dgGetIdentityMatrix();
+}
+*/
+
+void dgMatrix::EigenVectors (dgVector &eigenValues, const dgMatrix& initialGuess)
+{
+ float b[3];
+ float z[3];
+ float d[3];
+
+ dgMatrix& mat = *this;
+ dgMatrix eigenVectors (initialGuess.Transpose4X4());
+ mat = initialGuess * mat * eigenVectors;
+
+ b[0] = mat[0][0];
+ b[1] = mat[1][1];
+ b[2] = mat[2][2];
+
+ d[0] = mat[0][0];
+ d[1] = mat[1][1];
+ d[2] = mat[2][2];
+
+ z[0] = float (0.0f);
+ z[1] = float (0.0f);
+ z[2] = float (0.0f);
+
+ for (int32_t i = 0; i < 50; i++) {
+ float sm = dgAbsf(mat[0][1]) + dgAbsf(mat[0][2]) + dgAbsf(mat[1][2]);
+
+ if (sm < float (1.0e-6f)) {
+ HACD_ASSERT (dgAbsf((eigenVectors.m_front % eigenVectors.m_front) - float(1.0f)) < dgEPSILON);
+ HACD_ASSERT (dgAbsf((eigenVectors.m_up % eigenVectors.m_up) - float(1.0f)) < dgEPSILON);
+ HACD_ASSERT (dgAbsf((eigenVectors.m_right % eigenVectors.m_right) - float(1.0f)) < dgEPSILON);
+
+ // order the eigenvalue vectors
+ dgVector tmp (eigenVectors.m_front * eigenVectors.m_up);
+ if (tmp % eigenVectors.m_right < float(0.0f)) {
+ eigenVectors.m_right = eigenVectors.m_right.Scale (-float(1.0f));
+ }
+
+ eigenValues = dgVector (d[0], d[1], d[2], float (0.0f));
+ *this = eigenVectors.Inverse();
+ return;
+ }
+
+ float thresh = float (0.0f);
+ if (i < 3) {
+ thresh = (float)(0.2f / 9.0f) * sm;
+ }
+
+ for (int32_t ip = 0; ip < 2; ip ++) {
+ for (int32_t iq = ip + 1; iq < 3; iq ++) {
+ float g = float (100.0f) * dgAbsf(mat[ip][iq]);
+ //if ((i > 3) && (dgAbsf(d[0]) + g == dgAbsf(d[ip])) && (dgAbsf(d[1]) + g == dgAbsf(d[1]))) {
+ if ((i > 3) && ((dgAbsf(d[ip]) + g) == dgAbsf(d[ip])) && ((dgAbsf(d[iq]) + g) == dgAbsf(d[iq]))) {
+ mat[ip][iq] = float (0.0f);
+ } else if (dgAbsf(mat[ip][iq]) > thresh) {
+
+ float t;
+ float h = d[iq] - d[ip];
+ if (dgAbsf(h) + g == dgAbsf(h)) {
+ t = mat[ip][iq] / h;
+ } else {
+ float theta = float (0.5f) * h / mat[ip][iq];
+ t = float(1.0f) / (dgAbsf(theta) + dgSqrt(float(1.0f) + theta * theta));
+ if (theta < float (0.0f)) {
+ t = -t;
+ }
+ }
+ float c = float(1.0f) / dgSqrt (float (1.0f) + t * t);
+ float s = t * c;
+ float tau = s / (float(1.0f) + c);
+ h = t * mat[ip][iq];
+ z[ip] -= h;
+ z[iq] += h;
+ d[ip] -= h;
+ d[iq] += h;
+ mat[ip][iq] = float(0.0f);
+
+ for (int32_t j = 0; j <= ip - 1; j ++) {
+ //ROT (mat, j, ip, j, iq, s, tau);
+ //ROT(dgMatrix &a, int32_t i, int32_t j, int32_t k, int32_t l, float s, float tau)
+ float g = mat[j][ip];
+ float h = mat[j][iq];
+ mat[j][ip] = g - s * (h + g * tau);
+ mat[j][iq] = h + s * (g - h * tau);
+
+ }
+ for (int32_t j = ip + 1; j <= iq - 1; j ++) {
+ //ROT (mat, ip, j, j, iq, s, tau);
+ //ROT(dgMatrix &a, int32_t i, int32_t j, int32_t k, int32_t l, float s, float tau)
+ float g = mat[ip][j];
+ float h = mat[j][iq];
+ mat[ip][j] = g - s * (h + g * tau);
+ mat[j][iq] = h + s * (g - h * tau);
+ }
+ for (int32_t j = iq + 1; j < 3; j ++) {
+ //ROT (mat, ip, j, iq, j, s, tau);
+ //ROT(dgMatrix &a, int32_t i, int32_t j, int32_t k, int32_t l, float s, float tau)
+ float g = mat[ip][j];
+ float h = mat[iq][j];
+ mat[ip][j] = g - s * (h + g * tau);
+ mat[iq][j] = h + s * (g - h * tau);
+ }
+
+ for (int32_t j = 0; j < 3; j ++) {
+ //ROT (eigenVectors, j, ip, j, iq, s, tau);
+ //ROT(dgMatrix &a, int32_t i, int32_t j, int32_t k, int32_t l, float s, float tau)
+ float g = eigenVectors[j][ip];
+ float h = eigenVectors[j][iq];
+ eigenVectors[j][ip] = g - s * (h + g * tau);
+ eigenVectors[j][iq] = h + s * (g - h * tau);
+ }
+ }
+ }
+ }
+ b[0] += z[0]; d[0] = b[0]; z[0] = float (0.0f);
+ b[1] += z[1]; d[1] = b[1]; z[1] = float (0.0f);
+ b[2] += z[2]; d[2] = b[2]; z[2] = float (0.0f);
+ }
+
+ eigenValues = dgVector (d[0], d[1], d[2], float (0.0f));
+ *this = dgGetIdentityMatrix();
+}
+
diff --git a/APEX_1.4/shared/general/HACD/src/dgMeshEffect.cpp b/APEX_1.4/shared/general/HACD/src/dgMeshEffect.cpp
new file mode 100644
index 00000000..fce2314e
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgMeshEffect.cpp
@@ -0,0 +1,4752 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgMeshEffect.h"
+#include "dgConvexHull3d.h"
+#include "dgStack.h"
+#include "dgSphere.h"
+#include "dgGraph.h"
+#include "dgAABBPolygonSoup.h"
+#include "dgPolygonSoupBuilder.h"
+#include "SparseArray.h"
+
+#include <string.h>
+
+#if PX_X86
+#define PTR_TO_UINT64(x) ((uint64_t)(uint32_t)(x))
+#else
+#define PTR_TO_UINT64(x) ((uint64_t)(x))
+#endif
+
+
+class dgFlatClipEdgeAttr
+{
+ public:
+ int32_t m_rightIndex;
+ int32_t m_leftIndex;
+ int32_t m_leftEdgeAttr;
+ int32_t m_leftTwinAttr;
+ int32_t m_rightEdgeAttr;
+ int32_t m_rightTwinAttr;
+ dgEdge* m_edge;
+ dgEdge* m_twin;
+};
+
+dgMeshEffect::dgMeshEffect(bool preAllocaBuffers)
+{
+ Init(preAllocaBuffers);
+}
+
+dgMeshEffect::dgMeshEffect (const dgMatrix& planeMatrix, float witdth, float breadth, int32_t material, const dgMatrix& textureMatrix0, const dgMatrix& textureMatrix1)
+{
+ int32_t index[4];
+ int64_t attrIndex[4];
+ dgBigVector face[4];
+
+ Init(true);
+
+ face[0] = dgBigVector (float (0.0f), -witdth, -breadth, float (0.0f));
+ face[1] = dgBigVector (float (0.0f), witdth, -breadth, float (0.0f));
+ face[2] = dgBigVector (float (0.0f), witdth, breadth, float (0.0f));
+ face[3] = dgBigVector (float (0.0f), -witdth, breadth, float (0.0f));
+
+ for (int32_t i = 0; i < 4; i ++) {
+ dgBigVector uv0 (textureMatrix0.TransformVector(face[i]));
+ dgBigVector uv1 (textureMatrix1.TransformVector(face[i]));
+
+ m_points[i] = planeMatrix.TransformVector(face[i]);
+
+ m_attib[i].m_vertex.m_x = m_points[i].m_x;
+ m_attib[i].m_vertex.m_y = m_points[i].m_y;
+ m_attib[i].m_vertex.m_z = m_points[i].m_z;
+ m_attib[i].m_vertex.m_w = double (0.0f);
+
+ m_attib[i].m_normal_x = planeMatrix.m_front.m_x;
+ m_attib[i].m_normal_y = planeMatrix.m_front.m_y;
+ m_attib[i].m_normal_z = planeMatrix.m_front.m_z;
+
+ m_attib[i].m_u0 = uv0.m_y;
+ m_attib[i].m_v0 = uv0.m_z;
+
+ m_attib[i].m_u1 = uv1.m_y;
+ m_attib[i].m_v1 = uv1.m_z;
+
+ m_attib[i].m_material = material;
+
+ index[i] = i;
+ attrIndex[i] = i;
+ }
+
+ m_pointCount = 4;
+ m_atribCount = 4;
+ BeginFace();
+ AddFace (4, index, attrIndex);
+ EndFace();
+}
+
+
+dgMeshEffect::dgMeshEffect(dgPolyhedra& mesh, const dgMeshEffect& source)
+ :dgPolyhedra (mesh)
+{
+ m_pointCount = source.m_pointCount;
+ m_maxPointCount = source.m_maxPointCount;
+ m_points = (dgBigVector*) HACD_ALLOC(size_t (m_maxPointCount * sizeof(dgBigVector)));
+ memcpy (m_points, source.m_points, m_pointCount * sizeof(dgBigVector));
+
+ m_atribCount = source.m_atribCount;
+ m_maxAtribCount = source.m_maxAtribCount;
+ m_attib = (dgVertexAtribute*) HACD_ALLOC(size_t (m_maxAtribCount * sizeof(dgVertexAtribute)));
+ memcpy (m_attib, source.m_attib, m_atribCount * sizeof(dgVertexAtribute));
+}
+
+
+dgMeshEffect::dgMeshEffect(const dgMeshEffect& source)
+ :dgPolyhedra (source)
+{
+ m_pointCount = source.m_pointCount;
+ m_maxPointCount = source.m_maxPointCount;
+ m_points = (dgBigVector*) HACD_ALLOC(size_t (m_maxPointCount * sizeof(dgBigVector)));
+ memcpy (m_points, source.m_points, m_pointCount * sizeof(dgBigVector));
+
+ m_atribCount = source.m_atribCount;
+ m_maxAtribCount = source.m_maxAtribCount;
+ m_attib = (dgVertexAtribute*) HACD_ALLOC(size_t (m_maxAtribCount * sizeof(dgVertexAtribute)));
+ memcpy (m_attib, source.m_attib, m_atribCount * sizeof(dgVertexAtribute));
+}
+
+
+dgMeshEffect::~dgMeshEffect(void)
+{
+ HACD_FREE (m_points);
+ HACD_FREE (m_attib);
+}
+
+void dgMeshEffect::Init(bool preAllocaBuffers)
+{
+ m_pointCount = 0;
+ m_atribCount = 0;
+ m_maxPointCount = DG_MESH_EFFECT_INITIAL_VERTEX_SIZE;
+ m_maxAtribCount = DG_MESH_EFFECT_INITIAL_VERTEX_SIZE;
+
+ m_points = NULL;
+ m_attib = NULL;
+ if (preAllocaBuffers) {
+ m_points = (dgBigVector*) HACD_ALLOC(size_t (m_maxPointCount * sizeof(dgBigVector)));
+ m_attib = (dgVertexAtribute*) HACD_ALLOC(size_t (m_maxAtribCount * sizeof(dgVertexAtribute)));
+ }
+}
+
+
+void dgMeshEffect::Triangulate ()
+{
+ dgPolyhedra polygon;
+
+ int32_t mark = IncLRU();
+ polygon.BeginFace();
+ dgPolyhedra::Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++){
+ dgEdge* const face = &(*iter);
+
+ if ((face->m_mark != mark) && (face->m_incidentFace > 0)) {
+ int32_t index[DG_MESH_EFFECT_POINT_SPLITED];
+
+ dgEdge* ptr = face;
+ int32_t indexCount = 0;
+ do {
+ int32_t attribIndex = int32_t (ptr->m_userData);
+ m_attib[attribIndex].m_vertex.m_w = double (ptr->m_incidentVertex);
+ ptr->m_mark = mark;
+ index[indexCount] = attribIndex;
+ indexCount ++;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ polygon.AddFace(indexCount, index);
+ }
+ }
+ polygon.EndFace();
+
+
+ dgPolyhedra leftOversOut;
+ polygon.Triangulate(&m_attib[0].m_vertex.m_x, sizeof (dgVertexAtribute), &leftOversOut);
+ HACD_ASSERT (leftOversOut.GetCount() == 0);
+
+
+ RemoveAll();
+ SetLRU (0);
+
+ mark = polygon.IncLRU();
+ BeginFace();
+ dgPolyhedra::Iterator iter1 (polygon);
+ for (iter1.Begin(); iter1; iter1 ++){
+ dgEdge* const face = &(*iter1);
+ if ((face->m_mark != mark) && (face->m_incidentFace > 0)) {
+ int32_t index[DG_MESH_EFFECT_POINT_SPLITED];
+ int64_t userData[DG_MESH_EFFECT_POINT_SPLITED];
+
+ dgEdge* ptr = face;
+ int32_t indexCount = 0;
+ do {
+ ptr->m_mark = mark;
+ index[indexCount] = int32_t (m_attib[ptr->m_incidentVertex].m_vertex.m_w);
+
+ userData[indexCount] = ptr->m_incidentVertex;
+ indexCount ++;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ AddFace(indexCount, index, userData);
+ }
+ }
+ EndFace();
+
+ for (iter.Begin(); iter; iter ++){
+ dgEdge* const face = &(*iter);
+ if (face->m_incidentFace > 0) {
+ int32_t attribIndex = int32_t (face->m_userData);
+ m_attib[attribIndex].m_vertex.m_w = m_points[face->m_incidentVertex].m_w;
+ }
+ }
+
+}
+
+void dgMeshEffect::ConvertToPolygons ()
+{
+ dgPolyhedra polygon;
+
+ int32_t mark = IncLRU();
+ polygon.BeginFace();
+ dgPolyhedra::Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++){
+ dgEdge* const face = &(*iter);
+ if ((face->m_mark != mark) && (face->m_incidentFace > 0)) {
+ int32_t index[DG_MESH_EFFECT_POINT_SPLITED];
+
+ dgEdge* ptr = face;
+ int32_t indexCount = 0;
+ do {
+ int32_t attribIndex = int32_t (ptr->m_userData);
+
+ m_attib[attribIndex].m_vertex.m_w = float (ptr->m_incidentVertex);
+ ptr->m_mark = mark;
+ index[indexCount] = attribIndex;
+ indexCount ++;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ polygon.AddFace(indexCount, index);
+ }
+ }
+ polygon.EndFace();
+
+ dgPolyhedra leftOversOut;
+ polygon.ConvexPartition (&m_attib[0].m_vertex.m_x, sizeof (dgVertexAtribute), &leftOversOut);
+ HACD_ASSERT (leftOversOut.GetCount() == 0);
+
+ RemoveAll();
+ SetLRU (0);
+
+ mark = polygon.IncLRU();
+ BeginFace();
+ dgPolyhedra::Iterator iter1 (polygon);
+ for (iter1.Begin(); iter1; iter1 ++){
+ dgEdge* const face = &(*iter1);
+ if ((face->m_mark != mark) && (face->m_incidentFace > 0)) {
+ int32_t index[DG_MESH_EFFECT_POINT_SPLITED];
+ int64_t userData[DG_MESH_EFFECT_POINT_SPLITED];
+
+ dgEdge* ptr = face;
+ int32_t indexCount = 0;
+ do {
+ ptr->m_mark = mark;
+ index[indexCount] = int32_t (m_attib[ptr->m_incidentVertex].m_vertex.m_w);
+
+ userData[indexCount] = ptr->m_incidentVertex;
+ indexCount ++;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ AddFace(indexCount, index, userData);
+ }
+ }
+ EndFace();
+
+ for (iter.Begin(); iter; iter ++){
+ dgEdge* const face = &(*iter);
+ if (face->m_incidentFace > 0) {
+ int32_t attribIndex = int32_t (face->m_userData);
+ m_attib[attribIndex].m_vertex.m_w = m_points[face->m_incidentVertex].m_w;
+ }
+ }
+
+ RepairTJoints (false);
+}
+
+void dgMeshEffect::RemoveUnusedVertices(int32_t* const vertexMap)
+{
+ dgPolyhedra polygon;
+ dgStack<int32_t>attrbMap(m_atribCount);
+
+ memset(&vertexMap[0], -1, m_pointCount * sizeof (int));
+ memset(&attrbMap[0], -1, m_atribCount * sizeof (int));
+
+ int attribCount = 0;
+ int vertexCount = 0;
+
+ dgStack<dgBigVector>points (m_pointCount);
+ dgStack<dgVertexAtribute>atributes (m_atribCount);
+
+ int32_t mark = IncLRU();
+ polygon.BeginFace();
+ dgPolyhedra::Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++){
+ dgEdge* const face = &(*iter);
+ if ((face->m_mark != mark) && (face->m_incidentFace > 0)) {
+ int32_t vertex[DG_MESH_EFFECT_POINT_SPLITED];
+ int64_t userData[DG_MESH_EFFECT_POINT_SPLITED];
+ int indexCount = 0;
+ dgEdge* ptr = face;
+ do {
+ ptr->m_mark = mark;
+
+ int index = ptr->m_incidentVertex;
+ if (vertexMap[index] == -1) {
+ vertexMap[index] = vertexCount;
+ points[vertexCount] = m_points[index];
+ vertexCount ++;
+ }
+ vertex[indexCount] = vertexMap[index];
+
+ index = int (ptr->m_userData);
+ if (attrbMap[index] == -1) {
+ attrbMap[index] = attribCount;
+ atributes[attribCount] = m_attib[index];
+ attribCount ++;
+ }
+ userData[indexCount] = attrbMap[index];
+ indexCount ++;
+
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ polygon.AddFace(indexCount, vertex, userData);
+ }
+ }
+ polygon.EndFace();
+
+ m_pointCount = vertexCount;
+ memcpy (&m_points[0].m_x, &points[0].m_x, m_pointCount * sizeof (dgBigVector));
+
+ m_atribCount = attribCount;
+ memcpy (&m_attib[0].m_vertex.m_x, &atributes[0].m_vertex.m_x, m_atribCount * sizeof (dgVertexAtribute));
+
+
+ RemoveAll();
+ SetLRU (0);
+
+ BeginFace();
+ dgPolyhedra::Iterator iter1 (polygon);
+ for (iter1.Begin(); iter1; iter1 ++){
+ dgEdge* const face = &(*iter1);
+ if ((face->m_mark != mark) && (face->m_incidentFace > 0)) {
+ int32_t index[DG_MESH_EFFECT_POINT_SPLITED];
+ int64_t userData[DG_MESH_EFFECT_POINT_SPLITED];
+
+ dgEdge* ptr = face;
+ int32_t indexCount = 0;
+ do {
+ ptr->m_mark = mark;
+ index[indexCount] = ptr->m_incidentVertex;
+ userData[indexCount] = int64_t (ptr->m_userData);
+ indexCount ++;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ AddFace(indexCount, index, userData);
+ }
+ }
+ EndFace();
+ PackVertexArrays ();
+}
+
+dgMatrix dgMeshEffect::CalculateOOBB (dgBigVector& size) const
+{
+ dgSphere sphere (CalculateSphere (&m_points[0].m_x, sizeof (dgBigVector), NULL));
+ size = sphere.m_size;
+
+ dgMatrix permuation (dgGetIdentityMatrix());
+ permuation[0][0] = float (0.0f);
+ permuation[0][1] = float (1.0f);
+ permuation[1][1] = float (0.0f);
+ permuation[1][2] = float (1.0f);
+ permuation[2][2] = float (0.0f);
+ permuation[2][0] = float (1.0f);
+
+ while ((size.m_x < size.m_y) || (size.m_x < size.m_z)) {
+ sphere = permuation * sphere;
+ size = permuation.UnrotateVector(size);
+ }
+
+ return sphere;
+}
+
+void dgMeshEffect::CalculateAABB (dgBigVector& minBox, dgBigVector& maxBox) const
+{
+ dgBigVector minP ( double (1.0e15f), double (1.0e15f), double (1.0e15f), double (0.0f));
+ dgBigVector maxP (-double (1.0e15f), -double (1.0e15f), -double (1.0e15f), double (0.0f));
+
+ dgPolyhedra::Iterator iter (*this);
+ const dgBigVector* const points = &m_points[0];
+ for (iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ const dgBigVector& p (points[edge->m_incidentVertex]);
+
+ minP.m_x = GetMin (p.m_x, minP.m_x);
+ minP.m_y = GetMin (p.m_y, minP.m_y);
+ minP.m_z = GetMin (p.m_z, minP.m_z);
+
+ maxP.m_x = GetMax (p.m_x, maxP.m_x);
+ maxP.m_y = GetMax (p.m_y, maxP.m_y);
+ maxP.m_z = GetMax (p.m_z, maxP.m_z);
+ }
+
+ minBox = minP;
+ maxBox = maxP;
+}
+
+int32_t dgMeshEffect::EnumerateAttributeArray (dgVertexAtribute* const attib)
+{
+ int32_t index = 0;
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ HACD_ASSERT (index < GetCount());
+ attib[index] = m_attib[int32_t (edge->m_userData)];
+ edge->m_userData = uint64_t (index);
+ index ++;
+ }
+ return index;
+}
+
+void dgMeshEffect::ApplyAttributeArray (dgVertexAtribute* const attib, int32_t maxCount)
+{
+ dgStack<int32_t>indexMap (GetCount());
+
+ m_atribCount = dgVertexListToIndexList (&attib[0].m_vertex.m_x, sizeof (dgVertexAtribute), sizeof (dgVertexAtribute) / sizeof(double), maxCount, &indexMap[0], DG_VERTEXLIST_INDEXLIST_TOL);
+ m_maxAtribCount = m_atribCount;
+
+ HACD_FREE (m_attib);
+ m_attib = (dgVertexAtribute*)HACD_ALLOC(size_t (m_atribCount * sizeof(dgVertexAtribute)));
+ memcpy (m_attib, attib, m_atribCount * sizeof(dgVertexAtribute));
+
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if (edge->m_incidentFace > 0) {
+ int32_t index = indexMap[int32_t (edge->m_userData)];
+ HACD_ASSERT (index >= 0);
+ HACD_ASSERT (index < m_atribCount);
+ edge->m_userData = uint64_t (index);
+ }
+ }
+}
+
+
+
+dgBigVector dgMeshEffect::GetOrigin ()const
+{
+ dgBigVector origin (double (0.0f), double (0.0f), double (0.0f), double (0.0f));
+ for (int32_t i = 0; i < m_pointCount; i ++) {
+ origin += m_points[i];
+ }
+ return origin.Scale (double (1.0f) / m_pointCount);
+}
+
+
+void dgMeshEffect::FixCylindricalMapping (dgVertexAtribute* attribArray) const
+{
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ dgVertexAtribute& attrib0 = attribArray[int32_t (edge->m_userData)];
+ dgVertexAtribute& attrib1 = attribArray[int32_t (edge->m_next->m_userData)];
+
+ double error = fabs (attrib0.m_u0 - attrib1.m_u0);
+ if (error > float (0.6f)) {
+ if (attrib0.m_u0 < attrib1.m_u0) {
+ attrib0.m_u0 += float (1.0f);
+ attrib0.m_u1 = attrib0.m_u0;
+ } else {
+ attrib1.m_u0 += float (1.0f);
+ attrib1.m_u1 = attrib1.m_u0;
+ }
+
+ }
+ }
+
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ dgVertexAtribute& attrib0 = attribArray[int32_t (edge->m_userData)];
+ dgVertexAtribute& attrib1 = attribArray[int32_t (edge->m_next->m_userData)];
+
+ double error = fabs (attrib0.m_u0 - attrib1.m_u0);
+ if (error > float (0.6f)) {
+ if (attrib0.m_u0 < attrib1.m_u0) {
+ attrib0.m_u0 += float (1.0f);
+ attrib0.m_u1 = attrib0.m_u0;
+ } else {
+ attrib1.m_u0 += float (1.0f);
+ attrib1.m_u1 = attrib1.m_u0;
+ }
+ }
+ }
+}
+
+
+void dgMeshEffect::SphericalMapping (int32_t material)
+{
+ dgBigVector origin (GetOrigin());
+
+ dgStack<dgBigVector>sphere (m_pointCount);
+ for (int32_t i = 0; i < m_pointCount; i ++) {
+ dgBigVector point (m_points[i] - origin);
+ point = point.Scale (1.0f / dgSqrt (point % point));
+
+ double u = dgAsin (point.m_y);
+ double v = dgAtan2 (point.m_x, point.m_z);
+
+ u = (double (3.1416f/2.0f) - u) / double (3.1416f);
+ v = (double (3.1416f) - v) / double (2.0f * 3.1416f);
+ sphere[i].m_x = v;
+ sphere[i].m_y = u;
+ }
+
+
+ dgStack<dgVertexAtribute>attribArray (GetCount());
+ int32_t count = EnumerateAttributeArray (&attribArray[0]);
+
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ dgVertexAtribute& attrib = attribArray[int32_t (edge->m_userData)];
+ attrib.m_u0 = sphere[edge->m_incidentVertex].m_x;
+ attrib.m_v0 = sphere[edge->m_incidentVertex].m_y;
+ attrib.m_u1 = sphere[edge->m_incidentVertex].m_x;
+ attrib.m_v1 = sphere[edge->m_incidentVertex].m_y;
+ attrib.m_material = material;
+ }
+
+ FixCylindricalMapping (&attribArray[0]);
+ ApplyAttributeArray (&attribArray[0],count);
+}
+
+void dgMeshEffect::CylindricalMapping (int32_t cylinderMaterial, int32_t capMaterial)
+{
+/*
+ dgVector origin (GetOrigin());
+
+ dgStack<dgVector>cylinder (m_pointCount);
+
+ float xMax;
+ float xMin;
+
+ xMin= float (1.0e10f);
+ xMax= float (-1.0e10f);
+ for (int32_t i = 0; i < m_pointCount; i ++) {
+ cylinder[i] = m_points[i] - origin;
+ xMin = GetMin (xMin, cylinder[i].m_x);
+ xMax = GetMax (xMax, cylinder[i].m_x);
+ }
+
+ float xscale = float (1.0f)/ (xMax - xMin);
+ for (int32_t i = 0; i < m_pointCount; i ++) {
+ float u;
+ float v;
+ float y;
+ float z;
+ y = cylinder[i].m_y;
+ z = cylinder[i].m_z;
+ u = dgAtan2 (z, y);
+ if (u < float (0.0f)) {
+ u += float (3.141592f * 2.0f);
+ }
+ v = (cylinder[i].m_x - xMin) * xscale;
+
+ cylinder[i].m_x = float (1.0f) - u * float (1.0f / (2.0f * 3.141592f));
+ cylinder[i].m_y = v;
+ }
+
+ dgStack<dgVertexAtribute>attribArray (GetCount());
+ EnumerateAttributeArray (&attribArray[0]);
+
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge;
+ edge = &(*iter);
+ dgVertexAtribute& attrib = attribArray[int32_t (edge->m_userData)];
+ attrib.m_u0 = cylinder[edge->m_incidentVertex].m_x;
+ attrib.m_v0 = cylinder[edge->m_incidentVertex].m_y;
+ attrib.m_u1 = cylinder[edge->m_incidentVertex].m_x;
+ attrib.m_v1 = cylinder[edge->m_incidentVertex].m_y;
+ attrib.m_material = cylinderMaterial;
+ }
+
+ FixCylindricalMapping (&attribArray[0]);
+
+ int32_t mark;
+ mark = IncLRU();
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge;
+ edge = &(*iter);
+ if (edge->m_mark < mark){
+ const dgVector& p0 = m_points[edge->m_incidentVertex];
+ const dgVector& p1 = m_points[edge->m_next->m_incidentVertex];
+ const dgVector& p2 = m_points[edge->m_prev->m_incidentVertex];
+
+ edge->m_mark = mark;
+ edge->m_next->m_mark = mark;
+ edge->m_prev->m_mark = mark;
+
+ dgVector e0 (p1 - p0);
+ dgVector e1 (p2 - p0);
+ dgVector n (e0 * e1);
+ if ((n.m_x * n.m_x) > (float (0.99f) * (n % n))) {
+ dgEdge* ptr;
+
+ ptr = edge;
+ do {
+ dgVertexAtribute& attrib = attribArray[int32_t (ptr->m_userData)];
+ dgVector p (m_points[ptr->m_incidentVertex] - origin);
+ p.m_x = float (0.0f);
+ p = p.Scale (float (dgRsqrt(p % p)));
+ attrib.m_u0 = float (0.5f) + p.m_y * float (0.5f);
+ attrib.m_v0 = float (0.5f) + p.m_z * float (0.5f);
+ attrib.m_u1 = float (0.5f) + p.m_y * float (0.5f);
+ attrib.m_v1 = float (0.5f) + p.m_z * float (0.5f);
+ attrib.m_material = capMaterial;
+
+ ptr = ptr->m_next;
+ }while (ptr != edge);
+ }
+ }
+ }
+
+ ApplyAttributeArray (&attribArray[0]);
+*/
+
+ dgBigVector origin (GetOrigin());
+ dgStack<dgBigVector>cylinder (m_pointCount);
+
+ dgBigVector pMin (double (1.0e10f), double (1.0e10f), double (1.0e10f), double (0.0f));
+ dgBigVector pMax (double (-1.0e10f), double (-1.0e10f), double (-1.0e10f), double (0.0f));
+ for (int32_t i = 0; i < m_pointCount; i ++) {
+ dgBigVector tmp (m_points[i] - origin);
+ pMin.m_x = GetMin (pMin.m_x, tmp.m_x);
+ pMax.m_x = GetMax (pMax.m_x, tmp.m_x);
+ pMin.m_y = GetMin (pMin.m_y, tmp.m_y);
+ pMax.m_y = GetMax (pMax.m_y, tmp.m_y);
+ pMin.m_z = GetMin (pMin.m_z, tmp.m_z);
+ pMax.m_z = GetMax (pMax.m_z, tmp.m_z);
+ }
+
+ dgBigVector scale (double (1.0f)/ (pMax.m_x - pMin.m_x), double (1.0f)/ (pMax.m_x - pMin.m_x), double (1.0f)/ (pMax.m_x - pMin.m_x), double (0.0f));
+ for (int32_t i = 0; i < m_pointCount; i ++) {
+ dgBigVector point (m_points[i] - origin);
+ double u = (point.m_x - pMin.m_x) * scale.m_x;
+
+ point = point.Scale (1.0f / dgSqrt (point % point));
+ double v = dgAtan2 (point.m_y, point.m_z);
+
+ //u = (double (3.1416f/2.0f) - u) / double (3.1416f);
+ v = (double (3.1416f) - v) / double (2.0f * 3.1416f);
+ cylinder[i].m_x = v;
+ cylinder[i].m_y = u;
+ }
+
+
+ dgStack<dgVertexAtribute>attribArray (GetCount());
+ int32_t count = EnumerateAttributeArray (&attribArray[0]);
+
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ dgVertexAtribute& attrib = attribArray[int32_t (edge->m_userData)];
+ attrib.m_u0 = cylinder[edge->m_incidentVertex].m_x;
+ attrib.m_v0 = cylinder[edge->m_incidentVertex].m_y;
+ attrib.m_u1 = cylinder[edge->m_incidentVertex].m_x;
+ attrib.m_v1 = cylinder[edge->m_incidentVertex].m_y;
+ attrib.m_material = cylinderMaterial;
+ }
+
+ FixCylindricalMapping (&attribArray[0]);
+
+ // apply cap mapping
+ int32_t mark = IncLRU();
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark < mark){
+ const dgVector& p0 = m_points[edge->m_incidentVertex];
+ const dgVector& p1 = m_points[edge->m_next->m_incidentVertex];
+ const dgVector& p2 = m_points[edge->m_prev->m_incidentVertex];
+
+ edge->m_mark = mark;
+ edge->m_next->m_mark = mark;
+ edge->m_prev->m_mark = mark;
+
+ dgVector e0 (p1 - p0);
+ dgVector e1 (p2 - p0);
+ dgVector n (e0 * e1);
+ if ((n.m_x * n.m_x) > (float (0.99f) * (n % n))) {
+ dgEdge* ptr = edge;
+ do {
+ dgVertexAtribute& attrib = attribArray[int32_t (ptr->m_userData)];
+ dgVector p (m_points[ptr->m_incidentVertex] - origin);
+ double u = (p.m_y - pMin.m_y) * scale.m_y;
+ double v = (p.m_z - pMin.m_z) * scale.m_z;
+ attrib.m_u0 = u;
+ attrib.m_v0 = v;
+ attrib.m_u1 = u;
+ attrib.m_v1 = v;
+ attrib.m_material = capMaterial;
+
+ ptr = ptr->m_next;
+ }while (ptr != edge);
+ }
+ }
+ }
+
+ ApplyAttributeArray (&attribArray[0],count);
+}
+
+void dgMeshEffect::BoxMapping (int32_t front, int32_t side, int32_t top)
+{
+ dgBigVector minVal;
+ dgBigVector maxVal;
+ int32_t materialArray[3];
+
+ GetMinMax (minVal, maxVal, &m_points[0][0], m_pointCount, sizeof (dgBigVector));
+ dgBigVector dist (maxVal - minVal);
+ dgBigVector scale (double (1.0f)/ dist[0], double (1.0f)/ dist[1], double (1.0f)/ dist[2], double (0.0f));
+
+ dgStack<dgVertexAtribute>attribArray (GetCount());
+ int32_t count = EnumerateAttributeArray (&attribArray[0]);
+
+ materialArray[0] = front;
+ materialArray[1] = side;
+ materialArray[2] = top;
+
+ int32_t mark = IncLRU();
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark < mark){
+ const dgBigVector& p0 = m_points[edge->m_incidentVertex];
+ const dgBigVector& p1 = m_points[edge->m_next->m_incidentVertex];
+ const dgBigVector& p2 = m_points[edge->m_prev->m_incidentVertex];
+
+ edge->m_mark = mark;
+ edge->m_next->m_mark = mark;
+ edge->m_prev->m_mark = mark;
+
+ dgBigVector e0 (p1 - p0);
+ dgBigVector e1 (p2 - p0);
+ dgBigVector n (e0 * e1);
+
+ int32_t index = 0;
+ double maxProjection = float (0.0f);
+
+ for (int32_t i = 0; i < 3; i ++) {
+ double proj = fabs (n[i]);
+ if (proj > maxProjection) {
+ index = i;
+ maxProjection = proj;
+ }
+ }
+
+ int32_t u = (index + 1) % 3;
+ int32_t v = (u + 1) % 3;
+ if (index == 1) {
+ Swap (u, v);
+ }
+ dgEdge* ptr = edge;
+ do {
+ dgVertexAtribute& attrib = attribArray[int32_t (ptr->m_userData)];
+ dgBigVector p (scale.CompProduct(m_points[ptr->m_incidentVertex] - minVal));
+ attrib.m_u0 = p[u];
+ attrib.m_v0 = p[v];
+ attrib.m_u1 = double(0.0f);
+ attrib.m_v1 = double(0.0f);
+ attrib.m_material = materialArray[index];
+
+ ptr = ptr->m_next;
+ }while (ptr != edge);
+ }
+ }
+
+ ApplyAttributeArray (&attribArray[0],count);
+}
+
+void dgMeshEffect::UniformBoxMapping (int32_t material, const dgMatrix& textureMatrix)
+{
+ dgStack<dgVertexAtribute>attribArray (GetCount());
+ int32_t count = EnumerateAttributeArray (&attribArray[0]);
+
+ int32_t mark = IncLRU();
+ for (int32_t i = 0; i < 3; i ++) {
+ dgMatrix rotationMatrix (dgGetIdentityMatrix());
+ if (i == 1) {
+ rotationMatrix = dgYawMatrix(float (90.0f * 3.1416f / 180.0f));
+ } else if (i == 2) {
+ rotationMatrix = dgPitchMatrix(float (90.0f * 3.1416f / 180.0f));
+ }
+
+ dgPolyhedra::Iterator iter (*this);
+
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark < mark){
+ dgBigVector n (FaceNormal(edge, &m_points[0].m_x, sizeof (dgBigVector)));
+ dgVector normal (rotationMatrix.RotateVector(dgVector (n.Scale (double (1.0f) / sqrt (n % n)))));
+ normal.m_x = dgAbsf (normal.m_x);
+ normal.m_y = dgAbsf (normal.m_y);
+ normal.m_z = dgAbsf (normal.m_z);
+ if ((normal.m_z >= (normal.m_x - float (1.0e-4f))) && (normal.m_z >= (normal.m_y - float (1.0e-4f)))) {
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ dgVertexAtribute& attrib = attribArray[int32_t (ptr->m_userData)];
+ dgVector p (textureMatrix.TransformVector(rotationMatrix.RotateVector(m_points[ptr->m_incidentVertex])));
+ attrib.m_u0 = p.m_x;
+ attrib.m_v0 = p.m_y;
+ attrib.m_u1 = float (0.0f);
+ attrib.m_v1 = float (0.0f);
+ attrib.m_material = material;
+ ptr = ptr->m_next;
+ }while (ptr != edge);
+ }
+ }
+ }
+ }
+
+ ApplyAttributeArray (&attribArray[0],count);
+}
+
+void dgMeshEffect::CalculateNormals (double angleInRadians)
+{
+ dgStack<dgBigVector>faceNormal (GetCount());
+ dgStack<dgVertexAtribute>attribArray (GetCount());
+ int32_t count = EnumerateAttributeArray (&attribArray[0]);
+
+ int32_t faceIndex = 1;
+ int32_t mark = IncLRU();
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if ((edge->m_mark < mark) && (edge->m_incidentFace > 0)) {
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_incidentFace = faceIndex;
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ dgBigVector normal (FaceNormal (edge, &m_points[0].m_x, sizeof (m_points[0])));
+ normal = normal.Scale (float (1.0f) / (sqrt(normal % normal) + float(1.0e-16f)));
+ faceNormal[faceIndex] = normal;
+ faceIndex ++;
+ }
+ }
+
+
+ float smoothValue = dgCos (angleInRadians);
+//smoothValue = -1;
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if (edge->m_incidentFace > 0) {
+ dgBigVector normal0 (faceNormal[edge->m_incidentFace]);
+
+ dgEdge* startEdge = edge;
+ for (dgEdge* ptr = edge->m_prev->m_twin ; (ptr != edge) && (ptr->m_incidentFace > 0); ptr = ptr->m_prev->m_twin) {
+ const dgBigVector& normal1 (faceNormal[ptr->m_incidentFace]);
+ double dot = normal0 % normal1;
+ if (dot < smoothValue) {
+ break;
+ }
+ startEdge = ptr;
+ normal0 = normal1;
+ }
+
+ dgBigVector normal (faceNormal[startEdge->m_incidentFace]);
+ normal0 = normal;
+ for (dgEdge* ptr = startEdge->m_twin->m_next; (ptr != startEdge) && (ptr->m_incidentFace > 0); ptr = ptr->m_twin->m_next) {
+
+ const dgBigVector& normal1 (faceNormal[ptr->m_incidentFace]);
+ double dot = normal0 % normal1;
+ if (dot < smoothValue) {
+ break;
+ }
+ normal += normal1;
+ normal0 = normal1;
+ }
+
+ normal = normal.Scale (float (1.0f) / (sqrt(normal % normal) + float(1.0e-16f)));
+ int32_t edgeIndex = int32_t (edge->m_userData);
+ dgVertexAtribute& attrib = attribArray[edgeIndex];
+ attrib.m_normal_x = normal.m_x;
+ attrib.m_normal_y = normal.m_y;
+ attrib.m_normal_z = normal.m_z;
+ }
+ }
+
+ ApplyAttributeArray (&attribArray[0],count);
+}
+
+
+void dgMeshEffect::BeginPolygon ()
+{
+ m_pointCount = 0;
+ m_atribCount = 0;
+ RemoveAll();
+ BeginFace();
+}
+
+
+void dgMeshEffect::AddAtribute (const dgVertexAtribute& attib)
+{
+ if (m_atribCount >= m_maxAtribCount) {
+ m_maxAtribCount *= 2;
+ dgVertexAtribute* const attibArray = (dgVertexAtribute*) HACD_ALLOC(size_t (m_maxAtribCount * sizeof(dgVertexAtribute)));
+ memcpy (attibArray, m_attib, m_atribCount * sizeof(dgVertexAtribute));
+ HACD_FREE(m_attib);
+ m_attib = attibArray;
+ }
+
+ m_attib[m_atribCount] = attib;
+ m_attib[m_atribCount].m_vertex.m_x = QuantizeCordinade(m_attib[m_atribCount].m_vertex.m_x);
+ m_attib[m_atribCount].m_vertex.m_y = QuantizeCordinade(m_attib[m_atribCount].m_vertex.m_y);
+ m_attib[m_atribCount].m_vertex.m_z = QuantizeCordinade(m_attib[m_atribCount].m_vertex.m_z);
+ m_atribCount ++;
+}
+
+void dgMeshEffect::AddVertex(const dgBigVector& vertex)
+{
+ if (m_pointCount >= m_maxPointCount) {
+ m_maxPointCount *= 2;
+ dgBigVector* const points = (dgBigVector*) HACD_ALLOC(size_t (m_maxPointCount * sizeof(dgBigVector)));
+ memcpy (points, m_points, m_pointCount * sizeof(dgBigVector));
+ HACD_FREE(m_points);
+ m_points = points;
+ }
+
+ m_points[m_pointCount].m_x = QuantizeCordinade(vertex[0]);
+ m_points[m_pointCount].m_y = QuantizeCordinade(vertex[1]);
+ m_points[m_pointCount].m_z = QuantizeCordinade(vertex[2]);
+ m_points[m_pointCount].m_w = vertex.m_w;
+ m_pointCount ++;
+}
+
+
+void dgMeshEffect::AddPoint(const double* vertex, int32_t material)
+{
+ dgVertexAtribute attib;
+ AddVertex(dgBigVector (vertex[0], vertex[1], vertex[2], vertex[3]));
+
+ attib.m_vertex.m_x = m_points[m_pointCount - 1].m_x;
+ attib.m_vertex.m_y = m_points[m_pointCount - 1].m_y;
+ attib.m_vertex.m_z = m_points[m_pointCount - 1].m_z;
+ attib.m_vertex.m_w = m_points[m_pointCount - 1].m_w;
+
+ attib.m_normal_x = vertex[4];
+ attib.m_normal_y = vertex[5];
+ attib.m_normal_z = vertex[6];
+ attib.m_u0 = vertex[7];
+ attib.m_v0 = vertex[8];
+ attib.m_u1 = vertex[9];
+ attib.m_v1 = vertex[10];
+ attib.m_material = material;
+
+ AddAtribute (attib);
+}
+
+void dgMeshEffect::PackVertexArrays ()
+{
+ if (m_maxPointCount > m_pointCount) {
+ dgBigVector* const points = (dgBigVector*) HACD_ALLOC(size_t (m_pointCount * sizeof(dgBigVector)));
+ memcpy (points, m_points, m_pointCount * sizeof(dgBigVector));
+ HACD_FREE(m_points);
+ m_points = points;
+ m_maxPointCount = m_pointCount;
+ }
+
+
+ if (m_maxAtribCount > m_atribCount) {
+ dgVertexAtribute* const attibArray = (dgVertexAtribute*) HACD_ALLOC(size_t (m_atribCount * sizeof(dgVertexAtribute)));
+ memcpy (attibArray, m_attib, m_atribCount * sizeof(dgVertexAtribute));
+ HACD_FREE(m_attib);
+ m_attib = attibArray;
+ m_maxAtribCount = m_atribCount;
+ }
+};
+
+
+void dgMeshEffect::AddPolygon (int32_t count, const double* const vertexList, int32_t strideIndBytes, int32_t material)
+{
+ int32_t stride = int32_t (strideIndBytes / sizeof (double));
+
+ if (count > 3) {
+ dgPolyhedra polygon;
+ int32_t indexList[256];
+ HACD_ASSERT (count < int32_t (sizeof (indexList)/sizeof(indexList[0])));
+ for (int32_t i = 0; i < count; i ++) {
+ indexList[i] = i;
+ }
+
+ polygon.BeginFace();
+ polygon.AddFace(count, indexList, NULL);
+ polygon.EndFace();
+ polygon.Triangulate(vertexList, strideIndBytes, NULL);
+
+ int32_t mark = polygon.IncLRU();
+ dgPolyhedra::Iterator iter (polygon);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &iter.GetNode()->GetInfo();
+ if ((edge->m_incidentFace > 0) && (edge->m_mark < mark)) {
+ int32_t i0 = edge->m_incidentVertex;
+ int32_t i1 = edge->m_next->m_incidentVertex;
+ int32_t i2 = edge->m_next->m_next->m_incidentVertex;
+ edge->m_mark = mark;
+ edge->m_next->m_mark = mark;
+ edge->m_next->m_next->m_mark = mark;
+
+// #ifdef _DEBUG
+// dgBigVector p0_ (&vertexList[i0 * stride]);
+// dgBigVector p1_ (&vertexList[i1 * stride]);
+// dgBigVector p2_ (&vertexList[i2 * stride]);
+// dgBigVector e1_ (p1_ - p0_);
+// dgBigVector e2_ (p2_ - p0_);
+// dgBigVector n_ (e1_ * e2_);
+// double mag2_ = n_ % n_;
+// HACD_ASSERT (mag2_ > float (DG_MESH_EFFECT_PRECISION_SCALE_INV * DG_MESH_EFFECT_PRECISION_SCALE_INV));
+// #endif
+
+ AddPoint(vertexList + i0 * stride, material);
+ AddPoint(vertexList + i1 * stride, material);
+ AddPoint(vertexList + i2 * stride, material);
+ }
+ }
+
+ } else {
+
+ AddPoint(vertexList, material);
+ AddPoint(vertexList + stride, material);
+ AddPoint(vertexList + stride + stride, material);
+
+ const dgBigVector& p0 = m_points[m_pointCount - 3];
+ const dgBigVector& p1 = m_points[m_pointCount - 2];
+ const dgBigVector& p2 = m_points[m_pointCount - 1];
+ dgBigVector e1 (p1 - p0);
+ dgBigVector e2 (p2 - p0);
+ dgBigVector n (e1 * e2);
+ double mag3 = n % n;
+ if (mag3 < double (DG_MESH_EFFECT_PRECISION_SCALE_INV * DG_MESH_EFFECT_PRECISION_SCALE_INV)) {
+ m_pointCount -= 3;
+ m_atribCount -= 3;
+ }
+ }
+}
+
+
+void dgMeshEffect::AddPolygon (int32_t count, const float* const vertexList, int32_t strideIndBytes, int32_t material)
+{
+ dgVertexAtribute points[256];
+ HACD_ASSERT (count < int32_t (sizeof (points)/sizeof (points[0])));
+
+ uint32_t stride = (uint32_t)strideIndBytes / sizeof (float);
+ for (uint32_t i = 0; i < (uint32_t)count; i ++)
+ {
+ points[i].m_vertex.m_x = vertexList[i * stride + 0];
+ points[i].m_vertex.m_y = vertexList[i * stride + 1];
+ points[i].m_vertex.m_z = vertexList[i * stride + 2];
+ points[i].m_vertex.m_w = vertexList[i * stride + 3];
+
+ points[i].m_normal_x = vertexList[i * stride + 4];
+ points[i].m_normal_y = vertexList[i * stride + 5];
+ points[i].m_normal_z = vertexList[i * stride + 6];
+
+ points[i].m_u0 = vertexList[i * stride + 7];
+ points[i].m_v0 = vertexList[i * stride + 8];
+
+ points[i].m_u1 = vertexList[i * stride + 9];
+ points[i].m_u1 = vertexList[i * stride + 10];
+ }
+
+ AddPolygon (count, &points[0].m_vertex.m_x, sizeof (dgVertexAtribute), material);
+}
+
+void dgMeshEffect::EndPolygon (double tol)
+{
+ dgStack<int32_t>indexMap(m_pointCount);
+ dgStack<int32_t>attrIndexMap(m_atribCount);
+
+ int32_t triangCount = m_pointCount / 3;
+ m_pointCount = dgVertexListToIndexList (&m_points[0].m_x, sizeof (dgBigVector), sizeof (dgBigVector)/sizeof (double), m_pointCount, &indexMap[0], tol);
+ m_atribCount = dgVertexListToIndexList (&m_attib[0].m_vertex.m_x, sizeof (dgVertexAtribute), sizeof (dgVertexAtribute)/sizeof (double), m_atribCount, &attrIndexMap[0], tol);
+
+ for (int32_t i = 0; i < triangCount; i ++) {
+ int32_t index[3];
+ int64_t userdata[3];
+
+ index[0] = indexMap[i * 3 + 0];
+ index[1] = indexMap[i * 3 + 1];
+ index[2] = indexMap[i * 3 + 2];
+
+
+ dgBigVector e1 (m_points[index[1]] - m_points[index[0]]);
+ dgBigVector e2 (m_points[index[2]] - m_points[index[0]]);
+
+ dgBigVector n (e1 * e2);
+ double mag2 = n % n;
+ if (mag2 > double (1.0e-12f)) {
+ userdata[0] = attrIndexMap[i * 3 + 0];
+ userdata[1] = attrIndexMap[i * 3 + 1];
+ userdata[2] = attrIndexMap[i * 3 + 2];
+ dgEdge* const edge = AddFace (3, index, userdata);
+ if (!edge) {
+ HACD_ASSERT ((m_pointCount + 3) <= m_maxPointCount);
+
+ m_points[m_pointCount + 0] = m_points[index[0]];
+ m_points[m_pointCount + 1] = m_points[index[1]];
+ m_points[m_pointCount + 2] = m_points[index[2]];
+
+ index[0] = m_pointCount + 0;
+ index[1] = m_pointCount + 1;
+ index[2] = m_pointCount + 2;
+
+ m_pointCount += 3;
+
+ AddFace (3, index, userdata);
+ }
+ }
+ }
+ EndFace();
+
+ RepairTJoints (true);
+
+
+}
+
+
+void dgMeshEffect::BuildFromVertexListIndexList(
+ int32_t faceCount, const int32_t* const faceIndexCount, const int32_t* const faceMaterialIndex,
+ const float* const vertex, int32_t vertexStrideInBytes, const int32_t* const vertexIndex,
+ const float* const normal, int32_t normalStrideInBytes, const int32_t* const normalIndex,
+ const float* const uv0, int32_t uv0StrideInBytes, const int32_t* const uv0Index,
+ const float* const uv1, int32_t uv1StrideInBytes, const int32_t* const uv1Index)
+{
+ BeginPolygon ();
+
+ // calculate vertex Count
+ int32_t acc = 0;
+ int32_t vertexCount = 0;
+ for (int32_t j = 0; j < faceCount; j ++) {
+ int count = faceIndexCount[j];
+ for (int i = 0; i < count; i ++) {
+ vertexCount = GetMax(vertexCount, vertexIndex[acc + i] + 1);
+ }
+ acc += count;
+ }
+
+ int32_t layerCountBase = 0;
+ int32_t vertexStride = int32_t (vertexStrideInBytes / sizeof (float));
+ for (int i = 0; i < vertexCount; i ++) {
+ int index = i * vertexStride;
+ AddVertex (dgBigVector (vertex[index + 0], vertex[index + 1], vertex[index + 2], vertex[index + 3]));
+ layerCountBase += (vertex[index + 3]) > float(layerCountBase);
+ }
+
+
+ acc = 0;
+ int32_t normalStride = int32_t (normalStrideInBytes / sizeof (float));
+ int32_t uv0Stride = int32_t (uv0StrideInBytes / sizeof (float));
+ int32_t uv1Stride = int32_t (uv1StrideInBytes / sizeof (float));
+ for (int32_t j = 0; j < faceCount; j ++) {
+ int32_t indexCount = faceIndexCount[j];
+ int32_t materialIndex = faceMaterialIndex[j];
+ for (int32_t i = 0; i < indexCount; i ++) {
+ dgVertexAtribute point;
+ int32_t index = vertexIndex[acc + i];
+ point.m_vertex = m_points[index];
+
+ index = normalIndex[(acc + i)] * normalStride;
+ point.m_normal_x = normal[index + 0];
+ point.m_normal_y = normal[index + 1];
+ point.m_normal_z = normal[index + 2];
+
+ index = uv0Index[(acc + i)] * uv0Stride;
+ point.m_u0 = uv0[index + 0];
+ point.m_v0 = uv0[index + 1];
+
+ index = uv1Index[(acc + i)] * uv1Stride;
+ point.m_u1 = uv1[index + 0];
+ point.m_v1 = uv1[index + 1];
+
+ point.m_material = materialIndex;
+ AddAtribute(point);
+ }
+ acc += indexCount;
+ }
+
+
+ dgStack<int32_t>attrIndexMap(m_atribCount);
+ m_atribCount = dgVertexListToIndexList (&m_attib[0].m_vertex.m_x, sizeof (dgVertexAtribute), sizeof (dgVertexAtribute) / sizeof (double), m_atribCount, &attrIndexMap[0], DG_VERTEXLIST_INDEXLIST_TOL);
+
+ bool hasFaces = true;
+ dgStack<int8_t> faceMark (faceCount);
+ memset (&faceMark[0], 1, size_t (faceMark.GetSizeInBytes()));
+
+ int32_t layerCount = 0;
+ while (hasFaces) {
+ acc = 0;
+ hasFaces = false;
+ int32_t vertexBank = layerCount * vertexCount;
+ for (int32_t j = 0; j < faceCount; j ++) {
+ int32_t index[256];
+ int64_t userdata[256];
+
+ int indexCount = faceIndexCount[j];
+ HACD_ASSERT (indexCount < int32_t (sizeof (index) / sizeof (index[0])));
+
+ if (faceMark[j]) {
+ for (int i = 0; i < indexCount; i ++) {
+ index[i] = vertexIndex[acc + i] + vertexBank;
+ userdata[i] = attrIndexMap[acc + i];
+ }
+ dgEdge* const edge = AddFace (indexCount, index, userdata);
+ if (edge) {
+ faceMark[j] = 0;
+ } else {
+ // check if the face is not degenerated
+ bool degeneratedFace = false;
+ for (int i = 0; i < indexCount - 1; i ++) {
+ for (int k = i + 1; k < indexCount; k ++) {
+ if (index[i] == index[k]) {
+ degeneratedFace = true;
+ }
+ }
+ }
+ if (degeneratedFace) {
+ faceMark[j] = 0;
+ } else {
+ hasFaces = true;
+ }
+ }
+ }
+ acc += indexCount;
+ }
+ if (hasFaces) {
+ layerCount ++;
+ for (int i = 0; i < vertexCount; i ++) {
+ int index = i * vertexStride;
+ AddVertex (dgBigVector (vertex[index + 0], vertex[index + 1], vertex[index + 2], double (layerCount + layerCountBase)));
+ }
+ }
+ }
+
+ EndFace();
+ PackVertexArrays ();
+// RepairTJoints (true);
+}
+
+
+int32_t dgMeshEffect::GetTotalFaceCount() const
+{
+ return GetFaceCount();
+}
+
+int32_t dgMeshEffect::GetTotalIndexCount() const
+{
+ Iterator iter (*this);
+ int32_t count = 0;
+ int32_t mark = IncLRU();
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark == mark) {
+ continue;
+ }
+
+ if (edge->m_incidentFace < 0) {
+ continue;
+ }
+
+ dgEdge* ptr = edge;
+ do {
+ count ++;
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ return count;
+}
+
+void dgMeshEffect::GetFaces (int32_t* const facesIndex, int32_t* const materials, void** const faceNodeList) const
+{
+ Iterator iter (*this);
+
+ int32_t faces = 0;
+ int32_t indexCount = 0;
+ int32_t mark = IncLRU();
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark == mark) {
+ continue;
+ }
+
+ if (edge->m_incidentFace < 0) {
+ continue;
+ }
+
+ int32_t faceCount = 0;
+ dgEdge* ptr = edge;
+ do {
+// indexList[indexCount] = int32_t (ptr->m_userData);
+ faceNodeList[indexCount] = GetNodeFromInfo (*ptr);
+ indexCount ++;
+ faceCount ++;
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ facesIndex[faces] = faceCount;
+ materials[faces] = dgFastInt(m_attib[int32_t (edge->m_userData)].m_material);
+ faces ++;
+ }
+}
+
+void* dgMeshEffect::GetFirstVertex ()
+{
+ Iterator iter (*this);
+ iter.Begin();
+
+ dgTreeNode* node = NULL;
+ if (iter) {
+ int32_t mark = IncLRU();
+ node = iter.GetNode();
+
+ dgEdge* const edge = &node->GetInfo();
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+ }
+ return node;
+}
+
+void* dgMeshEffect::GetNextVertex (const void* const vertex)
+{
+ dgTreeNode* node = (dgTreeNode*) vertex;
+ int32_t mark = node->GetInfo().m_mark;
+
+ Iterator iter (*this);
+ iter.Set (node);
+ for (iter ++; iter; iter ++) {
+ dgTreeNode* node = iter.GetNode();
+ if (node->GetInfo().m_mark != mark) {
+ dgEdge* const edge = &node->GetInfo();
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+ return node;
+ }
+ }
+ return NULL;
+}
+
+int32_t dgMeshEffect::GetVertexIndex (const void* const vertex) const
+{
+ dgTreeNode* const node = (dgTreeNode*) vertex;
+ dgEdge* const edge = &node->GetInfo();
+ return edge->m_incidentVertex;
+}
+
+
+void* dgMeshEffect::GetFirstPoint ()
+{
+ Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++) {
+ dgTreeNode* const node = iter.GetNode();
+ dgEdge* const edge = &node->GetInfo();
+ if (edge->m_incidentFace > 0) {
+ return node;
+ }
+ }
+ return NULL;
+}
+
+void* dgMeshEffect::GetNextPoint (const void* const point)
+{
+ Iterator iter (*this);
+ iter.Set ((dgTreeNode*) point);
+ for (iter ++; iter; iter ++) {
+ dgTreeNode* const node = iter.GetNode();
+ dgEdge* const edge = &node->GetInfo();
+ if (edge->m_incidentFace > 0) {
+ return node;
+ }
+ }
+ return NULL;
+}
+
+int32_t dgMeshEffect::GetPointIndex (const void* const point) const
+{
+ dgTreeNode* const node = (dgTreeNode*) point;
+ dgEdge* const edge = &node->GetInfo();
+ return int (edge->m_userData);
+}
+
+int32_t dgMeshEffect::GetVertexIndexFromPoint (const void* const point) const
+{
+ return GetVertexIndex (point);
+}
+
+
+dgEdge* dgMeshEffect::ConectVertex (dgEdge* const e0, dgEdge* const e1)
+{
+ dgEdge* const edge = AddHalfEdge(e1->m_incidentVertex, e0->m_incidentVertex);
+ dgEdge* const twin = AddHalfEdge(e0->m_incidentVertex, e1->m_incidentVertex);
+ HACD_ASSERT ((edge && twin) || !(edge || twin));
+ if (edge) {
+ edge->m_twin = twin;
+ twin->m_twin = edge;
+
+ edge->m_incidentFace = e0->m_incidentFace;
+ twin->m_incidentFace = e1->m_incidentFace;
+
+ edge->m_userData = e1->m_userData;
+ twin->m_userData = e0->m_userData;
+
+ edge->m_next = e0;
+ edge->m_prev = e1->m_prev;
+
+ twin->m_next = e1;
+ twin->m_prev = e0->m_prev;
+
+ e0->m_prev->m_next = twin;
+ e0->m_prev = edge;
+
+ e1->m_prev->m_next = edge;
+ e1->m_prev = twin;
+ }
+
+ return edge;
+}
+
+
+//int32_t dgMeshEffect::GetVertexAttributeIndex (const void* vertex) const
+//{
+// dgTreeNode* node = (dgTreeNode*) vertex;
+// dgEdge* const edge = &node->GetInfo();
+// return int (edge->m_userData);
+//}
+
+
+void* dgMeshEffect::GetFirstEdge ()
+{
+ Iterator iter (*this);
+ iter.Begin();
+
+ dgTreeNode* node = NULL;
+ if (iter) {
+ int32_t mark = IncLRU();
+
+ node = iter.GetNode();
+
+ dgEdge* const edge = &node->GetInfo();
+ edge->m_mark = mark;
+ edge->m_twin->m_mark = mark;
+ }
+ return node;
+}
+
+void* dgMeshEffect::GetNextEdge (const void* const edge)
+{
+ dgTreeNode* node = (dgTreeNode*) edge;
+ int32_t mark = node->GetInfo().m_mark;
+
+ Iterator iter (*this);
+ iter.Set (node);
+ for (iter ++; iter; iter ++) {
+ dgTreeNode* node = iter.GetNode();
+ if (node->GetInfo().m_mark != mark) {
+ node->GetInfo().m_mark = mark;
+ node->GetInfo().m_twin->m_mark = mark;
+ return node;
+ }
+ }
+ return NULL;
+}
+
+void dgMeshEffect::GetEdgeIndex (const void* const edge, int32_t& v0, int32_t& v1) const
+{
+ dgTreeNode* node = (dgTreeNode*) edge;
+ v0 = node->GetInfo().m_incidentVertex;
+ v1 = node->GetInfo().m_twin->m_incidentVertex;
+}
+
+//void dgMeshEffect::GetEdgeAttributeIndex (const void* edge, int32_t& v0, int32_t& v1) const
+//{
+// dgTreeNode* node = (dgTreeNode*) edge;
+// v0 = int (node->GetInfo().m_userData);
+// v1 = int (node->GetInfo().m_twin->m_userData);
+//}
+
+
+void* dgMeshEffect::GetFirstFace ()
+{
+ Iterator iter (*this);
+ iter.Begin();
+
+ dgTreeNode* node = NULL;
+ if (iter) {
+ int32_t mark = IncLRU();
+ node = iter.GetNode();
+
+ dgEdge* const edge = &node->GetInfo();
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+
+ return node;
+}
+
+void* dgMeshEffect::GetNextFace (const void* const face)
+{
+ dgTreeNode* node = (dgTreeNode*) face;
+ int32_t mark = node->GetInfo().m_mark;
+
+ Iterator iter (*this);
+ iter.Set (node);
+ for (iter ++; iter; iter ++) {
+ dgTreeNode* node = iter.GetNode();
+ if (node->GetInfo().m_mark != mark) {
+ dgEdge* const edge = &node->GetInfo();
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ return node;
+ }
+ }
+ return NULL;
+}
+
+
+int32_t dgMeshEffect::IsFaceOpen (const void* const face) const
+{
+ dgTreeNode* node = (dgTreeNode*) face;
+ dgEdge* const edge = &node->GetInfo();
+ return (edge->m_incidentFace > 0) ? 0 : 1;
+}
+
+int32_t dgMeshEffect::GetFaceMaterial (const void* const face) const
+{
+ dgTreeNode* const node = (dgTreeNode*) face;
+ dgEdge* const edge = &node->GetInfo();
+ return int32_t (m_attib[edge->m_userData].m_material);
+}
+
+int32_t dgMeshEffect::GetFaceIndexCount (const void* const face) const
+{
+ int count = 0;
+ dgTreeNode* node = (dgTreeNode*) face;
+ dgEdge* const edge = &node->GetInfo();
+ dgEdge* ptr = edge;
+ do {
+ count ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ return count;
+}
+
+void dgMeshEffect::GetFaceIndex (const void* const face, int* const indices) const
+{
+ int count = 0;
+ dgTreeNode* node = (dgTreeNode*) face;
+ dgEdge* const edge = &node->GetInfo();
+ dgEdge* ptr = edge;
+ do {
+ indices[count] = ptr->m_incidentVertex;
+ count ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+}
+
+void dgMeshEffect::GetFaceAttributeIndex (const void* const face, int* const indices) const
+{
+ int count = 0;
+ dgTreeNode* node = (dgTreeNode*) face;
+ dgEdge* const edge = &node->GetInfo();
+ dgEdge* ptr = edge;
+ do {
+ indices[count] = int (ptr->m_userData);
+ count ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+}
+
+
+
+
+/*
+int32_t GetTotalFaceCount() const;
+{
+ int32_t mark;
+ int32_t count;
+ int32_t materialCount;
+ int32_t materials[256];
+ int32_t streamIndexMap[256];
+ dgIndexArray* array;
+
+ count = 0;
+ materialCount = 0;
+
+ array = (dgIndexArray*) HACD_ALLOC (4 * sizeof (int32_t) * GetCount() + sizeof (dgIndexArray) + 2048);
+ array->m_indexList = (int32_t*)&array[1];
+
+ mark = IncLRU();
+ dgPolyhedra::Iterator iter (*this);
+ memset(streamIndexMap, 0, sizeof (streamIndexMap));
+ for(iter.Begin(); iter; iter ++){
+
+ dgEdge* const edge;
+ edge = &(*iter);
+ if ((edge->m_incidentFace >= 0) && (edge->m_mark != mark)) {
+ dgEdge* ptr;
+ int32_t hashValue;
+ int32_t index0;
+ int32_t index1;
+
+ ptr = edge;
+ ptr->m_mark = mark;
+ index0 = int32_t (ptr->m_userData);
+
+ ptr = ptr->m_next;
+ ptr->m_mark = mark;
+ index1 = int32_t (ptr->m_userData);
+
+ ptr = ptr->m_next;
+ do {
+ ptr->m_mark = mark;
+
+ array->m_indexList[count * 4 + 0] = index0;
+ array->m_indexList[count * 4 + 1] = index1;
+ array->m_indexList[count * 4 + 2] = int32_t (ptr->m_userData);
+ array->m_indexList[count * 4 + 3] = m_attib[int32_t (edge->m_userData)].m_material;
+ index1 = int32_t (ptr->m_userData);
+
+ hashValue = array->m_indexList[count * 4 + 3] & 0xff;
+ streamIndexMap[hashValue] ++;
+ materials[hashValue] = array->m_indexList[count * 4 + 3];
+ count ++;
+
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ }
+*/
+
+
+
+
+void dgMeshEffect::GetVertexStreams (int32_t vetexStrideInByte, float* const vertex,
+ int32_t normalStrideInByte, float* const normal,
+ int32_t uvStrideInByte0, float* const uv0,
+ int32_t uvStrideInByte1, float* const uv1)
+{
+ uvStrideInByte0 /= sizeof (float);
+ uvStrideInByte1 /= sizeof (float);
+ vetexStrideInByte /= sizeof (float);
+ normalStrideInByte /= sizeof (float);
+ for (int32_t i = 0; i < m_atribCount; i ++) {
+ int32_t j = i * vetexStrideInByte;
+ vertex[j + 0] = float (m_attib[i].m_vertex.m_x);
+ vertex[j + 1] = float (m_attib[i].m_vertex.m_y);
+ vertex[j + 2] = float (m_attib[i].m_vertex.m_z);
+
+ j = i * normalStrideInByte;
+ normal[j + 0] = float (m_attib[i].m_normal_x);
+ normal[j + 1] = float (m_attib[i].m_normal_y);
+ normal[j + 2] = float (m_attib[i].m_normal_z);
+
+ j = i * uvStrideInByte1;
+ uv1[j + 0] = float (m_attib[i].m_u1);
+ uv1[j + 1] = float (m_attib[i].m_v1);
+
+ j = i * uvStrideInByte0;
+ uv0[j + 0] = float (m_attib[i].m_u0);
+ uv0[j + 1] = float (m_attib[i].m_v0);
+ }
+}
+
+
+void dgMeshEffect::GetIndirectVertexStreams(
+ int32_t vetexStrideInByte,
+ double* const vertex,
+ int32_t* const vertexIndices,
+ int32_t* const vertexCount,
+ int32_t normalStrideInByte,
+ double* const normal,
+ int32_t* const normalIndices,
+ int32_t* const normalCount,
+ int32_t uvStrideInByte0,
+ double* const uv0,
+ int32_t* const uvIndices0,
+ int32_t* const uvCount0,
+ int32_t uvStrideInByte1,
+ double* const uv1,
+ int32_t* const uvIndices1,
+ int32_t* const uvCount1)
+{
+ HACD_UNUSED(vetexStrideInByte);
+ HACD_UNUSED(vertex);
+ HACD_UNUSED(vertexIndices);
+ HACD_UNUSED(vertexCount);
+ HACD_UNUSED(normalStrideInByte);
+ HACD_UNUSED(normal);
+ HACD_UNUSED(normalIndices);
+ HACD_UNUSED(normalCount);
+ HACD_UNUSED(uvStrideInByte0);
+ HACD_UNUSED(uv0);
+ HACD_UNUSED(uvIndices0);
+ HACD_UNUSED(uvCount0);
+ HACD_UNUSED(uvStrideInByte1);
+ HACD_UNUSED(uv1);
+ HACD_UNUSED(uvIndices1);
+ HACD_UNUSED(uvCount1);
+/*
+ GetVertexStreams (vetexStrideInByte, vertex, normalStrideInByte, normal, uvStrideInByte0, uv0, uvStrideInByte1, uv1);
+
+ *vertexCount = dgVertexListToIndexList(vertex, vetexStrideInByte, vetexStrideInByte, 0, m_atribCount, vertexIndices, float (0.0f));
+ *normalCount = dgVertexListToIndexList(normal, normalStrideInByte, normalStrideInByte, 0, m_atribCount, normalIndices, float (0.0f));
+
+ dgTriplex* const tmpUV = (dgTriplex*) HACD_ALLOC (int32_t (sizeof (dgTriplex) * m_atribCount));
+ int32_t stride = int32_t (uvStrideInByte1 /sizeof (float));
+ for (int32_t i = 0; i < m_atribCount; i ++){
+ tmpUV[i].m_x = uv1[i * stride + 0];
+ tmpUV[i].m_y = uv1[i * stride + 1];
+ tmpUV[i].m_z = float (0.0f);
+ }
+
+ int32_t count = dgVertexListToIndexList(&tmpUV[0].m_x, sizeof (dgTriplex), sizeof (dgTriplex), 0, m_atribCount, uvIndices1, float (0.0f));
+ for (int32_t i = 0; i < count; i ++){
+ uv1[i * stride + 0] = tmpUV[i].m_x;
+ uv1[i * stride + 1] = tmpUV[i].m_y;
+ }
+ *uvCount1 = count;
+
+ stride = int32_t (uvStrideInByte0 /sizeof (float));
+ for (int32_t i = 0; i < m_atribCount; i ++){
+ tmpUV[i].m_x = uv0[i * stride + 0];
+ tmpUV[i].m_y = uv0[i * stride + 1];
+ tmpUV[i].m_z = float (0.0f);
+ }
+ count = dgVertexListToIndexList(&tmpUV[0].m_x, sizeof (dgTriplex), sizeof (dgTriplex), 0, m_atribCount, uvIndices0, float (0.0f));
+ for (int32_t i = 0; i < count; i ++){
+ uv0[i * stride + 0] = tmpUV[i].m_x;
+ uv0[i * stride + 1] = tmpUV[i].m_y;
+ }
+ *uvCount0 = count;
+
+ HACD_FREE (tmpUV);
+*/
+}
+
+dgMeshEffect::dgIndexArray* dgMeshEffect::MaterialGeometryBegin()
+{
+ int32_t materials[256];
+ int32_t streamIndexMap[256];
+
+ int32_t count = 0;
+ int32_t materialCount = 0;
+
+ dgIndexArray* const array = (dgIndexArray*) HACD_ALLOC (size_t (4 * sizeof (int32_t) * GetCount() + sizeof (dgIndexArray) + 2048));
+ array->m_indexList = (int32_t*)&array[1];
+
+ int32_t mark = IncLRU();
+ dgPolyhedra::Iterator iter (*this);
+ memset(streamIndexMap, 0, sizeof (streamIndexMap));
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if ((edge->m_incidentFace >= 0) && (edge->m_mark != mark)) {
+ dgEdge* ptr = edge;
+ ptr->m_mark = mark;
+ int32_t index0 = int32_t (ptr->m_userData);
+
+ ptr = ptr->m_next;
+ ptr->m_mark = mark;
+ int32_t index1 = int32_t (ptr->m_userData);
+
+ ptr = ptr->m_next;
+ do {
+ ptr->m_mark = mark;
+
+ array->m_indexList[count * 4 + 0] = index0;
+ array->m_indexList[count * 4 + 1] = index1;
+ array->m_indexList[count * 4 + 2] = int32_t (ptr->m_userData);
+ array->m_indexList[count * 4 + 3] = int32_t (m_attib[int32_t (edge->m_userData)].m_material);
+ index1 = int32_t (ptr->m_userData);
+
+ int32_t hashValue = array->m_indexList[count * 4 + 3] & 0xff;
+ streamIndexMap[hashValue] ++;
+ materials[hashValue] = array->m_indexList[count * 4 + 3];
+ count ++;
+
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ }
+
+ array->m_indexCount = count;
+ array->m_materialCount = materialCount;
+
+ count = 0;
+ for (int32_t i = 0; i < 256;i ++) {
+ if (streamIndexMap[i]) {
+ array->m_materials[count] = materials[i];
+ array->m_materialsIndexCount[count] = streamIndexMap[i] * 3;
+ count ++;
+ }
+ }
+
+ array->m_materialCount = count;
+
+ return array;
+}
+
+void dgMeshEffect::MaterialGeomteryEnd(dgIndexArray* const handle)
+{
+ HACD_FREE (handle);
+}
+
+
+int32_t dgMeshEffect::GetFirstMaterial (dgIndexArray* const handle)
+{
+ return GetNextMaterial (handle, -1);
+}
+
+int32_t dgMeshEffect::GetNextMaterial (dgIndexArray* const handle, int32_t materialId)
+{
+ materialId ++;
+ if(materialId >= handle->m_materialCount) {
+ materialId = -1;
+ }
+ return materialId;
+}
+
+void dgMeshEffect::GetMaterialGetIndexStream (dgIndexArray* const handle, int32_t materialHandle, int32_t* const indexArray)
+{
+ int32_t index;
+ int32_t textureID;
+
+ index = 0;
+ textureID = handle->m_materials[materialHandle];
+ for (int32_t j = 0; j < handle->m_indexCount; j ++) {
+ if (handle->m_indexList[j * 4 + 3] == textureID) {
+ indexArray[index + 0] = handle->m_indexList[j * 4 + 0];
+ indexArray[index + 1] = handle->m_indexList[j * 4 + 1];
+ indexArray[index + 2] = handle->m_indexList[j * 4 + 2];
+
+ index += 3;
+ }
+ }
+}
+
+void dgMeshEffect::GetMaterialGetIndexStreamShort (dgIndexArray* const handle, int32_t materialHandle, int16_t* const indexArray)
+{
+ int32_t index;
+ int32_t textureID;
+
+ index = 0;
+ textureID = handle->m_materials[materialHandle];
+ for (int32_t j = 0; j < handle->m_indexCount; j ++) {
+ if (handle->m_indexList[j * 4 + 3] == textureID) {
+ indexArray[index + 0] = (int16_t)handle->m_indexList[j * 4 + 0];
+ indexArray[index + 1] = (int16_t)handle->m_indexList[j * 4 + 1];
+ indexArray[index + 2] = (int16_t)handle->m_indexList[j * 4 + 2];
+ index += 3;
+ }
+ }
+}
+
+/*
+int32_t dgMeshEffect::GetEffectiveVertexCount() const
+{
+ int32_t mark;
+ int32_t count;
+
+ count = 0;
+ mark = IncLRU();
+ dgPolyhedra::Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++){
+ dgEdge* vertex;
+
+ vertex = &(*iter);
+ if (vertex->m_mark != mark) {
+ dgEdge* ptr;
+
+ ptr = vertex;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != vertex);
+ count ++;
+ }
+ }
+ return count;
+}
+*/
+
+dgConvexHull3d * dgMeshEffect::CreateConvexHull(double tolerance,int32_t maxVertexCount) const
+{
+ dgConvexHull3d *ret = NULL;
+
+ dgStack<dgBigVector> poolPtr(m_pointCount * 2);
+ dgBigVector* const pool = &poolPtr[0];
+
+ int32_t count = 0;
+ int32_t mark = IncLRU();
+ dgPolyhedra::Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++)
+ {
+ dgEdge* const vertex = &(*iter);
+ if (vertex->m_mark != mark)
+ {
+ dgEdge* ptr = vertex;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != vertex);
+
+ if (count < int32_t (poolPtr.GetElementsCount()))
+ {
+ const dgBigVector p = m_points[vertex->m_incidentVertex];
+ pool[count] = p;
+ count++;
+ }
+ }
+ }
+
+ ret = HACD_NEW(dgConvexHull3d)((const double *)pool,sizeof(dgBigVector),count,tolerance,maxVertexCount);
+
+ return ret;
+}
+
+
+void dgMeshEffect::TransformMesh (const dgMatrix& matrix)
+{
+ dgMatrix normalMatrix (matrix);
+ normalMatrix.m_posit = dgVector (float (0.0f), float (0.0f), float (0.0f), float (1.0f));
+
+ matrix.TransformTriplex (&m_points->m_x, sizeof (dgBigVector), &m_points->m_x, sizeof (dgBigVector), m_pointCount);
+ matrix.TransformTriplex (&m_attib[0].m_vertex.m_x, sizeof (dgVertexAtribute), &m_attib[0].m_vertex.m_x, sizeof (dgVertexAtribute), m_atribCount);
+ normalMatrix.TransformTriplex (&m_attib[0].m_normal_x, sizeof (dgVertexAtribute), &m_attib[0].m_normal_x, sizeof (dgVertexAtribute), m_atribCount);
+}
+
+
+dgMeshEffect::dgVertexAtribute dgMeshEffect::InterpolateEdge (dgEdge* const edge, double param) const
+{
+ dgVertexAtribute attrEdge;
+ double t1 = param;
+ double t0 = double (1.0f) - t1;
+ HACD_ASSERT (t1 >= double(0.0f));
+ HACD_ASSERT (t1 <= double(1.0f));
+
+ const dgVertexAtribute& attrEdge0 = m_attib[edge->m_userData];
+ const dgVertexAtribute& attrEdge1 = m_attib[edge->m_next->m_userData];
+
+ attrEdge.m_vertex.m_x = attrEdge0.m_vertex.m_x * t0 + attrEdge1.m_vertex.m_x * t1;
+ attrEdge.m_vertex.m_y = attrEdge0.m_vertex.m_y * t0 + attrEdge1.m_vertex.m_y * t1;
+ attrEdge.m_vertex.m_z = attrEdge0.m_vertex.m_z * t0 + attrEdge1.m_vertex.m_z * t1;
+ attrEdge.m_vertex.m_w = float(0.0f);
+ attrEdge.m_normal_x = attrEdge0.m_normal_x * t0 + attrEdge1.m_normal_x * t1;
+ attrEdge.m_normal_y = attrEdge0.m_normal_y * t0 + attrEdge1.m_normal_y * t1;
+ attrEdge.m_normal_z = attrEdge0.m_normal_z * t0 + attrEdge1.m_normal_z * t1;
+ attrEdge.m_u0 = attrEdge0.m_u0 * t0 + attrEdge1.m_u0 * t1;
+ attrEdge.m_v0 = attrEdge0.m_v0 * t0 + attrEdge1.m_v0 * t1;
+ attrEdge.m_u1 = attrEdge0.m_u1 * t0 + attrEdge1.m_u1 * t1;
+ attrEdge.m_v1 = attrEdge0.m_v1 * t0 + attrEdge1.m_v1 * t1;
+ attrEdge.m_material = attrEdge0.m_material;
+ return attrEdge;
+}
+
+bool dgMeshEffect::Sanity () const
+{
+ HACD_ASSERT (0);
+return false;
+/*
+ Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++) {
+ const dgEdge* const edge = &iter.GetNode()->GetInfo();
+ if (edge->m_incidentFace > 0) {
+ const dgVertexAtribute& attrEdge0 = m_attib[edge->m_userData];
+ dgVector p0 (m_points[edge->m_incidentVertex]);
+ dgVector q0 (attrEdge0.m_vertex);
+ dgVector delta0 (p0 - q0);
+ float error0 = delta0 % delta0;
+ if (error0 > float (1.0e-15f)) {
+ return false;
+ }
+
+ const dgVertexAtribute& attrEdge1 = m_attib[edge->m_next->m_userData];
+ dgVector p1 (m_points[edge->m_next->m_incidentVertex]);
+ dgVector q1 (attrEdge1.m_vertex);
+ dgVector delta1 (p1 - q1);
+ float error1 = delta1 % delta1;
+ if (error1 > float (1.0e-15f)) {
+ return false;
+ }
+ }
+ }
+ return true;
+*/
+}
+
+
+dgEdge* dgMeshEffect::InsertEdgeVertex (dgEdge* const edge, double param)
+{
+
+ dgEdge* const twin = edge->m_twin;
+ dgVertexAtribute attrEdge (InterpolateEdge (edge, param));
+ dgVertexAtribute attrTwin (InterpolateEdge (twin, float (1.0f) - param));
+
+ attrTwin.m_vertex = attrEdge.m_vertex;
+ AddPoint(&attrEdge.m_vertex.m_x, dgFastInt (attrEdge.m_material));
+ AddAtribute (attrTwin);
+
+ int32_t edgeAttrV0 = int32_t (edge->m_userData);
+ int32_t twinAttrV0 = int32_t (twin->m_userData);
+
+ dgEdge* const faceA0 = edge->m_next;
+ dgEdge* const faceA1 = edge->m_prev;
+ dgEdge* const faceB0 = twin->m_next;
+ dgEdge* const faceB1 = twin->m_prev;
+
+// SpliteEdgeAndTriangulate (m_pointCount - 1, edge);
+ SpliteEdge (m_pointCount - 1, edge);
+
+ faceA0->m_prev->m_userData = uint64_t (m_atribCount - 2);
+ faceA1->m_next->m_userData = uint64_t (edgeAttrV0);
+
+ faceB0->m_prev->m_userData = uint64_t (m_atribCount - 1);
+ faceB1->m_next->m_userData = uint64_t (twinAttrV0);
+
+ return faceA1->m_next;
+}
+
+
+
+dgMeshEffect::dgVertexAtribute dgMeshEffect::InterpolateVertex (const dgBigVector& srcPoint, dgEdge* const face) const
+{
+ //this should use Googol extended precision floats, because some face coming from Voronoi decomposition and booleans
+ //clipping has extreme aspect ratios, for now just use float64
+ const dgBigVector point (srcPoint);
+
+ dgVertexAtribute attribute;
+ memset (&attribute, 0, sizeof (attribute));
+ double tol = float (1.0e-4f);
+ for (int32_t i = 0; i < 4; i ++) {
+ dgEdge* ptr = face;
+ dgEdge* const edge0 = ptr;
+ dgBigVector q0 (m_points[ptr->m_incidentVertex]);
+
+ ptr = ptr->m_next;
+ const dgEdge* edge1 = ptr;
+ dgBigVector q1 (m_points[ptr->m_incidentVertex]);
+
+ ptr = ptr->m_next;
+ const dgEdge* edge2 = ptr;
+ do {
+ const dgBigVector q2 (m_points[ptr->m_incidentVertex]);
+
+ dgBigVector p10 (q1 - q0);
+ dgBigVector p20 (q2 - q0);
+ dgBigVector p_p0 (point - q0);
+ dgBigVector p_p1 (point - q1);
+ dgBigVector p_p2 (point - q2);
+
+ double alpha1 = p10 % p_p0;
+ double alpha2 = p20 % p_p0;
+ double alpha3 = p10 % p_p1;
+ double alpha4 = p20 % p_p1;
+ double alpha5 = p10 % p_p2;
+ double alpha6 = p20 % p_p2;
+
+ double vc = alpha1 * alpha4 - alpha3 * alpha2;
+ double vb = alpha5 * alpha2 - alpha1 * alpha6;
+ double va = alpha3 * alpha6 - alpha5 * alpha4;
+ double den = va + vb + vc;
+ double minError = den * (-tol);
+ double maxError = den * (float (1.0f) + tol);
+ if ((va > minError) && (vb > minError) && (vc > minError) && (va < maxError) && (vb < maxError) && (vc < maxError)) {
+ edge2 = ptr;
+
+ den = double (1.0f) / (va + vb + vc);
+
+ double alpha0 = float (va * den);
+ double alpha1 = float (vb * den);
+ double alpha2 = float (vc * den);
+
+ const dgVertexAtribute& attr0 = m_attib[edge0->m_userData];
+ const dgVertexAtribute& attr1 = m_attib[edge1->m_userData];
+ const dgVertexAtribute& attr2 = m_attib[edge2->m_userData];
+ dgBigVector normal (attr0.m_normal_x * alpha0 + attr1.m_normal_x * alpha1 + attr2.m_normal_x * alpha2,
+ attr0.m_normal_y * alpha0 + attr1.m_normal_y * alpha1 + attr2.m_normal_y * alpha2,
+ attr0.m_normal_z * alpha0 + attr1.m_normal_z * alpha1 + attr2.m_normal_z * alpha2, float (0.0f));
+ normal = normal.Scale (double (1.0f) / sqrt (normal % normal));
+
+ #ifdef _DEBUG
+ dgBigVector testPoint (attr0.m_vertex.m_x * alpha0 + attr1.m_vertex.m_x * alpha1 + attr2.m_vertex.m_x * alpha2,
+ attr0.m_vertex.m_y * alpha0 + attr1.m_vertex.m_y * alpha1 + attr2.m_vertex.m_y * alpha2,
+ attr0.m_vertex.m_z * alpha0 + attr1.m_vertex.m_z * alpha1 + attr2.m_vertex.m_z * alpha2, float (0.0f));
+ HACD_ASSERT (fabs (testPoint.m_x - point.m_x) < float (1.0e-2f));
+ HACD_ASSERT (fabs (testPoint.m_y - point.m_y) < float (1.0e-2f));
+ HACD_ASSERT (fabs (testPoint.m_z - point.m_z) < float (1.0e-2f));
+ #endif
+
+
+ attribute.m_vertex.m_x = point.m_x;
+ attribute.m_vertex.m_y = point.m_y;
+ attribute.m_vertex.m_z = point.m_z;
+ attribute.m_vertex.m_w = point.m_w;
+ attribute.m_normal_x = normal.m_x;
+ attribute.m_normal_y = normal.m_y;
+ attribute.m_normal_z = normal.m_z;
+ attribute.m_u0 = attr0.m_u0 * alpha0 + attr1.m_u0 * alpha1 + attr2.m_u0 * alpha2;
+ attribute.m_v0 = attr0.m_v0 * alpha0 + attr1.m_v0 * alpha1 + attr2.m_v0 * alpha2;
+ attribute.m_u1 = attr0.m_u1 * alpha0 + attr1.m_u1 * alpha1 + attr2.m_u1 * alpha2;
+ attribute.m_v1 = attr0.m_v1 * alpha0 + attr1.m_v1 * alpha1 + attr2.m_v1 * alpha2;
+
+ attribute.m_material = attr0.m_material;
+ HACD_ASSERT (attr0.m_material == attr1.m_material);
+ HACD_ASSERT (attr0.m_material == attr2.m_material);
+ return attribute;
+ }
+
+ q1 = q2;
+ edge1 = ptr;
+
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ tol *= double (2.0f);
+ }
+ // this should never happens
+ HACD_ASSERT (0);
+ return attribute;
+}
+
+
+
+
+void dgMeshEffect::MergeFaces (const dgMeshEffect* const source)
+{
+ int32_t mark = source->IncLRU();
+ dgPolyhedra::Iterator iter (*source);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if ((edge->m_incidentFace > 0) && (edge->m_mark < mark)) {
+ dgVertexAtribute face[DG_MESH_EFFECT_POINT_SPLITED];
+
+ int32_t count = 0;
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ face[count] = source->m_attib[ptr->m_userData];
+ count ++;
+ HACD_ASSERT (count < int32_t (sizeof (face) / sizeof (face[0])));
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ AddPolygon(count, &face[0].m_vertex.m_x, sizeof (dgVertexAtribute), dgFastInt (face[0].m_material));
+ }
+ }
+}
+
+
+void dgMeshEffect::ReverseMergeFaces (dgMeshEffect* const source)
+{
+ int32_t mark = source->IncLRU();
+ dgPolyhedra::Iterator iter (*source);
+ for(iter.Begin(); iter; iter ++){
+ dgEdge* const edge = &(*iter);
+ if ((edge->m_incidentFace > 0) && (edge->m_mark < mark)) {
+ dgVertexAtribute face[DG_MESH_EFFECT_POINT_SPLITED];
+
+ int32_t count = 0;
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ face[count] = source->m_attib[ptr->m_userData];
+ face[count].m_normal_x *= float (-1.0f);
+ face[count].m_normal_y *= float (-1.0f);
+ face[count].m_normal_z *= float (-1.0f);
+ count ++;
+ HACD_ASSERT (count < int32_t (sizeof (face) / sizeof (face[0])));
+ ptr = ptr->m_prev;
+ } while (ptr != edge);
+ AddPolygon(count, &face[0].m_vertex.m_x, sizeof (dgVertexAtribute), dgFastInt (face[0].m_material));
+ }
+ }
+}
+
+
+
+void dgMeshEffect::FilterCoplanarFaces (const dgMeshEffect* const coplanarFaces, float sign)
+{
+ const double tol = double (1.0e-5f);
+ const double tol2 = tol * tol;
+
+ int32_t mark = IncLRU();
+ Iterator iter (*this);
+ for (iter.Begin(); iter; ) {
+ dgEdge* const face = &(*iter);
+ iter ++;
+ if ((face->m_mark != mark) && (face->m_incidentFace > 0)) {
+ dgEdge* ptr = face;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+
+ dgBigVector normal (FaceNormal(face, &m_points[0].m_x, sizeof (dgBigVector)));
+ normal = normal.Scale (sign);
+ dgBigVector origin (m_points[face->m_incidentVertex]);
+
+ double error2 = (normal % normal) * tol2;
+ int32_t capMark = coplanarFaces->IncLRU();
+
+ Iterator capIter (*coplanarFaces);
+ for (capIter.Begin (); capIter; capIter ++) {
+ dgEdge* const capFace = &(*capIter);
+ if ((capFace->m_mark != capMark) && (capFace->m_incidentFace > 0)) {
+ dgEdge* ptr = capFace;
+ do {
+ ptr->m_mark = capMark;
+ ptr = ptr->m_next;
+ } while (ptr != capFace);
+
+ dgBigVector capNormal (coplanarFaces->FaceNormal(capFace, &coplanarFaces->m_points[0].m_x, sizeof (dgBigVector)));
+
+ if ((capNormal % normal) > double (0.0f)) {
+ dgBigVector capOrigin (coplanarFaces->m_points[capFace->m_incidentVertex]);
+ double dist = normal % (capOrigin - origin);
+ if ((dist * dist) < error2) {
+ DeleteFace(face);
+ iter.Begin();
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+bool dgMeshEffect::HasOpenEdges () const
+{
+ dgPolyhedra::Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++){
+ dgEdge* const face = &(*iter);
+ if (face->m_incidentFace < 0){
+ return true;
+ }
+ }
+ return false;
+}
+
+
+
+bool dgMeshEffect::SeparateDuplicateLoops (dgEdge* const face)
+{
+ for (dgEdge* ptr0 = face; ptr0 != face->m_prev; ptr0 = ptr0->m_next) {
+ int32_t index = ptr0->m_incidentVertex;
+
+ dgEdge* ptr1 = ptr0->m_next;
+ do {
+ if (ptr1->m_incidentVertex == index) {
+ dgEdge* const ptr00 = ptr0->m_prev;
+ dgEdge* const ptr11 = ptr1->m_prev;
+
+ ptr00->m_next = ptr1;
+ ptr1->m_prev = ptr00;
+
+ ptr11->m_next = ptr0;
+ ptr0->m_prev = ptr11;
+
+ return true;
+ }
+
+ ptr1 = ptr1->m_next;
+ } while (ptr1 != face);
+ }
+
+ return false;
+}
+
+
+dgMeshEffect* dgMeshEffect::GetNextLayer (int32_t mark)
+{
+ Iterator iter(*this);
+ dgEdge* edge = NULL;
+ for (iter.Begin (); iter; iter ++) {
+ edge = &(*iter);
+ if ((edge->m_mark < mark) && (edge->m_incidentFace > 0)) {
+ break;
+ }
+ }
+
+ if (!edge) {
+ return NULL;
+ }
+
+ int32_t layer = int32_t (m_points[edge->m_incidentVertex].m_w);
+ dgPolyhedra polyhedra;
+
+ polyhedra.BeginFace ();
+ for (iter.Begin (); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if ((edge->m_mark < mark) && (edge->m_incidentFace > 0)) {
+ int32_t thislayer = int32_t (m_points[edge->m_incidentVertex].m_w);
+ if (thislayer == layer) {
+ dgEdge* ptr = edge;
+ uint32_t count = 0;
+ int32_t faceIndex[256];
+ int64_t faceDataIndex[256];
+ do {
+ ptr->m_mark = mark;
+ faceIndex[count] = ptr->m_incidentVertex;
+ faceDataIndex[count] = (int64_t)ptr->m_userData;
+ count ++;
+ HACD_ASSERT (count < int32_t (sizeof (faceIndex)/ sizeof(faceIndex[0])));
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ polyhedra.AddFace ((int32_t)count, &faceIndex[0], &faceDataIndex[0]);
+ }
+ }
+ }
+ polyhedra.EndFace ();
+
+ dgMeshEffect* solid = NULL;
+ if (polyhedra.GetCount()) {
+ solid = HACD_NEW(dgMeshEffect)(polyhedra, *this);
+ solid->SetLRU(mark);
+ }
+ return solid;
+}
+
+
+void dgMeshEffect::RepairTJoints (bool triangulate)
+{
+ int32_t mark = IncLRU();
+ dgPolyhedra::Iterator iter (*this);
+#ifdef _DEBUG
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const face = &(*iter);
+ if ((face->m_incidentFace < 0) && (face->m_mark != mark)) {
+ for (dgEdge* ptr = face; ptr != face->m_prev; ptr = ptr->m_next) {
+ dgBigVector p0 (m_points[ptr->m_incidentVertex]);
+ for (dgEdge* ptr1 = ptr->m_next; ptr1 != face; ptr1 = ptr1->m_next) {
+ if (ptr->m_incidentVertex != ptr1->m_incidentVertex) {
+ dgBigVector p1 (m_points[ptr1->m_incidentVertex]);
+ dgBigVector dp (p1 - p0);
+ double err2 (dp % dp);
+ if (err2 < double (1.0e-16f)) {
+// HACD_ASSERT (0);
+ }
+ }
+ }
+ }
+ }
+ }
+ mark = IncLRU();
+#endif
+
+
+
+ for (iter.Begin(); iter; ) {
+ dgEdge* const face = &(*iter);
+ iter ++;
+
+
+ if ((face->m_incidentFace < 0) && (face->m_mark != mark)) {
+ // vertices project
+
+ while (SeparateDuplicateLoops (face));
+
+ dgBigVector dir (double (0.0f), double (0.0f), double (0.0f), double (0.0f));
+ double lengh2 = double (0.0f);
+ dgEdge* ptr = face;
+ do {
+ dgBigVector dir1 (m_points[ptr->m_next->m_incidentVertex] - m_points[ptr->m_incidentVertex]);
+ double val = dir1 % dir1;
+ if (val > lengh2) {
+ lengh2 = val;
+ dir = dir1;
+ }
+ ptr = ptr->m_next;
+ } while (ptr != face);
+
+ HACD_ASSERT (lengh2 > float (0.0f));
+
+ dgEdge* lastEdge = NULL;
+ dgEdge* firstEdge = NULL;
+ double minVal = double (-1.0e10f);
+ double maxVal = double (-1.0e10f);
+ ptr = face;
+ do {
+ const dgBigVector& p = m_points[ptr->m_incidentVertex];
+ double val = p % dir;
+ if (val > maxVal) {
+ maxVal = val;
+ lastEdge = ptr;
+ }
+ val *= double (-1.0f);
+ if (val > minVal) {
+ minVal = val;
+ firstEdge = ptr;
+ }
+
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+
+ HACD_ASSERT (firstEdge);
+ HACD_ASSERT (lastEdge);
+
+ bool isTJoint = true;
+ dgBigVector p0 (m_points[firstEdge->m_incidentVertex]);
+ dgBigVector p1 (m_points[lastEdge->m_incidentVertex]);
+ dgBigVector p1p0 (p1 - p0);
+ double den = p1p0 % p1p0;
+ ptr = firstEdge->m_next;
+ do {
+ dgBigVector p2 (m_points[ptr->m_incidentVertex]);
+ double num = (p2 - p0) % p1p0;
+ dgBigVector q (p0 + p1p0.Scale (num / den));
+ dgBigVector dist (p2 - q);
+ double err2 = dist % dist;
+ isTJoint &= (err2 < (double (1.0e-4f) * double (1.0e-4f)));
+ ptr = ptr->m_next;
+ } while (isTJoint && (ptr != firstEdge));
+
+ if (isTJoint) {
+ do {
+ dgEdge* next = NULL;
+
+ const dgBigVector p0 = m_points[firstEdge->m_incidentVertex];
+ const dgBigVector p1 = m_points[firstEdge->m_next->m_incidentVertex];
+ const dgBigVector p2 = m_points[firstEdge->m_prev->m_incidentVertex];
+
+ dgBigVector p1p0 (p1 - p0);
+ dgBigVector p2p0 (p2 - p0);
+ double dist10 = p1p0 % p1p0;
+ double dist20 = p2p0 % p2p0;
+
+ dgEdge* begin = NULL;
+ dgEdge* last = NULL;
+ if (dist20 > dist10) {
+ double t = (p1p0 % p2p0) / dist20;
+ HACD_ASSERT (t > float (0.0f));
+ HACD_ASSERT (t < float (1.0f));
+
+ if (firstEdge->m_next->m_next->m_next != firstEdge) {
+ ConectVertex (firstEdge->m_prev, firstEdge->m_next);
+ next = firstEdge->m_next->m_twin->m_next;
+ }
+ HACD_ASSERT (firstEdge->m_next->m_next->m_next == firstEdge);
+
+#ifdef _DEBUG
+ dgEdge* tmp = firstEdge->m_twin;
+ do {
+ HACD_ASSERT (tmp->m_incidentFace > 0);
+ tmp = tmp->m_next;
+ } while (tmp != firstEdge->m_twin);
+#endif
+
+ begin = firstEdge->m_next;
+ last = firstEdge;
+ firstEdge->m_userData = firstEdge->m_prev->m_twin->m_userData;
+ firstEdge->m_incidentFace = firstEdge->m_prev->m_twin->m_incidentFace;
+ dgVertexAtribute attrib (InterpolateEdge (firstEdge->m_prev->m_twin, t));
+ attrib.m_vertex = m_points[firstEdge->m_next->m_incidentVertex];
+ AddAtribute(attrib);
+ firstEdge->m_next->m_incidentFace = firstEdge->m_prev->m_twin->m_incidentFace;
+ firstEdge->m_next->m_userData = uint64_t (m_atribCount - 1);
+
+ bool restart = false;
+ if ((firstEdge->m_prev == &(*iter)) || (firstEdge->m_prev->m_twin == &(*iter))) {
+ restart = true;
+ }
+ DeleteEdge(firstEdge->m_prev);
+ if (restart) {
+ iter.Begin();
+ }
+
+ } else {
+
+ HACD_ASSERT (dist20 < dist10);
+
+ double t = (p1p0 % p2p0) / dist10;
+ HACD_ASSERT (t > float (0.0f));
+ HACD_ASSERT (t < float (1.0f));
+
+ if (firstEdge->m_next->m_next->m_next != firstEdge) {
+ ConectVertex (firstEdge->m_next, firstEdge->m_prev);
+ next = firstEdge->m_next->m_twin;
+ }
+ HACD_ASSERT (firstEdge->m_next->m_next->m_next == firstEdge);
+
+#ifdef _DEBUG
+ dgEdge* tmp = firstEdge->m_twin;
+ do {
+ HACD_ASSERT (tmp->m_incidentFace > 0);
+ tmp = tmp->m_next;
+ } while (tmp != firstEdge->m_twin);
+#endif
+
+ begin = firstEdge->m_prev;
+ last = firstEdge->m_next;
+ firstEdge->m_next->m_userData = firstEdge->m_twin->m_userData;
+ firstEdge->m_next->m_incidentFace = firstEdge->m_twin->m_incidentFace;
+ dgVertexAtribute attrib (InterpolateEdge (firstEdge->m_twin, double (1.0f) - t));
+ attrib.m_vertex = m_points[firstEdge->m_prev->m_incidentVertex];
+ AddAtribute(attrib);
+ firstEdge->m_prev->m_incidentFace = firstEdge->m_twin->m_incidentFace;
+ firstEdge->m_prev->m_userData = uint64_t (m_atribCount - 1);
+
+ bool restart = false;
+ if ((firstEdge == &(*iter)) || (firstEdge->m_twin == &(*iter))) {
+ restart = true;
+ }
+ DeleteEdge(firstEdge);
+ if (restart) {
+ iter.Begin();
+ }
+ }
+
+ if (triangulate) {
+ HACD_ASSERT (begin);
+ HACD_ASSERT (last);
+ for (dgEdge* ptr = begin->m_next->m_next; ptr != last; ptr = ptr->m_next) {
+ dgEdge* const e = AddHalfEdge (begin->m_incidentVertex, ptr->m_incidentVertex);
+ dgEdge* const t = AddHalfEdge (ptr->m_incidentVertex, begin->m_incidentVertex);
+ if (e && t) {
+ HACD_ASSERT (e);
+ HACD_ASSERT (t);
+ e->m_twin = t;
+ t->m_twin = e;
+
+ e->m_incidentFace = ptr->m_incidentFace;
+ t->m_incidentFace = ptr->m_incidentFace;
+
+ e->m_userData = last->m_next->m_userData;
+ t->m_userData = ptr->m_userData;
+
+ t->m_prev = ptr->m_prev;
+ ptr->m_prev->m_next = t;
+ e->m_next = ptr;
+ ptr->m_prev = e;
+ t->m_next = last->m_next;
+ e->m_prev = last;
+ last->m_next->m_prev = t;
+ last->m_next = e;
+ }
+ }
+ }
+
+ firstEdge = next;
+ } while (firstEdge);
+ }
+ }
+ }
+
+ DeleteDegenerateFaces(&m_points[0].m_x, sizeof (m_points[0]), double (1.0e-7f));
+}
+
+// create a convex hull
+dgMeshEffect::dgMeshEffect (const double* const vertexCloud, int32_t count, int32_t strideInByte, double distTol)
+:dgPolyhedra()
+{
+ Init(true);
+ if (count >= 4) {
+ dgConvexHull3d convexHull (vertexCloud, strideInByte, count, distTol);
+ if (convexHull.GetCount()) {
+
+ int32_t vertexCount = convexHull.GetVertexCount();
+ dgStack<dgVector> pointsPool (convexHull.GetVertexCount());
+ dgVector* const points = &pointsPool[0];
+ for (int32_t i = 0; i < vertexCount; i ++) {
+ points[i] = convexHull.GetVertex(i);
+ }
+ dgVector uv(float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgVector normal (float (0.0f), float (1.0f), float (0.0f), float (0.0f));
+
+ int32_t triangleCount = convexHull.GetCount();
+ dgStack<int32_t> faceCountPool (triangleCount);
+ dgStack<int32_t> materialsPool (triangleCount);
+ dgStack<int32_t> vertexIndexListPool (triangleCount * 3);
+ dgStack<int32_t> normalIndexListPool (triangleCount * 3);
+
+
+ memset (&materialsPool[0], 0, triangleCount * sizeof (int32_t));
+ memset (&normalIndexListPool[0], 0, 3 * triangleCount * sizeof (int32_t));
+
+ int32_t index = 0;
+ int32_t* const faceCount = &faceCountPool[0];
+ int32_t* const vertexIndexList = &vertexIndexListPool[0];
+ for (dgConvexHull3d::dgListNode* faceNode = convexHull.GetFirst(); faceNode; faceNode = faceNode->GetNext()) {
+ dgConvexHull3DFace& face = faceNode->GetInfo();
+ faceCount[index] = 3;
+ vertexIndexList[index * 3 + 0] = face.m_index[0];
+ vertexIndexList[index * 3 + 1] = face.m_index[1];
+ vertexIndexList[index * 3 + 2] = face.m_index[2];
+ index ++;
+ }
+
+ BuildFromVertexListIndexList(triangleCount, faceCount, &materialsPool[0],
+ &points[0].m_x, sizeof (dgVector), vertexIndexList,
+ &normal.m_x, sizeof (dgVector), &normalIndexListPool[0],
+ &uv.m_x, sizeof (dgVector), &normalIndexListPool[0],
+ &uv.m_x, sizeof (dgVector), &normalIndexListPool[0]);
+ }
+ }
+}
+
+//**** Note, from this point out is a copy of 'MeshEffect3.cpp' from the Newton Physics Engine
+
+
+dgMeshEffect* dgMeshEffect::CreateSimplification(int32_t maxVertexCount,hacd::ICallback* /*reportProgressCallback*/) const
+{
+ if (GetVertexCount() <= maxVertexCount)
+ {
+ return HACD_NEW(dgMeshEffect)(*this);
+ }
+ return HACD_NEW(dgMeshEffect)(*this);
+}
+
+// based of the paper Hierarchical Approximate Convex Decomposition by Khaled Mamou
+// available http://sourceforge.net/projects/hacd/
+// for the details http://kmamou.blogspot.com/
+// with his permission to adapt his algorithm to be more efficient.
+// also making some addition to his heuristic for better conve clusters seltections
+
+
+#define DG_BUILD_HIERACHICAL_HACD
+
+#define DG_CONCAVITY_SCALE double (100.0f)
+
+
+class dgHACDEdge
+{
+ public:
+ dgHACDEdge ()
+ :m_mark(0)
+ ,m_proxyListNode(NULL)
+ ,m_backFaceHandicap(double (1.0))
+ {
+ }
+ ~dgHACDEdge ()
+ {
+ }
+
+ int32_t m_mark;
+ void* m_proxyListNode;
+ double m_backFaceHandicap;
+};
+
+class dgHACDClusterFace
+{
+ public:
+ dgHACDClusterFace()
+ :m_edge(NULL)
+ ,m_area(double(0.0f))
+ {
+ }
+ ~dgHACDClusterFace()
+ {
+ }
+
+ dgEdge* m_edge;
+ double m_area;
+ dgBigVector m_normal;
+};
+
+
+class dgHACDCluster: public dgList<dgHACDClusterFace>
+{
+ public:
+ dgHACDCluster ()
+ :m_color(0)
+ ,m_hierachicalClusterIndex(0)
+ ,m_area(double (0.0f))
+ ,m_concavity(double (0.0f))
+ {
+ }
+
+ bool IsCoplanar(const dgBigPlane& plane, const dgMeshEffect& mesh, double tolerance) const
+ {
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+ for (dgListNode* node = GetFirst(); node; node = node->GetNext()) {
+ const dgHACDClusterFace& info = node->GetInfo();
+ dgEdge* ptr = info.m_edge;
+ do {
+ const dgBigVector& p = points[ptr->m_incidentVertex];
+ double dist = fabs(plane.Evalue(p));
+ if (dist > tolerance) {
+ return false;
+ }
+ ptr = ptr->m_next;
+ } while (ptr != info.m_edge);
+ }
+ return true;
+ }
+
+
+ int32_t m_color;
+ int32_t m_hierachicalClusterIndex;
+ double m_area;
+ double m_concavity;
+};
+
+
+class dgHACDClusterGraph :public dgGraph<dgHACDCluster, dgHACDEdge> ,public dgAABBPolygonSoup
+{
+ public:
+
+ class dgHACDConveHull: public dgConvexHull3d
+ {
+ class dgConvexHullRayCastData
+ {
+ public:
+ double m_normalProjection;
+ const dgConvexHull3DFace* m_face;
+ };
+
+ public:
+ dgHACDConveHull (const dgHACDConveHull& hull)
+ :dgConvexHull3d(hull)
+ ,m_mark(1)
+ {
+ }
+
+ dgHACDConveHull (const dgBigVector* const points, int32_t count)
+ :dgConvexHull3d(&points[0].m_x, sizeof (dgBigVector),count, double (0.0f))
+ ,m_mark(1)
+ {
+
+ }
+
+ double CalculateTriangleConcavity(const dgBigVector& normal,
+ int32_t i0,
+ int32_t i1,
+ int32_t i2,
+ const dgBigVector* const points)
+ {
+ uint32_t head = 1;
+ uint32_t tail = 0;
+ dgBigVector pool[1<<8][3];
+
+ pool[0][0] = points[i0];
+ pool[0][1] = points[i1];
+ pool[0][2] = points[i2];
+
+ const dgBigVector step(normal.Scale(double(4.0f) * GetDiagonal()));
+
+ double concavity = float(0.0f);
+ double minArea = float(0.125f);
+ double minArea2 = minArea * minArea * 0.5f;
+
+ int32_t maxCount = 4;
+ uint32_t mask = (sizeof (pool) / (3 * sizeof (pool[0][0]))) - 1;
+
+ const dgConvexHull3DFace* firstGuess = NULL;
+ while ((tail != head) && (maxCount >= 0))
+ {
+ maxCount --;
+ dgBigVector p0(pool[tail][0]);
+ dgBigVector p1(pool[tail][1]);
+ dgBigVector p2(pool[tail][2]);
+ tail = (tail + 1) & mask;
+
+ dgBigVector q1((p0 + p1 + p2).Scale(double(1.0f / 3.0f)));
+ dgBigVector q0(q1 + step);
+
+ double param = FastRayCast(q0, q1, &firstGuess);
+ if (param > double(1.0f))
+ {
+ param = double(1.0f);
+ }
+ dgBigVector dq(step.Scale(float(1.0f) - param));
+ double lenght2 = sqrt (dq % dq);
+ if (lenght2 > concavity)
+ {
+ concavity = lenght2;
+ }
+
+ if (((head + 1) & mask) != tail)
+ {
+ dgBigVector edge10(p1 - p0);
+ dgBigVector edge20(p2 - p0);
+ dgBigVector n(edge10 * edge20);
+ double area2 = n % n;
+ if (area2 > minArea2)
+ {
+ dgBigVector p01((p0 + p1).Scale(double(0.5f)));
+ dgBigVector p12((p1 + p2).Scale(double(0.5f)));
+ dgBigVector p20((p2 + p0).Scale(double(0.5f)));
+
+ pool[head][0] = p0;
+ pool[head][1] = p01;
+ pool[head][2] = p20;
+ head = (head + 1) & mask;
+
+ if (((head + 1) & mask) != tail)
+ {
+ pool[head][0] = p1;
+ pool[head][1] = p12;
+ pool[head][2] = p01;
+ head = (head + 1) & mask;
+
+ if (((head + 1) & mask) != tail)
+ {
+ pool[head][0] = p2;
+ pool[head][1] = p20;
+ pool[head][2] = p12;
+ head = (head + 1) & mask;
+ }
+ }
+ }
+ }
+ }
+ return concavity;
+ }
+
+ double FaceRayCast (const dgConvexHull3DFace* const face, const dgBigVector& origin, const dgBigVector& dist, double& normalProjection) const
+ {
+ int32_t i0 = face->m_index[0];
+ int32_t i1 = face->m_index[1];
+ int32_t i2 = face->m_index[2];
+
+ const dgBigVector& p0 = m_points[i0];
+ dgBigVector normal ((m_points[i1] - p0) * (m_points[i2] - p0));
+
+ double N = (origin - p0) % normal;
+ double D = dist % normal;
+
+ if (fabs(D) < double (1.0e-16f)) { //
+ normalProjection = float (0.0);
+ if (N > double (0.0f)) {
+ return float (-1.0e30);
+ } else {
+
+ return float (1.0e30);
+ }
+ }
+ normalProjection = D;
+ return - N / D;
+ }
+
+ dgConvexHull3DFace* ClosestFaceVertexToPoint (const dgBigVector& point)
+ {
+ // note, for this function to be effective point should be an already close point to the Hull.
+ // for example casting the point to the OBB or the AABB of the full is a good first guess.
+ dgConvexHull3DFace* closestFace = &GetFirst()->GetInfo();
+ int8_t pool[256 * (sizeof (dgConvexHull3DFace*) + sizeof (double))];
+ dgUpHeap<dgConvexHull3DFace*,double> heap (pool, sizeof (pool));
+
+ for (int32_t i = 0; i < 3; i ++) {
+ dgBigVector dist (m_points[closestFace->m_index[i]] - point);
+ heap.Push(closestFace, dist % dist);
+ }
+
+ m_mark ++;
+ double minDist = heap.Value();
+ while (heap.GetCount()) {
+ dgConvexHull3DFace* const face = heap[0];
+ if (heap.Value() < minDist) {
+ minDist = heap.Value();
+ closestFace = face;
+ }
+ heap.Pop();
+ //face->m_mark = m_mark;
+ face->SetMark(m_mark);
+ for (int32_t i = 0; i < 3; i ++) {
+ //const dgConvexHull3DFace* twin = &face->m_twin[i]->GetInfo();
+ dgConvexHull3DFace* twin = &face->GetTwin(i)->GetInfo();
+ //if (twin->m_mark != m_mark) {
+ if (twin->GetMark() != m_mark) {
+ dgBigVector dist (m_points[twin->m_index[i]] - point);
+ // use hysteresis to prevent stops at a local minimal, but at the same time fast descend
+ double dist2 = dist % dist;
+ if (dist2 < (minDist * double (1.001f))) {
+ heap.Push(twin, dist2);
+ }
+ }
+ }
+ }
+
+ return closestFace;
+ }
+
+ // this version have input sensitive complexity (approximately log2)
+ // when casting parallel rays and using the last face as initial guess this version has const time complexity
+ double RayCast (const dgBigVector& localP0, const dgBigVector& localP1,const dgConvexHull3DFace** firstFaceGuess)
+ {
+ const dgConvexHull3DFace* face = &GetFirst()->GetInfo();
+ if (firstFaceGuess && *firstFaceGuess)
+ {
+ face = *firstFaceGuess;
+ }
+ else
+ {
+ if (GetCount() > 32)
+ {
+ dgVector q0 (localP0);
+ dgVector q1 (localP1);
+ if (dgRayBoxClip (q0, q1, m_aabbP0, m_aabbP1))
+ {
+ face = ClosestFaceVertexToPoint (q0);
+ }
+ }
+ }
+
+ SparseArrayFixed< hacd::HaSizeT, 32, 2048 > tested;
+ hacd::HaSizeT index = (hacd::HaSizeT)face;
+ tested[index] = index;
+
+ int8_t pool[256 * (sizeof (dgConvexHullRayCastData) + sizeof (double))];
+ dgDownHeap<dgConvexHullRayCastData,double> heap (pool, sizeof (pool));
+
+ double t0 = double (-1.0e20); //for the maximum entering segment parameter;
+ double t1 = double ( 1.0e20); //for the minimum leaving segment parameter;
+ dgBigVector dS (localP1 - localP0); // is the segment direction vector;
+ dgConvexHullRayCastData data;
+ data.m_face = face;
+ double t = FaceRayCast (face, localP0, dS, data.m_normalProjection);
+ if (data.m_normalProjection >= float (0.0))
+ {
+ t = double (-1.0e30);
+ }
+
+ heap.Push (data, t);
+ while (heap.GetCount())
+ {
+ dgConvexHullRayCastData data (heap[0]);
+ double t = heap.Value();
+ const dgConvexHull3DFace* face = data.m_face;
+ double normalDistProjection = data.m_normalProjection;
+ heap.Pop();
+ bool foundThisBestFace = true;
+ if (normalDistProjection < double (0.0f))
+ {
+ if (t > t0)
+ {
+ t0 = t;
+ }
+ if (t0 > t1)
+ {
+ return double (1.2f);
+ }
+ }
+ else
+ {
+ foundThisBestFace = false;
+ }
+
+ for (int32_t i = 0; i < 3; i ++)
+ {
+ dgConvexHull3DFace* const face1 = &face->GetTwin(i)->GetInfo();
+
+ hacd::HaSizeT index = (hacd::HaSizeT)face1;
+ if ( !tested.find(index) )
+ {
+ tested[index] = index;
+ dgConvexHullRayCastData data;
+ data.m_face = face1;
+ double t = FaceRayCast (face1, localP0, dS, data.m_normalProjection);
+ if (data.m_normalProjection >= float (0.0))
+ {
+ t = double (-1.0e30);
+ }
+ else if (t > t0)
+ {
+ foundThisBestFace = false;
+ }
+ else if (fabs (t - t0) < double (1.0e-10f))
+ {
+ return dgConvexHull3d::RayCast (localP0, localP1);
+ }
+ if ((heap.GetCount() + 2)>= heap.GetMaxCount())
+ {
+ // remove t values that are old and way outside interval [0.0, 1.0]
+ for (int32_t i = heap.GetCount() - 1; i >= 0; i--)
+ {
+ double val = heap.Value(i);
+ if ((val < double (-100.0f)) || (val > double (100.0f)))
+ {
+ heap.Remove(i);
+ }
+ }
+ }
+ heap.Push (data, t);
+ }
+ }
+ if (foundThisBestFace)
+ {
+ if ((t0 >= double (0.0f)) && (t0 <= double (1.0f)))
+ {
+ if (firstFaceGuess)
+ {
+ *firstFaceGuess = face;
+ }
+ return t0;
+ }
+ break;
+ }
+ }
+
+ return double (1.2f);
+
+ }
+
+ double FastRayCast (const dgBigVector& localP0, const dgBigVector& localP1,const dgConvexHull3DFace** guess)
+ {
+ return RayCast (localP0, localP1, guess);
+ }
+
+ int32_t m_mark;
+ };
+
+ class dgHACDConvacityLookAheadTree : public UANS::UserAllocated
+ {
+ public:
+ dgHACDConvacityLookAheadTree (dgEdge* const face, double concavity)
+ :m_concavity(concavity)
+ ,m_faceList ()
+ ,m_left (NULL)
+ ,m_right (NULL)
+ {
+ m_faceList.Append(face);
+ }
+
+
+ dgHACDConvacityLookAheadTree (dgHACDConvacityLookAheadTree* const leftChild, dgHACDConvacityLookAheadTree* const rightChild, double concavity)
+ :m_concavity(concavity)
+ ,m_faceList ()
+ ,m_left (leftChild)
+ ,m_right (rightChild)
+ {
+ HACD_ASSERT (leftChild);
+ HACD_ASSERT (rightChild);
+
+ double concavityTest = m_concavity - double (1.0e-5f);
+ //if ((m_left->m_faceList.GetCount() == 1) || (m_right->m_faceList.GetCount() == 1)) {
+ if ((((m_left->m_faceList.GetCount() == 1) || (m_right->m_faceList.GetCount() == 1))) ||
+ ((concavityTest <= m_left->m_concavity) && (concavityTest <= m_right->m_concavity))) {
+ //The the parent has lower concavity this mean that the two do no add more detail,
+ //the can be deleted and replaced the parent node
+ // for example the two children can be two convex strips that are part of a larger convex piece
+ // but each part has a non zero concavity, while the convex part has a lower concavity
+ m_faceList.Merge (m_left->m_faceList);
+ m_faceList.Merge (m_right->m_faceList);
+
+ delete m_left;
+ delete m_right;
+ m_left = NULL;
+ m_right = NULL;
+ } else {
+ for (dgList<dgEdge*>::dgListNode* node = m_left->m_faceList.GetFirst(); node; node = node->GetNext()) {
+ m_faceList.Append(node->GetInfo());
+ }
+ for (dgList<dgEdge*>::dgListNode* node = m_right->m_faceList.GetFirst(); node; node = node->GetNext()) {
+ m_faceList.Append(node->GetInfo());
+ }
+ }
+ }
+
+ ~dgHACDConvacityLookAheadTree ()
+ {
+ if (m_left) {
+ HACD_ASSERT (m_right);
+ delete m_left;
+ delete m_right;
+ }
+ }
+
+ int32_t GetNodesCount () const
+ {
+ int32_t count = 0;
+ int32_t stack = 1;
+ const dgHACDConvacityLookAheadTree* pool[1024];
+ pool[0] = this;
+ while (stack) {
+ stack --;
+ count ++;
+ const dgHACDConvacityLookAheadTree* const root = pool[stack];
+ if (root->m_left) {
+ HACD_ASSERT (root->m_right);
+ pool[stack] = root->m_left;
+ stack ++;
+ HACD_ASSERT ((uint32_t)stack < sizeof (pool)/sizeof (pool[0]));
+ pool[stack] = root->m_right;
+ stack ++;
+ HACD_ASSERT ((uint32_t)stack < sizeof (pool)/sizeof (pool[0]));
+ }
+ }
+ return count;
+ }
+
+ void ReduceByCount (int32_t count, dgDownHeap<dgHACDConvacityLookAheadTree*, double>& approximation)
+ {
+ if (count < 1) {
+ count = 1;
+ }
+// int32_t nodesCount = GetNodesCount();
+
+ approximation.Flush();
+ dgHACDConvacityLookAheadTree* tmp = this;
+ approximation.Push(tmp, m_concavity);
+// nodesCount --;
+ //while (nodesCount && (approximation.GetCount() < count) && (approximation.Value() >= float (0.0f))) {
+ while ((approximation.GetCount() < count) && (approximation.Value() >= float (0.0f))) {
+ dgHACDConvacityLookAheadTree* worseCluster = approximation[0];
+ if (!worseCluster->m_left && approximation.Value() >= float (0.0f)) {
+ approximation.Pop();
+ approximation.Push(worseCluster, float (-1.0f));
+ } else {
+ HACD_ASSERT (worseCluster->m_left);
+ HACD_ASSERT (worseCluster->m_right);
+ approximation.Pop();
+ approximation.Push(worseCluster->m_left, worseCluster->m_left->m_concavity);
+ approximation.Push(worseCluster->m_right, worseCluster->m_right->m_concavity);
+// nodesCount -= 2;
+ }
+ }
+ }
+
+
+ void ReduceByConcavity (double concavity, dgDownHeap<dgHACDConvacityLookAheadTree*, double>& approximation)
+ {
+ approximation.Flush();
+ dgHACDConvacityLookAheadTree* tmp = this;
+
+ approximation.Push(tmp, m_concavity);
+ while (approximation.Value() > concavity) {
+ dgHACDConvacityLookAheadTree* worseCluster = approximation[0];
+ if (!worseCluster->m_left && approximation.Value() >= float (0.0f)) {
+ approximation.Pop();
+ approximation.Push(worseCluster, float (-1.0f));
+ } else {
+ HACD_ASSERT (worseCluster->m_left);
+ HACD_ASSERT (worseCluster->m_right);
+ approximation.Pop();
+ approximation.Push(worseCluster->m_left, worseCluster->m_left->m_concavity);
+ approximation.Push(worseCluster->m_right, worseCluster->m_right->m_concavity);
+ }
+ }
+ }
+
+ double m_concavity;
+ dgList<dgEdge*> m_faceList;
+ dgHACDConvacityLookAheadTree* m_left;
+ dgHACDConvacityLookAheadTree* m_right;
+ };
+
+ class dgPairProxy
+ {
+ public:
+ dgPairProxy()
+ :m_nodeA(NULL)
+ ,m_nodeB(NULL)
+ ,m_hierachicalClusterIndexA(0)
+ ,m_hierachicalClusterIndexB(0)
+ ,m_area(double(0.0f))
+ {
+ }
+
+ ~dgPairProxy()
+ {
+ }
+
+ dgListNode* m_nodeA;
+ dgListNode* m_nodeB;
+ int32_t m_hierachicalClusterIndexA;
+ int32_t m_hierachicalClusterIndexB;
+ double m_area;
+ double m_distanceConcavity;
+ };
+
+ class dgHACDRayCasterContext: public dgFastRayTest
+ {
+ public:
+ dgHACDRayCasterContext (const dgVector& l0, const dgVector& l1, dgHACDClusterGraph* const me, int32_t mycolor)
+ :dgFastRayTest (l0, l1)
+ ,m_myColor(mycolor)
+ ,m_colorHit(-1)
+ ,m_param (1.0f)
+ ,m_me (me)
+ {
+ }
+
+ int32_t m_myColor;
+ int32_t m_colorHit;
+ float m_param;
+ dgHACDClusterGraph* m_me;
+ };
+
+
+ dgHACDClusterGraph(dgMeshEffect& mesh, float backFaceDistanceFactor,hacd::ICallback* reportProgressCallback)
+ :dgGraph<dgHACDCluster, dgHACDEdge> ()
+ ,dgAABBPolygonSoup()
+ ,m_mark(0)
+ ,m_faceCount(0)
+ ,m_vertexMark(0)
+ ,m_progress(0)
+ ,m_cancavityTreeIndex(0)
+ ,m_vertexMarks(NULL)
+ ,m_invFaceCount(float (1.0f))
+ ,m_diagonal(double(1.0f))
+ ,m_vertexPool(NULL)
+ ,m_proxyList()
+ ,m_concavityTreeArray(NULL)
+ ,m_convexProximation()
+ ,m_priorityHeap (mesh.GetCount() + 2048)
+ ,m_reportProgressCallback (reportProgressCallback)
+// ,m_parallerConcavityCalculator()
+ {
+
+// m_parallerConcavityCalculator.SetThreadsCount(DG_CONCAVITY_MAX_THREADS);
+
+ // precondition the mesh for better approximation
+ mesh.ConvertToPolygons();
+
+ m_faceCount = mesh.GetTotalFaceCount();
+
+ m_invFaceCount = float (1.0f) / (m_faceCount);
+
+ // init some auxiliary structures
+ int32_t vertexCount = mesh.GetVertexCount();
+ m_vertexMarks = (int32_t*) HACD_ALLOC(vertexCount * sizeof(int32_t));
+ m_vertexPool = (dgBigVector*) HACD_ALLOC(vertexCount * sizeof(dgBigVector));
+ memset(m_vertexMarks, 0, vertexCount * sizeof(int32_t));
+
+ m_cancavityTreeIndex = m_faceCount + 1;
+ m_concavityTreeArray = (dgHACDConvacityLookAheadTree**) HACD_ALLOC(2 * m_cancavityTreeIndex * sizeof(dgHACDConvacityLookAheadTree*));
+ memset(m_concavityTreeArray, 0, 2 * m_cancavityTreeIndex * sizeof(dgHACDConvacityLookAheadTree*));
+
+ // scan the mesh and and add a node for each face
+ int32_t color = 1;
+ dgMeshEffect::Iterator iter(mesh);
+
+ int32_t meshMask = mesh.IncLRU();
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+ for (iter.Begin(); iter; iter++) {
+ dgEdge* const edge = &(*iter);
+ if ((edge->m_mark != meshMask) && (edge->m_incidentFace > 0)) {
+
+ // call the progress callback
+ //ReportProgress();
+
+ dgListNode* const clusterNode = AddNode ();
+ dgHACDCluster& cluster = clusterNode->GetInfo().m_nodeData;
+
+ double perimeter = double(0.0f);
+ dgEdge* ptr = edge;
+ do {
+ dgBigVector p1p0(points[ptr->m_incidentVertex] - points[ptr->m_prev->m_incidentVertex]);
+ perimeter += sqrt(p1p0 % p1p0);
+ ptr->m_incidentFace = color;
+ ptr->m_userData = PTR_TO_UINT64(clusterNode);
+ ptr->m_mark = meshMask;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ dgBigVector normal = mesh.FaceNormal(edge, &points[0][0], sizeof(dgBigVector));
+ double mag = sqrt(normal % normal);
+
+ cluster.m_color = color;
+ cluster.m_hierachicalClusterIndex = color;
+ cluster.m_area = double(0.5f) * mag;
+ cluster.m_concavity = CalculateConcavityMetric (double (0.0f), cluster.m_area, perimeter, 1, 0);
+
+ dgHACDClusterFace& face = cluster.Append()->GetInfo();
+ face.m_edge = edge;
+ face.m_area = double(0.5f) * mag;
+ face.m_normal = normal.Scale(double(1.0f) / mag);
+
+ m_concavityTreeArray[color] = HACD_NEW(dgHACDConvacityLookAheadTree)(edge, double (0.0f));
+
+ color ++;
+ }
+ }
+
+ // add all link adjacent faces links
+ for (dgListNode* clusterNode = GetFirst(); clusterNode; clusterNode = clusterNode->GetNext()) {
+
+ // call the progress callback
+ //ReportProgress();
+
+ dgHACDCluster& cluster = clusterNode->GetInfo().m_nodeData;
+ dgHACDClusterFace& face = cluster.GetFirst()->GetInfo();
+ dgEdge* const edge = face.m_edge;
+ dgEdge* ptr = edge;
+ do {
+ if (ptr->m_twin->m_incidentFace > 0) {
+ HACD_ASSERT (ptr->m_twin->m_userData);
+ dgListNode* const twinClusterNode = (dgListNode*) ptr->m_twin->m_userData;
+ HACD_ASSERT (twinClusterNode);
+
+ bool doubleEdge = false;
+ for (dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* edgeNode = clusterNode->GetInfo().GetFirst(); edgeNode; edgeNode = edgeNode->GetNext()) {
+ if (edgeNode->GetInfo().m_node == twinClusterNode) {
+ doubleEdge = true;
+ break;
+ }
+ }
+ if (!doubleEdge) {
+ clusterNode->GetInfo().AddEdge (twinClusterNode);
+ }
+ }
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+
+ Trace();
+
+ // add links to back faces
+ dgPolygonSoupDatabaseBuilder builder;
+ dgVector polygon[64];
+ int32_t indexList[64];
+
+ dgMatrix matrix (dgGetIdentityMatrix());
+ for (uint32_t i = 0; i < sizeof (polygon) / sizeof (polygon[0]); i ++) {
+ indexList[i] = (int32_t)i;
+ }
+
+ dgBigVector minAABB;
+ dgBigVector maxAABB;
+ mesh.CalculateAABB (minAABB, maxAABB);
+ maxAABB -= minAABB;
+ float rayDiagonalLength = float (sqrt (maxAABB % maxAABB));
+ m_diagonal = rayDiagonalLength;
+
+ builder.Begin();
+ dgTree<dgListNode*,int32_t> clusterMap;
+ for (dgListNode* clusterNode = GetFirst(); clusterNode; clusterNode = clusterNode->GetNext()) {
+
+ // call the progress callback
+ //ReportProgress();
+
+ dgHACDCluster& cluster = clusterNode->GetInfo().m_nodeData;
+ clusterMap.Insert(clusterNode, cluster.m_color);
+ dgHACDClusterFace& face = cluster.GetFirst()->GetInfo();
+ dgEdge* const edge = face.m_edge;
+ int32_t count = 0;
+ dgEdge* ptr = edge;
+ do {
+ polygon[count] = points[ptr->m_incidentVertex];
+ count ++;
+ ptr = ptr->m_prev;
+ } while (ptr != edge);
+
+ builder.AddMesh(&polygon[0].m_x, count, sizeof (dgVector), 1, &count, indexList, &cluster.m_color, matrix);
+ }
+ builder.End(false);
+ Create (builder, false);
+
+
+ float distanceThreshold = rayDiagonalLength * backFaceDistanceFactor;
+ for (dgListNode* clusterNodeA = GetFirst(); clusterNodeA; clusterNodeA = clusterNodeA->GetNext()) {
+
+ // call the progress callback
+ //ReportProgress();
+ dgHACDCluster& clusterA = clusterNodeA->GetInfo().m_nodeData;
+ dgHACDClusterFace& faceA = clusterA.GetFirst()->GetInfo();
+ dgEdge* const edgeA = faceA.m_edge;
+ dgEdge* ptr = edgeA;
+
+ dgVector p0 (points[ptr->m_incidentVertex]);
+ dgVector p1 (points[ptr->m_next->m_incidentVertex]);
+ ptr = ptr->m_next->m_next;
+ do {
+ dgVector p2 (points[ptr->m_incidentVertex]);
+ dgVector p01 ((p0 + p1).Scale (float (0.5f)));
+ dgVector p12 ((p1 + p2).Scale (float (0.5f)));
+ dgVector p20 ((p2 + p0).Scale (float (0.5f)));
+
+ CastBackFace (clusterNodeA, p0, p01, p20, distanceThreshold, clusterMap);
+ CastBackFace (clusterNodeA, p1, p12, p01, distanceThreshold, clusterMap);
+ CastBackFace (clusterNodeA, p2, p20, p12, distanceThreshold, clusterMap);
+ CastBackFace (clusterNodeA, p01, p12, p20, distanceThreshold, clusterMap);
+
+ p1 = p2;
+ ptr = ptr->m_next;
+ } while (ptr != edgeA);
+ }
+
+ Trace();
+ }
+
+ ~dgHACDClusterGraph ()
+ {
+ for (int32_t i = 0; i < m_faceCount * 2; i ++) {
+ if (m_concavityTreeArray[i]) {
+ delete m_concavityTreeArray[i];
+ }
+ }
+
+ HACD_FREE(m_concavityTreeArray);
+ HACD_FREE(m_vertexPool);
+ HACD_FREE(m_vertexMarks);
+ }
+
+
+ void CastBackFace (
+ dgListNode* const clusterNodeA,
+ const dgVector& p0,
+ const dgVector& p1,
+ const dgVector& p2,
+ float distanceThreshold,
+ dgTree<dgListNode*,int32_t>& clusterMap)
+ {
+ dgVector origin ((p0 + p1 + p2).Scale (float (1.0f/3.0f)));
+
+ float rayDistance = distanceThreshold * float (2.0f);
+
+ dgHACDCluster& clusterA = clusterNodeA->GetInfo().m_nodeData;
+ dgHACDClusterFace& faceA = clusterA.GetFirst()->GetInfo();
+ dgVector end (origin - dgVector (faceA.m_normal).Scale (rayDistance));
+
+ dgHACDRayCasterContext ray (origin, end, this, clusterA.m_color);
+ ForAllSectorsRayHit(ray, RayHit, &ray);
+
+ if (ray.m_colorHit != -1) {
+ HACD_ASSERT (ray.m_colorHit != ray.m_myColor);
+ float distance = rayDistance * ray.m_param;
+
+ if (distance < distanceThreshold) {
+
+ HACD_ASSERT (ray.m_colorHit != clusterA.m_color);
+ HACD_ASSERT (clusterMap.Find(ray.m_colorHit));
+ dgListNode* const clusterNodeB = clusterMap.Find(ray.m_colorHit)->GetInfo();
+ dgHACDCluster& clusterB = clusterNodeB->GetInfo().m_nodeData;
+
+ dgHACDClusterFace& faceB = clusterB.GetFirst()->GetInfo();
+ dgEdge* const edgeB = faceB.m_edge;
+
+ bool isAdjacent = false;
+ dgEdge* ptrA = faceA.m_edge;
+ do {
+ dgEdge* ptrB = edgeB;
+ do {
+ if (ptrB->m_twin == ptrA) {
+ ptrA = faceA.m_edge->m_prev;
+ isAdjacent = true;
+ break;
+ }
+ ptrB = ptrB->m_next;
+ } while (ptrB != edgeB);
+
+ ptrA = ptrA->m_next;
+ } while (ptrA != faceA.m_edge);
+
+ if (!isAdjacent) {
+
+ isAdjacent = false;
+ for (dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* edgeNode = clusterNodeA->GetInfo().GetFirst(); edgeNode; edgeNode = edgeNode->GetNext()) {
+ if (edgeNode->GetInfo().m_node == clusterNodeB) {
+ isAdjacent = true;
+ break;
+ }
+ }
+
+ if (!isAdjacent) {
+
+ dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* const edgeNodeAB = clusterNodeA->GetInfo().AddEdge (clusterNodeB);
+ dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* const edgeNodeBA = clusterNodeB->GetInfo().AddEdge (clusterNodeA);
+
+ dgHACDEdge& edgeAB = edgeNodeAB->GetInfo().m_edgeData;
+ dgHACDEdge& edgeBA = edgeNodeBA->GetInfo().m_edgeData;
+ edgeAB.m_backFaceHandicap = double (0.5f);
+ edgeBA.m_backFaceHandicap = double (0.5f);
+ }
+ }
+ }
+ }
+ }
+
+
+ void Trace() const
+ {
+ }
+
+
+ // you can insert cal callback here to print the progress as it collapse clusters
+ void ReportProgress ()
+ {
+ m_progress ++;
+ if (m_reportProgressCallback)
+ {
+ float progress = float(m_progress) * m_invFaceCount;
+ m_reportProgressCallback->ReportProgress("Performing HACD",progress);
+ }
+ }
+
+ dgMeshEffect* CreatePatitionMesh (dgMeshEffect& mesh, int32_t maxVertexPerHull)
+ {
+ dgMeshEffect* const convexPartionMesh = HACD_NEW(dgMeshEffect)(true);
+
+ dgMeshEffect::dgVertexAtribute polygon[256];
+ memset(polygon, 0, sizeof(polygon));
+ dgArray<dgBigVector> convexVertexBuffer(mesh.GetCount());
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+
+ convexPartionMesh->BeginPolygon();
+ double layer = double (0.0f);
+ for (dgList<dgHACDConvacityLookAheadTree*>::dgListNode* clusterNode = m_convexProximation.GetFirst(); clusterNode; clusterNode = clusterNode->GetNext()) {
+ dgHACDConvacityLookAheadTree* const cluster = clusterNode->GetInfo();
+
+ int32_t vertexCount = 0;
+ for (dgList<dgEdge*>::dgListNode* faceNode = cluster->m_faceList.GetFirst(); faceNode; faceNode = faceNode->GetNext()) {
+ dgEdge* const edge = faceNode->GetInfo();
+ dgEdge* ptr = edge;
+ do {
+ int32_t index = ptr->m_incidentVertex;
+ convexVertexBuffer[vertexCount] = points[index];
+ vertexCount++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ dgConvexHull3d convexHull(&convexVertexBuffer[0].m_x, sizeof(dgBigVector), vertexCount, 0.0, maxVertexPerHull);
+ if (convexHull.GetCount()) {
+ const dgBigVector* const vertex = convexHull.GetVertexPool();
+ for (dgConvexHull3d::dgListNode* node = convexHull.GetFirst(); node; node = node->GetNext()) {
+ const dgConvexHull3DFace* const face = &node->GetInfo();
+
+ int32_t i0 = face->m_index[0];
+ int32_t i1 = face->m_index[1];
+ int32_t i2 = face->m_index[2];
+
+ polygon[0].m_vertex = vertex[i0];
+ polygon[0].m_vertex.m_w = layer;
+
+ polygon[1].m_vertex = vertex[i1];
+ polygon[1].m_vertex.m_w = layer;
+
+ polygon[2].m_vertex = vertex[i2];
+ polygon[2].m_vertex.m_w = layer;
+
+ convexPartionMesh->AddPolygon(3, &polygon[0].m_vertex.m_x, sizeof(dgMeshEffect::dgVertexAtribute), 0);
+ }
+ layer += double (1.0f);
+ }
+ }
+ convexPartionMesh->EndPolygon(1.0e-5f);
+
+ m_progress = m_faceCount - 1;
+ ReportProgress();
+
+ return convexPartionMesh;
+ }
+
+
+
+ static float RayHit (void *context, const float* const polygon, int32_t strideInBytes, const int32_t* const indexArray, int32_t indexCount)
+ {
+ dgHACDRayCasterContext& me = *((dgHACDRayCasterContext*) context);
+ dgVector normal (&polygon[indexArray[indexCount] * (strideInBytes / sizeof (float))]);
+ float t = me.PolygonIntersect (normal, polygon, strideInBytes, indexArray, indexCount);
+ if (t < me.m_param) {
+ int32_t faceColor = (int32_t)me.m_me->GetTagId(indexArray);
+ if (faceColor != me.m_myColor) {
+ me.m_param = t;
+ me.m_colorHit = faceColor;
+ }
+ }
+ return t;
+ }
+
+
+ double ConcavityByFaceMedian (int32_t faceCountA, int32_t faceCountB) const
+ {
+ double faceCountCost = DG_CONCAVITY_SCALE * double (0.1f) * (faceCountA + faceCountB) * m_invFaceCount;
+ //faceCountCost *= 0;
+ return faceCountCost;
+ }
+
+ double CalculateConcavityMetric (double convexConcavity, double area, double perimeter, int32_t faceCountA, int32_t faceCountB) const
+ {
+ double edgeCost = perimeter * perimeter / (double(4.0f * 3.141592f) * area);
+ return convexConcavity * DG_CONCAVITY_SCALE + edgeCost + ConcavityByFaceMedian (faceCountA, faceCountB);
+ }
+
+ void SubmitInitialEdgeCosts (dgMeshEffect& mesh)
+ {
+ m_mark ++;
+ for (dgListNode* clusterNodeA = GetFirst(); clusterNodeA; clusterNodeA = clusterNodeA->GetNext())
+ {
+ for (dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* edgeNodeAB = clusterNodeA->GetInfo().GetFirst(); edgeNodeAB; edgeNodeAB = edgeNodeAB->GetNext())
+ {
+ dgHACDEdge& edgeAB = edgeNodeAB->GetInfo().m_edgeData;
+ double weight = edgeAB.m_backFaceHandicap;
+ if (edgeAB.m_mark != m_mark)
+ {
+ edgeAB.m_mark = m_mark;
+ dgListNode* const clusterNodeB = edgeNodeAB->GetInfo().m_node;
+ for (dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* edgeNodeBA = clusterNodeB->GetInfo().GetFirst(); edgeNodeBA; edgeNodeBA = edgeNodeBA->GetNext())
+ {
+ dgListNode* const clusterNode = edgeNodeBA->GetInfo().m_node;
+ if (clusterNode == clusterNodeA)
+ {
+ dgHACDEdge& edgeBA = edgeNodeBA->GetInfo().m_edgeData;
+ edgeBA.m_mark = m_mark;
+ HACD_ASSERT (!edgeAB.m_proxyListNode);
+ HACD_ASSERT (!edgeBA.m_proxyListNode);
+ dgList<dgPairProxy>::dgListNode* const proxyNode = SubmitEdgeCost (mesh, clusterNodeA, clusterNodeB, weight * edgeBA.m_backFaceHandicap);
+ edgeAB.m_proxyListNode = proxyNode;
+ edgeBA.m_proxyListNode = proxyNode;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ int32_t CopyVertexToPool(const dgMeshEffect& mesh, const dgHACDCluster& cluster, int32_t start)
+ {
+ int32_t count = start;
+
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+ for (dgList<dgHACDClusterFace>::dgListNode* node = cluster.GetFirst(); node; node = node->GetNext()) {
+ const dgHACDClusterFace& clusterFace = node->GetInfo();
+ dgEdge* edge = clusterFace.m_edge;
+ do {
+ int32_t index = edge->m_incidentVertex;
+ if (m_vertexMarks[index] != m_vertexMark) {
+ m_vertexMarks[index] = m_vertexMark;
+ m_vertexPool[count] = points[index];
+ count++;
+ }
+ edge = edge->m_next;
+ } while (edge != clusterFace.m_edge);
+ }
+ return count;
+ }
+
+
+ void MarkInteriorClusterEdges (dgMeshEffect& /*mesh*/, int32_t mark, const dgHACDCluster& cluster, int32_t colorA, int32_t colorB) const
+ {
+ HACD_ASSERT (colorA != colorB);
+ for (dgList<dgHACDClusterFace>::dgListNode* node = cluster.GetFirst(); node; node = node->GetNext()) {
+ dgHACDClusterFace& clusterFace = node->GetInfo();
+ dgEdge* edge = clusterFace.m_edge;
+ do {
+ if ((edge->m_twin->m_incidentFace == colorA) || (edge->m_twin->m_incidentFace == colorB)) {
+ edge->m_mark = mark;
+ edge->m_twin->m_mark = mark;
+ }
+ edge = edge->m_next;
+ } while (edge != clusterFace.m_edge);
+ }
+ }
+
+ double CalculateClusterPerimeter (dgMeshEffect& mesh, int32_t /*mark*/, const dgHACDCluster& cluster, int32_t colorA, int32_t colorB) const
+ {
+ HACD_ASSERT (colorA != colorB);
+ double perimeter = double (0.0f);
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+ for (dgList<dgHACDClusterFace>::dgListNode* node = cluster.GetFirst(); node; node = node->GetNext()) {
+ dgHACDClusterFace& clusterFace = node->GetInfo();
+ dgEdge* edge = clusterFace.m_edge;
+ do {
+ if (!((edge->m_twin->m_incidentFace == colorA) || (edge->m_twin->m_incidentFace == colorB))) {
+ dgBigVector p1p0(points[edge->m_twin->m_incidentVertex] - points[edge->m_incidentVertex]);
+ perimeter += sqrt(p1p0 % p1p0);
+ }
+ edge = edge->m_next;
+ } while (edge != clusterFace.m_edge);
+ }
+
+ return perimeter;
+ }
+
+ void HeapCollectGarbage ()
+ {
+ if ((m_priorityHeap.GetCount() + 20) > m_priorityHeap.GetMaxCount()) {
+ for (int32_t i = m_priorityHeap.GetCount() - 1; i >= 0; i--) {
+ dgList<dgPairProxy>::dgListNode* const emptyNode = m_priorityHeap[i];
+ dgPairProxy& emptyPair = emptyNode->GetInfo();
+ if ((emptyPair.m_nodeA == NULL) && (emptyPair.m_nodeB == NULL)) {
+ m_priorityHeap.Remove(i);
+ }
+ }
+ }
+ }
+
+
+
+ double CalculateConcavity(dgHACDConveHull& hull,
+ const dgMeshEffect& mesh,
+ const dgHACDCluster& cluster,
+ double &concavity)
+
+ {
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+ for (dgList<dgHACDClusterFace>::dgListNode* node = cluster.GetFirst(); node; node = node->GetNext())
+ {
+ dgHACDClusterFace& clusterFace = node->GetInfo();
+ dgEdge* edge = clusterFace.m_edge;
+ int32_t i0 = edge->m_incidentVertex;
+ int32_t i1 = edge->m_next->m_incidentVertex;
+ for (dgEdge* ptr = edge->m_next->m_next; ptr != edge; ptr = ptr->m_next)
+ {
+ int32_t i2 = ptr->m_incidentVertex;
+ double val = hull.CalculateTriangleConcavity(clusterFace.m_normal, i0, i1, i2, points);
+ if (val > concavity)
+ {
+ concavity = val;
+ }
+ i1 = i2;
+ }
+ }
+ return concavity;
+ }
+
+ double CalculateConcavitySingleThread (dgHACDConveHull& hull, dgMeshEffect& mesh, dgHACDCluster& clusterA, dgHACDCluster& clusterB)
+ {
+ double c1=0;
+ double c2=0;
+
+ // gather all of the work to be done to compute the concavity for both clusters in parallel
+ CalculateConcavity(hull, mesh, clusterA, c1);
+ CalculateConcavity(hull, mesh, clusterB, c2);
+ return GetMax(c1,c2);
+ }
+
+ dgList<dgPairProxy>::dgListNode* SubmitEdgeCost (dgMeshEffect& mesh, dgListNode* const clusterNodeA, dgListNode* const clusterNodeB, double perimeterHandicap)
+ {
+ dgHACDCluster& clusterA = clusterNodeA->GetInfo().m_nodeData;
+ dgHACDCluster& clusterB = clusterNodeB->GetInfo().m_nodeData;
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+
+ bool flatStrip = true;
+ double tol = double (1.0e-5f) * m_diagonal;
+ dgHACDClusterFace& clusterFaceA = clusterA.GetFirst()->GetInfo();
+ dgBigPlane plane(clusterFaceA.m_normal, -(points[clusterFaceA.m_edge->m_incidentVertex] % clusterFaceA.m_normal));
+
+ if (clusterA.GetCount() > 1)
+ {
+ flatStrip = clusterA.IsCoplanar(plane, mesh, tol);
+ }
+
+ if (flatStrip)
+ {
+ flatStrip = clusterB.IsCoplanar(plane, mesh, tol);
+ }
+
+ dgList<dgPairProxy>::dgListNode* pairNode = NULL;
+
+ if (!flatStrip)
+ {
+ m_vertexMark ++;
+ int32_t vertexCount = CopyVertexToPool(mesh, clusterA, 0);
+ vertexCount = CopyVertexToPool(mesh, clusterB, vertexCount);
+
+ dgHACDConveHull convexHull(m_vertexPool, vertexCount);
+
+ if (convexHull.GetVertexCount())
+ {
+ int32_t mark = mesh.IncLRU();
+ MarkInteriorClusterEdges (mesh, mark, clusterA, clusterA.m_color, clusterB.m_color);
+ MarkInteriorClusterEdges (mesh, mark, clusterB, clusterA.m_color, clusterB.m_color);
+
+ double area = clusterA.m_area + clusterB.m_area;
+ double perimeter = CalculateClusterPerimeter (mesh, mark, clusterA, clusterA.m_color, clusterB.m_color) +
+ CalculateClusterPerimeter (mesh, mark, clusterB, clusterA.m_color, clusterB.m_color);
+
+
+ double concavity = double (0.0f);
+ {
+ concavity = CalculateConcavitySingleThread (convexHull, mesh, clusterA, clusterB);
+ }
+
+ if (concavity < double(1.0e-3f))
+ {
+ concavity = double(0.0f);
+ }
+
+ // see if the heap will overflow
+ HeapCollectGarbage ();
+
+ // add a new pair to the heap
+ dgList<dgPairProxy>::dgListNode* pairNode = m_proxyList.Append();
+ dgPairProxy& pair = pairNode->GetInfo();
+ pair.m_nodeA = clusterNodeA;
+ pair.m_nodeB = clusterNodeB;
+ pair.m_distanceConcavity = concavity;
+ pair.m_hierachicalClusterIndexA = clusterA.m_hierachicalClusterIndex;
+ pair.m_hierachicalClusterIndexB = clusterB.m_hierachicalClusterIndex;
+
+ pair.m_area = area;
+ double cost = CalculateConcavityMetric (concavity, area, perimeter * perimeterHandicap, clusterA.GetCount(), clusterB.GetCount());
+ m_priorityHeap.Push(pairNode, cost);
+
+ return pairNode;
+ }
+ }
+ return pairNode;
+ }
+
+
+ void CollapseEdge (dgList<dgPairProxy>::dgListNode* const pairNode, dgMeshEffect& mesh, double concavity)
+ {
+ dgListNode* adjacentNodes[1024];
+ dgPairProxy& pair = pairNode->GetInfo();
+
+
+ HACD_ASSERT((pair.m_nodeA && pair.m_nodeB) || (!pair.m_nodeA && !pair.m_nodeB));
+ if (pair.m_nodeA && pair.m_nodeB)
+ {
+ // call the progress callback
+ ReportProgress();
+
+ dgListNode* const clusterNodeA = pair.m_nodeA;
+ dgListNode* const clusterNodeB = pair.m_nodeB;
+ HACD_ASSERT (clusterNodeA != clusterNodeB);
+
+ dgHACDCluster& clusterA = clusterNodeA->GetInfo().m_nodeData;
+ dgHACDCluster& clusterB = clusterNodeB->GetInfo().m_nodeData;
+
+ HACD_ASSERT (&clusterA != &clusterB);
+ HACD_ASSERT(clusterA.m_color != clusterB.m_color);
+
+ dgHACDConvacityLookAheadTree* const leftTree = m_concavityTreeArray[pair.m_hierachicalClusterIndexA];
+ dgHACDConvacityLookAheadTree* const rightTree = m_concavityTreeArray[pair.m_hierachicalClusterIndexB];
+ HACD_ASSERT (leftTree);
+ HACD_ASSERT (rightTree);
+ m_concavityTreeArray[pair.m_hierachicalClusterIndexA] = NULL;
+ m_concavityTreeArray[pair.m_hierachicalClusterIndexB] = NULL;
+ HACD_ASSERT (m_cancavityTreeIndex < (2 * (m_faceCount + 1)));
+
+ double treeConcavity = pair.m_distanceConcavity;
+// HACD_ASSERT (treeConcavity < 0.1);
+ m_concavityTreeArray[m_cancavityTreeIndex] = HACD_NEW(dgHACDConvacityLookAheadTree)(leftTree, rightTree, treeConcavity);
+ clusterA.m_hierachicalClusterIndex = m_cancavityTreeIndex;
+ clusterB.m_hierachicalClusterIndex = m_cancavityTreeIndex;
+ m_cancavityTreeIndex ++;
+
+ // merge two clusters
+ while (clusterB.GetCount()) {
+
+ dgHACDCluster::dgListNode* const nodeB = clusterB.GetFirst();
+ clusterB.Unlink(nodeB);
+
+ // now color code all faces of the merged cluster
+ dgHACDClusterFace& faceB = nodeB->GetInfo();
+ dgEdge* ptr = faceB.m_edge;
+ do {
+ ptr->m_incidentFace = clusterA.m_color;
+ ptr = ptr->m_next;
+ } while (ptr != faceB.m_edge);
+ clusterA.Append(nodeB);
+ }
+ clusterA.m_area = pair.m_area;
+ clusterA.m_concavity = concavity;
+
+ // invalidate all proxies that are still in the heap
+ int32_t adjacentCount = 1;
+ adjacentNodes[0] = clusterNodeA;
+ for (dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* edgeNodeAB = clusterNodeA->GetInfo().GetFirst(); edgeNodeAB; edgeNodeAB = edgeNodeAB->GetNext()) {
+ dgHACDEdge& edgeAB = edgeNodeAB->GetInfo().m_edgeData;
+ dgList<dgPairProxy>::dgListNode* const proxyNode = (dgList<dgPairProxy>::dgListNode*) edgeAB.m_proxyListNode;
+ if (proxyNode) {
+ dgPairProxy& pairProxy = proxyNode->GetInfo();
+ HACD_ASSERT ((edgeNodeAB->GetInfo().m_node == pairProxy.m_nodeA) || (edgeNodeAB->GetInfo().m_node == pairProxy.m_nodeB));
+ pairProxy.m_nodeA = NULL;
+ pairProxy.m_nodeB = NULL;
+ edgeAB.m_proxyListNode = NULL;
+ }
+
+ adjacentNodes[adjacentCount] = edgeNodeAB->GetInfo().m_node;
+ adjacentCount ++;
+ HACD_ASSERT ((uint32_t)adjacentCount < sizeof (adjacentNodes)/ sizeof (adjacentNodes[0]));
+ }
+
+ for (dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* edgeNodeBA = clusterNodeB->GetInfo().GetFirst(); edgeNodeBA; edgeNodeBA = edgeNodeBA->GetNext()) {
+ dgHACDEdge& edgeBA = edgeNodeBA->GetInfo().m_edgeData;
+ dgList<dgPairProxy>::dgListNode* const proxyNode = (dgList<dgPairProxy>::dgListNode*) edgeBA.m_proxyListNode;
+ if (proxyNode) {
+ dgPairProxy& pairProxy = proxyNode->GetInfo();
+ pairProxy.m_nodeA = NULL;
+ pairProxy.m_nodeB = NULL;
+ edgeBA.m_proxyListNode = NULL;
+ }
+
+ bool alreadyLinked = false;
+ dgListNode* const node = edgeNodeBA->GetInfo().m_node;
+ for (int32_t i = 0; i < adjacentCount; i ++) {
+ if (node == adjacentNodes[i]) {
+ alreadyLinked = true;
+ break;
+ }
+ }
+ if (!alreadyLinked) {
+ clusterNodeA->GetInfo().AddEdge (node);
+ node->GetInfo().AddEdge (clusterNodeA);
+ }
+ }
+ DeleteNode (clusterNodeB);
+
+ // submit all new costs for each edge connecting this new node to any other node
+ for (dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* edgeNodeAB = clusterNodeA->GetInfo().GetFirst(); edgeNodeAB; edgeNodeAB = edgeNodeAB->GetNext())
+ {
+ dgHACDEdge& edgeAB = edgeNodeAB->GetInfo().m_edgeData;
+ dgListNode* const clusterNodeB = edgeNodeAB->GetInfo().m_node;
+ double weigh = edgeAB.m_backFaceHandicap;
+ for (dgGraphNode<dgHACDCluster, dgHACDEdge>::dgListNode* edgeNodeBA = clusterNodeB->GetInfo().GetFirst(); edgeNodeBA; edgeNodeBA = edgeNodeBA->GetNext())
+ {
+ dgListNode* const clusterNode = edgeNodeBA->GetInfo().m_node;
+ if (clusterNode == clusterNodeA)
+ {
+ dgHACDEdge& edgeBA = edgeNodeBA->GetInfo().m_edgeData;
+ dgList<dgPairProxy>::dgListNode* const proxyNode = SubmitEdgeCost (mesh, clusterNodeA, clusterNodeB, weigh * edgeBA.m_backFaceHandicap);
+ if (proxyNode)
+ {
+ edgeBA.m_proxyListNode = proxyNode;
+ edgeAB.m_proxyListNode = proxyNode;
+ }
+ break;
+ }
+ }
+ }
+ }
+ m_proxyList.Remove(pairNode);
+ }
+
+#ifdef DG_BUILD_HIERACHICAL_HACD
+ void CollapseClusters (dgMeshEffect& mesh, double maxConcavity, int32_t maxClustesCount)
+ {
+
+ maxConcavity *= (m_diagonal * DG_CONCAVITY_SCALE);
+ while (m_priorityHeap.GetCount()) {
+ double concavity = m_priorityHeap.Value();
+ dgList<dgPairProxy>::dgListNode* const pairNode = m_priorityHeap[0];
+ m_priorityHeap.Pop();
+ CollapseEdge (pairNode, mesh, concavity);
+
+//if (m_progress == 24)
+//break;
+
+ }
+
+
+
+ int32_t treeCounts = 0;
+ for (int32_t i = 0; i < m_cancavityTreeIndex; i ++) {
+ if (m_concavityTreeArray[i]) {
+ m_concavityTreeArray[treeCounts] = m_concavityTreeArray[i];
+ m_concavityTreeArray[i] = NULL;
+ treeCounts ++;
+ }
+ }
+
+ if (treeCounts > 1) {
+
+ for (int32_t i = 0; i < treeCounts; i ++) {
+ if (m_concavityTreeArray[i]->m_faceList.GetCount()==1) {
+ delete m_concavityTreeArray[i];
+ m_concavityTreeArray[i] = m_concavityTreeArray[treeCounts-1];
+ m_concavityTreeArray[treeCounts-1]= NULL;
+ treeCounts --;
+ i--;
+ }
+ }
+
+
+ float C = 10000;
+ while (treeCounts > 1) {
+ dgHACDConvacityLookAheadTree* const leftTree = m_concavityTreeArray[treeCounts-1];
+ dgHACDConvacityLookAheadTree* const rightTree = m_concavityTreeArray[treeCounts-2];
+ m_concavityTreeArray[treeCounts-1] = NULL;
+ m_concavityTreeArray[treeCounts-2] = HACD_NEW(dgHACDConvacityLookAheadTree)(leftTree, rightTree, C);
+ C *= 2;
+ treeCounts --;
+ }
+
+ }
+
+ dgHACDConvacityLookAheadTree* const tree = m_concavityTreeArray[0];
+ dgDownHeap<dgHACDConvacityLookAheadTree*, double> approximation(maxClustesCount * 2);
+
+ tree->ReduceByCount (maxClustesCount, approximation);
+ // tree->ReduceByConcavity (maxConcavity, approximation);
+
+ while (approximation.GetCount()) {
+ m_convexProximation.Append(approximation[0]);
+ approximation.Pop();
+ }
+ }
+#else
+ void CollapseClusters (dgMeshEffect& mesh, double maxConcavity, int32_t maxClustesCount)
+ {
+ maxConcavity *= (m_diagonal * DG_CONCAVITY_SCALE);
+
+ bool terminate = false;
+ while (m_priorityHeap.GetCount() && !terminate) {
+ double concavity = m_priorityHeap.Value();
+ dgList<dgPairProxy>::dgListNode* const pairNode = m_priorityHeap[0];
+ if ((concavity < maxConcavity) && (GetCount() < maxClustesCount)) {
+ terminate = true;
+ } else {
+ m_priorityHeap.Pop();
+ CollapseEdge (pairNode, mesh, concavity);
+ }
+ }
+ }
+#endif
+
+ int32_t m_mark;
+ int32_t m_faceCount;
+ int32_t m_vertexMark;
+ int32_t m_progress;
+ int32_t m_cancavityTreeIndex;
+ int32_t* m_vertexMarks;
+ float m_invFaceCount;
+ double m_diagonal;
+ dgBigVector* m_vertexPool;
+ dgList<dgPairProxy> m_proxyList;
+ dgHACDConvacityLookAheadTree** m_concavityTreeArray;
+ dgList<dgHACDConvacityLookAheadTree*> m_convexProximation;
+ dgUpHeap<dgList<dgPairProxy>::dgListNode*, double> m_priorityHeap;
+ hacd::ICallback* m_reportProgressCallback;
+};
+
+
+dgMeshEffect* dgMeshEffect::CreateConvexApproximation(
+ float maxConcavity,
+ float backFaceDistanceFactor,
+ int32_t maxHullsCount,
+ int32_t maxVertexPerHull,
+ hacd::ICallback* reportProgressCallback) const
+{
+ // dgMeshEffect triangleMesh(*this);
+ if (maxHullsCount <= 1)
+ {
+ maxHullsCount = 1;
+ }
+ if (maxConcavity <= float (1.0e-5f))
+ {
+ maxConcavity = float (1.0e-5f);
+ }
+
+ if (maxVertexPerHull < 4)
+ {
+ maxVertexPerHull = 4;
+ }
+ ClampValue(backFaceDistanceFactor, float (0.01f), float (1.0f));
+
+ if ( reportProgressCallback )
+ {
+ reportProgressCallback->ReportProgress("Making a copy of the input mesh",0);
+ }
+ // make a copy of the mesh
+ dgMeshEffect mesh(*this);
+ mesh.ClearAttributeArray();
+
+ // create a general connectivity graph
+ if ( reportProgressCallback )
+ {
+ reportProgressCallback->ReportProgress("Creating Connectivity Graph",0);
+ }
+ // make a copy of the mesh
+ dgHACDClusterGraph graph (mesh, backFaceDistanceFactor, reportProgressCallback);
+
+ if ( reportProgressCallback )
+ {
+ reportProgressCallback->ReportProgress("Submit Initial Edge Costs",0);
+ }
+
+ // calculate initial edge costs
+ graph.SubmitInitialEdgeCosts(mesh);
+
+ // collapse the graph
+ if ( reportProgressCallback )
+ {
+ reportProgressCallback->ReportProgress("Collapse the Graph",0);
+ }
+
+ if ( reportProgressCallback )
+ {
+ reportProgressCallback->ReportProgress("Collapse Clusters",0);
+ }
+
+ graph.CollapseClusters (mesh, maxConcavity, maxHullsCount);
+
+ if ( reportProgressCallback )
+ {
+ reportProgressCallback->ReportProgress("Creating Partition Mesh",0);
+ }
+
+ // Create Partition Mesh
+ return graph.CreatePatitionMesh (mesh, maxVertexPerHull);
+}
+
+
+void dgMeshEffect::ClearAttributeArray ()
+{
+ dgStack<dgVertexAtribute>attribArray (m_pointCount);
+
+ memset (&attribArray[0], 0, m_pointCount * sizeof (dgVertexAtribute));
+ int32_t mark = IncLRU();
+ dgPolyhedra::Iterator iter (*this);
+ for(iter.Begin(); iter; iter ++)
+ {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark < mark)
+ {
+ dgEdge* ptr = edge;
+
+ int32_t index = ptr->m_incidentVertex;
+ dgVertexAtribute& attrib = attribArray[index];
+ attrib.m_vertex = m_points[index];
+ do
+ {
+ ptr->m_mark = mark;
+ ptr->m_userData = (uint64_t)index;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+ }
+ }
+ ApplyAttributeArray (&attribArray[0], m_pointCount);
+}
+
+
+//*******************************************
+// Original 'fast' implementation from Julio
+//*******************************************
+
+class dgClusterFace
+{
+public:
+ dgClusterFace()
+ {
+ }
+ ~dgClusterFace()
+ {
+ }
+
+ dgEdge* m_edge;
+ double m_area;
+ double m_perimeter;
+ dgBigVector m_normal;
+};
+
+class dgPairProxi
+{
+public:
+ dgPairProxi()
+ :m_edgeA(NULL)
+ ,m_edgeB(NULL)
+ ,m_area(double(0.0f))
+ ,m_perimeter(double(0.0f))
+ {
+ }
+
+ ~dgPairProxi()
+ {
+ }
+
+ dgEdge* m_edgeA;
+ dgEdge* m_edgeB;
+ double m_area;
+ double m_perimeter;
+};
+
+class dgClusterList: public dgList<dgClusterFace>
+{
+public:
+ dgClusterList()
+ : dgList<dgClusterFace>()
+ ,m_area (float (0.0f))
+ ,m_perimeter (float (0.0f))
+ {
+ }
+
+ ~dgClusterList()
+ {
+ }
+
+ int32_t AddVertexToPool(const dgMeshEffect& mesh, dgBigVector* const vertexPool, int32_t* const vertexMarks, int32_t vertexMark)
+ {
+ int32_t count = 0;
+
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+ for (dgListNode* node = GetFirst(); node; node = node->GetNext()) {
+ dgClusterFace& face = node->GetInfo();
+
+ dgEdge* edge = face.m_edge;
+ do {
+ int32_t index = edge->m_incidentVertex;
+ if (vertexMarks[index] != vertexMark)
+ {
+ vertexMarks[index] = vertexMark;
+ vertexPool[count] = points[index];
+ count++;
+ }
+ edge = edge->m_next;
+ } while (edge != face.m_edge);
+ }
+ return count;
+ }
+
+ double CalculateTriangleConcavity2(const dgConvexHull3d& convexHull, dgClusterFace& info, int32_t i0, int32_t i1, int32_t i2, const dgBigVector* const points) const
+ {
+ uint32_t head = 1;
+ uint32_t tail = 0;
+ dgBigVector pool[1<<8][3];
+
+ pool[0][0] = points[i0];
+ pool[0][1] = points[i1];
+ pool[0][2] = points[i2];
+
+ const dgBigVector step(info.m_normal.Scale(double(4.0f) * convexHull.GetDiagonal()));
+
+ double concavity = float(0.0f);
+ double minArea = float(0.125f);
+ double minArea2 = minArea * minArea * 0.5f;
+
+ // weight the area by the area of the face
+ //dgBigVector edge10(pool[0][1] - pool[0][0]);
+ //dgBigVector edge20(pool[0][2] - pool[0][0]);
+ //dgBigVector triangleArea = edge10 * edge20;
+ //double triangleArea2 = triangleArea % triangleArea;
+ //if ((triangleArea2 / minArea2)> float (64.0f)) {
+ // minArea2 = triangleArea2 / float (64.0f);
+ //}
+
+ int32_t maxCount = 4;
+ uint32_t mask = (sizeof (pool) / (3 * sizeof (pool[0][0]))) - 1;
+ while ((tail != head) && (maxCount >= 0)) {
+ //stack--;
+ maxCount --;
+ dgBigVector p0(pool[tail][0]);
+ dgBigVector p1(pool[tail][1]);
+ dgBigVector p2(pool[tail][2]);
+ tail = (tail + 1) & mask;
+
+ dgBigVector q1((p0 + p1 + p2).Scale(double(1.0f / 3.0f)));
+ dgBigVector q0(q1 + step);
+
+ double param = convexHull.RayCast(q0, q1);
+ if (param > double(1.0f)) {
+ param = double(1.0f);
+ }
+ dgBigVector dq(step.Scale(float(1.0f) - param));
+ double lenght2 = dq % dq;
+ if (lenght2 > concavity) {
+ concavity = lenght2;
+ }
+
+ if (((head + 1) & mask) != tail) {
+ dgBigVector edge10(p1 - p0);
+ dgBigVector edge20(p2 - p0);
+ dgBigVector n(edge10 * edge20);
+ double area2 = n % n;
+ if (area2 > minArea2) {
+ dgBigVector p01((p0 + p1).Scale(double(0.5f)));
+ dgBigVector p12((p1 + p2).Scale(double(0.5f)));
+ dgBigVector p20((p2 + p0).Scale(double(0.5f)));
+
+ pool[head][0] = p0;
+ pool[head][1] = p01;
+ pool[head][2] = p20;
+ head = (head + 1) & mask;
+
+ if (((head + 1) & mask) != tail) {
+ pool[head][0] = p1;
+ pool[head][1] = p12;
+ pool[head][2] = p01;
+ head = (head + 1) & mask;
+
+ if (((head + 1) & mask) != tail) {
+ pool[head][0] = p2;
+ pool[head][1] = p20;
+ pool[head][2] = p12;
+ head = (head + 1) & mask;
+ }
+ }
+ }
+ }
+ }
+ return concavity;
+ }
+
+ double CalculateConcavity2(const dgConvexHull3d& convexHull, const dgMeshEffect& mesh)
+ {
+ double concavity = float(0.0f);
+
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+
+ for (dgListNode* node = GetFirst(); node; node = node->GetNext()) {
+ dgClusterFace& info = node->GetInfo();
+ int32_t i0 = info.m_edge->m_incidentVertex;
+ int32_t i1 = info.m_edge->m_next->m_incidentVertex;
+ for (dgEdge* edge = info.m_edge->m_next->m_next; edge != info.m_edge; edge = edge->m_next) {
+ int32_t i2 = edge->m_incidentVertex;
+ double val = CalculateTriangleConcavity2(convexHull, info, i0, i1, i2, points);
+ if (val > concavity) {
+ concavity = val;
+ }
+ i1 = i2;
+ }
+ }
+
+ return concavity;
+ }
+
+ bool IsClusterCoplanar(const dgBigPlane& plane,
+ const dgMeshEffect& mesh) const
+ {
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+ for (dgListNode* node = GetFirst(); node; node = node->GetNext()) {
+ dgClusterFace& info = node->GetInfo();
+
+ dgEdge* ptr = info.m_edge;
+ do {
+ const dgBigVector& p = points[ptr->m_incidentVertex];
+ double dist = fabs(plane.Evalue(p));
+ if (dist > double(1.0e-5f)) {
+ return false;
+ }
+ ptr = ptr->m_next;
+ } while (ptr != info.m_edge);
+ }
+
+ return true;
+ }
+
+ bool IsEdgeConvex(const dgBigPlane& plane, const dgMeshEffect& mesh,
+ dgEdge* const edge) const
+ {
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+ dgEdge* const edge0 = edge->m_next;
+ dgEdge* ptr = edge0->m_twin->m_next;
+ do {
+ if (ptr->m_twin->m_incidentFace == edge->m_twin->m_incidentFace) {
+ HACD_ASSERT(edge0->m_incidentVertex == ptr->m_incidentVertex);
+ dgBigVector e0(points[edge0->m_twin->m_incidentVertex] - points[edge0->m_incidentVertex]);
+ dgBigVector e1(points[ptr->m_twin->m_incidentVertex] - points[edge0->m_incidentVertex]);
+ dgBigVector normal(e0 * e1);
+ return (normal % plane) > double(0.0f);
+ }
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge->m_twin);
+
+ HACD_ASSERT(0);
+ return true;
+ }
+
+ // calculate the convex hull of a conched group of faces,
+ // and measure the concavity, according to Khaled convexity criteria, which is basically
+ // has two components,
+ // the first is ratio between the the perimeter of the group of faces
+ // and the second the largest distance from any of the face to the surface of the hull
+ // when the faces are are a strip of a convex hull the perimeter ratio components is 1.0 and the distance to the hull is zero
+ // this is the ideal concavity.
+ // when the face are no part of the hull, then the worse distance to the hull is dominate the the metric
+ // this matrix is used to place all possible combination of this cluster with any adjacent cluster into a priority heap and determine
+ // which pair of two adjacent cluster is the best selection for combining the into a larger cluster.
+ void CalculateNodeCost(dgMeshEffect& mesh, int32_t meshMask,
+ dgBigVector* const vertexPool, int32_t* const vertexMarks,
+ int32_t& vertexMark, dgClusterList* const clusters, double diagonalInv,
+ double aspectRatioCoeficent, dgList<dgPairProxi>& proxyList,
+ dgUpHeap<dgList<dgPairProxi>::dgListNode*, double>& heap)
+ {
+ int32_t faceIndex = GetFirst()->GetInfo().m_edge->m_incidentFace;
+
+ const dgBigVector* const points = (dgBigVector*) mesh.GetVertexPool();
+
+ bool flatStrip = true;
+ dgBigPlane plane(GetFirst()->GetInfo().m_normal, -(points[GetFirst()->GetInfo().m_edge->m_incidentVertex] % GetFirst()->GetInfo().m_normal));
+ if (GetCount() > 1) {
+ flatStrip = IsClusterCoplanar(plane, mesh);
+ }
+
+ vertexMark++;
+ int32_t vertexCount = AddVertexToPool(mesh, vertexPool, vertexMarks, vertexMark);
+ for (dgListNode* node = GetFirst(); node; node = node->GetNext()) {
+ //dgClusterFace& clusterFaceA = GetFirst()->GetInfo();
+ dgClusterFace& clusterFaceA = node->GetInfo();
+
+ dgEdge* edge = clusterFaceA.m_edge;
+ do {
+ int32_t twinFaceIndex = edge->m_twin->m_incidentFace;
+ if ((edge->m_mark != meshMask) && (twinFaceIndex != faceIndex) && (twinFaceIndex > 0)) {
+
+ dgClusterList& clusterListB = clusters[twinFaceIndex];
+
+ vertexMark++;
+ int32_t extraCount = clusterListB.AddVertexToPool(mesh, &vertexPool[vertexCount], &vertexMarks[0], vertexMark);
+
+ int32_t count = vertexCount + extraCount;
+ dgConvexHull3d convexHull(&vertexPool[0].m_x, sizeof(dgBigVector), count, 0.0);
+
+ double concavity = double(0.0f);
+ if (convexHull.GetVertexCount()) {
+ concavity = sqrt(GetMax(CalculateConcavity2(convexHull, mesh), clusterListB.CalculateConcavity2(convexHull, mesh)));
+ if (concavity < double(1.0e-3f)) {
+ concavity = double(0.0f);
+ }
+ }
+
+ if ((concavity == double(0.0f)) && flatStrip) {
+ if (clusterListB.IsClusterCoplanar(plane, mesh)) {
+ bool concaveEdge = !(IsEdgeConvex(plane, mesh, edge) && IsEdgeConvex(plane, mesh, edge->m_twin));
+ if (concaveEdge) {
+ concavity += 1000.0f;
+ }
+ }
+ }
+
+ dgBigVector p1p0(points[edge->m_twin->m_incidentVertex] - points[edge->m_incidentVertex]);
+ double edgeLength = double(2.0f) * sqrt(p1p0 % p1p0);
+
+ double area = m_area + clusterListB.m_area;
+ double perimeter = m_perimeter + clusterListB.m_perimeter - edgeLength;
+ double edgeCost = perimeter * perimeter / (double(4.0f * 3.141592f) * area);
+ double cost = diagonalInv * (concavity + edgeCost * aspectRatioCoeficent);
+
+ if ((heap.GetCount() + 20) > heap.GetMaxCount()) {
+ for (int32_t i = heap.GetCount() - 1; i >= 0; i--) {
+ dgList<dgPairProxi>::dgListNode* emptyNode = heap[i];
+ dgPairProxi& emptyPair = emptyNode->GetInfo();
+ if ((emptyPair.m_edgeA == NULL) && (emptyPair.m_edgeB == NULL)) {
+ heap.Remove(i);
+ }
+ }
+ }
+
+ dgList<dgPairProxi>::dgListNode* pairNode = proxyList.Append();
+ dgPairProxi& pair = pairNode->GetInfo();
+ pair.m_edgeA = edge;
+ pair.m_edgeB = edge->m_twin;
+ pair.m_area = area;
+ pair.m_perimeter = perimeter;
+ edge->m_userData = PTR_TO_UINT64(pairNode);
+ edge->m_twin->m_userData = PTR_TO_UINT64(pairNode);
+ heap.Push(pairNode, cost);
+ }
+
+ edge->m_mark = meshMask;
+ edge->m_twin->m_mark = meshMask;
+ edge = edge->m_next;
+ } while (edge != clusterFaceA.m_edge);
+ }
+ }
+
+
+ double m_area;
+ double m_perimeter;
+};
+
+dgMeshEffect::dgMeshEffect(const dgMeshEffect& source, float absoluteconcavity, int32_t maxCount,hacd::ICallback *callback,bool /*legacyVersion*/)
+ :dgPolyhedra()
+{
+ if ( callback )
+ {
+ callback->ReportProgress("Initializing",0);
+ }
+ Init(true);
+
+ dgMeshEffect mesh(source);
+ int32_t faceCount = mesh.GetTotalFaceCount() + 1;
+ dgStack<dgClusterList> clusterPool(faceCount);
+ dgClusterList* const clusters = &clusterPool[0];
+
+ for (int32_t i = 0; i < faceCount; i++)
+ {
+ clusters[i] = dgClusterList();
+ }
+
+ int32_t meshMask = mesh.IncLRU();
+ const dgBigVector* const points = mesh.m_points;
+
+ // enumerate all faces, and initialize cluster pool
+ dgMeshEffect::Iterator iter(mesh);
+
+ int32_t clusterIndex = 1;
+ for (iter.Begin(); iter; iter++)
+ {
+ dgEdge* const edge = &(*iter);
+ edge->m_userData = uint64_t (-1);
+ if ((edge->m_mark != meshMask) && (edge->m_incidentFace > 0))
+ {
+ double perimeter = double(0.0f);
+ dgEdge* ptr = edge;
+ do
+ {
+ dgBigVector p1p0(points[ptr->m_incidentVertex] - points[ptr->m_prev->m_incidentVertex]);
+ perimeter += sqrt(p1p0 % p1p0);
+ ptr->m_incidentFace = clusterIndex;
+
+ ptr->m_mark = meshMask;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ dgBigVector normal = mesh.FaceNormal(edge, &points[0][0], sizeof(dgBigVector));
+ double mag = sqrt(normal % normal);
+
+ dgClusterFace& faceInfo = clusters[clusterIndex].Append()->GetInfo();
+
+ faceInfo.m_edge = edge;
+ faceInfo.m_perimeter = perimeter;
+ faceInfo.m_area = double(0.5f) * mag;
+ faceInfo.m_normal = normal.Scale(double(1.0f) / mag);
+
+ clusters[clusterIndex].m_perimeter = perimeter;
+ clusters[clusterIndex].m_area = faceInfo.m_area;
+
+ clusterIndex++;
+ }
+ }
+
+ HACD_ASSERT(faceCount == clusterIndex);
+
+ // recalculate all edge cost
+ dgStack<int32_t> vertexMarksArray(mesh.GetVertexCount());
+ dgStack<dgBigVector> vertexArray(mesh.GetVertexCount() * 2);
+
+ dgBigVector* const vertexPool = &vertexArray[0];
+ int32_t* const vertexMarks = &vertexMarksArray[0];
+ memset(&vertexMarks[0], 0, (size_t)vertexMarksArray.GetSizeInBytes());
+
+ dgList<dgPairProxi> proxyList;
+ dgUpHeap<dgList<dgPairProxi>::dgListNode*, double> heap(mesh.GetCount() + 1000);
+
+ int32_t vertexMark = 0;
+
+ double diagonalInv = float(1.0f);
+ double aspectRatioCoeficent = absoluteconcavity / float(10.0f);
+ meshMask = mesh.IncLRU();
+
+ // calculate all the initial cost of all clusters, which at this time are all a single faces
+ for (int32_t faceIndex = 1; faceIndex < faceCount; faceIndex++)
+ {
+ vertexMark++;
+ dgClusterList& clusterList = clusters[faceIndex];
+ HACD_ASSERT(clusterList.GetFirst()->GetInfo().m_edge->m_incidentFace == faceIndex);
+ clusterList.CalculateNodeCost(mesh, meshMask, &vertexPool[0], &vertexMarks[0], vertexMark, &clusters[0], diagonalInv, aspectRatioCoeficent, proxyList, heap);
+ }
+
+ if ( callback )
+ {
+ callback->ReportProgress("Calculating Convex Clusters",0);
+ }
+
+
+
+ // calculate all essential convex clusters by merging the all possible clusters according
+ // which combined concavity es lower that the max absolute concavity
+ // select the pair with the smaller concavity and fuse then into a larger cluster
+ int32_t essencialClustersCount = faceCount - 1;
+ while (heap.GetCount() && ((heap.Value() < absoluteconcavity) || (essencialClustersCount > maxCount)))
+ {
+ dgList<dgPairProxi>::dgListNode* const pairNode = heap[0];
+ heap.Pop();
+ dgPairProxi& pair = pairNode->GetInfo();
+
+ HACD_ASSERT((pair.m_edgeA && pair.m_edgeB) || (!pair.m_edgeA && !pair.m_edgeB));
+ if (pair.m_edgeA && pair.m_edgeB)
+ {
+
+ HACD_ASSERT(pair.m_edgeA->m_incidentFace != pair.m_edgeB->m_incidentFace);
+
+ // merge two clusters
+ int32_t faceIndexA = pair.m_edgeA->m_incidentFace;
+ int32_t faceIndexB = pair.m_edgeB->m_incidentFace;
+ dgClusterList* listA = &clusters[faceIndexA];
+ dgClusterList* listB = &clusters[faceIndexB];
+ if (pair.m_edgeA->m_incidentFace > pair.m_edgeB->m_incidentFace)
+ {
+ Swap(faceIndexA, faceIndexB);
+ Swap(listA, listB);
+ }
+
+ while (listB->GetFirst())
+ {
+ dgClusterList::dgListNode* const nodeB = listB->GetFirst();
+ listB->Unlink(nodeB);
+ dgClusterFace& faceB = nodeB->GetInfo();
+
+ dgEdge* ptr = faceB.m_edge;
+ do {
+ ptr->m_incidentFace = faceIndexA;
+ ptr = ptr->m_next;
+ } while (ptr != faceB.m_edge);
+ listA->Append(nodeB);
+ }
+ essencialClustersCount --;
+
+ listB->m_area = float (0.0f);
+ listB->m_perimeter = float (0.0f);
+ listA->m_area = pair.m_area;
+ listA->m_perimeter = pair.m_perimeter;
+
+ // recalculated the new metric for the new cluster, and tag the used cluster as invalid, so that
+ // other potential selection do not try merge with this this one, producing convex that re use a face more than once
+ int32_t mark = mesh.IncLRU();
+ for (dgClusterList::dgListNode* node = listA->GetFirst(); node; node = node->GetNext()) {
+ dgClusterFace& face = node->GetInfo();
+ dgEdge* ptr = face.m_edge;
+ do {
+ if (ptr->m_userData != uint64_t (-1)) {
+ dgList<dgPairProxi>::dgListNode* const pairNode = (dgList<dgPairProxi>::dgListNode*) ptr->m_userData;
+ dgPairProxi& pairProxy = pairNode->GetInfo();
+ pairProxy.m_edgeA = NULL;
+ pairProxy.m_edgeB = NULL;
+ }
+ ptr->m_userData = uint64_t (-1);
+ ptr->m_twin->m_userData = uint64_t (-1);
+
+ if ((ptr->m_twin->m_incidentFace == faceIndexA) || (ptr->m_twin->m_incidentFace < 0)) {
+ ptr->m_mark = mark;
+ ptr->m_twin->m_mark = mark;
+ }
+
+ if (ptr->m_mark != mark) {
+ dgClusterList& adjacentList = clusters[ptr->m_twin->m_incidentFace];
+ for (dgClusterList::dgListNode* adjacentNode = adjacentList.GetFirst(); adjacentNode; adjacentNode = adjacentNode->GetNext()) {
+ dgClusterFace& adjacentFace = adjacentNode->GetInfo();
+ dgEdge* adjacentEdge = adjacentFace.m_edge;
+ do {
+ if (adjacentEdge->m_twin->m_incidentFace == faceIndexA) {
+ adjacentEdge->m_twin->m_mark = mark;
+ }
+ adjacentEdge = adjacentEdge->m_next;
+ } while (adjacentEdge != adjacentFace.m_edge);
+ }
+ ptr->m_mark = mark - 1;
+ }
+ ptr = ptr->m_next;
+ } while (ptr != face.m_edge);
+ }
+
+ // re generated the cost of merging this new all its adjacent clusters, that are still alive.
+ vertexMark++;
+ listA->CalculateNodeCost(mesh, mark, &vertexPool[0], &vertexMarks[0], vertexMark, &clusters[0], diagonalInv, aspectRatioCoeficent, proxyList, heap);
+ }
+
+ proxyList.Remove(pairNode);
+ }
+
+ if ( callback )
+ {
+ callback->ReportProgress("Computing Concavity",0);
+ }
+
+
+
+ BeginPolygon();
+ float layer = float(0.0f);
+
+ dgVertexAtribute polygon[256];
+ memset(polygon, 0, sizeof(polygon));
+ dgArray<dgBigVector> convexVertexBuffer(1024);
+ for (int32_t i = 0; i < faceCount; i++) {
+ dgClusterList& clusterList = clusters[i];
+
+ if (clusterList.GetCount()) {
+ int32_t count = 0;
+ for (dgClusterList::dgListNode* node = clusterList.GetFirst(); node; node = node->GetNext()) {
+ dgClusterFace& face = node->GetInfo();
+ dgEdge* edge = face.m_edge;
+
+ dgEdge* sourceEdge = source.FindEdge(edge->m_incidentVertex, edge->m_twin->m_incidentVertex);
+ do {
+ int32_t index = edge->m_incidentVertex;
+ convexVertexBuffer[count] = points[index];
+
+ count++;
+ sourceEdge = sourceEdge->m_next;
+ edge = edge->m_next;
+ } while (edge != face.m_edge);
+ }
+
+ dgConvexHull3d convexHull(&convexVertexBuffer[0].m_x, sizeof(dgBigVector), count, 0.0);
+
+ if (convexHull.GetCount()) {
+ const dgBigVector* const vertex = convexHull.GetVertexPool();
+ for (dgConvexHull3d::dgListNode* node = convexHull.GetFirst(); node; node = node->GetNext()) {
+ const dgConvexHull3DFace* const face = &node->GetInfo();
+
+ int32_t i0 = face->m_index[0];
+ int32_t i1 = face->m_index[1];
+ int32_t i2 = face->m_index[2];
+
+ polygon[0].m_vertex = vertex[i0];
+ polygon[0].m_vertex.m_w = layer;
+
+ polygon[1].m_vertex = vertex[i1];
+ polygon[1].m_vertex.m_w = layer;
+
+ polygon[2].m_vertex = vertex[i2];
+ polygon[2].m_vertex.m_w = layer;
+
+ AddPolygon(3, &polygon[0].m_vertex.m_x, sizeof(dgVertexAtribute), 0);
+ }
+
+ layer += float(1.0f);
+ //break;
+ }
+ }
+ }
+ EndPolygon(1.0e-5f);
+
+ for (int32_t i = 0; i < faceCount; i++) {
+ clusters[i].RemoveAll();
+ }
+}
+
+
+dgMeshEffect* dgMeshEffect::CreateConvexApproximationFast(float maxConcavity, int32_t maxCount,hacd::ICallback *callback) const
+{
+ dgMeshEffect triangleMesh(*this);
+ if (maxCount <= 1)
+ {
+ maxCount = 1;
+ }
+ if (maxConcavity <= float (1.0e-5f))
+ {
+ maxConcavity = float (1.0e-5f);
+ }
+ dgMeshEffect* const convexPartion = HACD_NEW(dgMeshEffect)(triangleMesh, maxConcavity, maxCount, callback, true );
+ return convexPartion;
+}
diff --git a/APEX_1.4/shared/general/HACD/src/dgPolygonSoupBuilder.cpp b/APEX_1.4/shared/general/HACD/src/dgPolygonSoupBuilder.cpp
new file mode 100644
index 00000000..2fe00fc0
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgPolygonSoupBuilder.cpp
@@ -0,0 +1,962 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+/****************************************************************************
+*
+* Visual C++ 6.0 created by: Julio Jerez
+*
+****************************************************************************/
+#include "dgStack.h"
+#include "dgMatrix.h"
+#include "dgPolyhedra.h"
+#include "dgPolygonSoupBuilder.h"
+
+#define DG_POINTS_RUN (512 * 1024)
+
+class dgPolySoupFilterAllocator: public dgPolyhedra
+{
+ public:
+ dgPolySoupFilterAllocator (void)
+ :dgPolyhedra ()
+ {
+ }
+
+ ~dgPolySoupFilterAllocator ()
+ {
+ }
+
+ int32_t AddFilterFace (uint32_t count, int32_t* const pool)
+ {
+ BeginFace();
+ HACD_ASSERT (count);
+ bool reduction = true;
+ while (reduction && !AddFace (int32_t (count), pool)) {
+ reduction = false;
+ if (count >3) {
+ for (uint32_t i = 0; i < count; i ++) {
+ for (uint32_t j = i + 1; j < count; j ++) {
+ if (pool[j] == pool[i]) {
+ for (i = j; i < count - 1; i ++) {
+ pool[i] = pool[i + 1];
+ }
+ count --;
+ i = count;
+ reduction = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ EndFace();
+
+ HACD_ASSERT (reduction);
+ return reduction ? int32_t (count) : 0;
+ }
+};
+
+
+dgPolygonSoupDatabaseBuilder::dgPolygonSoupDatabaseBuilder (void)
+ :m_faceVertexCount(), m_vertexIndex(), m_normalIndex(), m_vertexPoints(), m_normalPoints()
+{
+ m_run = DG_POINTS_RUN;
+ m_faceCount = 0;
+ m_indexCount = 0;
+ m_vertexCount = 0;
+ m_normalCount = 0;
+}
+
+
+dgPolygonSoupDatabaseBuilder::~dgPolygonSoupDatabaseBuilder ()
+{
+}
+
+
+void dgPolygonSoupDatabaseBuilder::Begin()
+{
+ m_run = DG_POINTS_RUN;
+ m_faceCount = 0;
+ m_indexCount = 0;
+ m_vertexCount = 0;
+ m_normalCount = 0;
+}
+
+
+void dgPolygonSoupDatabaseBuilder::AddMesh (const float* const vertex, int32_t vertexCount, int32_t strideInBytes, int32_t faceCount,
+ const int32_t* const faceArray, const int32_t* const indexArray, const int32_t* const faceTagsData, const dgMatrix& worldMatrix)
+{
+ int32_t faces[256];
+ int32_t pool[2048];
+
+
+ m_vertexPoints[m_vertexCount + vertexCount].m_x = double (0.0f);
+ dgBigVector* const vertexPool = &m_vertexPoints[m_vertexCount];
+
+ worldMatrix.TransformTriplex (&vertexPool[0].m_x, sizeof (dgBigVector), vertex, strideInBytes, vertexCount);
+ for (int32_t i = 0; i < vertexCount; i ++) {
+ vertexPool[i].m_w = double (0.0f);
+ }
+
+ int32_t totalIndexCount = faceCount;
+ for (int32_t i = 0; i < faceCount; i ++) {
+ totalIndexCount += faceArray[i];
+ }
+
+ m_vertexIndex[m_indexCount + totalIndexCount] = 0;
+ m_faceVertexCount[m_faceCount + faceCount] = 0;
+
+ int32_t k = 0;
+ for (int32_t i = 0; i < faceCount; i ++) {
+ int32_t count = faceArray[i];
+ for (int32_t j = 0; j < count; j ++) {
+ int32_t index = indexArray[k];
+ pool[j] = index + m_vertexCount;
+ k ++;
+ }
+
+ int32_t convexFaces = 0;
+ if (count == 3) {
+ convexFaces = 1;
+ dgBigVector p0 (m_vertexPoints[pool[2]]);
+ for (int32_t i = 0; i < 3; i ++) {
+ dgBigVector p1 (m_vertexPoints[pool[i]]);
+ dgBigVector edge (p1 - p0);
+ double mag2 = edge % edge;
+ if (mag2 < float (1.0e-6f)) {
+ convexFaces = 0;
+ }
+ p0 = p1;
+ }
+
+ if (convexFaces) {
+ dgBigVector edge0 (m_vertexPoints[pool[2]] - m_vertexPoints[pool[0]]);
+ dgBigVector edge1 (m_vertexPoints[pool[1]] - m_vertexPoints[pool[0]]);
+ dgBigVector normal (edge0 * edge1);
+ double mag2 = normal % normal;
+ if (mag2 < float (1.0e-8f)) {
+ convexFaces = 0;
+ }
+ }
+
+ if (convexFaces) {
+ faces[0] = 3;
+ }
+
+ } else {
+ convexFaces = AddConvexFace (count, pool, faces);
+ }
+
+ int32_t index = 0;
+ for (int32_t k = 0; k < convexFaces; k ++) {
+ int32_t count = faces[k];
+ m_vertexIndex[m_indexCount] = faceTagsData[i];
+ m_indexCount ++;
+ for (int32_t j = 0; j < count; j ++) {
+ m_vertexIndex[m_indexCount] = pool[index];
+ index ++;
+ m_indexCount ++;
+ }
+ m_faceVertexCount[m_faceCount] = count + 1;
+ m_faceCount ++;
+ }
+ }
+ m_vertexCount += vertexCount;
+ m_run -= vertexCount;
+ if (m_run <= 0) {
+ PackArray();
+ }
+}
+
+void dgPolygonSoupDatabaseBuilder::PackArray()
+{
+ dgStack<int32_t> indexMapPool (m_vertexCount);
+ int32_t* const indexMap = &indexMapPool[0];
+ m_vertexCount = dgVertexListToIndexList (&m_vertexPoints[0].m_x, sizeof (dgBigVector), 3, m_vertexCount, &indexMap[0], float (1.0e-6f));
+
+ int32_t k = 0;
+ for (int32_t i = 0; i < m_faceCount; i ++) {
+ k ++;
+
+ int32_t count = m_faceVertexCount[i];
+ for (int32_t j = 1; j < count; j ++) {
+ int32_t index = m_vertexIndex[k];
+ index = indexMap[index];
+ m_vertexIndex[k] = index;
+ k ++;
+ }
+ }
+
+ m_run = DG_POINTS_RUN;
+}
+
+void dgPolygonSoupDatabaseBuilder::SingleFaceFixup()
+{
+ if (m_faceCount == 1) {
+ int32_t index = 0;
+ int32_t count = m_faceVertexCount[0];
+ for (int32_t j = 0; j < count; j ++) {
+ m_vertexIndex[m_indexCount] = m_vertexIndex[index];
+ index ++;
+ m_indexCount ++;
+ }
+ m_faceVertexCount[m_faceCount] = count;
+ m_faceCount ++;
+ }
+}
+
+void dgPolygonSoupDatabaseBuilder::EndAndOptimize(bool optimize)
+{
+ if (m_faceCount) {
+ dgStack<int32_t> indexMapPool (m_indexCount + m_vertexCount);
+
+ int32_t* const indexMap = &indexMapPool[0];
+ m_vertexCount = dgVertexListToIndexList (&m_vertexPoints[0].m_x, sizeof (dgBigVector), 3, m_vertexCount, &indexMap[0], float (1.0e-4f));
+
+ int32_t k = 0;
+ for (int32_t i = 0; i < m_faceCount; i ++) {
+ k ++;
+ int32_t count = m_faceVertexCount[i];
+ for (int32_t j = 1; j < count; j ++) {
+ int32_t index = m_vertexIndex[k];
+ index = indexMap[index];
+ m_vertexIndex[k] = index;
+ k ++;
+ }
+ }
+
+ OptimizeByIndividualFaces();
+ if (optimize) {
+ OptimizeByGroupID();
+ OptimizeByIndividualFaces();
+ }
+ }
+}
+
+
+void dgPolygonSoupDatabaseBuilder::OptimizeByGroupID()
+{
+ dgTree<int, int> attribFilter;
+ dgPolygonSoupDatabaseBuilder builder;
+ dgPolygonSoupDatabaseBuilder builderAux;
+ dgPolygonSoupDatabaseBuilder builderLeftOver;
+
+ builder.Begin();
+ int32_t polygonIndex = 0;
+ for (int32_t i = 0; i < m_faceCount; i ++) {
+ int32_t attribute = m_vertexIndex[polygonIndex];
+ if (!attribFilter.Find(attribute)) {
+ attribFilter.Insert (attribute, attribute);
+ builder.OptimizeByGroupID (*this, i, polygonIndex, builderLeftOver);
+ for (int32_t j = 0; builderLeftOver.m_faceCount && (j < 64); j ++) {
+ builderAux.m_faceVertexCount[builderLeftOver.m_faceCount] = 0;
+ builderAux.m_vertexIndex[builderLeftOver.m_indexCount] = 0;
+ builderAux.m_vertexPoints[builderLeftOver.m_vertexCount].m_x = float (0.0f);
+
+ memcpy (&builderAux.m_faceVertexCount[0], &builderLeftOver.m_faceVertexCount[0], sizeof (int32_t) * builderLeftOver.m_faceCount);
+ memcpy (&builderAux.m_vertexIndex[0], &builderLeftOver.m_vertexIndex[0], sizeof (int32_t) * builderLeftOver.m_indexCount);
+ memcpy (&builderAux.m_vertexPoints[0], &builderLeftOver.m_vertexPoints[0], sizeof (dgBigVector) * builderLeftOver.m_vertexCount);
+
+ builderAux.m_faceCount = builderLeftOver.m_faceCount;
+ builderAux.m_indexCount = builderLeftOver.m_indexCount;
+ builderAux.m_vertexCount = builderLeftOver.m_vertexCount;
+
+ int32_t prevFaceCount = builderLeftOver.m_faceCount;
+ builderLeftOver.m_faceCount = 0;
+ builderLeftOver.m_indexCount = 0;
+ builderLeftOver.m_vertexCount = 0;
+
+ builder.OptimizeByGroupID (builderAux, 0, 0, builderLeftOver);
+ if (prevFaceCount == builderLeftOver.m_faceCount) {
+ break;
+ }
+ }
+ HACD_ASSERT (builderLeftOver.m_faceCount == 0);
+ }
+ polygonIndex += m_faceVertexCount[i];
+ }
+// builder.End();
+ builder.Optimize(false);
+
+ m_faceVertexCount[builder.m_faceCount] = 0;
+ m_vertexIndex[builder.m_indexCount] = 0;
+ m_vertexPoints[builder.m_vertexCount].m_x = float (0.0f);
+
+ memcpy (&m_faceVertexCount[0], &builder.m_faceVertexCount[0], sizeof (int32_t) * builder.m_faceCount);
+ memcpy (&m_vertexIndex[0], &builder.m_vertexIndex[0], sizeof (int32_t) * builder.m_indexCount);
+ memcpy (&m_vertexPoints[0], &builder.m_vertexPoints[0], sizeof (dgBigVector) * builder.m_vertexCount);
+
+ m_faceCount = builder.m_faceCount;
+ m_indexCount = builder.m_indexCount;
+ m_vertexCount = builder.m_vertexCount;
+ m_normalCount = builder.m_normalCount;
+}
+
+
+void dgPolygonSoupDatabaseBuilder::OptimizeByGroupID (dgPolygonSoupDatabaseBuilder& source, int32_t faceNumber, int32_t faceIndexNumber, dgPolygonSoupDatabaseBuilder& leftOver)
+{
+ int32_t indexPool[1024 * 1];
+ int32_t atributeData[1024 * 1];
+ dgVector vertexPool[1024 * 1];
+ dgPolyhedra polyhedra;
+
+ int32_t attribute = source.m_vertexIndex[faceIndexNumber];
+ for (int32_t i = 0; i < int32_t (sizeof(atributeData) / sizeof (int32_t)); i ++) {
+ indexPool[i] = i;
+ atributeData[i] = attribute;
+ }
+
+ leftOver.Begin();
+ polyhedra.BeginFace ();
+ for (int32_t i = faceNumber; i < source.m_faceCount; i ++) {
+ int32_t indexCount;
+ indexCount = source.m_faceVertexCount[i];
+ HACD_ASSERT (indexCount < 1024);
+
+ if (source.m_vertexIndex[faceIndexNumber] == attribute) {
+ dgEdge* const face = polyhedra.AddFace(indexCount - 1, &source.m_vertexIndex[faceIndexNumber + 1]);
+ if (!face) {
+ int32_t faceArray;
+ for (int32_t j = 0; j < indexCount - 1; j ++) {
+ int32_t index;
+ index = source.m_vertexIndex[faceIndexNumber + j + 1];
+ vertexPool[j] = source.m_vertexPoints[index];
+ }
+ faceArray = indexCount - 1;
+ leftOver.AddMesh (&vertexPool[0].m_x, indexCount - 1, sizeof (dgVector), 1, &faceArray, indexPool, atributeData, dgGetIdentityMatrix());
+ } else {
+ // set the attribute
+ dgEdge* ptr = face;
+ do {
+ ptr->m_userData = uint64_t (attribute);
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ }
+ }
+ faceIndexNumber += indexCount;
+ }
+
+ leftOver.Optimize(false);
+ polyhedra.EndFace();
+
+
+ dgPolyhedra facesLeft;
+ facesLeft.BeginFace();
+ polyhedra.ConvexPartition (&source.m_vertexPoints[0].m_x, sizeof (dgBigVector), &facesLeft);
+ facesLeft.EndFace();
+
+
+ int32_t mark = polyhedra.IncLRU();
+ dgPolyhedra::Iterator iter (polyhedra);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_incidentFace < 0) {
+ continue;
+ }
+ if (edge->m_mark == mark) {
+ continue;
+ }
+
+ dgEdge* ptr = edge;
+ int32_t indexCount = 0;
+ do {
+ ptr->m_mark = mark;
+ vertexPool[indexCount] = source.m_vertexPoints[ptr->m_incidentVertex];
+ indexCount ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ if (indexCount >= 3) {
+ AddMesh (&vertexPool[0].m_x, indexCount, sizeof (dgVector), 1, &indexCount, indexPool, atributeData, dgGetIdentityMatrix());
+ }
+ }
+
+
+ mark = facesLeft.IncLRU();
+ dgPolyhedra::Iterator iter1 (facesLeft);
+ for (iter1.Begin(); iter1; iter1 ++) {
+ dgEdge* const edge = &(*iter1);
+ if (edge->m_incidentFace < 0) {
+ continue;
+ }
+ if (edge->m_mark == mark) {
+ continue;
+ }
+
+ dgEdge* ptr = edge;
+ int32_t indexCount = 0;
+ do {
+ ptr->m_mark = mark;
+ vertexPool[indexCount] = source.m_vertexPoints[ptr->m_incidentVertex];
+ indexCount ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ if (indexCount >= 3) {
+ AddMesh (&vertexPool[0].m_x, indexCount, sizeof (dgVector), 1, &indexCount, indexPool, atributeData, dgGetIdentityMatrix());
+ }
+ }
+}
+
+
+
+
+void dgPolygonSoupDatabaseBuilder::OptimizeByIndividualFaces()
+{
+ int32_t* const faceArray = &m_faceVertexCount[0];
+ int32_t* const indexArray = &m_vertexIndex[0];
+
+ int32_t* const oldFaceArray = &m_faceVertexCount[0];
+ int32_t* const oldIndexArray = &m_vertexIndex[0];
+
+ int32_t polygonIndex = 0;
+ int32_t newFaceCount = 0;
+ int32_t newIndexCount = 0;
+ for (int32_t i = 0; i < m_faceCount; i ++) {
+ int32_t oldCount = oldFaceArray[i];
+ int32_t count = FilterFace (oldCount - 1, &oldIndexArray[polygonIndex + 1]);
+ if (count) {
+ faceArray[newFaceCount] = count + 1;
+ for (int32_t j = 0; j < count + 1; j ++) {
+ indexArray[newIndexCount + j] = oldIndexArray[polygonIndex + j];
+ }
+ newFaceCount ++;
+ newIndexCount += (count + 1);
+ }
+ polygonIndex += oldCount;
+ }
+ HACD_ASSERT (polygonIndex == m_indexCount);
+ m_faceCount = newFaceCount;
+ m_indexCount = newIndexCount;
+}
+
+
+
+
+void dgPolygonSoupDatabaseBuilder::End(bool optimize)
+{
+ Optimize(optimize);
+
+ // build the normal array and adjacency array
+ // calculate all face the normals
+ int32_t indexCount = 0;
+ m_normalPoints[m_faceCount].m_x = double (0.0f);
+ for (int32_t i = 0; i < m_faceCount; i ++) {
+ int32_t faceIndexCount = m_faceVertexCount[i];
+
+ int32_t* const ptr = &m_vertexIndex[indexCount + 1];
+ dgBigVector v0 (&m_vertexPoints[ptr[0]].m_x);
+ dgBigVector v1 (&m_vertexPoints[ptr[1]].m_x);
+ dgBigVector e0 (v1 - v0);
+ dgBigVector normal (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (int32_t j = 2; j < faceIndexCount - 1; j ++) {
+ dgBigVector v2 (&m_vertexPoints[ptr[j]].m_x);
+ dgBigVector e1 (v2 - v0);
+ normal += e0 * e1;
+ e0 = e1;
+ }
+ normal = normal.Scale (dgRsqrt (normal % normal));
+
+ m_normalPoints[i].m_x = normal.m_x;
+ m_normalPoints[i].m_y = normal.m_y;
+ m_normalPoints[i].m_z = normal.m_z;
+ indexCount += faceIndexCount;
+ }
+ // compress normals array
+ m_normalIndex[m_faceCount] = 0;
+ m_normalCount = dgVertexListToIndexList(&m_normalPoints[0].m_x, sizeof (dgBigVector), 3, m_faceCount, &m_normalIndex[0], float (1.0e-4f));
+}
+
+void dgPolygonSoupDatabaseBuilder::Optimize(bool optimize)
+{
+ #define DG_PATITION_SIZE (1024 * 4)
+ if (optimize && (m_faceCount > DG_PATITION_SIZE)) {
+
+ dgBigVector median (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgBigVector varian (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+
+ dgStack<dgVector> pool (1024 * 2);
+ dgStack<int32_t> indexArray (1024 * 2);
+ int32_t polygonIndex = 0;
+ for (int32_t i = 0; i < m_faceCount; i ++) {
+
+ dgBigVector p0 (float ( 1.0e10f), float ( 1.0e10f), float ( 1.0e10f), float (0.0f));
+ dgBigVector p1 (float (-1.0e10f), float (-1.0e10f), float (-1.0e10f), float (0.0f));
+ int32_t count = m_faceVertexCount[i];
+
+ for (int32_t j = 1; j < count; j ++) {
+ int32_t k = m_vertexIndex[polygonIndex + j];
+ p0.m_x = GetMin (p0.m_x, double (m_vertexPoints[k].m_x));
+ p0.m_y = GetMin (p0.m_y, double (m_vertexPoints[k].m_y));
+ p0.m_z = GetMin (p0.m_z, double (m_vertexPoints[k].m_z));
+ p1.m_x = GetMax (p1.m_x, double (m_vertexPoints[k].m_x));
+ p1.m_y = GetMax (p1.m_y, double (m_vertexPoints[k].m_y));
+ p1.m_z = GetMax (p1.m_z, double (m_vertexPoints[k].m_z));
+ }
+
+ dgBigVector p ((p0 + p1).Scale (0.5f));
+ median += p;
+ varian += p.CompProduct (p);
+ polygonIndex += count;
+ }
+
+ varian = varian.Scale (float (m_faceCount)) - median.CompProduct(median);
+
+ int32_t axis = 0;
+ float maxVarian = float (-1.0e10f);
+ for (int32_t i = 0; i < 3; i ++) {
+ if (varian[i] > maxVarian) {
+ axis = i;
+ maxVarian = float (varian[i]);
+ }
+ }
+ dgBigVector center = median.Scale (float (1.0f) / float (m_faceCount));
+ double axisVal = center[axis];
+
+ dgPolygonSoupDatabaseBuilder left;
+ dgPolygonSoupDatabaseBuilder right;
+
+ left.Begin();
+ right.Begin();
+ polygonIndex = 0;
+ for (int32_t i = 0; i < m_faceCount; i ++) {
+ int32_t side = 0;
+ int32_t count = m_faceVertexCount[i];
+ for (int32_t j = 1; j < count; j ++) {
+ int32_t k;
+ k = m_vertexIndex[polygonIndex + j];
+ dgVector p (&m_vertexPoints[k].m_x);
+ if (p[axis] > axisVal) {
+ side = 1;
+ break;
+ }
+ }
+
+ int32_t faceArray = count - 1;
+ int32_t faceTagsData = m_vertexIndex[polygonIndex];
+ for (int32_t j = 1; j < count; j ++) {
+ int32_t k = m_vertexIndex[polygonIndex + j];
+ pool[j - 1] = m_vertexPoints[k];
+ indexArray[j - 1] = j - 1;
+ }
+
+ if (!side) {
+ left.AddMesh (&pool[0].m_x, count - 1, sizeof (dgVector), 1, &faceArray, &indexArray[0], &faceTagsData, dgGetIdentityMatrix());
+ } else {
+ right.AddMesh (&pool[0].m_x, count - 1, sizeof (dgVector), 1, &faceArray, &indexArray[0], &faceTagsData, dgGetIdentityMatrix());
+ }
+ polygonIndex += count;
+ }
+
+ left.Optimize(optimize);
+ right.Optimize(optimize);
+
+ m_faceCount = 0;
+ m_indexCount = 0;
+ m_vertexCount = 0;
+ m_normalCount = 0;
+ polygonIndex = 0;
+ for (int32_t i = 0; i < left.m_faceCount; i ++) {
+ int32_t count = left.m_faceVertexCount[i];
+ int32_t faceArray = count - 1;
+ int32_t faceTagsData = left.m_vertexIndex[polygonIndex];
+ for (int32_t j = 1; j < count; j ++) {
+ int32_t k = left.m_vertexIndex[polygonIndex + j];
+ pool[j - 1] = left.m_vertexPoints[k];
+ indexArray[j - 1] = j - 1;
+ }
+ AddMesh (&pool[0].m_x, count - 1, sizeof (dgVector), 1, &faceArray, &indexArray[0], &faceTagsData, dgGetIdentityMatrix());
+ polygonIndex += count;
+ }
+
+ polygonIndex = 0;
+ for (int32_t i = 0; i < right.m_faceCount; i ++) {
+ int32_t count = right.m_faceVertexCount[i];
+ int32_t faceArray = count - 1;
+ int32_t faceTagsData = right.m_vertexIndex[polygonIndex];
+ for (int32_t j = 1; j < count; j ++) {
+ int32_t k = right.m_vertexIndex[polygonIndex + j];
+ pool[j - 1] = right.m_vertexPoints[k];
+ indexArray[j - 1] = j - 1;
+ }
+ AddMesh (&pool[0].m_x, count - 1, sizeof (dgVector), 1, &faceArray, &indexArray[0], &faceTagsData, dgGetIdentityMatrix());
+ polygonIndex += count;
+ }
+
+ if (m_faceCount < DG_PATITION_SIZE) {
+ EndAndOptimize(optimize);
+ } else {
+ EndAndOptimize(false);
+ }
+
+ } else {
+ EndAndOptimize(optimize);
+ }
+}
+
+
+
+int32_t dgPolygonSoupDatabaseBuilder::FilterFace (int32_t count, int32_t* const pool)
+{
+ if (count == 3) {
+ dgBigVector p0 (m_vertexPoints[pool[2]]);
+ for (int32_t i = 0; i < 3; i ++) {
+ dgBigVector p1 (m_vertexPoints[pool[i]]);
+ dgBigVector edge (p1 - p0);
+ double mag2 = edge % edge;
+ if (mag2 < float (1.0e-6f)) {
+ count = 0;
+ }
+ p0 = p1;
+ }
+
+ if (count == 3) {
+ dgBigVector edge0 (m_vertexPoints[pool[2]] - m_vertexPoints[pool[0]]);
+ dgBigVector edge1 (m_vertexPoints[pool[1]] - m_vertexPoints[pool[0]]);
+ dgBigVector normal (edge0 * edge1);
+ double mag2 = normal % normal;
+ if (mag2 < float (1.0e-8f)) {
+ count = 0;
+ }
+ }
+ } else {
+ dgPolySoupFilterAllocator polyhedra;
+
+ count = polyhedra.AddFilterFace (uint32_t (count), pool);
+
+ if (!count) {
+ return 0;
+ }
+
+ dgEdge* edge = &polyhedra.GetRoot()->GetInfo();
+ if (edge->m_incidentFace < 0) {
+ edge = edge->m_twin;
+ }
+
+ bool flag = true;
+ while (flag) {
+ flag = false;
+ if (count >= 3) {
+ dgEdge* ptr = edge;
+
+ dgBigVector p0 (&m_vertexPoints[ptr->m_incidentVertex].m_x);
+ do {
+ dgBigVector p1 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x);
+ dgBigVector e0 (p1 - p0);
+ double mag2 = e0 % e0;
+ if (mag2 < float (1.0e-6f)) {
+ count --;
+ flag = true;
+ edge = ptr->m_next;
+ ptr->m_prev->m_next = ptr->m_next;
+ ptr->m_next->m_prev = ptr->m_prev;
+ ptr->m_twin->m_next->m_prev = ptr->m_twin->m_prev;
+ ptr->m_twin->m_prev->m_next = ptr->m_twin->m_next;
+ break;
+ }
+ p0 = p1;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ }
+ if (count >= 3) {
+ flag = true;
+ dgBigVector normal (polyhedra.FaceNormal (edge, &m_vertexPoints[0].m_x, sizeof (dgBigVector)));
+
+ HACD_ASSERT ((normal % normal) > float (1.0e-10f));
+ normal = normal.Scale (dgRsqrt (normal % normal + float (1.0e-20f)));
+
+ while (flag) {
+ flag = false;
+ if (count >= 3) {
+ dgEdge* ptr = edge;
+
+ dgBigVector p0 (&m_vertexPoints[ptr->m_prev->m_incidentVertex].m_x);
+ dgBigVector p1 (&m_vertexPoints[ptr->m_incidentVertex].m_x);
+ dgBigVector e0 (p1 - p0);
+ e0 = e0.Scale (dgRsqrt (e0 % e0 + float(1.0e-10f)));
+ do {
+ dgBigVector p2 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x);
+ dgBigVector e1 (p2 - p1);
+
+ e1 = e1.Scale (dgRsqrt (e1 % e1 + float(1.0e-10f)));
+ double mag2 = e1 % e0;
+ if (mag2 > float (0.9999f)) {
+ count --;
+ flag = true;
+ edge = ptr->m_next;
+ ptr->m_prev->m_next = ptr->m_next;
+ ptr->m_next->m_prev = ptr->m_prev;
+ ptr->m_twin->m_next->m_prev = ptr->m_twin->m_prev;
+ ptr->m_twin->m_prev->m_next = ptr->m_twin->m_next;
+ break;
+ }
+
+ dgBigVector n (e0 * e1);
+ mag2 = n % normal;
+ if (mag2 < float (1.0e-5f)) {
+ count --;
+ flag = true;
+ edge = ptr->m_next;
+ ptr->m_prev->m_next = ptr->m_next;
+ ptr->m_next->m_prev = ptr->m_prev;
+ ptr->m_twin->m_next->m_prev = ptr->m_twin->m_prev;
+ ptr->m_twin->m_prev->m_next = ptr->m_twin->m_next;
+ break;
+ }
+
+ e0 = e1;
+ p1 = p2;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ }
+ }
+
+ dgEdge* first = edge;
+ if (count >= 3) {
+ double best = float (2.0f);
+ dgEdge* ptr = edge;
+
+ dgBigVector p0 (&m_vertexPoints[ptr->m_incidentVertex].m_x);
+ dgBigVector p1 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x);
+ dgBigVector e0 (p1 - p0);
+ e0 = e0.Scale (dgRsqrt (e0 % e0 + float(1.0e-10f)));
+ do {
+ dgBigVector p2 (&m_vertexPoints[ptr->m_next->m_next->m_incidentVertex].m_x);
+ dgBigVector e1 (p2 - p1);
+
+ e1 = e1.Scale (dgRsqrt (e1 % e1 + float(1.0e-10f)));
+ double mag2 = fabs (e1 % e0);
+ if (mag2 < best) {
+ best = mag2;
+ first = ptr;
+ }
+
+ e0 = e1;
+ p1 = p2;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ count = 0;
+ ptr = first;
+ do {
+ pool[count] = ptr->m_incidentVertex;
+ count ++;
+ ptr = ptr->m_next;
+ } while (ptr != first);
+ }
+
+#ifdef _DEBUG
+ if (count >= 3) {
+ int32_t j0 = count - 2;
+ int32_t j1 = count - 1;
+ dgBigVector normal (polyhedra.FaceNormal (edge, &m_vertexPoints[0].m_x, sizeof (dgBigVector)));
+ for (int32_t j2 = 0; j2 < count; j2 ++) {
+ dgBigVector p0 (&m_vertexPoints[pool[j0]].m_x);
+ dgBigVector p1 (&m_vertexPoints[pool[j1]].m_x);
+ dgBigVector p2 (&m_vertexPoints[pool[j2]].m_x);
+ dgBigVector e0 ((p0 - p1));
+ dgBigVector e1 ((p2 - p1));
+
+ dgBigVector n (e1 * e0);
+ HACD_ASSERT ((n % normal) > float (0.0f));
+ j0 = j1;
+ j1 = j2;
+ }
+ }
+#endif
+ }
+
+ return (count >= 3) ? count : 0;
+}
+
+
+int32_t dgPolygonSoupDatabaseBuilder::AddConvexFace (int32_t count, int32_t* const pool, int32_t* const facesArray)
+{
+ dgPolySoupFilterAllocator polyhedra;
+
+ count = polyhedra.AddFilterFace(uint32_t (count), pool);
+
+ dgEdge* edge = &polyhedra.GetRoot()->GetInfo();
+ if (edge->m_incidentFace < 0) {
+ edge = edge->m_twin;
+ }
+
+
+ int32_t isconvex = 1;
+ int32_t facesCount = 0;
+
+ int32_t flag = 1;
+ while (flag) {
+ flag = 0;
+ if (count >= 3) {
+ dgEdge* ptr = edge;
+
+ dgBigVector p0 (&m_vertexPoints[ptr->m_incidentVertex].m_x);
+ do {
+ dgBigVector p1 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x);
+ dgBigVector e0 (p1 - p0);
+ double mag2 = e0 % e0;
+ if (mag2 < float (1.0e-6f)) {
+ count --;
+ flag = 1;
+ edge = ptr->m_next;
+ ptr->m_prev->m_next = ptr->m_next;
+ ptr->m_next->m_prev = ptr->m_prev;
+ ptr->m_twin->m_next->m_prev = ptr->m_twin->m_prev;
+ ptr->m_twin->m_prev->m_next = ptr->m_twin->m_next;
+ break;
+ }
+ p0 = p1;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ }
+ if (count >= 3) {
+ flag = 1;
+
+ while (flag) {
+ flag = 0;
+ if (count >= 3) {
+ dgEdge* ptr = edge;
+
+ dgBigVector p0 (&m_vertexPoints[ptr->m_prev->m_incidentVertex].m_x);
+ dgBigVector p1 (&m_vertexPoints[ptr->m_incidentVertex].m_x);
+ dgBigVector e0 (p1 - p0);
+ e0 = e0.Scale (dgRsqrt (e0 % e0 + float(1.0e-10f)));
+ do {
+ dgBigVector p2 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x);
+ dgBigVector e1 (p2 - p1);
+
+ e1 = e1.Scale (dgRsqrt (e1 % e1 + float(1.0e-10f)));
+ double mag2 = e1 % e0;
+ if (mag2 > float (0.9999f)) {
+ count --;
+ flag = 1;
+ edge = ptr->m_next;
+ ptr->m_prev->m_next = ptr->m_next;
+ ptr->m_next->m_prev = ptr->m_prev;
+ ptr->m_twin->m_next->m_prev = ptr->m_twin->m_prev;
+ ptr->m_twin->m_prev->m_next = ptr->m_twin->m_next;
+ break;
+ }
+
+ e0 = e1;
+ p1 = p2;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ }
+
+ dgBigVector normal (polyhedra.FaceNormal (edge, &m_vertexPoints[0].m_x, sizeof (dgBigVector)));
+ double mag2 = normal % normal;
+ if (mag2 < float (1.0e-8f)) {
+ return 0;
+ }
+ normal = normal.Scale (dgRsqrt (mag2));
+
+
+ if (count >= 3) {
+ dgEdge* ptr = edge;
+ dgBigVector p0 (&m_vertexPoints[ptr->m_prev->m_incidentVertex].m_x);
+ dgBigVector p1 (&m_vertexPoints[ptr->m_incidentVertex].m_x);
+ dgBigVector e0 (p1 - p0);
+ e0 = e0.Scale (dgRsqrt (e0 % e0 + float(1.0e-10f)));
+ do {
+ dgBigVector p2 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x);
+ dgBigVector e1 (p2 - p1);
+
+ e1 = e1.Scale (dgRsqrt (e1 % e1 + float(1.0e-10f)));
+
+ dgBigVector n (e0 * e1);
+ double mag2 = n % normal;
+ if (mag2 < float (1.0e-5f)) {
+ isconvex = 0;
+ break;
+ }
+
+ e0 = e1;
+ p1 = p2;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ }
+
+ if (isconvex) {
+ dgEdge* const first = edge;
+ if (count >= 3) {
+ count = 0;
+ dgEdge* ptr = first;
+ do {
+ pool[count] = ptr->m_incidentVertex;
+ count ++;
+ ptr = ptr->m_next;
+ } while (ptr != first);
+ facesArray[facesCount] = count;
+ facesCount = 1;
+ }
+ } else {
+ dgPolyhedra leftOver;
+ dgPolyhedra polyhedra2;
+ dgEdge* ptr = edge;
+ count = 0;
+ do {
+ pool[count] = ptr->m_incidentVertex;
+ count ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+
+ polyhedra2.BeginFace();
+ polyhedra2.AddFace (count, pool);
+ polyhedra2.EndFace();
+ leftOver.BeginFace();
+ polyhedra2.ConvexPartition (&m_vertexPoints[0].m_x, sizeof (dgTriplex), &leftOver);
+ leftOver.EndFace();
+
+ int32_t mark = polyhedra2.IncLRU();
+ int32_t index = 0;
+ dgPolyhedra::Iterator iter (polyhedra2);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_incidentFace < 0) {
+ continue;
+ }
+ if (edge->m_mark == mark) {
+ continue;
+ }
+
+ ptr = edge;
+ count = 0;
+ do {
+ ptr->m_mark = mark;
+ pool[index] = ptr->m_incidentVertex;
+ index ++;
+ count ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ facesArray[facesCount] = count;
+ facesCount ++;
+ }
+ }
+
+ return facesCount;
+}
diff --git a/APEX_1.4/shared/general/HACD/src/dgPolyhedra.cpp b/APEX_1.4/shared/general/HACD/src/dgPolyhedra.cpp
new file mode 100644
index 00000000..5e43af37
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgPolyhedra.cpp
@@ -0,0 +1,2433 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgTypes.h"
+#include "dgHeap.h"
+#include "dgStack.h"
+#include "dgSphere.h"
+#include "dgPolyhedra.h"
+#include "dgConvexHull3d.h"
+#include "dgSmallDeterminant.h"
+#include <string.h>
+
+
+//#define DG_MIN_EDGE_ASPECT_RATIO double (0.02f)
+
+class dgDiagonalEdge
+{
+ public:
+ dgDiagonalEdge (dgEdge* const edge)
+ :m_i0(edge->m_incidentVertex), m_i1(edge->m_twin->m_incidentVertex)
+ {
+ }
+ int32_t m_i0;
+ int32_t m_i1;
+};
+
+
+struct dgEdgeCollapseEdgeHandle
+{
+ dgEdgeCollapseEdgeHandle (dgEdge* const newEdge)
+ :m_inList(false), m_edge(newEdge)
+ {
+ }
+
+ dgEdgeCollapseEdgeHandle (const dgEdgeCollapseEdgeHandle &dataHandle)
+ :m_inList(true), m_edge(dataHandle.m_edge)
+ {
+ dgEdgeCollapseEdgeHandle* const handle = (dgEdgeCollapseEdgeHandle *)IntToPointer (m_edge->m_userData);
+ if (handle) {
+ HACD_ASSERT (handle != this);
+ handle->m_edge = NULL;
+ }
+ m_edge->m_userData = uint64_t (PointerToInt(this));
+ }
+
+ ~dgEdgeCollapseEdgeHandle ()
+ {
+ if (m_inList) {
+ if (m_edge) {
+ dgEdgeCollapseEdgeHandle* const handle = (dgEdgeCollapseEdgeHandle *)IntToPointer (m_edge->m_userData);
+ if (handle == this) {
+ m_edge->m_userData = PointerToInt (NULL);
+ }
+ }
+ }
+ m_edge = NULL;
+ }
+
+ uint32_t m_inList;
+ dgEdge* m_edge;
+};
+
+
+class dgVertexCollapseVertexMetric
+{
+ public:
+ double elem[10];
+
+ dgVertexCollapseVertexMetric (const dgBigPlane &plane)
+ {
+ elem[0] = plane.m_x * plane.m_x;
+ elem[1] = plane.m_y * plane.m_y;
+ elem[2] = plane.m_z * plane.m_z;
+ elem[3] = plane.m_w * plane.m_w;
+ elem[4] = double (2.0) * plane.m_x * plane.m_y;
+ elem[5] = double (2.0) * plane.m_x * plane.m_z;
+ elem[6] = double (2.0) * plane.m_x * plane.m_w;
+ elem[7] = double (2.0) * plane.m_y * plane.m_z;
+ elem[8] = double (2.0) * plane.m_y * plane.m_w;
+ elem[9] = double (2.0) * plane.m_z * plane.m_w;
+ }
+
+ void Clear ()
+ {
+ memset (elem, 0, 10 * sizeof (double));
+ }
+
+ void Accumulate (const dgVertexCollapseVertexMetric& p)
+ {
+ elem[0] += p.elem[0];
+ elem[1] += p.elem[1];
+ elem[2] += p.elem[2];
+ elem[3] += p.elem[3];
+ elem[4] += p.elem[4];
+ elem[5] += p.elem[5];
+ elem[6] += p.elem[6];
+ elem[7] += p.elem[7];
+ elem[8] += p.elem[8];
+ elem[9] += p.elem[9];
+ }
+
+ void Accumulate (const dgBigPlane& plane)
+ {
+ elem[0] += plane.m_x * plane.m_x;
+ elem[1] += plane.m_y * plane.m_y;
+ elem[2] += plane.m_z * plane.m_z;
+ elem[3] += plane.m_w * plane.m_w;
+
+ elem[4] += double (2.0f) * plane.m_x * plane.m_y;
+ elem[5] += double (2.0f) * plane.m_x * plane.m_z;
+ elem[7] += double (2.0f) * plane.m_y * plane.m_z;
+
+ elem[6] += double (2.0f) * plane.m_x * plane.m_w;
+ elem[8] += double (2.0f) * plane.m_y * plane.m_w;
+ elem[9] += double (2.0f) * plane.m_z * plane.m_w;
+ }
+
+
+ double Evalue (const dgVector &p) const
+ {
+ double acc = elem[0] * p.m_x * p.m_x + elem[1] * p.m_y * p.m_y + elem[2] * p.m_z * p.m_z +
+ elem[4] * p.m_x * p.m_y + elem[5] * p.m_x * p.m_z + elem[7] * p.m_y * p.m_z +
+ elem[6] * p.m_x + elem[8] * p.m_y + elem[9] * p.m_z + elem[3];
+ return fabs (acc);
+ }
+};
+
+
+
+dgPolyhedra::dgPolyhedra (void)
+ :dgTree <dgEdge, int64_t>()
+ ,m_baseMark(0)
+ ,m_edgeMark(0)
+ ,m_faceSecuence(0)
+{
+}
+
+dgPolyhedra::dgPolyhedra (const dgPolyhedra &polyhedra)
+ :dgTree <dgEdge, int64_t>()
+ ,m_baseMark(0)
+ ,m_edgeMark(0)
+ ,m_faceSecuence(0)
+{
+ dgStack<int32_t> indexPool (1024 * 16);
+ dgStack<uint64_t> userPool (1024 * 16);
+ int32_t* const index = &indexPool[0];
+ uint64_t* const user = &userPool[0];
+
+ BeginFace ();
+ Iterator iter(polyhedra);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_incidentFace < 0) {
+ continue;
+ }
+
+ if (!FindEdge(edge->m_incidentVertex, edge->m_twin->m_incidentVertex)) {
+ int32_t indexCount = 0;
+ dgEdge* ptr = edge;
+ do {
+ user[indexCount] = ptr->m_userData;
+ index[indexCount] = ptr->m_incidentVertex;
+ indexCount ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ dgEdge* const face = AddFace (indexCount, index, (int64_t*) user);
+ ptr = face;
+ do {
+ ptr->m_incidentFace = edge->m_incidentFace;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ }
+ }
+ EndFace();
+
+ m_faceSecuence = polyhedra.m_faceSecuence;
+
+#ifdef __ENABLE_SANITY_CHECK
+ HACD_ASSERT (SanityCheck());
+#endif
+}
+
+dgPolyhedra::~dgPolyhedra ()
+{
+}
+
+
+int32_t dgPolyhedra::GetFaceCount() const
+{
+ Iterator iter (*this);
+ int32_t count = 0;
+ int32_t mark = IncLRU();
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark == mark) {
+ continue;
+ }
+
+ if (edge->m_incidentFace < 0) {
+ continue;
+ }
+
+ count ++;
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ return count;
+}
+
+
+
+dgEdge* dgPolyhedra::AddFace ( int32_t count, const int32_t* const index, const int64_t* const userdata)
+{
+ class IntersectionFilter
+ {
+ public:
+ IntersectionFilter ()
+ {
+ m_count = 0;
+ }
+
+ bool Insert (int32_t /*dummy*/, int64_t value)
+ {
+ int32_t i;
+ for (i = 0 ; i < m_count; i ++) {
+ if (m_array[i] == value) {
+ return false;
+ }
+ }
+ m_array[i] = value;
+ m_count ++;
+ return true;
+ }
+
+ int32_t m_count;
+ int64_t m_array[2048];
+ };
+
+ IntersectionFilter selfIntersectingFaceFilter;
+
+ int32_t dummyValues = 0;
+ int32_t i0 = index[count-1];
+ for (int32_t i = 0; i < count; i ++) {
+ int32_t i1 = index[i];
+ dgPairKey code0 (i0, i1);
+
+ if (!selfIntersectingFaceFilter.Insert (dummyValues, code0.GetVal())) {
+ return NULL;
+ }
+
+ dgPairKey code1 (i1, i0);
+ if (!selfIntersectingFaceFilter.Insert (dummyValues, code1.GetVal())) {
+ return NULL;
+ }
+
+
+ if (i0 == i1) {
+ return NULL;
+ }
+ if (FindEdge (i0, i1)) {
+ return NULL;
+ }
+ i0 = i1;
+ }
+
+ m_faceSecuence ++;
+
+ i0 = index[count-1];
+ int32_t i1 = index[0];
+ uint64_t udata0 = 0;
+ uint64_t udata1 = 0;
+ if (userdata) {
+ udata0 = uint64_t (userdata[count-1]);
+ udata1 = uint64_t (userdata[0]);
+ }
+
+ bool state;
+ dgPairKey code (i0, i1);
+ dgEdge tmpEdge (i0, m_faceSecuence, udata0);
+ dgTreeNode* node = Insert (tmpEdge, code.GetVal(), state);
+ HACD_ASSERT (!state);
+ dgEdge* edge0 = &node->GetInfo();
+ dgEdge* const first = edge0;
+
+ for (int32_t i = 1; i < count; i ++) {
+ i0 = i1;
+ i1 = index[i];
+ udata0 = udata1;
+ udata1 = uint64_t (userdata ? userdata[i] : 0);
+
+ dgPairKey code (i0, i1);
+ dgEdge tmpEdge (i0, m_faceSecuence, udata0);
+ node = Insert (tmpEdge, code.GetVal(), state);
+ HACD_ASSERT (!state);
+
+ dgEdge* const edge1 = &node->GetInfo();
+ edge0->m_next = edge1;
+ edge1->m_prev = edge0;
+ edge0 = edge1;
+ }
+
+ first->m_prev = edge0;
+ edge0->m_next = first;
+
+ return first->m_next;
+}
+
+
+void dgPolyhedra::EndFace ()
+{
+ dgPolyhedra::Iterator iter (*this);
+
+ // Connect all twin edge
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (!edge->m_twin) {
+ edge->m_twin = FindEdge (edge->m_next->m_incidentVertex, edge->m_incidentVertex);
+ if (edge->m_twin) {
+ edge->m_twin->m_twin = edge;
+ }
+ }
+ }
+
+#ifdef __ENABLE_SANITY_CHECK
+ HACD_ASSERT (SanityCheck());
+#endif
+ dgStack<dgEdge*> edgeArrayPool(GetCount() * 2 + 256);
+
+ int32_t edgeCount = 0;
+ dgEdge** const edgeArray = &edgeArrayPool[0];
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (!edge->m_twin) {
+ bool state;
+ dgPolyhedra::dgPairKey code (edge->m_next->m_incidentVertex, edge->m_incidentVertex);
+ dgEdge tmpEdge (edge->m_next->m_incidentVertex, -1);
+ tmpEdge.m_incidentFace = -1;
+ dgPolyhedra::dgTreeNode* const node = Insert (tmpEdge, code.GetVal(), state);
+ HACD_ASSERT (!state);
+ edge->m_twin = &node->GetInfo();
+ edge->m_twin->m_twin = edge;
+ edgeArray[edgeCount] = edge->m_twin;
+ edgeCount ++;
+ }
+ }
+
+ for (int32_t i = 0; i < edgeCount; i ++) {
+ dgEdge* const edge = edgeArray[i];
+ HACD_ASSERT (!edge->m_prev);
+ dgEdge *ptr = edge->m_twin;
+ for (; ptr->m_next; ptr = ptr->m_next->m_twin){}
+ ptr->m_next = edge;
+ edge->m_prev = ptr;
+ }
+
+#ifdef __ENABLE_SANITY_CHECK
+ HACD_ASSERT (SanityCheck ());
+#endif
+}
+
+
+void dgPolyhedra::DeleteFace(dgEdge* const face)
+{
+ dgEdge* edgeList[1024 * 16];
+
+ if (face->m_incidentFace > 0) {
+ int32_t count = 0;
+ dgEdge* ptr = face;
+ do {
+ ptr->m_incidentFace = -1;
+ int32_t i = 0;
+ for (; i < count; i ++) {
+ if ((edgeList[i] == ptr) || (edgeList[i]->m_twin == ptr)) {
+ break;
+ }
+ }
+ if (i == count) {
+ edgeList[count] = ptr;
+ count ++;
+ }
+ ptr = ptr->m_next;
+ } while (ptr != face);
+
+
+ for (int32_t i = 0; i < count; i ++) {
+ dgEdge* const ptr = edgeList[i];
+ if (ptr->m_twin->m_incidentFace < 0) {
+ DeleteEdge (ptr);
+ }
+ }
+ }
+}
+
+
+
+dgBigVector dgPolyhedra::FaceNormal (dgEdge* const face, const double* const pool, int32_t strideInBytes) const
+{
+ int32_t stride = strideInBytes / (int32_t)sizeof (double);
+ dgEdge* edge = face;
+ dgBigVector p0 (&pool[edge->m_incidentVertex * stride]);
+ edge = edge->m_next;
+ dgBigVector p1 (&pool[edge->m_incidentVertex * stride]);
+ dgBigVector e1 (p1 - p0);
+
+ dgBigVector normal (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (edge = edge->m_next; edge != face; edge = edge->m_next) {
+ dgBigVector p2 (&pool[edge->m_incidentVertex * stride]);
+ dgBigVector e2 (p2 - p0);
+ normal += e1 * e2;
+ e1 = e2;
+ }
+ return normal;
+}
+
+
+dgEdge* dgPolyhedra::AddHalfEdge (int32_t v0, int32_t v1)
+{
+ if (v0 != v1) {
+ dgPairKey pairKey (v0, v1);
+ dgEdge tmpEdge (v0, -1);
+
+ dgTreeNode* node = Insert (tmpEdge, pairKey.GetVal());
+ return node ? &node->GetInfo() : NULL;
+ } else {
+ return NULL;
+ }
+}
+
+
+void dgPolyhedra::DeleteEdge (dgEdge* const edge)
+{
+ dgEdge *const twin = edge->m_twin;
+
+ edge->m_prev->m_next = twin->m_next;
+ twin->m_next->m_prev = edge->m_prev;
+ edge->m_next->m_prev = twin->m_prev;
+ twin->m_prev->m_next = edge->m_next;
+
+ dgTreeNode *const nodeA = GetNodeFromInfo (*edge);
+ dgTreeNode *const nodeB = GetNodeFromInfo (*twin);
+
+ HACD_ASSERT (&nodeA->GetInfo() == edge);
+ HACD_ASSERT (&nodeB->GetInfo() == twin);
+
+ Remove (nodeA);
+ Remove (nodeB);
+}
+
+
+dgEdge* dgPolyhedra::SpliteEdge (int32_t newIndex, dgEdge* const edge)
+{
+ dgEdge* const edge00 = edge->m_prev;
+ dgEdge* const edge01 = edge->m_next;
+ dgEdge* const twin00 = edge->m_twin->m_next;
+ dgEdge* const twin01 = edge->m_twin->m_prev;
+
+ int32_t i0 = edge->m_incidentVertex;
+ int32_t i1 = edge->m_twin->m_incidentVertex;
+
+ int32_t f0 = edge->m_incidentFace;
+ int32_t f1 = edge->m_twin->m_incidentFace;
+
+ DeleteEdge (edge);
+
+ dgEdge* const edge0 = AddHalfEdge (i0, newIndex);
+ dgEdge* const edge1 = AddHalfEdge (newIndex, i1);
+
+ dgEdge* const twin0 = AddHalfEdge (newIndex, i0);
+ dgEdge* const twin1 = AddHalfEdge (i1, newIndex);
+ HACD_ASSERT (edge0);
+ HACD_ASSERT (edge1);
+ HACD_ASSERT (twin0);
+ HACD_ASSERT (twin1);
+
+ edge0->m_twin = twin0;
+ twin0->m_twin = edge0;
+
+ edge1->m_twin = twin1;
+ twin1->m_twin = edge1;
+
+ edge0->m_next = edge1;
+ edge1->m_prev = edge0;
+
+ twin1->m_next = twin0;
+ twin0->m_prev = twin1;
+
+ edge0->m_prev = edge00;
+ edge00 ->m_next = edge0;
+
+ edge1->m_next = edge01;
+ edge01->m_prev = edge1;
+
+ twin0->m_next = twin00;
+ twin00->m_prev = twin0;
+
+ twin1->m_prev = twin01;
+ twin01->m_next = twin1;
+
+ edge0->m_incidentFace = f0;
+ edge1->m_incidentFace = f0;
+
+ twin0->m_incidentFace = f1;
+ twin1->m_incidentFace = f1;
+
+#ifdef __ENABLE_SANITY_CHECK
+ // HACD_ASSERT (SanityCheck ());
+#endif
+
+ return edge0;
+}
+
+
+
+bool dgPolyhedra::FlipEdge (dgEdge* const edge)
+{
+ // dgTreeNode *node;
+ if (edge->m_next->m_next->m_next != edge) {
+ return false;
+ }
+
+ if (edge->m_twin->m_next->m_next->m_next != edge->m_twin) {
+ return false;
+ }
+
+ if (FindEdge(edge->m_prev->m_incidentVertex, edge->m_twin->m_prev->m_incidentVertex)) {
+ return false;
+ }
+
+ dgEdge *const prevEdge = edge->m_prev;
+ dgEdge *const prevTwin = edge->m_twin->m_prev;
+
+ dgPairKey edgeKey (prevTwin->m_incidentVertex, prevEdge->m_incidentVertex);
+ dgPairKey twinKey (prevEdge->m_incidentVertex, prevTwin->m_incidentVertex);
+
+ ReplaceKey (GetNodeFromInfo (*edge), edgeKey.GetVal());
+ // HACD_ASSERT (node);
+
+ ReplaceKey (GetNodeFromInfo (*edge->m_twin), twinKey.GetVal());
+ // HACD_ASSERT (node);
+
+ edge->m_incidentVertex = prevTwin->m_incidentVertex;
+ edge->m_twin->m_incidentVertex = prevEdge->m_incidentVertex;
+
+ edge->m_userData = prevTwin->m_userData;
+ edge->m_twin->m_userData = prevEdge->m_userData;
+
+ prevEdge->m_next = edge->m_twin->m_next;
+ prevTwin->m_prev->m_prev = edge->m_prev;
+
+ prevTwin->m_next = edge->m_next;
+ prevEdge->m_prev->m_prev = edge->m_twin->m_prev;
+
+ edge->m_prev = prevTwin->m_prev;
+ edge->m_next = prevEdge;
+
+ edge->m_twin->m_prev = prevEdge->m_prev;
+ edge->m_twin->m_next = prevTwin;
+
+ prevTwin->m_prev->m_next = edge;
+ prevTwin->m_prev = edge->m_twin;
+
+ prevEdge->m_prev->m_next = edge->m_twin;
+ prevEdge->m_prev = edge;
+
+ edge->m_next->m_incidentFace = edge->m_incidentFace;
+ edge->m_prev->m_incidentFace = edge->m_incidentFace;
+
+ edge->m_twin->m_next->m_incidentFace = edge->m_twin->m_incidentFace;
+ edge->m_twin->m_prev->m_incidentFace = edge->m_twin->m_incidentFace;
+
+
+#ifdef __ENABLE_SANITY_CHECK
+ HACD_ASSERT (SanityCheck ());
+#endif
+
+ return true;
+}
+
+
+
+bool dgPolyhedra::GetConectedSurface (dgPolyhedra &polyhedra) const
+{
+ if (!GetCount()) {
+ return false;
+ }
+
+ dgEdge* edge = NULL;
+ Iterator iter(*this);
+ for (iter.Begin (); iter; iter ++) {
+ edge = &(*iter);
+ if ((edge->m_mark < m_baseMark) && (edge->m_incidentFace > 0)) {
+ break;
+ }
+ }
+
+ if (!iter) {
+ return false;
+ }
+
+ int32_t faceIndex[4096];
+ int64_t faceDataIndex[4096];
+ dgStack<dgEdge*> stackPool (GetCount());
+ dgEdge** const stack = &stackPool[0];
+
+ int32_t mark = IncLRU();
+
+ polyhedra.BeginFace ();
+ stack[0] = edge;
+ int32_t index = 1;
+ while (index) {
+ index --;
+ dgEdge* const edge = stack[index];
+
+ if (edge->m_mark == mark) {
+ continue;
+ }
+
+ int32_t count = 0;
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ faceIndex[count] = ptr->m_incidentVertex;
+ faceDataIndex[count] = int64_t (ptr->m_userData);
+ count ++;
+ HACD_ASSERT (count < int32_t ((sizeof (faceIndex)/sizeof(faceIndex[0]))));
+
+ if ((ptr->m_twin->m_incidentFace > 0) && (ptr->m_twin->m_mark != mark)) {
+ stack[index] = ptr->m_twin;
+ index ++;
+ HACD_ASSERT (index < GetCount());
+ }
+
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ polyhedra.AddFace (count, &faceIndex[0], &faceDataIndex[0]);
+ }
+
+ polyhedra.EndFace ();
+
+ return true;
+}
+
+
+void dgPolyhedra::ChangeEdgeIncidentVertex (dgEdge* const edge, int32_t newIndex)
+{
+ dgEdge* ptr = edge;
+ do {
+ dgTreeNode* node = GetNodeFromInfo(*ptr);
+ dgPairKey Key0 (newIndex, ptr->m_twin->m_incidentVertex);
+ ReplaceKey (node, Key0.GetVal());
+
+ node = GetNodeFromInfo(*ptr->m_twin);
+ dgPairKey Key1 (ptr->m_twin->m_incidentVertex, newIndex);
+ ReplaceKey (node, Key1.GetVal());
+
+ ptr->m_incidentVertex = newIndex;
+
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+}
+
+
+void dgPolyhedra::DeleteDegenerateFaces (const double* const pool, int32_t strideInBytes, double area)
+{
+ if (!GetCount()) {
+ return;
+ }
+
+#ifdef __ENABLE_SANITY_CHECK
+ HACD_ASSERT (SanityCheck ());
+#endif
+ dgStack <dgPolyhedra::dgTreeNode*> faceArrayPool(GetCount() / 2 + 100);
+
+ int32_t count = 0;
+ dgPolyhedra::dgTreeNode** const faceArray = &faceArrayPool[0];
+ int32_t mark = IncLRU();
+ Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+
+ if ((edge->m_mark != mark) && (edge->m_incidentFace > 0)) {
+ faceArray[count] = iter.GetNode();
+ count ++;
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ }
+ }
+
+ double area2 = area * area;
+ area2 *= double (4.0f);
+
+ for (int32_t i = 0; i < count; i ++) {
+ dgPolyhedra::dgTreeNode* const faceNode = faceArray[i];
+ dgEdge* const edge = &faceNode->GetInfo();
+
+ dgBigVector normal (FaceNormal (edge, pool, strideInBytes));
+
+ double faceArea = normal % normal;
+ if (faceArea < area2) {
+ DeleteFace (edge);
+ }
+ }
+
+#ifdef __ENABLE_SANITY_CHECK
+ mark = IncLRU();
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if ((edge->m_mark != mark) && (edge->m_incidentFace > 0)) {
+ //HACD_ASSERT (edge->m_next->m_next->m_next == edge);
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ dgBigVector normal (FaceNormal (edge, pool, strideInBytes));
+
+ double faceArea = normal % normal;
+ HACD_ASSERT (faceArea >= area2);
+ }
+ }
+ HACD_ASSERT (SanityCheck ());
+#endif
+}
+
+
+static void NormalizeVertex (int32_t count, dgBigVector* const dst, const double* const src, int32_t stride)
+{
+// dgBigVector min;
+// dgBigVector max;
+// GetMinMax (min, max, src, count, int32_t (stride * sizeof (double)));
+// dgBigVector centre (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ for (int32_t i = 0; i < count; i ++) {
+// dst[i].m_x = centre.m_x + src[i * stride + 0];
+// dst[i].m_y = centre.m_y + src[i * stride + 1];
+// dst[i].m_z = centre.m_z + src[i * stride + 2];
+ dst[i].m_x = src[i * stride + 0];
+ dst[i].m_y = src[i * stride + 1];
+ dst[i].m_z = src[i * stride + 2];
+ dst[i].m_w= double (0.0f);
+ }
+}
+
+static dgBigPlane EdgePlane (int32_t i0, int32_t i1, int32_t i2, const dgBigVector* const pool)
+{
+ const dgBigVector& p0 = pool[i0];
+ const dgBigVector& p1 = pool[i1];
+ const dgBigVector& p2 = pool[i2];
+
+ dgBigPlane plane (p0, p1, p2);
+ double mag = sqrt (plane % plane);
+ if (mag < double (1.0e-12f)) {
+ mag = double (1.0e-12f);
+ }
+ mag = double (1.0f) / mag;
+
+ plane.m_x *= mag;
+ plane.m_y *= mag;
+ plane.m_z *= mag;
+ plane.m_w *= mag;
+
+ return plane;
+}
+
+
+static dgBigPlane UnboundedLoopPlane (int32_t i0, int32_t i1, int32_t i2, const dgBigVector* const pool)
+{
+ const dgBigVector p0 = pool[i0];
+ const dgBigVector p1 = pool[i1];
+ const dgBigVector p2 = pool[i2];
+ dgBigVector E0 (p1 - p0);
+ dgBigVector E1 (p2 - p0);
+
+ dgBigVector N ((E0 * E1) * E0);
+ double dist = - (N % p0);
+ dgBigPlane plane (N, dist);
+
+ double mag = sqrt (plane % plane);
+ if (mag < double (1.0e-12f)) {
+ mag = double (1.0e-12f);
+ }
+ mag = double (10.0f) / mag;
+
+ plane.m_x *= mag;
+ plane.m_y *= mag;
+ plane.m_z *= mag;
+ plane.m_w *= mag;
+
+ return plane;
+}
+
+
+static void CalculateAllMetrics (const dgPolyhedra* const polyhedra, dgVertexCollapseVertexMetric* const table, const dgBigVector* const pool)
+{
+ int32_t edgeMark = polyhedra->IncLRU();
+ dgPolyhedra::Iterator iter (*polyhedra);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+
+ HACD_ASSERT (edge);
+ if (edge->m_mark != edgeMark) {
+
+ if (edge->m_incidentFace > 0) {
+ int32_t i0 = edge->m_incidentVertex;
+ int32_t i1 = edge->m_next->m_incidentVertex;
+ int32_t i2 = edge->m_prev->m_incidentVertex;
+
+ dgBigPlane constrainPlane (EdgePlane (i0, i1, i2, pool));
+ dgVertexCollapseVertexMetric tmp (constrainPlane);
+
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = edgeMark;
+ i0 = ptr->m_incidentVertex;
+ table[i0].Accumulate(tmp);
+
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ } else {
+ HACD_ASSERT (edge->m_twin->m_incidentFace > 0);
+ int32_t i0 = edge->m_twin->m_incidentVertex;
+ int32_t i1 = edge->m_twin->m_next->m_incidentVertex;
+ int32_t i2 = edge->m_twin->m_prev->m_incidentVertex;
+
+ edge->m_mark = edgeMark;
+ dgBigPlane constrainPlane (UnboundedLoopPlane (i0, i1, i2, pool));
+ dgVertexCollapseVertexMetric tmp (constrainPlane);
+
+ i0 = edge->m_incidentVertex;
+ table[i0].Accumulate(tmp);
+
+ i0 = edge->m_twin->m_incidentVertex;
+ table[i0].Accumulate(tmp);
+ }
+ }
+ }
+}
+
+
+double dgPolyhedra::EdgePenalty (const dgBigVector* const pool, dgEdge* const edge) const
+{
+ int32_t i0 = edge->m_incidentVertex;
+ int32_t i1 = edge->m_next->m_incidentVertex;
+
+ const dgBigVector& p0 = pool[i0];
+ const dgBigVector& p1 = pool[i1];
+ dgBigVector dp (p1 - p0);
+
+ double dot = dp % dp;
+ if (dot < double(1.0e-6f)) {
+ return double (-1.0f);
+ }
+
+ if ((edge->m_incidentFace > 0) && (edge->m_twin->m_incidentFace > 0)) {
+ dgBigVector edgeNormal (FaceNormal (edge, &pool[0].m_x, sizeof (dgBigVector)));
+ dgBigVector twinNormal (FaceNormal (edge->m_twin, &pool[0].m_x, sizeof (dgBigVector)));
+
+ double mag0 = edgeNormal % edgeNormal;
+ double mag1 = twinNormal % twinNormal;
+ if ((mag0 < double (1.0e-24f)) || (mag1 < double (1.0e-24f))) {
+ return double (-1.0f);
+ }
+
+ edgeNormal = edgeNormal.Scale (double (1.0f) / sqrt(mag0));
+ twinNormal = twinNormal.Scale (double (1.0f) / sqrt(mag1));
+
+ dot = edgeNormal % twinNormal;
+ if (dot < double (-0.9f)) {
+ return float (-1.0f);
+ }
+
+ dgEdge* ptr = edge;
+ do {
+ if ((ptr->m_incidentFace <= 0) || (ptr->m_twin->m_incidentFace <= 0)){
+ dgEdge* const adj = edge->m_twin;
+ ptr = edge;
+ do {
+ if ((ptr->m_incidentFace <= 0) || (ptr->m_twin->m_incidentFace <= 0)){
+ return float (-1.0);
+ }
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != adj);
+ }
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+ }
+
+ int32_t faceA = edge->m_incidentFace;
+ int32_t faceB = edge->m_twin->m_incidentFace;
+
+ i0 = edge->m_twin->m_incidentVertex;
+ dgBigVector p (pool[i0].m_x, pool[i0].m_y, pool[i0].m_z, float (0.0f));
+
+ bool penalty = false;
+ dgEdge* ptr = edge;
+ do {
+ dgEdge* const adj = ptr->m_twin;
+
+ int32_t face = adj->m_incidentFace;
+ if ((face != faceB) && (face != faceA) && (face >= 0) && (adj->m_next->m_incidentFace == face) && (adj->m_prev->m_incidentFace == face)){
+
+ int32_t i0 = adj->m_next->m_incidentVertex;
+ const dgBigVector& p0 = pool[i0];
+
+ int32_t i1 = adj->m_incidentVertex;
+ const dgBigVector& p1 = pool[i1];
+
+ int32_t i2 = adj->m_prev->m_incidentVertex;
+ const dgBigVector& p2 = pool[i2];
+
+ dgBigVector n0 ((p1 - p0) * (p2 - p0));
+ dgBigVector n1 ((p1 - p) * (p2 - p));
+
+// double mag0 = n0 % n0;
+// HACD_ASSERT (mag0 > double(1.0e-16f));
+// mag0 = sqrt (mag0);
+
+// double mag1 = n1 % n1;
+// mag1 = sqrt (mag1);
+
+ double dot = n0 % n1;
+ if (dot < double (0.0f)) {
+// if (dot <= (mag0 * mag1 * float (0.707f)) || (mag0 > (double(16.0f) * mag1))) {
+ penalty = true;
+ break;
+ }
+ }
+
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+
+ double aspect = float (-1.0f);
+ if (!penalty) {
+ int32_t i0 = edge->m_twin->m_incidentVertex;
+ dgBigVector p0 (pool[i0]);
+
+ aspect = float (1.0f);
+ for (dgEdge* ptr = edge->m_twin->m_next->m_twin->m_next; ptr != edge; ptr = ptr->m_twin->m_next) {
+ if (ptr->m_incidentFace > 0) {
+ int32_t i0 = ptr->m_next->m_incidentVertex;
+ const dgBigVector& p1 = pool[i0];
+
+ int32_t i1 = ptr->m_prev->m_incidentVertex;
+ const dgBigVector& p2 = pool[i1];
+
+ dgBigVector e0 (p1 - p0);
+ dgBigVector e1 (p2 - p1);
+ dgBigVector e2 (p0 - p2);
+
+ double mag0 = e0 % e0;
+ double mag1 = e1 % e1;
+ double mag2 = e2 % e2;
+ double maxMag = GetMax (mag0, mag1, mag2);
+ double minMag = GetMin (mag0, mag1, mag2);
+ double ratio = minMag / maxMag;
+
+ if (ratio < aspect) {
+ aspect = ratio;
+ }
+ }
+ }
+ aspect = sqrt (aspect);
+ //aspect = 1.0f;
+ }
+
+ return aspect;
+}
+
+static void CalculateVertexMetrics (dgVertexCollapseVertexMetric table[], const dgBigVector* const pool, dgEdge* const edge)
+{
+ int32_t i0 = edge->m_incidentVertex;
+
+// const dgBigVector& p0 = pool[i0];
+ table[i0].Clear ();
+ dgEdge* ptr = edge;
+ do {
+
+ if (ptr->m_incidentFace > 0) {
+ int32_t i1 = ptr->m_next->m_incidentVertex;
+ int32_t i2 = ptr->m_prev->m_incidentVertex;
+ dgBigPlane constrainPlane (EdgePlane (i0, i1, i2, pool));
+ table[i0].Accumulate (constrainPlane);
+
+ } else {
+ int32_t i1 = ptr->m_twin->m_incidentVertex;
+ int32_t i2 = ptr->m_twin->m_prev->m_incidentVertex;
+ dgBigPlane constrainPlane (UnboundedLoopPlane (i0, i1, i2, pool));
+ table[i0].Accumulate (constrainPlane);
+
+ i1 = ptr->m_prev->m_incidentVertex;
+ i2 = ptr->m_prev->m_twin->m_prev->m_incidentVertex;
+ constrainPlane = UnboundedLoopPlane (i0, i1, i2, pool);
+ table[i0].Accumulate (constrainPlane);
+ }
+
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+}
+
+static void RemoveHalfEdge (dgPolyhedra* const polyhedra, dgEdge* const edge)
+{
+ dgEdgeCollapseEdgeHandle* const handle = (dgEdgeCollapseEdgeHandle *) IntToPointer (edge->m_userData);
+ if (handle) {
+ handle->m_edge = NULL;
+ }
+
+ dgPolyhedra::dgTreeNode* const node = polyhedra->GetNodeFromInfo(*edge);
+ HACD_ASSERT (node);
+ polyhedra->Remove (node);
+}
+
+
+static dgEdge* CollapseEdge(dgPolyhedra* const polyhedra, dgEdge* const edge)
+{
+ int32_t v0 = edge->m_incidentVertex;
+ int32_t v1 = edge->m_twin->m_incidentVertex;
+
+#ifdef __ENABLE_SANITY_CHECK
+ dgPolyhedra::dgPairKey TwinKey (v1, v0);
+ dgPolyhedra::dgTreeNode* const node = polyhedra->Find (TwinKey.GetVal());
+ dgEdge* const twin1 = node ? &node->GetInfo() : NULL;
+ HACD_ASSERT (twin1);
+ HACD_ASSERT (edge->m_twin == twin1);
+ HACD_ASSERT (twin1->m_twin == edge);
+ HACD_ASSERT (edge->m_incidentFace != 0);
+ HACD_ASSERT (twin1->m_incidentFace != 0);
+#endif
+
+
+ dgEdge* retEdge = edge->m_twin->m_prev->m_twin;
+ if (retEdge == edge->m_twin->m_next) {
+ return NULL;
+ }
+ if (retEdge == edge->m_twin) {
+ return NULL;
+ }
+ if (retEdge == edge->m_next) {
+ retEdge = edge->m_prev->m_twin;
+ if (retEdge == edge->m_twin->m_next) {
+ return NULL;
+ }
+ if (retEdge == edge->m_twin) {
+ return NULL;
+ }
+ }
+
+ dgEdge* lastEdge = NULL;
+ dgEdge* firstEdge = NULL;
+ if ((edge->m_incidentFace >= 0) && (edge->m_twin->m_incidentFace >= 0)) {
+ lastEdge = edge->m_prev->m_twin;
+ firstEdge = edge->m_twin->m_next->m_twin->m_next;
+ } else if (edge->m_twin->m_incidentFace >= 0) {
+ firstEdge = edge->m_twin->m_next->m_twin->m_next;
+ lastEdge = edge;
+ } else {
+ lastEdge = edge->m_prev->m_twin;
+ firstEdge = edge->m_twin->m_next;
+ }
+
+ for (dgEdge* ptr = firstEdge; ptr != lastEdge; ptr = ptr->m_twin->m_next) {
+ dgEdge* badEdge = polyhedra->FindEdge (edge->m_twin->m_incidentVertex, ptr->m_twin->m_incidentVertex);
+ if (badEdge) {
+ return NULL;
+ }
+ }
+
+ dgEdge* const twin = edge->m_twin;
+ if (twin->m_next == twin->m_prev->m_prev) {
+ twin->m_prev->m_twin->m_twin = twin->m_next->m_twin;
+ twin->m_next->m_twin->m_twin = twin->m_prev->m_twin;
+
+ RemoveHalfEdge (polyhedra, twin->m_prev);
+ RemoveHalfEdge (polyhedra, twin->m_next);
+ } else {
+ twin->m_next->m_prev = twin->m_prev;
+ twin->m_prev->m_next = twin->m_next;
+ }
+
+ if (edge->m_next == edge->m_prev->m_prev) {
+ edge->m_next->m_twin->m_twin = edge->m_prev->m_twin;
+ edge->m_prev->m_twin->m_twin = edge->m_next->m_twin;
+ RemoveHalfEdge (polyhedra, edge->m_next);
+ RemoveHalfEdge (polyhedra, edge->m_prev);
+ } else {
+ edge->m_next->m_prev = edge->m_prev;
+ edge->m_prev->m_next = edge->m_next;
+ }
+
+ HACD_ASSERT (twin->m_twin->m_incidentVertex == v0);
+ HACD_ASSERT (edge->m_twin->m_incidentVertex == v1);
+ RemoveHalfEdge (polyhedra, twin);
+ RemoveHalfEdge (polyhedra, edge);
+
+ dgEdge* ptr = retEdge;
+ do {
+ dgPolyhedra::dgPairKey pairKey (v0, ptr->m_twin->m_incidentVertex);
+
+ dgPolyhedra::dgTreeNode* node = polyhedra->Find (pairKey.GetVal());
+ if (node) {
+ if (&node->GetInfo() == ptr) {
+ dgPolyhedra::dgPairKey key (v1, ptr->m_twin->m_incidentVertex);
+ ptr->m_incidentVertex = v1;
+ node = polyhedra->ReplaceKey (node, key.GetVal());
+ HACD_ASSERT (node);
+ }
+ }
+
+ dgPolyhedra::dgPairKey TwinKey (ptr->m_twin->m_incidentVertex, v0);
+ node = polyhedra->Find (TwinKey.GetVal());
+ if (node) {
+ if (&node->GetInfo() == ptr->m_twin) {
+ dgPolyhedra::dgPairKey key (ptr->m_twin->m_incidentVertex, v1);
+ node = polyhedra->ReplaceKey (node, key.GetVal());
+ HACD_ASSERT (node);
+ }
+ }
+
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != retEdge);
+
+ return retEdge;
+}
+
+
+
+void dgPolyhedra::Optimize (const double* const array, int32_t strideInBytes, double tol)
+{
+ dgList <dgEdgeCollapseEdgeHandle>::dgListNode *handleNodePtr;
+
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+
+#ifdef __ENABLE_SANITY_CHECK
+ HACD_ASSERT (SanityCheck ());
+#endif
+
+ int32_t edgeCount = GetEdgeCount() * 4 + 1024 * 16;
+ int32_t maxVertexIndex = GetLastVertexIndex();
+
+ dgStack<dgBigVector> vertexPool (maxVertexIndex);
+ dgStack<dgVertexCollapseVertexMetric> vertexMetrics (maxVertexIndex + 512);
+
+ dgList <dgEdgeCollapseEdgeHandle> edgeHandleList;
+ dgStack<char> heapPool (2 * edgeCount * int32_t (sizeof (double) + sizeof (dgEdgeCollapseEdgeHandle*) + sizeof (int32_t)));
+ dgDownHeap<dgList <dgEdgeCollapseEdgeHandle>::dgListNode* , double> bigHeapArray(&heapPool[0], heapPool.GetSizeInBytes());
+
+ NormalizeVertex (maxVertexIndex, &vertexPool[0], array, stride);
+ memset (&vertexMetrics[0], 0, maxVertexIndex * sizeof (dgVertexCollapseVertexMetric));
+ CalculateAllMetrics (this, &vertexMetrics[0], &vertexPool[0]);
+
+
+ double tol2 = tol * tol;
+ Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+
+ edge->m_userData = 0;
+ int32_t index0 = edge->m_incidentVertex;
+ int32_t index1 = edge->m_twin->m_incidentVertex;
+
+ dgVertexCollapseVertexMetric &metric = vertexMetrics[index0];
+ dgVector p (&vertexPool[index1].m_x);
+ double cost = metric.Evalue (p);
+ if (cost < tol2) {
+ cost = EdgePenalty (&vertexPool[0], edge);
+
+ if (cost > double (0.0f)) {
+ dgEdgeCollapseEdgeHandle handle (edge);
+ handleNodePtr = edgeHandleList.Addtop (handle);
+ bigHeapArray.Push (handleNodePtr, cost);
+ }
+ }
+ }
+
+
+ while (bigHeapArray.GetCount()) {
+ handleNodePtr = bigHeapArray[0];
+
+ dgEdge* edge = handleNodePtr->GetInfo().m_edge;
+ bigHeapArray.Pop();
+ edgeHandleList.Remove (handleNodePtr);
+
+ if (edge) {
+ CalculateVertexMetrics (&vertexMetrics[0], &vertexPool[0], edge);
+
+ int32_t index0 = edge->m_incidentVertex;
+ int32_t index1 = edge->m_twin->m_incidentVertex;
+ dgVertexCollapseVertexMetric &metric = vertexMetrics[index0];
+ dgBigVector p (vertexPool[index1]);
+
+ if ((metric.Evalue (p) < tol2) && (EdgePenalty (&vertexPool[0], edge) > double (0.0f))) {
+
+#ifdef __ENABLE_SANITY_CHECK
+ HACD_ASSERT (SanityCheck ());
+#endif
+
+ edge = CollapseEdge(this, edge);
+
+#ifdef __ENABLE_SANITY_CHECK
+ HACD_ASSERT (SanityCheck ());
+#endif
+ if (edge) {
+ // Update vertex metrics
+ CalculateVertexMetrics (&vertexMetrics[0], &vertexPool[0], edge);
+
+ // Update metrics for all surrounding vertex
+ dgEdge* ptr = edge;
+ do {
+ CalculateVertexMetrics (&vertexMetrics[0], &vertexPool[0], ptr->m_twin);
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+
+ // calculate edge cost of all incident edges
+ int32_t mark = IncLRU();
+ ptr = edge;
+ do {
+ HACD_ASSERT (ptr->m_mark != mark);
+ ptr->m_mark = mark;
+
+ index0 = ptr->m_incidentVertex;
+ index1 = ptr->m_twin->m_incidentVertex;
+
+ dgVertexCollapseVertexMetric &metric = vertexMetrics[index0];
+ dgBigVector p (vertexPool[index1]);
+
+ double cost = float (-1.0f);
+ if (metric.Evalue (p) < tol2) {
+ cost = EdgePenalty (&vertexPool[0], ptr);
+ }
+
+ if (cost > double (0.0f)) {
+ dgEdgeCollapseEdgeHandle handle (ptr);
+ handleNodePtr = edgeHandleList.Addtop (handle);
+ bigHeapArray.Push (handleNodePtr, cost);
+ } else {
+ dgEdgeCollapseEdgeHandle* const handle = (dgEdgeCollapseEdgeHandle*)IntToPointer (ptr->m_userData);
+ if (handle) {
+ handle->m_edge = NULL;
+ }
+ ptr->m_userData = uint32_t (NULL);
+
+ }
+
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+
+
+ // calculate edge cost of all incident edges to a surrounding vertex
+ ptr = edge;
+ do {
+ dgEdge* const incidentEdge = ptr->m_twin;
+
+ dgEdge* ptr1 = incidentEdge;
+ do {
+ index0 = ptr1->m_incidentVertex;
+ index1 = ptr1->m_twin->m_incidentVertex;
+
+ if (ptr1->m_mark != mark) {
+ ptr1->m_mark = mark;
+ dgVertexCollapseVertexMetric &metric = vertexMetrics[index0];
+ dgBigVector p (vertexPool[index1]);
+
+ double cost = float (-1.0f);
+ if (metric.Evalue (p) < tol2) {
+ cost = EdgePenalty (&vertexPool[0], ptr1);
+ }
+
+ if (cost > double (0.0f)) {
+ HACD_ASSERT (cost > double(0.0f));
+ dgEdgeCollapseEdgeHandle handle (ptr1);
+ handleNodePtr = edgeHandleList.Addtop (handle);
+ bigHeapArray.Push (handleNodePtr, cost);
+ } else {
+ dgEdgeCollapseEdgeHandle *handle;
+ handle = (dgEdgeCollapseEdgeHandle*)IntToPointer (ptr1->m_userData);
+ if (handle) {
+ handle->m_edge = NULL;
+ }
+ ptr1->m_userData = uint32_t (NULL);
+
+ }
+ }
+
+ if (ptr1->m_twin->m_mark != mark) {
+ ptr1->m_twin->m_mark = mark;
+ dgVertexCollapseVertexMetric &metric = vertexMetrics[index1];
+ dgBigVector p (vertexPool[index0]);
+
+ double cost = float (-1.0f);
+ if (metric.Evalue (p) < tol2) {
+ cost = EdgePenalty (&vertexPool[0], ptr1->m_twin);
+ }
+
+ if (cost > double (0.0f)) {
+ HACD_ASSERT (cost > double(0.0f));
+ dgEdgeCollapseEdgeHandle handle (ptr1->m_twin);
+ handleNodePtr = edgeHandleList.Addtop (handle);
+ bigHeapArray.Push (handleNodePtr, cost);
+ } else {
+ dgEdgeCollapseEdgeHandle *handle;
+ handle = (dgEdgeCollapseEdgeHandle*) IntToPointer (ptr1->m_twin->m_userData);
+ if (handle) {
+ handle->m_edge = NULL;
+ }
+ ptr1->m_twin->m_userData = uint32_t (NULL);
+
+ }
+ }
+
+ ptr1 = ptr1->m_twin->m_next;
+ } while (ptr1 != incidentEdge);
+
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+ }
+ }
+ }
+ }
+}
+
+
+dgEdge* dgPolyhedra::FindEarTip (dgEdge* const face, const double* const pool, int32_t stride, dgDownHeap<dgEdge*, double>& heap, const dgBigVector &normal) const
+{
+ dgEdge* ptr = face;
+ dgBigVector p0 (&pool[ptr->m_prev->m_incidentVertex * stride]);
+ dgBigVector p1 (&pool[ptr->m_incidentVertex * stride]);
+ dgBigVector d0 (p1 - p0);
+ double f = sqrt (d0 % d0);
+ if (f < double (1.0e-10f)) {
+ f = double (1.0e-10f);
+ }
+ d0 = d0.Scale (double (1.0f) / f);
+
+ double minAngle = float (10.0f);
+ do {
+ dgBigVector p2 (&pool [ptr->m_next->m_incidentVertex * stride]);
+ dgBigVector d1 (p2 - p1);
+ float f = dgSqrt (d1 % d1);
+ if (f < float (1.0e-10f)) {
+ f = float (1.0e-10f);
+ }
+ d1 = d1.Scale (float (1.0f) / f);
+ dgBigVector n (d0 * d1);
+
+ double angle = normal % n;
+ if (angle >= double (0.0f)) {
+ heap.Push (ptr, angle);
+ }
+
+ if (angle < minAngle) {
+ minAngle = angle;
+ }
+
+ d0 = d1;
+ p1 = p2;
+ ptr = ptr->m_next;
+ } while (ptr != face);
+
+ if (minAngle > float (0.1f)) {
+ return heap[0];
+ }
+
+ dgEdge* ear = NULL;
+ while (heap.GetCount()) {
+ ear = heap[0];
+ heap.Pop();
+
+ if (FindEdge (ear->m_prev->m_incidentVertex, ear->m_next->m_incidentVertex)) {
+ continue;
+ }
+
+ dgBigVector p0 (&pool [ear->m_prev->m_incidentVertex * stride]);
+ dgBigVector p1 (&pool [ear->m_incidentVertex * stride]);
+ dgBigVector p2 (&pool [ear->m_next->m_incidentVertex * stride]);
+
+ dgBigVector p10 (p1 - p0);
+ dgBigVector p21 (p2 - p1);
+ dgBigVector p02 (p0 - p2);
+
+ for (ptr = ear->m_next->m_next; ptr != ear->m_prev; ptr = ptr->m_next) {
+ dgBigVector p (&pool [ptr->m_incidentVertex * stride]);
+
+ double side = ((p - p0) * p10) % normal;
+ if (side < double (0.05f)) {
+ side = ((p - p1) * p21) % normal;
+ if (side < double (0.05f)) {
+ side = ((p - p2) * p02) % normal;
+ if (side < float (0.05f)) {
+ break;
+ }
+ }
+ }
+ }
+
+ if (ptr == ear->m_prev) {
+ break;
+ }
+ }
+
+ return ear;
+}
+
+
+
+
+
+//dgEdge* TriangulateFace (dgPolyhedra& polyhedra, dgEdge* face, const float* const pool, int32_t stride, dgDownHeap<dgEdge*, float>& heap, dgVector* const faceNormalOut)
+dgEdge* dgPolyhedra::TriangulateFace (dgEdge* face, const double* const pool, int32_t stride, dgDownHeap<dgEdge*, double>& heap, dgBigVector* const faceNormalOut)
+{
+#ifdef _DEBUG
+ dgEdge* perimeter [1024 * 16];
+ dgEdge* ptr = face;
+ int32_t perimeterCount = 0;
+ do {
+ perimeter[perimeterCount] = ptr;
+ perimeterCount ++;
+ HACD_ASSERT (perimeterCount < int32_t (sizeof (perimeter) / sizeof (perimeter[0])));
+ ptr = ptr->m_next;
+ } while (ptr != face);
+ perimeter[perimeterCount] = face;
+ HACD_ASSERT ((perimeterCount + 1) < int32_t (sizeof (perimeter) / sizeof (perimeter[0])));
+#endif
+ dgBigVector normal (FaceNormal (face, pool, int32_t (stride * sizeof (double))));
+
+ double dot = normal % normal;
+ if (dot < double (1.0e-12f)) {
+ if (faceNormalOut) {
+ *faceNormalOut = dgBigVector (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ }
+ return face;
+ }
+ normal = normal.Scale (double (1.0f) / sqrt (dot));
+ if (faceNormalOut) {
+ *faceNormalOut = normal;
+ }
+
+
+ while (face->m_next->m_next->m_next != face) {
+ dgEdge* const ear = FindEarTip (face, pool, stride, heap, normal);
+ if (!ear) {
+ return face;
+ }
+ if ((face == ear) || (face == ear->m_prev)) {
+ face = ear->m_prev->m_prev;
+ }
+ dgEdge* const edge = AddHalfEdge (ear->m_next->m_incidentVertex, ear->m_prev->m_incidentVertex);
+ if (!edge) {
+ return face;
+ }
+ dgEdge* const twin = AddHalfEdge (ear->m_prev->m_incidentVertex, ear->m_next->m_incidentVertex);
+ if (!twin) {
+ return face;
+ }
+ HACD_ASSERT (twin);
+
+
+ edge->m_mark = ear->m_mark;
+ edge->m_userData = ear->m_next->m_userData;
+ edge->m_incidentFace = ear->m_incidentFace;
+
+ twin->m_mark = ear->m_mark;
+ twin->m_userData = ear->m_prev->m_userData;
+ twin->m_incidentFace = ear->m_incidentFace;
+
+ edge->m_twin = twin;
+ twin->m_twin = edge;
+
+ twin->m_prev = ear->m_prev->m_prev;
+ twin->m_next = ear->m_next;
+ ear->m_prev->m_prev->m_next = twin;
+ ear->m_next->m_prev = twin;
+
+ edge->m_next = ear->m_prev;
+ edge->m_prev = ear;
+ ear->m_prev->m_prev = edge;
+ ear->m_next = edge;
+
+ heap.Flush ();
+ }
+ return NULL;
+}
+
+
+void dgPolyhedra::MarkAdjacentCoplanarFaces (dgPolyhedra& polyhedraOut, dgEdge* const face, const double* const pool, int32_t strideInBytes)
+{
+ const double normalDeviation = double (0.9999f);
+ const double distanceFromPlane = double (1.0f / 128.0f);
+
+ int32_t faceIndex[1024 * 4];
+ dgEdge* stack[1024 * 4];
+ dgEdge* deleteEdge[1024 * 4];
+
+ int32_t deleteCount = 1;
+ deleteEdge[0] = face;
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+
+ HACD_ASSERT (face->m_incidentFace > 0);
+
+ dgBigVector normalAverage (FaceNormal (face, pool, strideInBytes));
+ double dot = normalAverage % normalAverage;
+ if (dot > double (1.0e-12f)) {
+ int32_t testPointsCount = 1;
+ dot = double (1.0f) / sqrt (dot);
+ dgBigVector normal (normalAverage.Scale (dot));
+
+ dgBigVector averageTestPoint (&pool[face->m_incidentVertex * stride]);
+ dgBigPlane testPlane(normal, - (averageTestPoint % normal));
+
+ polyhedraOut.BeginFace();
+
+ IncLRU();
+ int32_t faceMark = IncLRU();
+
+ int32_t faceIndexCount = 0;
+ dgEdge* ptr = face;
+ do {
+ ptr->m_mark = faceMark;
+ faceIndex[faceIndexCount] = ptr->m_incidentVertex;
+ faceIndexCount ++;
+ HACD_ASSERT (faceIndexCount < int32_t (sizeof (faceIndex) / sizeof (faceIndex[0])));
+ ptr = ptr ->m_next;
+ } while (ptr != face);
+ polyhedraOut.AddFace(faceIndexCount, faceIndex);
+
+ int32_t index = 1;
+ deleteCount = 0;
+ stack[0] = face;
+ while (index)
+ {
+ index --;
+ dgEdge* const face = stack[index];
+ deleteEdge[deleteCount] = face;
+ deleteCount ++;
+ HACD_ASSERT (deleteCount < int32_t (sizeof (deleteEdge) / sizeof (deleteEdge[0])));
+// TODO:JWR Temporarily commented out... HACD_ASSERT (face->m_next->m_next->m_next == face);
+
+ dgEdge* edge = face;
+ do
+ {
+ dgEdge* const ptr = edge->m_twin;
+ if (ptr->m_incidentFace > 0)
+ {
+ if (ptr->m_mark != faceMark)
+ {
+ dgEdge* ptr1 = ptr;
+ faceIndexCount = 0;
+ do
+ {
+ ptr1->m_mark = faceMark;
+ faceIndex[faceIndexCount] = ptr1->m_incidentVertex;
+ HACD_ASSERT (faceIndexCount < int32_t (sizeof (faceIndex) / sizeof (faceIndex[0])));
+ faceIndexCount ++;
+ ptr1 = ptr1 ->m_next;
+ } while (ptr1 != ptr);
+
+ dgBigVector normal1 (FaceNormal (ptr, pool, strideInBytes));
+ dot = normal1 % normal1;
+ if (dot < double (1.0e-12f)) {
+ deleteEdge[deleteCount] = ptr;
+ deleteCount ++;
+ HACD_ASSERT (deleteCount < int32_t (sizeof (deleteEdge) / sizeof (deleteEdge[0])));
+ } else {
+ //normal1 = normal1.Scale (double (1.0f) / sqrt (dot));
+ dgBigVector testNormal (normal1.Scale (double (1.0f) / sqrt (dot)));
+ dot = normal % testNormal;
+ if (dot >= normalDeviation) {
+ dgBigVector testPoint (&pool[ptr->m_prev->m_incidentVertex * stride]);
+ double dist = fabs (testPlane.Evalue (testPoint));
+ if (dist < distanceFromPlane) {
+ testPointsCount ++;
+
+ averageTestPoint += testPoint;
+ testPoint = averageTestPoint.Scale (double (1.0f) / double(testPointsCount));
+
+ normalAverage += normal1;
+ testNormal = normalAverage.Scale (double (1.0f) / sqrt (normalAverage % normalAverage));
+ testPlane = dgBigPlane (testNormal, - (testPoint % testNormal));
+
+ polyhedraOut.AddFace(faceIndexCount, faceIndex);;
+ stack[index] = ptr;
+ index ++;
+ HACD_ASSERT (index < int32_t (sizeof (stack) / sizeof (stack[0])));
+ }
+ }
+ }
+ }
+ }
+
+ edge = edge->m_next;
+ } while (edge != face);
+ }
+ polyhedraOut.EndFace();
+ }
+
+ for (int32_t index = 0; index < deleteCount; index ++) {
+ DeleteFace (deleteEdge[index]);
+ }
+}
+
+
+void dgPolyhedra::RefineTriangulation (const double* const vertex, int32_t stride, dgBigVector* const normal, int32_t perimeterCount, dgEdge** const perimeter)
+{
+ dgList<dgDiagonalEdge> dignonals;
+
+ for (int32_t i = 1; i <= perimeterCount; i ++) {
+ dgEdge* const last = perimeter[i - 1];
+ for (dgEdge* ptr = perimeter[i]->m_prev; ptr != last; ptr = ptr->m_twin->m_prev) {
+ dgList<dgDiagonalEdge>::dgListNode* node = dignonals.GetFirst();
+ for (; node; node = node->GetNext()) {
+ const dgDiagonalEdge& key = node->GetInfo();
+ if (((key.m_i0 == ptr->m_incidentVertex) && (key.m_i1 == ptr->m_twin->m_incidentVertex)) ||
+ ((key.m_i1 == ptr->m_incidentVertex) && (key.m_i0 == ptr->m_twin->m_incidentVertex))) {
+ break;
+ }
+ }
+ if (!node) {
+ dgDiagonalEdge key (ptr);
+ dignonals.Append(key);
+ }
+ }
+ }
+
+ dgEdge* const face = perimeter[0];
+ int32_t i0 = face->m_incidentVertex * stride;
+ int32_t i1 = face->m_next->m_incidentVertex * stride;
+ dgBigVector p0 (vertex[i0], vertex[i0 + 1], vertex[i0 + 2], float (0.0f));
+ dgBigVector p1 (vertex[i1], vertex[i1 + 1], vertex[i1 + 2], float (0.0f));
+
+ dgBigVector p1p0 (p1 - p0);
+ double mag2 = p1p0 % p1p0;
+ for (dgEdge* ptr = face->m_next->m_next; mag2 < float (1.0e-12f); ptr = ptr->m_next) {
+ int32_t i1 = ptr->m_incidentVertex * stride;
+ dgBigVector p1 (vertex[i1], vertex[i1 + 1], vertex[i1 + 2], float (0.0f));
+ p1p0 = p1 - p0;
+ mag2 = p1p0 % p1p0;
+ }
+
+ dgMatrix matrix (dgGetIdentityMatrix());
+ matrix.m_posit = p0;
+ matrix.m_front = dgVector (p1p0.Scale (double (1.0f) / sqrt (mag2)));
+ matrix.m_right = dgVector (normal->Scale (double (1.0f) / sqrt (*normal % *normal)));
+ matrix.m_up = matrix.m_right * matrix.m_front;
+ matrix = matrix.Inverse();
+ matrix.m_posit.m_w = float (1.0f);
+
+ int32_t maxCount = dignonals.GetCount() * dignonals.GetCount();
+ while (dignonals.GetCount() && maxCount) {
+ maxCount --;
+ dgList<dgDiagonalEdge>::dgListNode* const node = dignonals.GetFirst();
+ dgDiagonalEdge key (node->GetInfo());
+ dignonals.Remove(node);
+ dgEdge* const edge = FindEdge(key.m_i0, key.m_i1);
+ if (edge) {
+ int32_t i0 = edge->m_incidentVertex * stride;
+ int32_t i1 = edge->m_next->m_incidentVertex * stride;
+ int32_t i2 = edge->m_next->m_next->m_incidentVertex * stride;
+ int32_t i3 = edge->m_twin->m_prev->m_incidentVertex * stride;
+
+ dgBigVector p0 (vertex[i0], vertex[i0 + 1], vertex[i0 + 2], double (0.0f));
+ dgBigVector p1 (vertex[i1], vertex[i1 + 1], vertex[i1 + 2], double (0.0f));
+ dgBigVector p2 (vertex[i2], vertex[i2 + 1], vertex[i2 + 2], double (0.0f));
+ dgBigVector p3 (vertex[i3], vertex[i3 + 1], vertex[i3 + 2], double (0.0f));
+
+ p0 = matrix.TransformVector(p0);
+ p1 = matrix.TransformVector(p1);
+ p2 = matrix.TransformVector(p2);
+ p3 = matrix.TransformVector(p3);
+
+ double circleTest[3][3];
+ circleTest[0][0] = p0[0] - p3[0];
+ circleTest[0][1] = p0[1] - p3[1];
+ circleTest[0][2] = circleTest[0][0] * circleTest[0][0] + circleTest[0][1] * circleTest[0][1];
+
+ circleTest[1][0] = p1[0] - p3[0];
+ circleTest[1][1] = p1[1] - p3[1];
+ circleTest[1][2] = circleTest[1][0] * circleTest[1][0] + circleTest[1][1] * circleTest[1][1];
+
+ circleTest[2][0] = p2[0] - p3[0];
+ circleTest[2][1] = p2[1] - p3[1];
+ circleTest[2][2] = circleTest[2][0] * circleTest[2][0] + circleTest[2][1] * circleTest[2][1];
+
+ double error;
+ double det = Determinant3x3 (circleTest, &error);
+ if (det < float (0.0f)) {
+ dgEdge* frontFace0 = edge->m_prev;
+ dgEdge* backFace0 = edge->m_twin->m_prev;
+
+ FlipEdge(edge);
+
+ if (perimeterCount > 4) {
+ dgEdge* backFace1 = backFace0->m_next;
+ dgEdge* frontFace1 = frontFace0->m_next;
+ for (int32_t i = 0; i < perimeterCount; i ++) {
+ if (frontFace0 == perimeter[i]) {
+ frontFace0 = NULL;
+ }
+ if (frontFace1 == perimeter[i]) {
+ frontFace1 = NULL;
+ }
+
+ if (backFace0 == perimeter[i]) {
+ backFace0 = NULL;
+ }
+ if (backFace1 == perimeter[i]) {
+ backFace1 = NULL;
+ }
+ }
+
+ if (backFace0 && (backFace0->m_incidentFace > 0) && (backFace0->m_twin->m_incidentFace > 0)) {
+ dgDiagonalEdge key (backFace0);
+ dignonals.Append(key);
+ }
+ if (backFace1 && (backFace1->m_incidentFace > 0) && (backFace1->m_twin->m_incidentFace > 0)) {
+ dgDiagonalEdge key (backFace1);
+ dignonals.Append(key);
+ }
+
+ if (frontFace0 && (frontFace0->m_incidentFace > 0) && (frontFace0->m_twin->m_incidentFace > 0)) {
+ dgDiagonalEdge key (frontFace0);
+ dignonals.Append(key);
+ }
+
+ if (frontFace1 && (frontFace1->m_incidentFace > 0) && (frontFace1->m_twin->m_incidentFace > 0)) {
+ dgDiagonalEdge key (frontFace1);
+ dignonals.Append(key);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void dgPolyhedra::RefineTriangulation (const double* const vertex, int32_t stride)
+{
+ dgEdge* edgePerimeters[1024 * 16];
+ int32_t perimeterCount = 0;
+
+ dgPolyhedra::Iterator iter (*this);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_incidentFace < 0) {
+ dgEdge* ptr = edge;
+ do {
+ edgePerimeters[perimeterCount] = ptr->m_twin;
+ perimeterCount ++;
+ HACD_ASSERT (perimeterCount < int32_t (sizeof (edgePerimeters) / sizeof (edgePerimeters[0])));
+ ptr = ptr->m_prev;
+ } while (ptr != edge);
+ break;
+ }
+ }
+ HACD_ASSERT (perimeterCount);
+ HACD_ASSERT (perimeterCount < int32_t (sizeof (edgePerimeters) / sizeof (edgePerimeters[0])));
+ edgePerimeters[perimeterCount] = edgePerimeters[0];
+
+ dgBigVector normal (FaceNormal(edgePerimeters[0], vertex, int32_t (stride * sizeof (double))));
+ if ((normal % normal) > float (1.0e-12f)) {
+ RefineTriangulation (vertex, stride, &normal, perimeterCount, edgePerimeters);
+ }
+}
+
+
+void dgPolyhedra::OptimizeTriangulation (const double* const vertex, int32_t strideInBytes)
+{
+ int32_t polygon[1024 * 8];
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+
+ dgPolyhedra leftOver;
+ dgPolyhedra buildConvex;
+
+ buildConvex.BeginFace();
+ dgPolyhedra::Iterator iter (*this);
+
+ for (iter.Begin(); iter; ) {
+ dgEdge* const edge = &(*iter);
+ iter++;
+
+ if (edge->m_incidentFace > 0) {
+ dgPolyhedra flatFace;
+ MarkAdjacentCoplanarFaces (flatFace, edge, vertex, strideInBytes);
+ //HACD_ASSERT (flatFace.GetCount());
+
+ if (flatFace.GetCount()) {
+ //flatFace.Triangulate (vertex, strideInBytes, &leftOver);
+ //HACD_ASSERT (!leftOver.GetCount());
+ flatFace.RefineTriangulation (vertex, stride);
+
+ int32_t mark = flatFace.IncLRU();
+ dgPolyhedra::Iterator iter (flatFace);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark != mark) {
+ if (edge->m_incidentFace > 0) {
+ dgEdge* ptr = edge;
+ int32_t vertexCount = 0;
+ do {
+ polygon[vertexCount] = ptr->m_incidentVertex;
+ vertexCount ++;
+ HACD_ASSERT (vertexCount < int32_t (sizeof (polygon) / sizeof (polygon[0])));
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ if (vertexCount >= 3) {
+ buildConvex.AddFace (vertexCount, polygon);
+ }
+ }
+ }
+ }
+ }
+ iter.Begin();
+ }
+ }
+ buildConvex.EndFace();
+ HACD_ASSERT (GetCount() == 0);
+ SwapInfo(buildConvex);
+}
+
+
+void dgPolyhedra::Triangulate (const double* const /*vertex*/, int32_t /*strideInBytes*/, dgPolyhedra* const /*leftOver*/)
+{
+ int32_t count = GetCount() / 2;
+ dgStack<char> memPool (int32_t ((count + 512) * (2 * sizeof (double))));
+ dgDownHeap<dgEdge*, double> heap(&memPool[0], memPool.GetSizeInBytes());
+#if 0
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+ int32_t mark = IncLRU();
+ Iterator iter (*this);
+ for (iter.Begin(); iter; )
+ {
+ dgEdge* const thisEdge = &(*iter);
+ iter ++;
+
+ if (thisEdge->m_mark == mark)
+ {
+ continue;
+ }
+ if (thisEdge->m_incidentFace < 0)
+ {
+ continue;
+ }
+
+ count = 0;
+ dgEdge* ptr = thisEdge;
+ do
+ {
+ count ++;
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != thisEdge);
+
+ if (count > 3)
+ {
+ dgEdge* const edge = TriangulateFace (thisEdge, vertex, stride, heap, NULL);
+ heap.Flush ();
+
+ if (edge)
+ {
+ HACD_ASSERT (edge->m_incidentFace > 0);
+
+ if (leftOver)
+ {
+ int32_t* const index = (int32_t *) &heap[0];
+ int64_t* const data = (int64_t *)&index[count];
+ int32_t i = 0;
+ dgEdge* ptr = edge;
+ do {
+ index[i] = ptr->m_incidentVertex;
+ data[i] = int64_t (ptr->m_userData);
+ i ++;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ leftOver->AddFace(i, index, data);
+ }
+ DeleteFace (edge);
+ iter.Begin();
+ }
+ }
+ }
+ OptimizeTriangulation (vertex, strideInBytes);
+ mark = IncLRU();
+ m_faceSecuence = 1;
+ for (iter.Begin(); iter; iter ++)
+ {
+ dgEdge* edge = &(*iter);
+ if (edge->m_mark == mark)
+ {
+ continue;
+ }
+ if (edge->m_incidentFace < 0)
+ {
+ continue;
+ }
+ HACD_ASSERT (edge == edge->m_next->m_next->m_next);
+
+ for (int32_t i = 0; i < 3; i ++)
+ {
+ edge->m_incidentFace = m_faceSecuence;
+ edge->m_mark = mark;
+ edge = edge->m_next;
+ }
+ m_faceSecuence ++;
+ }
+#endif
+}
+
+
+static void RemoveColinearVertices (dgPolyhedra& flatFace, const double* const vertex, int32_t stride)
+{
+ dgEdge* edgePerimeters[1024];
+
+ int32_t perimeterCount = 0;
+ int32_t mark = flatFace.IncLRU();
+ dgPolyhedra::Iterator iter (flatFace);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if ((edge->m_incidentFace < 0) && (edge->m_mark != mark)) {
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ edgePerimeters[perimeterCount] = edge;
+ perimeterCount ++;
+ HACD_ASSERT (perimeterCount < int32_t (sizeof (edgePerimeters) / sizeof (edgePerimeters[0])));
+ }
+ }
+
+ for (int32_t i = 0; i < perimeterCount; i ++) {
+ dgEdge* edge = edgePerimeters[i];
+ dgEdge* ptr = edge;
+ dgVector p0 (&vertex[ptr->m_incidentVertex * stride]);
+ dgVector p1 (&vertex[ptr->m_next->m_incidentVertex * stride]);
+ dgVector e0 (p1 - p0) ;
+ e0 = e0.Scale (float (1.0f) / (dgSqrt (e0 % e0) + float (1.0e-12f)));
+ int32_t ignoreTest = 1;
+ do {
+ ignoreTest = 0;
+ dgVector p2 (&vertex[ptr->m_next->m_next->m_incidentVertex * stride]);
+ dgVector e1 (p2 - p1);
+ e1 = e1.Scale (float (1.0f) / (dgSqrt (e1 % e1) + float (1.0e-12f)));
+ float dot = e1 % e0;
+ if (dot > float (float (0.9999f))) {
+
+ for (dgEdge* interiorEdge = ptr->m_next->m_twin->m_next; interiorEdge != ptr->m_twin; interiorEdge = ptr->m_next->m_twin->m_next) {
+ flatFace.DeleteEdge (interiorEdge);
+ }
+
+ if (ptr->m_twin->m_next->m_next->m_next == ptr->m_twin) {
+ HACD_ASSERT (ptr->m_twin->m_next->m_incidentFace > 0);
+ flatFace.DeleteEdge (ptr->m_twin->m_next);
+ }
+
+ HACD_ASSERT (ptr->m_next->m_twin->m_next->m_twin == ptr);
+ edge = ptr->m_next;
+
+ if (!flatFace.FindEdge (ptr->m_incidentVertex, edge->m_twin->m_incidentVertex) &&
+ !flatFace.FindEdge (edge->m_twin->m_incidentVertex, ptr->m_incidentVertex)) {
+ ptr->m_twin->m_prev = edge->m_twin->m_prev;
+ edge->m_twin->m_prev->m_next = ptr->m_twin;
+
+ edge->m_next->m_prev = ptr;
+ ptr->m_next = edge->m_next;
+
+ edge->m_next = edge->m_twin;
+ edge->m_prev = edge->m_twin;
+ edge->m_twin->m_next = edge;
+ edge->m_twin->m_prev = edge;
+ flatFace.DeleteEdge (edge);
+ flatFace.ChangeEdgeIncidentVertex (ptr->m_twin, ptr->m_next->m_incidentVertex);
+
+ e1 = e0;
+ p1 = p2;
+ edge = ptr;
+ ignoreTest = 1;
+ continue;
+ }
+ }
+
+ e0 = e1;
+ p1 = p2;
+ ptr = ptr->m_next;
+ } while ((ptr != edge) || ignoreTest);
+ }
+}
+
+
+static int32_t GetInteriorDiagonals (dgPolyhedra& polyhedra, dgEdge** const diagonals, int32_t maxCount)
+{
+ int32_t count = 0;
+ int32_t mark = polyhedra.IncLRU();
+ dgPolyhedra::Iterator iter (polyhedra);
+ for (iter.Begin(); iter; iter++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark != mark) {
+ if (edge->m_incidentFace > 0) {
+ if (edge->m_twin->m_incidentFace > 0) {
+ edge->m_twin->m_mark = mark;
+ if (count < maxCount){
+ diagonals[count] = edge;
+ count ++;
+ }
+ HACD_ASSERT (count <= maxCount);
+ }
+ }
+ }
+ edge->m_mark = mark;
+ }
+
+ return count;
+}
+
+static bool IsEssensialPointDiagonal (dgEdge* const diagonal, const dgBigVector& normal, const double* const pool, int32_t stride)
+{
+ double dot;
+ dgBigVector p0 (&pool[diagonal->m_incidentVertex * stride]);
+ dgBigVector p1 (&pool[diagonal->m_twin->m_next->m_twin->m_incidentVertex * stride]);
+ dgBigVector p2 (&pool[diagonal->m_prev->m_incidentVertex * stride]);
+
+ dgBigVector e1 (p1 - p0);
+ dot = e1 % e1;
+ if (dot < double (1.0e-12f)) {
+ return false;
+ }
+ e1 = e1.Scale (double (1.0f) / sqrt(dot));
+
+ dgBigVector e2 (p2 - p0);
+ dot = e2 % e2;
+ if (dot < double (1.0e-12f)) {
+ return false;
+ }
+ e2 = e2.Scale (double (1.0f) / sqrt(dot));
+
+ dgBigVector n1 (e1 * e2);
+
+ dot = normal % n1;
+ //if (dot > double (float (0.1f)f)) {
+ //if (dot >= double (-1.0e-6f)) {
+ if (dot >= double (0.0f)) {
+ return false;
+ }
+ return true;
+}
+
+
+static bool IsEssensialDiagonal (dgEdge* const diagonal, const dgBigVector& normal, const double* const pool, int32_t stride)
+{
+ return IsEssensialPointDiagonal (diagonal, normal, pool, stride) || IsEssensialPointDiagonal (diagonal->m_twin, normal, pool, stride);
+}
+
+
+void dgPolyhedra::ConvexPartition (const double* const vertex, int32_t strideInBytes, dgPolyhedra* const leftOversOut)
+{
+ if (GetCount()) {
+ Triangulate (vertex, strideInBytes, leftOversOut);
+ DeleteDegenerateFaces (vertex, strideInBytes, float (1.0e-5f));
+ Optimize (vertex, strideInBytes, float (1.0e-4f));
+ DeleteDegenerateFaces (vertex, strideInBytes, float (1.0e-5f));
+
+ if (GetCount()) {
+ int32_t removeCount = 0;
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+
+ int32_t polygon[1024 * 8];
+ dgEdge* diagonalsPool[1024 * 8];
+ dgPolyhedra buildConvex;
+
+ buildConvex.BeginFace();
+ dgPolyhedra::Iterator iter (*this);
+ for (iter.Begin(); iter; ) {
+ dgEdge* edge = &(*iter);
+ iter++;
+ if (edge->m_incidentFace > 0) {
+
+ dgPolyhedra flatFace;
+ MarkAdjacentCoplanarFaces (flatFace, edge, vertex, strideInBytes);
+
+ if (flatFace.GetCount()) {
+ flatFace.RefineTriangulation (vertex, stride);
+ RemoveColinearVertices (flatFace, vertex, stride);
+
+ int32_t diagonalCount = GetInteriorDiagonals (flatFace, diagonalsPool, sizeof (diagonalsPool) / sizeof (diagonalsPool[0]));
+ if (diagonalCount) {
+ edge = &flatFace.GetRoot()->GetInfo();
+ if (edge->m_incidentFace < 0) {
+ edge = edge->m_twin;
+ }
+ HACD_ASSERT (edge->m_incidentFace > 0);
+
+ dgBigVector normal (FaceNormal (edge, vertex, strideInBytes));
+ normal = normal.Scale (double (1.0f) / sqrt (normal % normal));
+
+ edge = NULL;
+ dgPolyhedra::Iterator iter (flatFace);
+ for (iter.Begin(); iter; iter ++) {
+ edge = &(*iter);
+ if (edge->m_incidentFace < 0) {
+ break;
+ }
+ }
+ HACD_ASSERT (edge);
+
+ int32_t isConvex = 1;
+ dgEdge* ptr = edge;
+ int32_t mark = flatFace.IncLRU();
+
+ dgBigVector normal2 (normal);
+ dgBigVector p0 (&vertex[ptr->m_prev->m_incidentVertex * stride]);
+ dgBigVector p1 (&vertex[ptr->m_incidentVertex * stride]);
+ dgBigVector e0 (p1 - p0);
+ e0 = e0.Scale (float (1.0f) / (dgSqrt (e0 % e0) + float (1.0e-14f)));
+ do {
+ dgBigVector p2 (&vertex[ptr->m_next->m_incidentVertex * stride]);
+ dgBigVector e1 (p2 - p1);
+ e1 = e1.Scale (float (1.0f) / (sqrt (e1 % e1) + float (1.0e-14f)));
+ double dot = (e0 * e1) % normal2;
+ //if (dot > float (0.0f)) {
+ if (dot > float (5.0e-3f)) {
+ isConvex = 0;
+ break;
+ }
+ ptr->m_mark = mark;
+ e0 = e1;
+ p1 = p2;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ if (isConvex) {
+ dgPolyhedra::Iterator iter (flatFace);
+ for (iter.Begin(); iter; iter ++) {
+ ptr = &(*iter);
+ if (ptr->m_incidentFace < 0) {
+ if (ptr->m_mark < mark) {
+ isConvex = 0;
+ break;
+ }
+ }
+ }
+ }
+
+ if (isConvex) {
+ if (diagonalCount > 2) {
+ int32_t count = 0;
+ ptr = edge;
+ do {
+ polygon[count] = ptr->m_incidentVertex;
+ count ++;
+ HACD_ASSERT (count < int32_t (sizeof (polygon) / sizeof (polygon[0])));
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+
+ for (int32_t i = 0; i < count - 1; i ++) {
+ for (int32_t j = i + 1; j < count; j ++) {
+ if (polygon[i] == polygon[j]) {
+ i = count;
+ isConvex = 0;
+ break ;
+ }
+ }
+ }
+ }
+ }
+
+ if (isConvex) {
+ for (int32_t j = 0; j < diagonalCount; j ++) {
+ dgEdge* const diagonal = diagonalsPool[j];
+ removeCount ++;
+ flatFace.DeleteEdge (diagonal);
+ }
+ } else {
+ for (int32_t j = 0; j < diagonalCount; j ++) {
+ dgEdge* const diagonal = diagonalsPool[j];
+ if (!IsEssensialDiagonal(diagonal, normal, vertex, stride)) {
+ removeCount ++;
+ flatFace.DeleteEdge (diagonal);
+ }
+ }
+ }
+ }
+
+ int32_t mark = flatFace.IncLRU();
+ dgPolyhedra::Iterator iter (flatFace);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark != mark) {
+ if (edge->m_incidentFace > 0) {
+ dgEdge* ptr = edge;
+ int32_t diagonalCount = 0;
+ do {
+ polygon[diagonalCount] = ptr->m_incidentVertex;
+ diagonalCount ++;
+ HACD_ASSERT (diagonalCount < int32_t (sizeof (polygon) / sizeof (polygon[0])));
+ ptr->m_mark = mark;
+ ptr = ptr->m_next;
+ } while (ptr != edge);
+ if (diagonalCount >= 3) {
+ buildConvex.AddFace (diagonalCount, polygon);
+ }
+ }
+ }
+ }
+ }
+ iter.Begin();
+ }
+ }
+
+ buildConvex.EndFace();
+ HACD_ASSERT (GetCount() == 0);
+ SwapInfo(buildConvex);
+ }
+ }
+}
+
+
+dgSphere dgPolyhedra::CalculateSphere (const double* const vertex, int32_t strideInBytes, const dgMatrix* const /*basis*/) const
+{
+/*
+ // this si a degenerate mesh of a flat plane calculate OOBB by discrete rotations
+ dgStack<int32_t> pool (GetCount() * 3 + 6);
+ int32_t* const indexList = &pool[0];
+
+ dgMatrix axis (dgGetIdentityMatrix());
+ dgBigVector p0 (float ( 1.0e10f), float ( 1.0e10f), float ( 1.0e10f), float (0.0f));
+ dgBigVector p1 (float (-1.0e10f), float (-1.0e10f), float (-1.0e10f), float (0.0f));
+
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+ int32_t indexCount = 0;
+ int32_t mark = IncLRU();
+ dgPolyhedra::Iterator iter(*this);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark != mark) {
+ dgEdge *ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+ int32_t index = edge->m_incidentVertex;
+ indexList[indexCount + 6] = edge->m_incidentVertex;
+ dgBigVector point (vertex[index * stride + 0], vertex[index * stride + 1], vertex[index * stride + 2], float (0.0f));
+ for (int32_t i = 0; i < 3; i ++) {
+ if (point[i] < p0[i]) {
+ p0[i] = point[i];
+ indexList[i * 2 + 0] = index;
+ }
+ if (point[i] > p1[i]) {
+ p1[i] = point[i];
+ indexList[i * 2 + 1] = index;
+ }
+ }
+ indexCount ++;
+ }
+ }
+ indexCount += 6;
+
+
+ dgBigVector size (p1 - p0);
+ double volume = size.m_x * size.m_y * size.m_z;
+
+
+ for (float pitch = float (0.0f); pitch < float (90.0f); pitch += float (10.0f)) {
+ dgMatrix pitchMatrix (dgPitchMatrix(pitch * float (3.1416f) / float (180.0f)));
+ for (float yaw = float (0.0f); yaw < float (90.0f); yaw += float (10.0f)) {
+ dgMatrix yawMatrix (dgYawMatrix(yaw * float (3.1416f) / float (180.0f)));
+ for (float roll = float (0.0f); roll < float (90.0f); roll += float (10.0f)) {
+ int32_t tmpIndex[6];
+ dgMatrix rollMatrix (dgRollMatrix(roll * float (3.1416f) / float (180.0f)));
+ dgMatrix tmp (pitchMatrix * yawMatrix * rollMatrix);
+ dgBigVector q0 (float ( 1.0e10f), float ( 1.0e10f), float ( 1.0e10f), float (0.0f));
+ dgBigVector q1 (float (-1.0e10f), float (-1.0e10f), float (-1.0e10f), float (0.0f));
+
+ float volume1 = float (1.0e10f);
+ for (int32_t i = 0; i < indexCount; i ++) {
+ int32_t index = indexList[i];
+ dgBigVector point (vertex[index * stride + 0], vertex[index * stride + 1], vertex[index * stride + 2], float (0.0f));
+ point = tmp.UnrotateVector(point);
+
+ for (int32_t j = 0; j < 3; j ++) {
+ if (point[j] < q0[j]) {
+ q0[j] = point[j];
+ tmpIndex[j * 2 + 0] = index;
+ }
+ if (point[j] > q1[j]) {
+ q1[j] = point[j];
+ tmpIndex[j * 2 + 1] = index;
+ }
+ }
+
+
+ dgVector size1 (q1 - q0);
+ volume1 = size1.m_x * size1.m_y * size1.m_z;
+ if (volume1 >= volume) {
+ break;
+ }
+ }
+
+ if (volume1 < volume) {
+ p0 = q0;
+ p1 = q1;
+ axis = tmp;
+ volume = volume1;
+ memcpy (indexList, tmpIndex, sizeof (tmpIndex));
+ }
+ }
+ }
+ }
+
+ HACD_ASSERT (0);
+ dgSphere sphere (axis);
+ dgVector q0 (p0);
+ dgVector q1 (p1);
+ sphere.m_posit = axis.RotateVector((q1 + q0).Scale (float (0.5f)));
+ sphere.m_size = (q1 - q0).Scale (float (0.5f));
+ return sphere;
+*/
+
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+
+ int32_t vertexCount = 0;
+ int32_t mark = IncLRU();
+ dgPolyhedra::Iterator iter(*this);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark != mark) {
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+ vertexCount ++;
+ }
+ }
+ HACD_ASSERT (vertexCount);
+
+ mark = IncLRU();
+ int32_t vertexCountIndex = 0;
+ dgStack<dgBigVector> pool (vertexCount);
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark != mark) {
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+ int32_t incidentVertex = edge->m_incidentVertex * stride;
+ pool[vertexCountIndex] = dgBigVector (vertex[incidentVertex + 0], vertex[incidentVertex + 1], vertex[incidentVertex + 2], float (0.0f));
+ vertexCountIndex ++;
+ }
+ }
+ HACD_ASSERT (vertexCountIndex <= vertexCount);
+
+ dgMatrix axis (dgGetIdentityMatrix());
+ dgSphere sphere (axis);
+ dgConvexHull3d convexHull (&pool[0].m_x, sizeof (dgBigVector), vertexCountIndex, 0.0f);
+ if (convexHull.GetCount()) {
+ dgStack<int32_t> triangleList (convexHull.GetCount() * 3);
+ int32_t trianglesCount = 0;
+ for (dgConvexHull3d::dgListNode* node = convexHull.GetFirst(); node; node = node->GetNext()) {
+ dgConvexHull3DFace* const face = &node->GetInfo();
+ triangleList[trianglesCount * 3 + 0] = face->m_index[0];
+ triangleList[trianglesCount * 3 + 1] = face->m_index[1];
+ triangleList[trianglesCount * 3 + 2] = face->m_index[2];
+ trianglesCount ++;
+ HACD_ASSERT ((trianglesCount * 3) <= triangleList.GetElementsCount());
+ }
+
+ dgVector* const dst = (dgVector*) &pool[0].m_x;
+ for (int32_t i = 0; i < convexHull.GetVertexCount(); i ++) {
+ dst[i] = convexHull.GetVertex(i);
+ }
+ sphere.SetDimensions (&dst[0].m_x, sizeof (dgVector), &triangleList[0], trianglesCount * 3, NULL);
+
+ } else if (vertexCountIndex >= 3) {
+ dgStack<int32_t> triangleList (GetCount() * 3 * 2);
+ int32_t mark = IncLRU();
+ int32_t trianglesCount = 0;
+ for (iter.Begin(); iter; iter ++) {
+ dgEdge* const edge = &(*iter);
+ if (edge->m_mark != mark) {
+ dgEdge* ptr = edge;
+ do {
+ ptr->m_mark = mark;
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+
+ ptr = edge->m_next->m_next;
+ do {
+ triangleList[trianglesCount * 3 + 0] = edge->m_incidentVertex;
+ triangleList[trianglesCount * 3 + 1] = ptr->m_prev->m_incidentVertex;
+ triangleList[trianglesCount * 3 + 2] = ptr->m_incidentVertex;
+ trianglesCount ++;
+ HACD_ASSERT ((trianglesCount * 3) <= triangleList.GetElementsCount());
+ ptr = ptr->m_twin->m_next;
+ } while (ptr != edge);
+
+ dgVector* const dst = (dgVector*) &pool[0].m_x;
+ for (int32_t i = 0; i < vertexCountIndex; i ++) {
+ dst[i] = pool[i];
+ }
+ sphere.SetDimensions (&dst[0].m_x, sizeof (dgVector), &triangleList[0], trianglesCount * 3, NULL);
+ }
+ }
+ }
+ return sphere;
+
+}
diff --git a/APEX_1.4/shared/general/HACD/src/dgQuaternion.cpp b/APEX_1.4/shared/general/HACD/src/dgQuaternion.cpp
new file mode 100644
index 00000000..01b7275a
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgQuaternion.cpp
@@ -0,0 +1,184 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgVector.h"
+#include "dgMatrix.h"
+#include "dgQuaternion.h"
+
+dgQuaternion::dgQuaternion (const dgMatrix &matrix)
+{
+ enum QUAT_INDEX
+ {
+ X_INDEX=0,
+ Y_INDEX=1,
+ Z_INDEX=2
+ };
+ static QUAT_INDEX QIndex [] = {Y_INDEX, Z_INDEX, X_INDEX};
+
+ float *ptr;
+ float trace;
+ QUAT_INDEX i;
+ QUAT_INDEX j;
+ QUAT_INDEX k;
+
+ trace = matrix[0][0] + matrix[1][1] + matrix[2][2];
+ if (trace > float(0.0f)) {
+ trace = dgSqrt (trace + float(1.0f));
+ m_q0 = float (0.5f) * trace;
+ trace = float (0.5f) / trace;
+ m_q1 = (matrix[1][2] - matrix[2][1]) * trace;
+ m_q2 = (matrix[2][0] - matrix[0][2]) * trace;
+ m_q3 = (matrix[0][1] - matrix[1][0]) * trace;
+
+ } else {
+ i = X_INDEX;
+ if (matrix[Y_INDEX][Y_INDEX] > matrix[X_INDEX][X_INDEX]) {
+ i = Y_INDEX;
+ }
+ if (matrix[Z_INDEX][Z_INDEX] > matrix[i][i]) {
+ i = Z_INDEX;
+ }
+ j = QIndex [i];
+ k = QIndex [j];
+
+ trace = float(1.0f) + matrix[i][i] - matrix[j][j] - matrix[k][k];
+ trace = dgSqrt (trace);
+
+ ptr = &m_q1;
+ ptr[i] = float (0.5f) * trace;
+ trace = float (0.5f) / trace;
+ m_q0 = (matrix[j][k] - matrix[k][j]) * trace;
+ ptr[j] = (matrix[i][j] + matrix[j][i]) * trace;
+ ptr[k] = (matrix[i][k] + matrix[k][i]) * trace;
+ }
+
+}
+
+
+dgQuaternion::dgQuaternion (const dgVector &unitAxis, float Angle)
+{
+ float sinAng;
+
+ Angle *= float (0.5f);
+ m_q0 = dgCos (Angle);
+ sinAng = dgSin (Angle);
+
+ m_q1 = unitAxis.m_x * sinAng;
+ m_q2 = unitAxis.m_y * sinAng;
+ m_q3 = unitAxis.m_z * sinAng;
+
+}
+
+
+dgVector dgQuaternion::CalcAverageOmega (const dgQuaternion &/*QB*/, float /*dt*/) const
+{
+HACD_ASSERT (0);
+return dgVector (0, 0, 0, 0);
+/*
+ float dirMag;
+ float dirMag2;
+ float omegaMag;
+ float dirMagInv;
+
+ HACD_ASSERT (0);
+ dgQuaternion dq (Inverse() * QB);
+// dgQuaternion dq (QB * Inverse());
+ dgVector omegaDir (dq.m_q1, dq.m_q2, dq.m_q3, float (0.0f));
+
+ dirMag2 = omegaDir % omegaDir;
+ if (dirMag2 < float(float (1.0e-5f) * float (1.0e-5f))) {
+ return dgVector (float(0.0f), float(0.0f), float(0.0f), float(0.0f));
+ }
+
+ dirMagInv = dgRsqrt (dirMag2);
+ dirMag = dirMag2 * dirMagInv;
+
+ omegaMag = float(2.0f) * dgAtan2 (dirMag, dq.m_q0) / dt;
+
+ return omegaDir.Scale (dirMagInv * omegaMag);
+*/
+}
+
+
+dgQuaternion dgQuaternion::Slerp (const dgQuaternion &/*QB*/, float /*t*/) const
+{
+HACD_ASSERT (0);
+return dgQuaternion();
+/*
+ float dot;
+ float ang;
+ float Sclp;
+ float Sclq;
+ float den;
+ float sinAng;
+ dgQuaternion Q;
+
+ dot = DotProduct (QB);
+
+ if ((dot + float(1.0f)) > dgEPSILON) {
+ if (dot < (float(1.0f) - dgEPSILON) ) {
+ ang = dgAcos (dot);
+
+ sinAng = dgSin (ang);
+ den = float(1.0f) / sinAng;
+
+ Sclp = dgSin ((float(1.0f) - t ) * ang) * den;
+ Sclq = dgSin (t * ang) * den;
+
+ } else {
+ Sclp = float(1.0f) - t;
+ Sclq = t;
+ }
+
+ Q.m_q0 = m_q0 * Sclp + QB.m_q0 * Sclq;
+ Q.m_q1 = m_q1 * Sclp + QB.m_q1 * Sclq;
+ Q.m_q2 = m_q2 * Sclp + QB.m_q2 * Sclq;
+ Q.m_q3 = m_q3 * Sclp + QB.m_q3 * Sclq;
+
+ } else {
+ Q.m_q0 = m_q3;
+ Q.m_q1 = -m_q2;
+ Q.m_q2 = m_q1;
+ Q.m_q3 = m_q0;
+
+ Sclp = dgSin ((float(1.0f) - t) * dgPI * float (0.5f));
+ Sclq = dgSin (t * dgPI * float (0.5f));
+
+ Q.m_q0 = m_q0 * Sclp + Q.m_q0 * Sclq;
+ Q.m_q1 = m_q1 * Sclp + Q.m_q1 * Sclq;
+ Q.m_q2 = m_q2 * Sclp + Q.m_q2 * Sclq;
+ Q.m_q3 = m_q3 * Sclp + Q.m_q3 * Sclq;
+ }
+
+ dot = Q.DotProduct (Q);
+ if ((dot) < float(1.0f - dgEPSILON * 10.0f) ) {
+ //dot = float(1.0f) / dgSqrt (dot);
+ dot = dgRsqrt (dot);
+ Q.m_q0 *= dot;
+ Q.m_q1 *= dot;
+ Q.m_q2 *= dot;
+ Q.m_q3 *= dot;
+ }
+ return Q;
+*/
+}
+
+
diff --git a/APEX_1.4/shared/general/HACD/src/dgSmallDeterminant.cpp b/APEX_1.4/shared/general/HACD/src/dgSmallDeterminant.cpp
new file mode 100644
index 00000000..e0e9e52b
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgSmallDeterminant.cpp
@@ -0,0 +1,153 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgTypes.h"
+#include "dgGoogol.h"
+#include "dgSmallDeterminant.h"
+
+#define Absolute(a) ((a) >= 0.0 ? (a) : -(a))
+
+double Determinant2x2 (const double matrix[2][2], double* const error)
+{
+ double a00xa11 = matrix[0][0] * matrix[1][1];
+ double a01xa10 = matrix[0][1] * matrix[1][0];
+ *error = Absolute(a00xa11) + Absolute(a01xa10);
+ return a00xa11 - a01xa10;
+}
+
+dgGoogol Determinant2x2 (const dgGoogol matrix[2][2])
+{
+ dgGoogol a00xa11 (matrix[0][0] * matrix[1][1]);
+ dgGoogol a01xa10 (matrix[0][1] * matrix[1][0]);
+ return a00xa11 - a01xa10;
+}
+
+
+
+double Determinant3x3 (const double matrix[3][3], double* const error)
+{
+ double sign = double (-1.0f);
+ double det = double (0.0f);
+ double accError = double (0.0f);
+ for (int32_t i = 0; i < 3; i ++) {
+ double cofactor[2][2];
+ for (int32_t j = 0; j < 2; j ++) {
+ int32_t k0 = 0;
+ for (int32_t k = 0; k < 3; k ++) {
+ if (k != i) {
+ cofactor[j][k0] = matrix[j][k];
+ k0 ++;
+ }
+ }
+ }
+
+ double parcialError;
+ double minorDet = Determinant2x2 (cofactor, &parcialError);
+ accError += parcialError * Absolute (matrix[2][i]);
+ det += sign * minorDet * matrix[2][i];
+ sign *= double (-1.0f);
+ }
+
+ *error = accError;
+ return det;
+}
+
+dgGoogol Determinant3x3 (const dgGoogol matrix[3][3])
+{
+ dgGoogol negOne (double (-1.0f));
+ dgGoogol sign (double (-1.0f));
+ dgGoogol det = double (0.0f);
+ for (int32_t i = 0; i < 3; i ++) {
+ dgGoogol cofactor[2][2];
+
+ for (int32_t j = 0; j < 2; j ++) {
+ int32_t k0 = 0;
+ for (int32_t k = 0; k < 3; k ++) {
+ if (k != i) {
+ cofactor[j][k0] = matrix[j][k];
+ k0 ++;
+ }
+ }
+ }
+
+ dgGoogol minorDet (Determinant2x2 (cofactor));
+ det = det + sign * minorDet * matrix[2][i];
+ sign = sign * negOne;
+ }
+ return det;
+}
+
+
+double Determinant4x4 (const double matrix[4][4], double* const error)
+{
+ double sign = double (1.0f);
+ double det = double (0.0f);
+ double accError = double (0.0f);
+ for (int32_t i = 0; i < 4; i ++) {
+ double cofactor[3][3];
+ for (int32_t j = 0; j < 3; j ++) {
+ int32_t k0 = 0;
+ for (int32_t k = 0; k < 4; k ++) {
+ if (k != i) {
+ cofactor[j][k0] = matrix[j][k];
+ k0 ++;
+ }
+ }
+ }
+
+ double parcialError;
+ double minorDet = Determinant3x3 (cofactor, &parcialError);
+ accError += parcialError * Absolute (matrix[3][i]);
+ det += sign * minorDet * matrix[3][i];
+ sign *= double (-1.0f);
+ }
+
+ *error = accError;
+ return det;
+}
+
+
+dgGoogol Determinant4x4 (const dgGoogol matrix[4][4])
+{
+ dgGoogol sign = double (1.0f);
+ dgGoogol det = double (0.0f);
+ dgGoogol negOne (double (-1.0f));
+ dgGoogol accError = double (0.0f);
+ for (int32_t i = 0; i < 4; i ++) {
+ dgGoogol cofactor[3][3];
+ for (int32_t j = 0; j < 3; j ++) {
+ int32_t k0 = 0;
+ for (int32_t k = 0; k < 4; k ++) {
+ if (k != i) {
+ cofactor[j][k0] = matrix[j][k];
+ k0 ++;
+ }
+ }
+ }
+
+ dgGoogol minorDet = Determinant3x3 (cofactor);
+ det = det + sign * minorDet * matrix[3][i];
+ sign = sign * negOne;
+ }
+ return det;
+}
+
+
diff --git a/APEX_1.4/shared/general/HACD/src/dgSphere.cpp b/APEX_1.4/shared/general/HACD/src/dgSphere.cpp
new file mode 100644
index 00000000..e6c8879e
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgSphere.cpp
@@ -0,0 +1,894 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgTypes.h"
+#include "dgPlane.h"
+#include "dgSphere.h"
+#include "dgMatrix.h"
+
+static dgSphere identitySphere;
+const dgSphere& GetIdentitySphere()
+{
+ return identitySphere;
+}
+
+
+
+namespace InternalSphere
+{
+ const float SPHERE_TOL = 0.002f;
+
+ static float AspectRatio (float x, float y)
+ {
+ float tmp;
+ x = dgAbsf (x);
+ y = dgAbsf (y);
+
+ if (y < x) {
+ tmp = y;
+ y = x;
+ x = tmp;
+ }
+ if (y < 1.0e-12) {
+ y = 1.0e-12f;
+ }
+ return x / y;
+ }
+
+
+ static void BoundingBox (const dgMatrix &matrix, const float vertex[], int32_t stride, const int32_t index[], int32_t indexCount, dgVector &min, dgVector &max)
+ {
+ float xmin = float (1.0e10f);
+ float ymin = float (1.0e10f);
+ float zmin = float (1.0e10f);
+
+ float xmax = float (-1.0e10f);
+ float ymax = float (-1.0e10f);
+ float zmax = float (-1.0e10f);
+
+ const float* const ptr = vertex;
+ for (int32_t j = 0 ; j < indexCount; j ++ ) {
+ int32_t i = index[j] * stride;
+ dgVector tmp (ptr[i + 0], ptr[i + 1], ptr[i + 2], float (0.0f));
+ tmp = matrix.UnrotateVector (tmp);
+ if (tmp.m_x < xmin) xmin = tmp.m_x;
+ if (tmp.m_y < ymin) ymin = tmp.m_y;
+ if (tmp.m_z < zmin) zmin = tmp.m_z;
+ if (tmp.m_x > xmax) xmax = tmp.m_x;
+ if (tmp.m_y > ymax) ymax = tmp.m_y;
+ if (tmp.m_z > zmax) zmax = tmp.m_z;
+ }
+
+ min = dgVector (xmin, ymin, zmin, float (0.0f));
+ max = dgVector (xmax, ymax, zmax, float (0.0f));
+ }
+
+ // Compute axis aligned box
+ static void BoundingBox (const dgMatrix &Mat, const float vertex[], int32_t vertexCount, int32_t stride, dgVector &min, dgVector &max)
+ {
+ float xmin = float (1.0e10f);
+ float ymin = float (1.0e10f);
+ float zmin = float (1.0e10f);
+
+ float xmax = float (-1.0e10f);
+ float ymax = float (-1.0e10f);
+ float zmax = float (-1.0e10f);
+
+ const float* ptr = vertex;
+ for (int32_t i = 0 ; i < vertexCount; i ++ ) {
+ dgVector tmp (ptr[0], ptr[1], ptr[2], float (0.0f));
+ ptr += stride;
+ tmp = Mat.UnrotateVector (tmp);
+ if (tmp.m_x < xmin) xmin = tmp.m_x;
+ if (tmp.m_y < ymin) ymin = tmp.m_y;
+ if (tmp.m_z < zmin) zmin = tmp.m_z;
+ if (tmp.m_x > xmax) xmax = tmp.m_x;
+ if (tmp.m_y > ymax) ymax = tmp.m_y;
+ if (tmp.m_z > zmax) zmax = tmp.m_z;
+ }
+
+ min = dgVector (xmin, ymin, zmin, float (0.0f));
+ max = dgVector (xmax, ymax, zmax, float (0.0f));
+ }
+
+
+ static void Statistics (dgSphere &sphere, dgVector &eigenValues, dgVector &scaleVector, const float vertex[], const int32_t faceIndex[], int32_t indexCount, int32_t stride)
+ {
+ dgVector var (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgVector cov (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgVector centre (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgVector massCenter (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+
+ double totalArea = float (0.0f);
+ const float* const ptr = vertex;
+ for (int32_t i = 0; i < indexCount; i += 3) {
+ int32_t index = faceIndex[i] * stride;
+ dgVector p0 (&ptr[index]);
+ p0 = p0.CompProduct (scaleVector);
+
+ index = faceIndex[i + 1] * stride;;
+ dgVector p1 (&ptr[index]);
+ p1 = p1.CompProduct (scaleVector);
+
+ index = faceIndex[i + 2] * stride;;
+ dgVector p2 (&ptr[index]);
+ p2 = p2.CompProduct (scaleVector);
+
+ dgVector normal ((p1 - p0) * (p2 - p0));
+
+ double area = float (0.5f) * sqrt (normal % normal);
+
+ centre = p0 + p1 + p2;
+ centre = centre.Scale (float (1.0f / 3.0f));
+
+ // Inertia of each point in the triangle
+ double Ixx = p0.m_x * p0.m_x + p1.m_x * p1.m_x + p2.m_x * p2.m_x;
+ double Iyy = p0.m_y * p0.m_y + p1.m_y * p1.m_y + p2.m_y * p2.m_y;
+ double Izz = p0.m_z * p0.m_z + p1.m_z * p1.m_z + p2.m_z * p2.m_z;
+
+ double Ixy = p0.m_x * p0.m_y + p1.m_x * p1.m_y + p2.m_x * p2.m_y;
+ double Iyz = p0.m_y * p0.m_z + p1.m_y * p1.m_z + p2.m_y * p2.m_z;
+ double Ixz = p0.m_x * p0.m_z + p1.m_x * p1.m_z + p2.m_x * p2.m_z;
+
+ if (area > dgEPSILON * 10.0) {
+ double K = area / double (12.0);
+ //Coriolis theorem for Inertia of a triangle in an arbitrary orientation
+ Ixx = K * (Ixx + 9.0 * centre.m_x * centre.m_x);
+ Iyy = K * (Iyy + 9.0 * centre.m_y * centre.m_y);
+ Izz = K * (Izz + 9.0 * centre.m_z * centre.m_z);
+
+ Ixy = K * (Ixy + 9.0 * centre.m_x * centre.m_y);
+ Ixz = K * (Ixz + 9.0 * centre.m_x * centre.m_z);
+ Iyz = K * (Iyz + 9.0 * centre.m_y * centre.m_z);
+ centre = centre.Scale ((float)area);
+ }
+
+ totalArea += area;
+ massCenter += centre;
+ var += dgVector ((float)Ixx, (float)Iyy, (float)Izz, float (0.0f));
+ cov += dgVector ((float)Ixy, (float)Ixz, (float)Iyz, float (0.0f));
+ }
+
+ if (totalArea > dgEPSILON * 10.0) {
+ double K = double (1.0) / totalArea;
+ var = var.Scale ((float)K);
+ cov = cov.Scale ((float)K);
+ massCenter = massCenter.Scale ((float)K);
+ }
+
+ double Ixx = var.m_x - massCenter.m_x * massCenter.m_x;
+ double Iyy = var.m_y - massCenter.m_y * massCenter.m_y;
+ double Izz = var.m_z - massCenter.m_z * massCenter.m_z;
+
+ double Ixy = cov.m_x - massCenter.m_x * massCenter.m_y;
+ double Ixz = cov.m_y - massCenter.m_x * massCenter.m_z;
+ double Iyz = cov.m_z - massCenter.m_y * massCenter.m_z;
+
+ sphere.m_front = dgVector ((float)Ixx, (float)Ixy, (float)Ixz, float (0.0f));
+ sphere.m_up = dgVector ((float)Ixy, (float)Iyy, (float)Iyz, float (0.0f));
+ sphere.m_right = dgVector ((float)Ixz, (float)Iyz, (float)Izz, float (0.0f));
+ sphere.EigenVectors(eigenValues);
+ }
+
+
+ static void Statistics (dgSphere &sphere, dgVector &eigenValues, dgVector &scaleVector, const float vertex[], int32_t vertexCount, int32_t stride)
+ {
+ dgBigVector var (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgBigVector cov (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+ dgBigVector massCenter (float (0.0f), float (0.0f), float (0.0f), float (0.0f));
+
+ const float* ptr = vertex;
+ for (int32_t i = 0; i < vertexCount; i ++) {
+ float x = ptr[0] * scaleVector.m_x;
+ float y = ptr[1] * scaleVector.m_y;
+ float z = ptr[2] * scaleVector.m_z;
+ ptr += stride;
+ massCenter += dgBigVector (x, y, z, float (0.0f));
+ var += dgBigVector (x * x, y * y, z * z, float (0.0f));
+ cov += dgBigVector (x * y, x * z, y * z, float (0.0f));
+ }
+
+ double k = double (1.0) / vertexCount;
+ var = var.Scale (k);
+ cov = cov.Scale (k);
+ massCenter = massCenter.Scale (k);
+
+ double Ixx = var.m_x - massCenter.m_x * massCenter.m_x;
+ double Iyy = var.m_y - massCenter.m_y * massCenter.m_y;
+ double Izz = var.m_z - massCenter.m_z * massCenter.m_z;
+
+ double Ixy = cov.m_x - massCenter.m_x * massCenter.m_y;
+ double Ixz = cov.m_y - massCenter.m_x * massCenter.m_z;
+ double Iyz = cov.m_z - massCenter.m_y * massCenter.m_z;
+
+ sphere.m_front = dgVector (float(Ixx), float(Ixy), float(Ixz), float (0.0f));
+ sphere.m_up = dgVector (float(Ixy), float(Iyy), float(Iyz), float (0.0f));
+ sphere.m_right = dgVector (float(Ixz), float(Iyz), float(Izz), float (0.0f));
+ sphere.EigenVectors (eigenValues);
+ }
+
+/*
+ static void Statistics (
+ dgSphere &sphere,
+ dgVector &eigenValues,
+ const dgVector &scaleVector,
+ const float vertex[],
+ int32_t stride,
+ const dgFace face[],
+ int32_t faceCount)
+ {
+ HACD_ASSERT (0);
+
+ int32_t i;
+ int32_t index;
+ const float *ptr;
+ double K;
+ double Ixx;
+ double Iyy;
+ double Izz;
+ double Ixy;
+ double Ixz;
+ double Iyz;
+ double area;
+ double totalArea;
+ const dgFace *Face;
+
+ dgVector var (float (0.0f), float (0.0f), float (0.0f));
+ dgVector cov (float (0.0f), float (0.0f), float (0.0f));
+ dgVector centre (float (0.0f), float (0.0f), float (0.0f));
+ dgVector massCenter (float (0.0f), float (0.0f), float (0.0f));
+
+ totalArea = 0.0;
+ ptr = vertex;
+ for (i = 0; i < faceCount; i ++) {
+ Face = &face[i];
+
+ index = Face->m_wireFrame[0] * stride;
+ dgVector p0 (&ptr[index]);
+ p0 = p0.CompProduct (scaleVector);
+
+ index = Face->m_wireFrame[1] * stride;
+ dgVector p1 (&ptr[index]);
+ p1 = p1.CompProduct (scaleVector);
+
+ index = Face->m_wireFrame[2] * stride;
+ dgVector p2 (&ptr[index]);
+ p2 = p2.CompProduct (scaleVector);
+
+ dgVector normal ((p1 - p0) * (p2 - p0));
+
+ area = 0.5 * sqrt (normal % normal);
+
+ centre = p0 + p1 + p2;
+ centre = centre.Scale (1.0f / 3.0f);
+
+ // Inercia of each point in the triangle
+ Ixx = p0.m_x * p0.m_x + p1.m_x * p1.m_x + p2.m_x * p2.m_x;
+ Iyy = p0.m_y * p0.m_y + p1.m_y * p1.m_y + p2.m_y * p2.m_y;
+ Izz = p0.m_z * p0.m_z + p1.m_z * p1.m_z + p2.m_z * p2.m_z;
+
+ Ixy = p0.m_x * p0.m_y + p1.m_x * p1.m_y + p2.m_x * p2.m_y;
+ Iyz = p0.m_y * p0.m_z + p1.m_y * p1.m_z + p2.m_y * p2.m_z;
+ Ixz = p0.m_x * p0.m_z + p1.m_x * p1.m_z + p2.m_x * p2.m_z;
+
+ if (area > dgEPSILON * 10.0) {
+ K = area / 12.0;
+ //Coriollis teorem for Inercia of a triangle in an arbitrary orientation
+ Ixx = K * (Ixx + 9.0 * centre.m_x * centre.m_x);
+ Iyy = K * (Iyy + 9.0 * centre.m_y * centre.m_y);
+ Izz = K * (Izz + 9.0 * centre.m_z * centre.m_z);
+
+ Ixy = K * (Ixy + 9.0 * centre.m_x * centre.m_y);
+ Ixz = K * (Ixz + 9.0 * centre.m_x * centre.m_z);
+ Iyz = K * (Iyz + 9.0 * centre.m_y * centre.m_z);
+ centre = centre.Scale ((float)area);
+ }
+
+ totalArea += area;
+ massCenter += centre;
+ var += dgVector ((float)Ixx, (float)Iyy, (float)Izz);
+ cov += dgVector ((float)Ixy, (float)Ixz, (float)Iyz);
+ }
+
+ if (totalArea > dgEPSILON * 10.0) {
+ K = 1.0 / totalArea;
+ var = var.Scale ((float)K);
+ cov = cov.Scale ((float)K);
+ massCenter = massCenter.Scale ((float)K);
+ }
+
+ Ixx = var.m_x - massCenter.m_x * massCenter.m_x;
+ Iyy = var.m_y - massCenter.m_y * massCenter.m_y;
+ Izz = var.m_z - massCenter.m_z * massCenter.m_z;
+
+ Ixy = cov.m_x - massCenter.m_x * massCenter.m_y;
+ Ixz = cov.m_y - massCenter.m_x * massCenter.m_z;
+ Iyz = cov.m_z - massCenter.m_y * massCenter.m_z;
+
+ sphere.m_front = dgVector ((float)Ixx, (float)Ixy, (float)Ixz);
+ sphere.m_up = dgVector ((float)Ixy, (float)Iyy, (float)Iyz);
+ sphere.m_right = dgVector ((float)Ixz, (float)Iyz, (float)Izz);
+ sphere.EigenVectors(eigenValues);
+ }
+*/
+}
+
+
+dgSphere::dgSphere ()
+ :dgMatrix(dgGetIdentityMatrix()), m_size (0, 0, 0, 0)
+{
+// HACD_ASSERT (0);
+// planeTest = FrontTest;
+}
+
+dgSphere::dgSphere (const dgQuaternion &quat, const dgVector &position, const dgVector& dim)
+ :dgMatrix(quat, position)
+{
+ SetDimensions (dim.m_x, dim.m_y, dim.m_z);
+ HACD_ASSERT (0);
+// planeTest = FrontTest;
+}
+
+dgSphere::dgSphere(const dgMatrix &matrix, const dgVector& dim)
+ :dgMatrix(matrix)
+{
+ SetDimensions (dim.m_x, dim.m_y, dim.m_z);
+// HACD_ASSERT (0);
+// planeTest = FrontTest;
+}
+
+
+
+void dgSphere::SetDimensions (const float vertex[], int32_t strideInBytes, const int32_t triangles[], int32_t indexCount, const dgMatrix *basis)
+{
+ dgVector eigen;
+ dgVector scaleVector (float (1.0f), float (1.0f), float (1.0f), float (0.0f));
+
+ if (indexCount < 3) {
+ return;
+ }
+
+ int32_t stride = int32_t (strideInBytes / sizeof (float));
+ if (!basis) {
+
+ InternalSphere::Statistics (*this, eigen, scaleVector, vertex, triangles, indexCount, stride);
+
+ int32_t k = 0;
+ for (int32_t i = 0; i < 3; i ++) {
+ if (k >= 6) {
+ break;
+ }
+ for (int32_t j = i + 1; j < 3; j ++) {
+ float aspect = InternalSphere::AspectRatio (eigen[i], eigen[j]);
+ if (aspect > float (0.9f)) {
+ scaleVector[i] *= float (2.0f);
+ InternalSphere::Statistics (*this, eigen, scaleVector, vertex, triangles, indexCount, stride);
+ k ++;
+ i = -1;
+ break;
+ }
+ }
+ }
+ } else {
+ *this = *basis;
+ }
+
+ dgVector min;
+ dgVector max;
+ InternalSphere::BoundingBox (*this, vertex, stride, triangles, indexCount, min, max);
+
+ dgVector massCenter (max + min);
+ massCenter = massCenter.Scale (float (0.5f));
+ m_posit = TransformVector (massCenter);
+
+ dgVector dim (max - min);
+ dim = dim.Scale (float(0.5f));
+ SetDimensions (dim.m_x, dim.m_y, dim.m_z);
+}
+
+
+void dgSphere::SetDimensions (const float vertex[], int32_t strideInBytes, int32_t count, const dgMatrix *basis)
+{
+ dgVector eigen;
+ dgVector scaleVector (float(1.0f), float(1.0f), float(1.0f), float (0.0f));
+
+ int32_t stride = int32_t (strideInBytes / sizeof (float));
+ if (!basis) {
+ InternalSphere::Statistics (*this, eigen, scaleVector, vertex, count, stride);
+
+ int32_t k = 0;
+ for (int32_t i = 0; i < 3; i ++) {
+ if (k >= 6) {
+ break;
+ }
+ for (int32_t j = i + 1; j < 3; j ++) {
+ float aspect = InternalSphere::AspectRatio (eigen[i], eigen[j]);
+ if (aspect > float (0.9f)) {
+ scaleVector[i] *= float (2.0f);
+ InternalSphere::Statistics (*this, eigen, scaleVector, vertex, count, stride);
+ k ++;
+ i = -1;
+ break;
+ }
+ }
+ }
+ } else {
+ *this = *basis;
+ }
+
+ dgVector min;
+ dgVector max;
+ InternalSphere::BoundingBox (*this, vertex, count, stride, min, max);
+
+ dgVector massCenter (max + min);
+ massCenter = massCenter.Scale (0.5);
+ m_posit = TransformVector (massCenter);
+
+ dgVector dim (max - min);
+ dim = dim.Scale (float(0.5f));
+ SetDimensions (dim.m_x + InternalSphere::SPHERE_TOL,
+ dim.m_y + InternalSphere::SPHERE_TOL,
+ dim.m_z + InternalSphere::SPHERE_TOL);
+
+}
+
+/*
+void dgSphere::SetDimensions (
+ const float vertex[],
+ int32_t strideInBytes,
+ const int32_t index[],
+ int32_t indexCount,
+ const dgMatrix *basis)
+{
+ int32_t i;
+ int32_t j;
+ int32_t k;
+ int32_t stride;
+ float aspect;
+ dgVector eigen;
+ dgVector scaleVector (float(1.0f), float(1.0f), float(1.0f), float (0.0f));
+
+ stride = strideInBytes / sizeof (float);
+ if (!basis) {
+ InternalSphere::Statistics (*this, eigen, scaleVector, vertex, index, indexCount, stride);
+
+ k = 0;
+ for (i = 0; i < 3; i ++) {
+ if (k >= 6) {
+ break;
+ }
+ for (j = i + 1; j < 3; j ++) {
+ aspect = InternalSphere::AspectRatio (eigen[i], eigen[j]);
+ if (aspect > float (0.9f)) {
+ scaleVector[i] *= float (2.0f);
+ InternalSphere::Statistics (*this, eigen, scaleVector, vertex, index, indexCount, stride);
+ i = -1;
+ k ++;
+ break;
+ }
+ }
+ }
+ } else {
+ *this = *basis;
+ }
+
+ dgVector min;
+ dgVector max;
+ InternalSphere::BoundingBox (*this, vertex, stride, index, indexCount, min, max);
+
+ dgVector massCenter (max + min);
+ massCenter = massCenter.Scale (float(0.5f));
+ m_posit = TransformVector (massCenter);
+
+ dgVector dim (max - min);
+ dim = dim.Scale (float(0.5f));
+ SetDimensions (dim.m_x + InternalSphere::SPHERE_TOL,
+ dim.m_y + InternalSphere::SPHERE_TOL,
+ dim.m_z + InternalSphere::SPHERE_TOL);
+}
+*/
+
+
+
+/*
+dgSphere::dgSphere (
+ const dgSphere &dgSphere,
+ const dgVector &Dir)
+{
+ if ((Dir % Dir) < EPSILON * 0.01f) {
+ *this = dgSphere;
+ return;
+ }
+
+ front = Dir;
+ front.Fast_Normalize();
+
+ if (dgAbsf (front % dgSphere.right) < 0.995) {
+ up = front * dgSphere.right;
+ up.Fast_Normalize();
+ } else {
+ up = dgSphere.up;
+ }
+ right = up * front;
+
+ dgVector Step (Dir.Scale(0.5));
+ size.m_x = (float)(dgSphere.size.m_x * dgAbsf (right % dgSphere.right) +
+ dgSphere.size.m_y * dgAbsf (right % dgSphere.up) +
+ dgSphere.size.m_z * dgAbsf (right % dgSphere.front));
+
+ size.m_y = (float)(dgSphere.size.m_x * dgAbsf (up % dgSphere.right) +
+ dgSphere.size.m_y * dgAbsf (up % dgSphere.up) +
+ dgSphere.size.m_z * dgAbsf (up % dgSphere.front));
+
+ size.m_z = (float)(sqrt (Step % Step) +
+ dgSphere.size.m_x * dgAbsf (front % dgSphere.right) +
+ dgSphere.size.m_y * dgAbsf (front % dgSphere.up) +
+ dgSphere.size.m_z * dgAbsf (front % dgSphere.front));
+ posit = dgSphere.posit + Step;
+
+}
+
+
+
+bool dgSphere::dgSphere_Overlap_Test (const dgSphere &dgSphere)
+{
+ double R;
+ dgVector Dir (dgSphere.posit - posit);
+
+ R = size.m_x * dgAbsf (right % Dir) + dgSphere.size.m_x * dgAbsf (dgSphere.right % Dir) +
+ size.m_y * dgAbsf (up % Dir) + dgSphere.size.m_y * dgAbsf (dgSphere.up % Dir) +
+ size.m_z * dgAbsf (front %Dir) + dgSphere.size.m_z * dgAbsf (dgSphere.front % Dir);
+ if (R < (Dir % Dir)) {
+ return false;
+ }
+
+ R = size.m_x * dgAbsf (right % dgSphere.right) +
+ size.m_y * dgAbsf (up % dgSphere.right) +
+ size.m_z * dgAbsf (front % dgSphere.right) + dgSphere.size.m_x;
+ if (R < dgAbsf (Dir % dgSphere.right)) {
+ return false;
+ }
+
+ R = size.m_x * dgAbsf (right % dgSphere.up) +
+ size.m_y * dgAbsf (up % dgSphere.up) +
+ size.m_z * dgAbsf (front % dgSphere.up) + dgSphere.size.m_y;
+ if (R < dgAbsf (Dir % dgSphere.up)) {
+ return false;
+ }
+
+ R = size.m_x * dgAbsf (right % dgSphere.front) +
+ size.m_y * dgAbsf (up % dgSphere.front) +
+ size.m_z * dgAbsf (front % dgSphere.front) + dgSphere.size.m_z;
+ if (R < dgAbsf (Dir % dgSphere.front)) {
+ return false;
+ }
+
+ R = dgSphere.size.m_x * dgAbsf (dgSphere.right % right) +
+ dgSphere.size.m_y * dgAbsf (dgSphere.up % right) +
+ dgSphere.size.m_z * dgAbsf (dgSphere.front % right) + size.m_x;
+ if (R < dgAbsf (Dir % right)) {
+ return false;
+ }
+
+ R = dgSphere.size.m_x * dgAbsf (dgSphere.right % up) +
+ dgSphere.size.m_y * dgAbsf (dgSphere.up % up) +
+ dgSphere.size.m_z * dgAbsf (dgSphere.front % up) + size.m_y;
+ if (R < dgAbsf (Dir % up)) {
+ return false;
+ }
+
+ R = dgSphere.size.m_x * dgAbsf (dgSphere.right % front) +
+ dgSphere.size.m_y * dgAbsf (dgSphere.up % front) +
+ dgSphere.size.m_z * dgAbsf (dgSphere.front % front) + size.m_z;
+ if (R < dgAbsf (Dir % front)) {
+ return false;
+ }
+
+ return true;
+}
+
+
+void dgSphere::Swept_Volume (
+ dgVector &min,
+ dgVector &max)
+{
+ float w;
+ float h;
+ float b;
+
+ w = (float)(size.m_x * dgAbsf(right.m_x) + size.m_y * dgAbsf(up.m_x) + size.m_z * dgAbsf(front.m_x));
+ h = (float)(size.m_x * dgAbsf(right.m_y) + size.m_y * dgAbsf(up.m_y) + size.m_z * dgAbsf(front.m_y));
+ b = (float)(size.m_x * dgAbsf(right.m_z) + size.m_y * dgAbsf(up.m_z) + size.m_z * dgAbsf(front.m_z));
+
+ min.m_x = posit.m_x - w;
+ min.m_y = posit.m_y - h;
+ min.m_z = posit.m_z - b;
+
+ max.m_x = posit.m_x + w;
+ max.m_y = posit.m_y + h;
+ max.m_z = posit.m_z + b;
+}
+*/
+
+
+/*
+int32_t dgSphere::FrontTest (
+ const dgMatrix &matrix,
+ const dgPlane* plane) const
+{
+ float R;
+ float dR;
+ InternalSphere::dgFloatSign flag0;
+ InternalSphere::dgFloatSign flag1;
+
+ dR = m_size.m_x * dgAbsf (matrix.m_front.m_x) + m_size.m_y * dgAbsf (matrix.m_up.m_x) + m_size.m_z * dgAbsf (matrix.m_right.m_x);
+ R = plane[5].m_x * matrix.m_posit.m_x + plane[5].m_w;
+
+ flag0.f = R + dR;
+ flag1.f = R - dR;
+ flag0.i = flag0.i >> 30 & 2;
+ flag1.i = flag1.i >> 31 & 1;
+ return InternalSphere::CodeTbl[flag0.i | flag1.i];
+}
+
+int32_t dgSphere::RearTest (const dgMatrix &matrix, const dgPlane* plane) const
+{
+ float R;
+ float dR;
+ InternalSphere::dgFloatSign flag0;
+ InternalSphere::dgFloatSign flag1;
+
+ dR = m_size.m_x * dgAbsf (matrix.m_front.m_x) + m_size.m_y * dgAbsf (matrix.m_up.m_x) + m_size.m_z * dgAbsf (matrix.m_right.m_x);
+ R = plane[4].m_x * matrix.m_posit.m_x + plane[4].m_w;
+
+ flag0.f = R + dR;
+ flag1.f = R - dR;
+ flag0.i = flag0.i >> 30 & 2;
+ flag1.i = flag1.i >> 31 & 1;
+ return InternalSphere::CodeTbl[flag0.i | flag1.i];
+}
+
+
+int32_t dgSphere::LeftTest (const dgMatrix &matrix, const dgPlane* plane) const
+{
+ float R;
+ float dR;
+ InternalSphere::dgFloatSign flag0;
+ InternalSphere::dgFloatSign flag1;
+
+ dR = m_size.m_x * dgAbsf (matrix.m_front.m_x * plane[0].m_x + matrix.m_front.m_z * plane[0].m_z) +
+ m_size.m_y * dgAbsf (matrix.m_up.m_x * plane[0].m_x + matrix.m_up.m_z * plane[0].m_z) +
+ m_size.m_z * dgAbsf (matrix.m_right.m_x * plane[0].m_x + matrix.m_right.m_z * plane[0].m_z);
+ R = plane[0].m_x * matrix.m_posit.m_x + plane[0].m_z * matrix.m_posit.m_z;
+
+ flag0.f = R + dR;
+ flag1.f = R - dR;
+ flag0.i = (flag0.i >> 30) & 2;
+ flag1.i = (flag1.i >> 31) & 1;
+ return InternalSphere::CodeTbl[flag0.i | flag1.i];
+}
+
+int32_t dgSphere::RightTest (const dgMatrix &matrix, const dgPlane* plane) const
+{
+ float R;
+ float dR;
+ InternalSphere::dgFloatSign flag0;
+ InternalSphere::dgFloatSign flag1;
+
+ dR = m_size.m_x * dgAbsf (matrix.m_front.m_x * plane[1].m_x + matrix.m_front.m_z * plane[1].m_z) +
+ m_size.m_y * dgAbsf (matrix.m_up.m_x * plane[1].m_x + matrix.m_up.m_z * plane[1].m_z) +
+ m_size.m_z * dgAbsf (matrix.m_right.m_x * plane[1].m_x + matrix.m_right.m_z * plane[1].m_z);
+ R = plane[1].m_x * matrix.m_posit.m_x + plane[1].m_z * matrix.m_posit.m_z;
+
+ flag0.f = R + dR;
+ flag1.f = R - dR;
+ flag0.i = (flag0.i >> 30) & 2;
+ flag1.i = (flag1.i >> 31) & 1;
+ return InternalSphere::CodeTbl[flag0.i | flag1.i];
+}
+
+int32_t dgSphere::BottomTest (const dgMatrix &matrix, const dgPlane* plane) const
+{
+ float R;
+ float dR;
+ InternalSphere::dgFloatSign flag0;
+ InternalSphere::dgFloatSign flag1;
+
+ dR = m_size.m_x * dgAbsf (matrix.m_front.m_x * plane[2].m_x + matrix.m_front.m_y * plane[2].m_y) +
+ m_size.m_y * dgAbsf (matrix.m_up.m_x * plane[2].m_x + matrix.m_up.m_y * plane[2].m_y) +
+ m_size.m_z * dgAbsf (matrix.m_right.m_x * plane[2].m_x + matrix.m_right.m_y * plane[2].m_y);
+
+ R = plane[2].m_x * matrix.m_posit.m_x + plane[2].m_y * matrix.m_posit.m_y;
+
+ flag0.f = R + dR;
+ flag1.f = R - dR;
+ flag0.i = (flag0.i >> 30) & 2;
+ flag1.i = (flag1.i >> 31) & 1;
+
+ return InternalSphere::CodeTbl[flag0.i | flag1.i];
+}
+
+int32_t dgSphere::TopTest (const dgMatrix &matrix, const dgPlane* plane) const
+{
+ float R;
+ float dR;
+ InternalSphere::dgFloatSign flag0;
+ InternalSphere::dgFloatSign flag1;
+
+ dR = m_size.m_x * dgAbsf (matrix.m_front.m_x * plane[3].m_x + matrix.m_front.m_y * plane[3].m_y) +
+ m_size.m_y * dgAbsf (matrix.m_up.m_x * plane[3].m_x + matrix.m_up.m_y * plane[3].m_y) +
+ m_size.m_z * dgAbsf (matrix.m_right.m_x * plane[3].m_x + matrix.m_right.m_y * plane[3].m_y);
+
+ R = plane[3].m_x * matrix.m_posit.m_x + plane[3].m_y * matrix.m_posit.m_y;
+
+ flag0.f = R + dR;
+ flag1.f = R - dR;
+ flag0.i = (flag0.i >> 30) & 2;
+ flag1.i = (flag1.i >> 31) & 1;
+ return InternalSphere::CodeTbl[flag0.i | flag1.i];
+}
+
+
+
+int32_t dgSphere::VisibilityTestLow (
+ const dgCamera* camera,
+ const dgMatrix& matrix) const
+{
+ int32_t i;
+ int32_t code;
+ const dgPlane* planes;
+ const dgPlane* guardPlanes;
+
+ planes = camera->GetViewVolume();
+
+ code = (this->*planeTest) (matrix, planes);
+ if (code != -1) {
+ for (i = 0; i < 6; i ++) {
+ code |= (this->*planeTestArray[i]) (matrix, planes);
+ if (code == -1) {
+ planeTest = planeTestArray[i];
+ return -1;
+ }
+ }
+
+ if (code) {
+ guardPlanes = camera->GetGuardViewVolume();
+ if (guardPlanes) {
+ code = 0;
+ for (i = 0; i < 6; i ++) {
+ code |= (this->*planeTestArray[i]) (matrix, guardPlanes);
+ HACD_ASSERT (code >= 0);
+ if (code) {
+ return code;
+ }
+ }
+ }
+ }
+ }
+
+ return code;
+}
+
+
+int32_t dgSphere::VisibilityTest (const dgCamera* camera) const
+{
+ dgMatrix viewMatrix (*this * camera->GetViewMatrix());
+ return VisibilityTestLow (camera, viewMatrix);
+}
+
+int32_t dgSphere::VisibilityTest (const dgCamera* camera, const dgMatrix &worldMatrix) const
+{
+ dgMatrix viewMatrix (*this * worldMatrix * camera->GetViewMatrix());
+ return VisibilityTestLow (camera, viewMatrix);
+}
+
+void dgSphere::Render (
+ const dgCamera* camera,
+ const dgMatrix &worldMatrix,
+ uint32_t rgb) const
+{
+ int32_t i;
+ struct ColorVertex
+ {
+ float m_x;
+ float m_y;
+ float m_z;
+ dgColor m_color;
+ };
+
+ uint32_t index [][2] = {
+ {0, 4}, {1, 5}, {2, 6}, {3, 7},
+ {0, 1}, {4, 5}, {7, 6}, {3, 2},
+ {1, 2}, {5, 6}, {4, 7}, {0, 3},
+ };
+
+ ColorVertex* ptr;
+ ColorVertex box[8];
+
+ box[0].m_x = -size.m_x;
+ box[0].m_y = -size.m_y;
+ box[0].m_z = -size.m_z;
+ box[0].m_color.m_val = rgb;
+
+ box[1].m_x = size.m_x;
+ box[1].m_y = -size.m_y;
+ box[1].m_z = -size.m_z;
+ box[1].m_color.m_val = rgb;
+
+ box[2].m_x = size.m_x;
+ box[2].m_y = -size.m_y;
+ box[2].m_z = size.m_z;
+ box[2].m_color.m_val = rgb;
+
+ box[3].m_x = -size.m_x;
+ box[3].m_y = -size.m_y;
+ box[3].m_z = size.m_z;
+ box[3].m_color.m_val = rgb;
+
+ box[4].m_x = -size.m_x;
+ box[4].m_y = size.m_y;
+ box[4].m_z = -size.m_z;
+ box[4].m_color.m_val = rgb;
+
+ box[5].m_x = size.m_x;
+ box[5].m_y = size.m_y;
+ box[5].m_z = -size.m_z;
+ box[5].m_color.m_val = rgb;
+
+ box[6].m_x = size.m_x;
+ box[6].m_y = size.m_y;
+ box[6].m_z = size.m_z;
+ box[6].m_color.m_val = rgb;
+
+ box[7].m_x = -size.m_x;
+ box[7].m_y = size.m_y;
+ box[7].m_z = size.m_z;
+ box[7].m_color.m_val = rgb;
+
+ dgRenderDescriptorParams param;
+ param.m_indexCount = 0;
+ param.m_vertexCount = sizeof (index) / sizeof (int32_t);
+ param.m_descType = dgDynamicVertex;
+ param.m_primitiveType = RENDER_LINELIST;
+ param.m_vertexFlags = VERTEX_ENABLE_XYZ | COLOR_ENABLE;
+
+ dgRenderDescriptor desc (param);
+
+ dgMatrix tmpMat (*this * worldMatrix);
+ camera->SetWorldMatrix (&tmpMat);
+
+ desc.m_material = dgMaterial::UseDebugMaterial();
+
+ dgVertexRecord vertexRecord (desc.LockVertex());
+ ptr = (ColorVertex*) vertexRecord.vertex.ptr;
+ for (i = 0; i < (sizeof (index) / (2 * sizeof (uint32_t))); i ++) {
+ ptr[0] = box[index[i][0]];
+ ptr[1] = box[index[i][1]];
+ ptr += 2;
+ }
+ desc.UnlockVertex();
+
+ camera->Render (desc);
+
+ desc.m_material->Release();
+}
+*/
+
diff --git a/APEX_1.4/shared/general/HACD/src/dgTree.cpp b/APEX_1.4/shared/general/HACD/src/dgTree.cpp
new file mode 100644
index 00000000..b1cf4776
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgTree.cpp
@@ -0,0 +1,415 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgTypes.h"
+#include "dgTree.h"
+
+
+dgRedBackNode *dgRedBackNode::Minimum () const
+{
+ dgRedBackNode* ptr = (dgRedBackNode *)this;
+ for (; ptr->m_left; ptr = ptr->m_left){}
+ return ptr;
+}
+
+dgRedBackNode *dgRedBackNode::Maximum () const
+{
+ dgRedBackNode* ptr = (dgRedBackNode *)this;
+ for (; ptr->m_right; ptr = ptr->m_right){}
+ return ptr;
+}
+
+
+dgRedBackNode *dgRedBackNode::Prev () const
+{
+ if (m_left) {
+ return m_left->Maximum ();
+ }
+
+ dgRedBackNode* node = (dgRedBackNode *)this;
+ dgRedBackNode* ptr = m_parent;
+ for (; ptr && node == ptr->m_left; ptr = ptr->m_parent) {
+ node = ptr;
+ }
+ return ptr;
+
+}
+
+dgRedBackNode *dgRedBackNode::Next () const
+{
+
+ if (m_right) {
+ return m_right->Minimum ();
+ }
+
+ dgRedBackNode* node = (dgRedBackNode *)this;
+ dgRedBackNode* ptr = m_parent;
+ for (; ptr && node == ptr->m_right; ptr = ptr->m_parent) {
+ node = ptr;
+ }
+ return ptr;
+}
+
+// rotate node me to left
+void dgRedBackNode::RotateLeft(dgRedBackNode** const head)
+{
+ dgRedBackNode* const me = this;
+ dgRedBackNode* const child = me->m_right;
+
+ //establish me->m_right link
+ me->m_right = child->m_left;
+ if (child->m_left != NULL) {
+ child->m_left->m_parent = me;
+ }
+
+ // establish child->m_parent link
+ if (child != NULL) {
+ child->m_parent = me->m_parent;
+ }
+ if (me->m_parent) {
+ if (me == me->m_parent->m_left) {
+ me->m_parent->m_left = child;
+ } else {
+ me->m_parent->m_right = child;
+ }
+ } else {
+ *head = child;
+ }
+
+ // link child and me
+ child->m_left = me;
+ if (me != NULL) {
+ me->m_parent = child;
+ }
+}
+
+
+// rotate node me to right *
+void dgRedBackNode::RotateRight(dgRedBackNode ** const head)
+{
+ dgRedBackNode* const me = this;
+ dgRedBackNode* const child = me->m_left;
+
+ // establish me->m_left link
+ me->m_left = child->m_right;
+ if (child->m_right != NULL) {
+ child->m_right->m_parent = me;
+ }
+
+ // establish child->m_parent link
+ if (child != NULL) {
+ child->m_parent = me->m_parent;
+ }
+ if (me->m_parent) {
+ if (me == me->m_parent->m_right) {
+ me->m_parent->m_right = child;
+ } else {
+ me->m_parent->m_left = child;
+ }
+ } else {
+ *head = child;
+ }
+
+ // link me and child
+ child->m_right = me;
+ if (me != NULL) {
+ me->m_parent = child;
+ }
+}
+
+
+// maintain Red-Black tree balance after inserting node ptr
+void dgRedBackNode::InsertFixup(dgRedBackNode ** const head)
+{
+ dgRedBackNode* ptr = this;
+ // check Red-Black properties
+ while ((ptr != *head) && (ptr->m_parent->GetColor() == RED)) {
+ // we have a violation
+ if (ptr->m_parent == ptr->m_parent->m_parent->m_left) {
+ dgRedBackNode* const tmp = ptr->m_parent->m_parent->m_right;
+ if (tmp && (tmp->GetColor() == RED)) {
+ // uncle is RED
+ ptr->m_parent->SetColor(BLACK);
+ tmp->SetColor(BLACK) ;
+ ptr->m_parent->m_parent->SetColor(RED) ;
+ ptr = ptr->m_parent->m_parent;
+ } else {
+ // uncle is BLACK
+ if (ptr == ptr->m_parent->m_right) {
+ // make ptr a left child
+ ptr = ptr->m_parent;
+ ptr->RotateLeft(head);
+ }
+
+ ptr->m_parent->SetColor(BLACK);
+ if (ptr->m_parent->m_parent) {
+ ptr->m_parent->m_parent->SetColor(RED);
+ ptr->m_parent->m_parent->RotateRight(head);
+ }
+ }
+ } else {
+ HACD_ASSERT (ptr->m_parent == ptr->m_parent->m_parent->m_right);
+ // mirror image of above code
+ dgRedBackNode* const tmp = ptr->m_parent->m_parent->m_left;
+ if (tmp && (tmp->GetColor() == RED)) {
+ //uncle is RED
+ ptr->m_parent->SetColor(BLACK);
+ tmp->SetColor(BLACK) ;
+ ptr->m_parent->m_parent->SetColor(RED) ;
+ ptr = ptr->m_parent->m_parent;
+ } else {
+ // uncle is BLACK
+ if (ptr == ptr->m_parent->m_left) {
+ ptr = ptr->m_parent;
+ ptr->RotateRight(head);
+ }
+ ptr->m_parent->SetColor(BLACK);
+ if (ptr->m_parent->m_parent->GetColor() == BLACK) {
+ ptr->m_parent->m_parent->SetColor(RED) ;
+ ptr->m_parent->m_parent->RotateLeft (head);
+ }
+ }
+ }
+ }
+ (*head)->SetColor(BLACK);
+}
+
+
+//maintain Red-Black tree balance after deleting node x
+void dgRedBackNode::RemoveFixup (dgRedBackNode* const thisNode, dgRedBackNode ** const head)
+{
+ dgRedBackNode* ptr = this;
+ dgRedBackNode* node = thisNode;
+ while ((node != *head) && (!node || node->GetColor() == BLACK)) {
+ if (node == ptr->m_left) {
+ if (!ptr) {
+ return;
+ }
+ dgRedBackNode* tmp = ptr->m_right;
+ if (!tmp) {
+ return;
+ }
+ if (tmp->GetColor() == RED) {
+ tmp->SetColor(BLACK) ;
+ ptr->SetColor(RED) ;
+ ptr->RotateLeft (head);
+ tmp = ptr->m_right;
+ if (!ptr || !tmp) {
+ return;
+ }
+ }
+ if ((!tmp->m_left || (tmp->m_left->GetColor() == BLACK)) &&
+ (!tmp->m_right || (tmp->m_right->GetColor() == BLACK))) {
+ tmp->SetColor(RED);
+ node = ptr;
+ ptr = ptr->m_parent;
+ continue;
+ } else if (!tmp->m_right || (tmp->m_right->GetColor() == BLACK)) {
+ tmp->m_left->SetColor(BLACK);
+ tmp->SetColor(RED);
+ tmp->RotateRight (head);
+ tmp = ptr->m_right;
+ if (!ptr || !tmp) {
+ return;
+ }
+ }
+ tmp->SetColor (ptr->GetColor());
+ if (tmp->m_right) {
+ tmp->m_right->SetColor(BLACK) ;
+ }
+ if (ptr) {
+ ptr->SetColor(BLACK) ;
+ ptr->RotateLeft (head);
+ }
+ node = *head;
+
+ } else {
+ if (!ptr) {
+ return;
+ }
+ dgRedBackNode* tmp = ptr->m_left;
+ if (!tmp) {
+ return;
+ }
+ if (tmp->GetColor() == RED) {
+ tmp->SetColor(BLACK) ;
+ ptr->SetColor(RED) ;
+ ptr->RotateRight (head);
+ tmp = ptr->m_left;
+ if (!ptr || !tmp) {
+ return;
+ }
+ }
+
+ if ((!tmp->m_right || (tmp->m_right->GetColor() == BLACK)) &&
+ (!tmp->m_left || (tmp->m_left->GetColor() == BLACK))) {
+ tmp->SetColor(RED) ;
+ node = ptr;
+ ptr = ptr->m_parent;
+ continue;
+ } else if (!tmp->m_left || (tmp->m_left->GetColor() == BLACK)) {
+ tmp->m_right->SetColor(BLACK) ;
+ tmp->SetColor(RED) ;
+ tmp->RotateLeft (head);
+ tmp = ptr->m_left;
+ if (!ptr || !tmp) {
+ return;
+ }
+ }
+ tmp->SetColor (ptr->GetColor());
+ if (tmp->m_left) {
+ tmp->m_left->SetColor(BLACK);
+ }
+ if (ptr) {
+ ptr->SetColor(BLACK) ;
+ ptr->RotateRight (head);
+ }
+ node = *head;
+ }
+ }
+ if (node) {
+ node->SetColor(BLACK);
+ }
+}
+
+void dgRedBackNode::Unlink (dgRedBackNode ** const head)
+{
+// dgRedBackNode *child;
+// dgRedBackNode *endNode;
+// dgRedBackNode *endNodeParent;
+// dgRedBackNode::REDBLACK_COLOR oldColor;
+
+ dgRedBackNode* const node = this;
+// node->Kill();
+ node->SetInTreeFlag(false);
+
+ if (!node->m_left || !node->m_right) {
+ // y has a NULL node as a child
+ dgRedBackNode* const endNode = node;
+
+ // x is y's only child
+ dgRedBackNode* child = endNode->m_right;
+ if (endNode->m_left) {
+ child = endNode->m_left;
+ }
+
+ // remove y from the parent chain
+ if (child) {
+ child->m_parent = endNode->m_parent;
+ }
+
+ if (endNode->m_parent) {
+ if (endNode == endNode->m_parent->m_left) {
+ endNode->m_parent->m_left = child;
+ } else {
+ endNode->m_parent->m_right = child;
+ }
+ } else {
+ *head = child;
+ }
+
+ if (endNode->GetColor() == BLACK) {
+ endNode->m_parent->RemoveFixup (child, head);
+ }
+// endNode->Release();
+// delete endNode;
+
+ } else {
+
+ // find tree successor with a NULL node as a child
+ dgRedBackNode* endNode = node->m_right;
+ while (endNode->m_left != NULL) {
+ endNode = endNode->m_left;
+ }
+
+ HACD_ASSERT (endNode);
+ HACD_ASSERT (endNode->m_parent);
+ HACD_ASSERT (!endNode->m_left);
+
+ // x is y's only child
+ dgRedBackNode* const child = endNode->m_right;
+
+ HACD_ASSERT ((endNode != node->m_right) || !child || (child->m_parent == endNode));
+
+ endNode->m_left = node->m_left;
+ node->m_left->m_parent = endNode;
+
+ dgRedBackNode* endNodeParent = endNode;
+ if (endNode != node->m_right) {
+ if (child) {
+ child->m_parent = endNode->m_parent;
+ }
+ endNode->m_parent->m_left = child;
+ endNode->m_right = node->m_right;
+ node->m_right->m_parent = endNode;
+ endNodeParent = endNode->m_parent;
+ }
+
+
+ if (node == *head) {
+ *head = endNode;
+ } else if (node == node->m_parent->m_left) {
+ node->m_parent->m_left = endNode;
+ } else {
+ node->m_parent->m_right = endNode;
+ }
+ endNode->m_parent = node->m_parent;
+
+ dgRedBackNode::REDBLACK_COLOR oldColor = endNode->GetColor();
+ endNode->SetColor (node->GetColor());
+ node->SetColor (oldColor);
+
+ if (oldColor == BLACK) {
+ endNodeParent->RemoveFixup (child, head);
+ }
+// node->Release();
+// delete node;
+ }
+}
+
+void dgRedBackNode::Remove (dgRedBackNode ** const head)
+{
+ Unlink (head);
+ delete this;
+}
+
+void dgRedBackNode::RemoveAllLow ()
+{
+ if (m_left) {
+ m_left->RemoveAllLow();
+ }
+ if (m_right) {
+ m_right->RemoveAllLow ();
+ }
+ SetInTreeFlag(false);
+// Kill();
+// Release();
+ delete this;
+}
+
+void dgRedBackNode::RemoveAll ()
+{
+ dgRedBackNode* root = this;
+ for (; root->m_parent; root = root->m_parent);
+ root->RemoveAllLow();
+}
+
+
diff --git a/APEX_1.4/shared/general/HACD/src/dgTypes.cpp b/APEX_1.4/shared/general/HACD/src/dgTypes.cpp
new file mode 100644
index 00000000..ddb3b2f8
--- /dev/null
+++ b/APEX_1.4/shared/general/HACD/src/dgTypes.cpp
@@ -0,0 +1,426 @@
+/* Copyright (c) <2003-2011> <Julio Jerez, Newton Game Dynamics>
+*
+* This software is provided 'as-is', without any express or implied
+* warranty. In no event will the authors be held liable for any damages
+* arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose,
+* including commercial applications, and to alter it and redistribute it
+* freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not
+* claim that you wrote the original software. If you use this software
+* in a product, an acknowledgment in the product documentation would be
+* appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be
+* misrepresented as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*/
+
+#include "dgTypes.h"
+#include "dgVector.h"
+#include "dgStack.h"
+#include <string.h>
+
+void GetMinMax (dgVector &minOut, dgVector &maxOut, const float* const vertexArray, int32_t vCount, int32_t strideInBytes)
+{
+ int32_t stride = int32_t (strideInBytes / sizeof (float));
+ const float* vArray = vertexArray + stride;
+
+ HACD_ASSERT (stride >= 3);
+ minOut = dgVector (vertexArray[0], vertexArray[1], vertexArray[2], float (0.0f));
+ maxOut = dgVector (vertexArray[0], vertexArray[1], vertexArray[2], float (0.0f));
+
+ for (int32_t i = 1; i < vCount; i ++) {
+ minOut.m_x = GetMin (minOut.m_x, vArray[0]);
+ minOut.m_y = GetMin (minOut.m_y, vArray[1]);
+ minOut.m_z = GetMin (minOut.m_z, vArray[2]);
+
+ maxOut.m_x = GetMax (maxOut.m_x, vArray[0]);
+ maxOut.m_y = GetMax (maxOut.m_y, vArray[1]);
+ maxOut.m_z = GetMax (maxOut.m_z, vArray[2]);
+
+ vArray += stride;
+ }
+}
+
+
+void GetMinMax (dgBigVector &minOut, dgBigVector &maxOut, const double* const vertexArray, int32_t vCount, int32_t strideInBytes)
+{
+ int32_t stride = int32_t (strideInBytes / sizeof (double));
+ const double* vArray = vertexArray + stride;
+
+ HACD_ASSERT (stride >= 3);
+ minOut = dgBigVector (vertexArray[0], vertexArray[1], vertexArray[2], double (0.0f));
+ maxOut = dgBigVector (vertexArray[0], vertexArray[1], vertexArray[2], double (0.0f));
+
+ for (int32_t i = 1; i < vCount; i ++) {
+ minOut.m_x = GetMin (minOut.m_x, vArray[0]);
+ minOut.m_y = GetMin (minOut.m_y, vArray[1]);
+ minOut.m_z = GetMin (minOut.m_z, vArray[2]);
+
+ maxOut.m_x = GetMax (maxOut.m_x, vArray[0]);
+ maxOut.m_y = GetMax (maxOut.m_y, vArray[1]);
+ maxOut.m_z = GetMax (maxOut.m_z, vArray[2]);
+
+ vArray += stride;
+ }
+}
+
+
+
+static inline int32_t cmp_vertex (const double* const v1, const double* const v2, int32_t firstSortAxis)
+ {
+ if (v1[firstSortAxis] < v2[firstSortAxis]) {
+ return -1;
+ }
+
+ if (v1[firstSortAxis] > v2[firstSortAxis]){
+ return 1;
+ }
+
+ return 0;
+ }
+
+static int32_t SortVertices (double* const vertexList, int32_t stride, int32_t compareCount, int32_t vertexCount, double tolerance)
+ {
+ double xc = 0;
+ double yc = 0;
+ double zc = 0;
+ double x2c = 0;
+ double y2c = 0;
+ double z2c = 0;
+
+ dgBigVector minP (1e10, 1e10, 1e10, 0);
+ dgBigVector maxP (-1e10, -1e10, -1e10, 0);
+ int32_t k = 0;
+ for (int32_t i = 0; i < vertexCount; i ++) {
+ double x = vertexList[k + 2];
+ double y = vertexList[k + 3];
+ double z = vertexList[k + 4];
+ k += stride;
+
+ xc += x;
+ yc += y;
+ zc += z;
+ x2c += x * x;
+ y2c += y * y;
+ z2c += z * z;
+
+ if (x < minP.m_x) {
+ minP.m_x = x;
+ }
+ if (y < minP.m_y) {
+ minP.m_y = y;
+ }
+
+ if (z < minP.m_z) {
+ minP.m_z = z;
+ }
+
+ if (x > maxP.m_x) {
+ maxP.m_x = x;
+ }
+ if (y > maxP.m_y) {
+ maxP.m_y = y;
+ }
+
+ if (z > maxP.m_z) {
+ maxP.m_z = z;
+ }
+ }
+
+ dgBigVector del (maxP - minP);
+ double minDist = GetMin (del.m_x, del.m_y, del.m_z);
+ if (minDist < 1.0e-3) {
+ minDist = 1.0e-3;
+ }
+
+ double tol = tolerance * minDist + 1.0e-12f;
+ double sweptWindow = 2.0 * tol;
+ sweptWindow += 1.0e-4;
+
+ x2c = vertexCount * x2c - xc * xc;
+ y2c = vertexCount * y2c - yc * yc;
+ z2c = vertexCount * z2c - zc * zc;
+
+ int32_t firstSortAxis = 2;
+ if ((y2c >= x2c) && (y2c >= z2c)) {
+ firstSortAxis = 3;
+ } else if ((z2c >= x2c) && (z2c >= y2c)) {
+ firstSortAxis = 4;
+ }
+
+
+ int32_t stack[1024][2];
+ stack[0][0] = 0;
+ stack[0][1] = vertexCount - 1;
+ int32_t stackIndex = 1;
+ while (stackIndex) {
+ stackIndex --;
+ int32_t lo = stack[stackIndex][0];
+ int32_t hi = stack[stackIndex][1];
+ if ((hi - lo) > 8) {
+ int32_t i = lo;
+ int32_t j = hi;
+ double val[64];
+ memcpy (val, &vertexList[((lo + hi) >> 1) * stride], stride * sizeof (double));
+ do {
+ while (cmp_vertex (&vertexList[i * stride], val, firstSortAxis) < 0) i ++;
+ while (cmp_vertex (&vertexList[j * stride], val, firstSortAxis) > 0) j --;
+
+ if (i <= j) {
+ double tmp[64];
+ memcpy (tmp, &vertexList[i * stride], stride * sizeof (double));
+ memcpy (&vertexList[i * stride], &vertexList[j * stride], stride * sizeof (double));
+ memcpy (&vertexList[j * stride], tmp, stride * sizeof (double));
+ i++;
+ j--;
+ }
+ } while (i <= j);
+
+ if (i < hi) {
+ stack[stackIndex][0] = i;
+ stack[stackIndex][1] = hi;
+ stackIndex ++;
+ }
+ if (lo < j) {
+ stack[stackIndex][0] = lo;
+ stack[stackIndex][1] = j;
+ stackIndex ++;
+ }
+ HACD_ASSERT (stackIndex < int32_t (sizeof (stack) / (2 * sizeof (stack[0][0]))));
+ } else {
+ for (int32_t i = lo + 1; i <= hi ; i++) {
+ double tmp[64];
+ memcpy (tmp, &vertexList[i * stride], stride * sizeof (double));
+
+ int32_t j = i;
+ for (; j && (cmp_vertex (&vertexList[(j - 1) * stride], tmp, firstSortAxis) > 0); j --) {
+ memcpy (&vertexList[j * stride], &vertexList[(j - 1)* stride], stride * sizeof (double));
+ }
+ memcpy (&vertexList[j * stride], tmp, stride * sizeof (double));
+ }
+ }
+ }
+
+
+#ifdef _DEBUG
+ for (int32_t i = 0; i < (vertexCount - 1); i ++) {
+ HACD_ASSERT (cmp_vertex (&vertexList[i * stride], &vertexList[(i + 1) * stride], firstSortAxis) <= 0);
+ }
+#endif
+
+ int32_t count = 0;
+ for (int32_t i = 0; i < vertexCount; i ++) {
+ int32_t m = i * stride;
+ int32_t index = int32_t (vertexList[m + 0]);
+ if (index == int32_t (0xffffffff)) {
+ double swept = vertexList[m + firstSortAxis] + sweptWindow;
+ int32_t k = i * stride + stride;
+ for (int32_t i1 = i + 1; i1 < vertexCount; i1 ++) {
+
+ index = int32_t (vertexList[k + 0]);
+ if (index == int32_t (0xffffffff)) {
+ double val = vertexList[k + firstSortAxis];
+ if (val >= swept) {
+ break;
+ }
+ bool test = true;
+ for (int32_t t = 0; test && (t < compareCount); t ++) {
+ double val = fabs (vertexList[m + t + 2] - vertexList[k + t + 2]);
+ test = test && (val <= tol);
+ }
+ if (test) {
+ vertexList[k + 0] = double (count);
+ }
+ }
+ k += stride;
+ }
+
+ memcpy (&vertexList[count * stride + 2], &vertexList[m + 2], (stride - 2) * sizeof (double));
+ vertexList[m + 0] = double (count);
+ count ++;
+ }
+ }
+
+ return count;
+ }
+
+
+
+//static int32_t QuickSortVertices (float* const vertList, int32_t stride, int32_t floatSize, int32_t unsignedSize, int32_t vertexCount, float tolerance)
+static int32_t QuickSortVertices (double* const vertList, int32_t stride, int32_t compareCount, int32_t vertexCount, double tolerance)
+ {
+ int32_t count = 0;
+ if (vertexCount > (3 * 1024 * 32)) {
+ double x = float (0.0f);
+ double y = float (0.0f);
+ double z = float (0.0f);
+ double xd = float (0.0f);
+ double yd = float (0.0f);
+ double zd = float (0.0f);
+
+ for (int32_t i = 0; i < vertexCount; i ++) {
+ double x0 = vertList[i * stride + 2];
+ double y0 = vertList[i * stride + 3];
+ double z0 = vertList[i * stride + 4];
+ x += x0;
+ y += y0;
+ z += z0;
+ xd += x0 * x0;
+ yd += y0 * y0;
+ zd += z0 * z0;
+ }
+
+ xd = vertexCount * xd - x * x;
+ yd = vertexCount * yd - y * y;
+ zd = vertexCount * zd - z * z;
+
+ int32_t axis = 2;
+ double axisVal = x / vertexCount;
+ if ((yd > xd) && (yd > zd)) {
+ axis = 3;
+ axisVal = y / vertexCount;
+ }
+ if ((zd > xd) && (zd > yd)) {
+ axis = 4;
+ axisVal = z / vertexCount;
+ }
+
+ int32_t i0 = 0;
+ int32_t i1 = vertexCount - 1;
+ do {
+ for ( ;vertList[i0 * stride + axis] < axisVal; i0 ++);
+ for ( ;vertList[i1 * stride + axis] > axisVal; i1 --);
+ if (i0 <= i1) {
+ for (int32_t i = 0; i < stride; i ++) {
+ Swap (vertList[i0 * stride + i], vertList[i1 * stride + i]);
+ }
+ i0 ++;
+ i1 --;
+ }
+ } while (i0 <= i1);
+ HACD_ASSERT (i0 < vertexCount);
+
+ int32_t count0 = QuickSortVertices (&vertList[ 0 * stride], stride, compareCount, i0, tolerance);
+ int32_t count1 = QuickSortVertices (&vertList[i0 * stride], stride, compareCount, vertexCount - i0, tolerance);
+
+ count = count0 + count1;
+
+ for (int32_t i = 0; i < count1; i ++) {
+ memcpy (&vertList[(count0 + i) * stride + 2], &vertList[(i0 + i) * stride + 2], (stride - 2) * sizeof (double));
+ }
+
+
+// double* const indexPtr = (int64_t*)vertList;
+ for (int32_t i = i0; i < vertexCount; i ++) {
+// indexPtr[i * stride] += count0;
+ vertList[i * stride] += double (count0);
+ }
+
+ } else {
+ count = SortVertices (vertList, stride, compareCount, vertexCount, tolerance);
+ }
+
+ return count;
+ }
+
+
+
+
+
+int32_t dgVertexListToIndexList (double* const vertList, int32_t strideInBytes, int32_t compareCount, int32_t vertexCount, int32_t* const indexListOut, double tolerance)
+{
+ if (strideInBytes < 3 * int32_t (sizeof (double))) {
+ return 0;
+ }
+ if (compareCount < 3) {
+ return 0;
+ }
+ HACD_ASSERT (compareCount <= int32_t (strideInBytes / sizeof (double)));
+ HACD_ASSERT (strideInBytes == int32_t (sizeof (double) * (strideInBytes / sizeof (double))));
+
+ int32_t stride = strideInBytes / int32_t (sizeof (double));
+ int32_t stride2 = stride + 2;
+
+ dgStack<double>pool (stride2 * vertexCount);
+ double* const tmpVertexList = &pool[0];
+
+// int64_t* const indexPtr = (int64_t*)tmpVertexList;
+
+ int32_t k = 0;
+ int32_t m = 0;
+ for (int32_t i = 0; i < vertexCount; i ++) {
+ memcpy (&tmpVertexList[m + 2], &vertList[k], stride * sizeof (double));
+ tmpVertexList[m + 0] = double (- 1.0f);
+ tmpVertexList[m + 1] = double (i);
+ k += stride;
+ m += stride2;
+ }
+
+ int32_t count = QuickSortVertices (tmpVertexList, stride2, compareCount, vertexCount, tolerance);
+
+ k = 0;
+ m = 0;
+ for (int32_t i = 0; i < count; i ++) {
+ k = i * stride;
+ m = i * stride2;
+ memcpy (&vertList[k], &tmpVertexList[m + 2], stride * sizeof (double));
+ k += stride;
+ m += stride2;
+ }
+
+ m = 0;
+ for (int32_t i = 0; i < vertexCount; i ++) {
+ int32_t i1 = int32_t (tmpVertexList [m + 1]);
+ int32_t index = int32_t (tmpVertexList [m + 0]);
+ indexListOut[i1] = index;
+ m += stride2;
+ }
+ return count;
+}
+
+
+
+
+int32_t dgVertexListToIndexList (float* const vertList, int32_t strideInBytes, int32_t floatSizeInBytes, int32_t unsignedSizeInBytes, int32_t vertexCount, int32_t* const indexList, float tolerance)
+{
+ HACD_FORCE_PARAMETER_REFERENCE(unsignedSizeInBytes);
+ uint32_t stride = (uint32_t)strideInBytes / sizeof (float);
+
+ HACD_ASSERT (!unsignedSizeInBytes);
+ dgStack<double> pool(vertexCount * (int32_t)stride);
+
+ int32_t floatCount = floatSizeInBytes / (int32_t)sizeof (float);
+
+ double* const data = &pool[0];
+ for (uint32_t i = 0; i < (uint32_t)vertexCount; i ++) {
+
+ double* const dst = &data[i * stride];
+ float* const src = &vertList[i * stride];
+ for (uint32_t j = 0; j < stride; j ++) {
+ dst[j] = src[j];
+ }
+ }
+
+ int32_t count = dgVertexListToIndexList (data, (int32_t)(stride * sizeof (double)), floatCount, vertexCount, indexList, double (tolerance));
+ for (uint32_t i = 0; i < (uint32_t)count; i ++) {
+ double* const src = &data[i * stride];
+ float* const dst = &vertList[i * stride];
+ for (uint32_t j = 0; j < stride; j ++) {
+ dst[j] = float (src[j]);
+ }
+ }
+
+ return count;
+}
+
+
+namespace hacd
+{
+size_t gAllocCount=0;
+size_t gAllocSize=0;
+}; \ No newline at end of file