/*! ** ** Copyright (c) 2015 by John W. Ratcliff mailto:jratcliffscarab@gmail.com ** ** ** The MIT license: ** ** Permission is hereby granted, free of charge, to any person obtaining a copy ** of this software and associated documentation files (the "Software"), to deal ** in the Software without restriction, including without limitation the rights ** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ** copies of the Software, and to permit persons to whom the Software is furnished ** to do so, subject to the following conditions: ** ** The above copyright notice and this permission notice shall be included in all ** copies or substantial portions of the Software. ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ** ** If you find this code snippet useful; you can tip me at this bitcoin address: ** ** BITCOIN TIP JAR: "1BT66EoaGySkbY9J6MugvQRhMMXDwPxPya" ** */ #include "HACD.h" #include #include #include "PlatformConfigHACD.h" #include "dgMeshEffect.h" #include "dgConvexHull3d.h" #include "MergeHulls.h" #include "ConvexDecomposition.h" #include "WuQuantizer.h" #ifdef _DEBUG #define DEBUG_WAVEFRONT 0 // if true, will save the input convex decomposition to disk as a wavefront OBJ file for debugging purposes. #endif using namespace hacd; namespace HACD { inline float det(const float *p1,const float *p2,const float *p3) { return p1[0]*p2[1]*p3[2] + p2[0]*p3[1]*p1[2] + p3[0]*p1[1]*p2[2] -p1[0]*p3[1]*p2[2] - p2[0]*p1[1]*p3[2] - p3[0]*p2[1]*p1[2]; } static float fm_computeMeshVolume(const float *vertices,uint32_t tcount,const uint32_t *indices) { float volume = 0; for (uint32_t i=0; i bmax.x ) bmax.x = v.x; if ( v.y > bmax.y ) bmax.y = v.y; if ( v.z > bmax.z ) bmax.z = v.z; } source+=3; } inputCenter.x = (bmin.x+bmax.x)*0.5f; inputCenter.y = (bmin.y+bmax.y)*0.5f; inputCenter.z = (bmin.z+bmax.z)*0.5f; float dx = bmax.x - bmin.x; float dy = bmax.y - bmin.y; float dz = bmax.z - bmin.z; if ( dx > 0 ) { inputScale.x = 1.0f / dx; } else { inputScale.x = 1; } if ( dy > 0 ) { inputScale.y = 1.0f / dy; } else { inputScale.y = 1; } if ( dz > 0 ) { inputScale.z = 1.0f / dz; } else { inputScale.z = 1; } source = desc.mVertices; desc.mVertices = (const float *)HACD_ALLOC( sizeof(float)*3*desc.mVertexCount ); float *dest = (float *)desc.mVertices; for (uint32_t i=0; iReportProgress("Starting HACD",1); } #if DEBUG_WAVEFRONT { static uint32_t saveCount=0; saveCount++; char scratch[512]; sprintf_s(scratch,512,"HACD_DEBUG_%d.obj", saveCount ); FILE *fph = fopen(scratch,"wb"); if ( fph ) { fprintf(fph,"# NormalizeInputMesh: %s\r\n", _desc.mNormalizeInputMesh ? "true" : "false"); fprintf(fph,"# UseFastVersion: %s\r\n", _desc.mUseFastVersion ? "true" : "false" ); fprintf(fph,"# TriangleCount: %d\r\n", _desc.mTriangleCount); fprintf(fph,"# VertexCount: %d\r\n", _desc.mVertexCount); fprintf(fph,"# MaxHullCount: %d\r\n", _desc.mMaxHullCount); fprintf(fph,"# MaxMergeHullCount: %d\r\n", _desc.mMaxMergeHullCount); fprintf(fph,"# MaxHullVertices: %d\r\n", _desc.mMaxHullVertices); fprintf(fph,"# Concavity: %0.4f\r\n", _desc.mConcavity); fprintf(fph,"# SmallClusterThreshold: %0.4f\r\n", _desc.mSmallClusterThreshold); fprintf(fph,"# BackFaceDistanceFactor: %0.4f\r\n", _desc.mBackFaceDistanceFactor); fprintf(fph,"# DecompositionDepth: %d\r\n", _desc.mDecompositionDepth); fprintf(fph,"# JobSwarmContext: %s\r\n", _desc.mJobSwarmContext ? "true" : "false"); fprintf(fph,"# Callback: %s\r\n", _desc.mCallback ? "true" : "false"); for (uint32_t i=0; i<_desc.mVertexCount; i++) { const float *p = &_desc.mVertices[i*3]; fprintf(fph,"v %0.9f %0.9f %0.9f\r\n", p[0], p[1], p[2] ); } for (uint32_t i=0; i<_desc.mTriangleCount; i++) { uint32_t i1 = _desc.mIndices[i*3+0]; uint32_t i2 = _desc.mIndices[i*3+1]; uint32_t i3 = _desc.mIndices[i*3+2]; fprintf(fph,"f %d %d %d\r\n", i1+1, i2+1, i3+1 ); } fclose(fph); } } #endif releaseHACD(); Desc desc = _desc; float *tempPositions = NULL; // temp memory holding remapped vertex positions uint32_t *tempIndices = NULL; // temp memory holding remapped triangle indices // This method scans the input mesh for duplicate vertices. if ( desc.mRemoveDuplicateVertices ) { if ( desc.mCallback ) { desc.mCallback->ReportProgress("Removing duplicate vertices",1); } tempPositions = (float *)HACD_ALLOC(sizeof(float)*desc.mVertexCount*3); // room to hold all of the input vertex positions tempIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*desc.mTriangleCount*3); // room to hold all of the triangle indices desc.mVertices = tempPositions; // the remapped vertex position data desc.mIndices = tempIndices; // the remapped triangle indices uint32_t removeCount = 0; desc.mVertexCount = 0; uint32_t *remapPositions = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*_desc.mVertexCount); // Scan each input position and see if it duplicates an already defined vertex position for (uint32_t i=0; i<_desc.mVertexCount; i++) { const float *p1 = &_desc.mVertices[i*3]; // see if this position is already represented in out vertex list. // Iterate through all positions we have already defined bool found = false; for (uint32_t j=0; jReportProgress(scratch,1); } } if ( desc.mVertexCount ) { if ( desc.mDecompositionDepth ) // if using legacy ACD { CONVEX_DECOMPOSITION::ConvexDecomposition *cd = CONVEX_DECOMPOSITION::createConvexDecomposition(); CONVEX_DECOMPOSITION::DecompDesc dcompDesc; dcompDesc.mIndices = desc.mIndices; dcompDesc.mVertices = desc.mVertices; dcompDesc.mTcount = desc.mTriangleCount; dcompDesc.mVcount = desc.mVertexCount; dcompDesc.mMaxVertices = desc.mMaxHullVertices; dcompDesc.mDepth = desc.mDecompositionDepth; dcompDesc.mCpercent = desc.mConcavity*10; dcompDesc.mMeshVolumePercent = desc.mSmallClusterThreshold; dcompDesc.mCallback = desc.mCallback; if ( desc.mMaxMergeHullCount == 1 ) // if we only want a single hull output then set the decomposition depth to zero! { dcompDesc.mDepth = 0; } ret = cd->performConvexDecomposition(dcompDesc); for (uint32_t i=0; igetConvexResult(i,true); Hull h; h.mVertices = result->mHullVertices; h.mIndices = result->mHullIndices; h.mTriangleCount = result->mHullTcount; h.mVertexCount = result->mHullVcount; h.mVolume = fm_computeMeshVolume(h.mVertices,h.mTriangleCount,h.mIndices); mHulls.push_back(h); } } else { Vec3 inputScale(1,1,1); Vec3 inputCenter(0,0,0); if ( desc.mNormalizeInputMesh ) { if ( desc.mCallback ) { desc.mCallback->ReportProgress("Normalizing Input Mesh",1); } normalizeInputMesh(desc,inputScale,inputCenter); } { dgMeshEffect mesh(true); float normal[3] = { 0,1,0 }; float uv[2] = { 0,0 }; int32_t *faceIndexCount = (int32_t *)HACD_ALLOC(sizeof(int32_t)*desc.mTriangleCount); int32_t *dummyIndex = (int32_t *)HACD_ALLOC(sizeof(int32_t)*desc.mTriangleCount*3); for (uint32_t i=0; iReportProgress("Building Mesh from Vertex Index List",1); } mesh.BuildFromVertexListIndexList((int32_t)desc.mTriangleCount,faceIndexCount,dummyIndex, desc.mVertices,(int32_t)sizeof(float)*3,(const int32_t *const)desc.mIndices, normal,(int32_t)sizeof(float)*3,dummyIndex, uv,(int32_t)sizeof(float)*2,dummyIndex, uv,(int32_t)sizeof(float)*2,dummyIndex); dgMeshEffect *result; { if ( desc.mCallback ) { desc.mCallback->ReportProgress("Begin HACD",1); } if ( desc.mUseFastVersion ) { result = mesh.CreateConvexApproximationFast(desc.mConcavity,(int32_t)desc.mMaxHullCount,desc.mCallback); } else { result = mesh.CreateConvexApproximation(desc.mConcavity,desc.mBackFaceDistanceFactor,(int32_t)desc.mMaxHullCount,(int32_t)desc.mMaxHullVertices,desc.mCallback); } } if ( result ) { // now we build hulls for each connected surface... if ( desc.mCallback ) { desc.mCallback->ReportProgress("Getting connected surfaces",1); } dgPolyhedra segment; result->BeginConectedSurface(); if ( result->GetConectedSurface(segment)) { dgMeshEffect *solid = HACD_NEW(dgMeshEffect)(segment,*result); while ( solid ) { dgConvexHull3d *hull = solid->CreateConvexHull(0.00001,(int32_t)desc.mMaxHullVertices); if ( hull ) { Hull h; h.mVertexCount = (uint32_t)hull->GetVertexCount(); h.mVertices = (float *)HACD_ALLOC( sizeof(float)*3*h.mVertexCount); for (uint32_t i=0; iGetVertex((int32_t)i); dest[0] = (float)source.m_x*inputScale.x+inputCenter.x; dest[1] = (float)source.m_y*inputScale.y+inputCenter.y; dest[2] = (float)source.m_z*inputScale.z+inputCenter.z; } h.mTriangleCount = (uint32_t)hull->GetCount(); uint32_t *destIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*3*h.mTriangleCount); h.mIndices = destIndices; dgList::Iterator iter(*hull); for (iter.Begin(); iter; iter++) { dgConvexHull3DFace &face = (*iter); destIndices[0] = (uint32_t)face.m_index[0]; destIndices[1] = (uint32_t)face.m_index[1]; destIndices[2] = (uint32_t)face.m_index[2]; destIndices+=3; } h.mVolume = fm_computeMeshVolume(h.mVertices,h.mTriangleCount,h.mIndices); mHulls.push_back(h); // save it! delete hull; } delete solid; solid = NULL; dgPolyhedra nextSegment; int32_t moreSegments = result->GetConectedSurface(nextSegment); if ( moreSegments ) { solid = HACD_NEW(dgMeshEffect)(nextSegment,*result); } else { result->EndConectedSurface(); } } } delete result; } } ret= (uint32_t)mHulls.size(); } if ( desc.mNormalizeInputMesh && desc.mDecompositionDepth == 0 ) { releaseNormalizedInputMesh(desc); } } if ( ret && ((ret > desc.mMaxMergeHullCount) || (desc.mSmallClusterThreshold != 0.0f)) ) { MergeHullsInterface *mhi = createMergeHullsInterface(); if ( mhi ) { if ( desc.mCallback ) { desc.mCallback->ReportProgress("Gathering Input Hulls",1); } MergeHullVector inputHulls; MergeHullVector outputHulls; for (uint32_t i=0; imergeHulls(inputHulls,outputHulls,desc.mMaxMergeHullCount, desc.mSmallClusterThreshold + FLT_EPSILON, desc.mMaxHullVertices, desc.mCallback); } for (uint32_t i=0; iReportProgress("Gathering Merged Hulls",1); } for (uint32_t i=0; irelease(); } HACD_FREE(tempIndices); HACD_FREE(tempPositions); } return ret; } void releaseHull(Hull &h) { HACD_FREE((void *)h.mIndices); HACD_FREE((void *)h.mVertices); h.mIndices = NULL; h.mVertices = NULL; } virtual const Hull *getHull(uint32_t index) const { const Hull *ret = NULL; if ( index < mHulls.size() ) { ret = &mHulls[index]; } return ret; } virtual void releaseHACD(void) // release memory associated with the last HACD request { for (uint32_t i=0; i mHulls; }; HACD_API * createHACD_API(void) { MyHACD_API *m = HACD_NEW(MyHACD_API); return static_cast(m); } };