nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // Product: OpenCTM nuclear@14: // File: compressMG2.c nuclear@14: // Description: Implementation of the MG2 compression method. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // Copyright (c) 2009-2010 Marcus Geelnard nuclear@14: // nuclear@14: // This software is provided 'as-is', without any express or implied nuclear@14: // warranty. In no event will the authors be held liable for any damages nuclear@14: // arising from the use of this software. nuclear@14: // nuclear@14: // Permission is granted to anyone to use this software for any purpose, nuclear@14: // including commercial applications, and to alter it and redistribute it nuclear@14: // freely, subject to the following restrictions: nuclear@14: // nuclear@14: // 1. The origin of this software must not be misrepresented; you must not nuclear@14: // claim that you wrote the original software. If you use this software nuclear@14: // in a product, an acknowledgment in the product documentation would be nuclear@14: // appreciated but is not required. nuclear@14: // nuclear@14: // 2. Altered source versions must be plainly marked as such, and must not nuclear@14: // be misrepresented as being the original software. nuclear@14: // nuclear@14: // 3. This notice may not be removed or altered from any source nuclear@14: // distribution. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: nuclear@14: #include nuclear@14: #include nuclear@14: #include "openctm.h" nuclear@14: #include "internal.h" nuclear@14: nuclear@14: #ifdef __DEBUG_ nuclear@14: #include nuclear@14: #endif nuclear@14: nuclear@14: // We need PI nuclear@14: #ifndef PI nuclear@14: #define PI 3.141592653589793238462643f nuclear@14: #endif nuclear@14: nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _CTMgrid - 3D space subdivision grid. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: typedef struct { nuclear@14: // Axis-aligned boudning box for the grid. nuclear@14: CTMfloat mMin[3]; nuclear@14: CTMfloat mMax[3]; nuclear@14: nuclear@14: // How many divisions per axis (minimum 1). nuclear@14: CTMuint mDivision[3]; nuclear@14: nuclear@14: // Size of each grid box. nuclear@14: CTMfloat mSize[3]; nuclear@14: } _CTMgrid; nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _CTMsortvertex - Vertex information. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: typedef struct { nuclear@14: // Vertex X coordinate (used for sorting). nuclear@14: CTMfloat x; nuclear@14: nuclear@14: // Grid index. This is the index into the 3D space subdivision grid. nuclear@14: CTMuint mGridIndex; nuclear@14: nuclear@14: // Original index (before sorting). nuclear@14: CTMuint mOriginalIndex; nuclear@14: } _CTMsortvertex; nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmSetupGrid() - Setup the 3D space subdivision grid. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmSetupGrid(_CTMcontext * self, _CTMgrid * aGrid) nuclear@14: { nuclear@14: CTMuint i; nuclear@14: CTMfloat factor[3], sum, wantedGrids; nuclear@14: nuclear@14: // Calculate the mesh bounding box nuclear@14: aGrid->mMin[0] = aGrid->mMax[0] = self->mVertices[0]; nuclear@14: aGrid->mMin[1] = aGrid->mMax[1] = self->mVertices[1]; nuclear@14: aGrid->mMin[2] = aGrid->mMax[2] = self->mVertices[2]; nuclear@14: for(i = 1; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: if(self->mVertices[i * 3] < aGrid->mMin[0]) nuclear@14: aGrid->mMin[0] = self->mVertices[i * 3]; nuclear@14: else if(self->mVertices[i * 3] > aGrid->mMax[0]) nuclear@14: aGrid->mMax[0] = self->mVertices[i * 3]; nuclear@14: if(self->mVertices[i * 3 + 1] < aGrid->mMin[1]) nuclear@14: aGrid->mMin[1] = self->mVertices[i * 3 + 1]; nuclear@14: else if(self->mVertices[i * 3 + 1] > aGrid->mMax[1]) nuclear@14: aGrid->mMax[1] = self->mVertices[i * 3 + 1]; nuclear@14: if(self->mVertices[i * 3 + 2] < aGrid->mMin[2]) nuclear@14: aGrid->mMin[2] = self->mVertices[i * 3 + 2]; nuclear@14: else if(self->mVertices[i * 3 + 2] > aGrid->mMax[2]) nuclear@14: aGrid->mMax[2] = self->mVertices[i * 3 + 2]; nuclear@14: } nuclear@14: nuclear@14: // Determine optimal grid resolution, based on the number of vertices and nuclear@14: // the bounding box. nuclear@14: // NOTE: This algorithm is quite crude, and could very well be optimized for nuclear@14: // better compression levels in the future without affecting the file format nuclear@14: // or backward compatibility at all. nuclear@14: for(i = 0; i < 3; ++ i) nuclear@14: factor[i] = aGrid->mMax[i] - aGrid->mMin[i]; nuclear@14: sum = factor[0] + factor[1] + factor[2]; nuclear@14: if(sum > 1e-30f) nuclear@14: { nuclear@14: sum = 1.0f / sum; nuclear@14: for(i = 0; i < 3; ++ i) nuclear@14: factor[i] *= sum; nuclear@14: wantedGrids = powf(100.0f * self->mVertexCount, 1.0f / 3.0f); nuclear@14: for(i = 0; i < 3; ++ i) nuclear@14: { nuclear@14: aGrid->mDivision[i] = (CTMuint) ceilf(wantedGrids * factor[i]); nuclear@14: if(aGrid->mDivision[i] < 1) nuclear@14: aGrid->mDivision[i] = 1; nuclear@14: } nuclear@14: } nuclear@14: else nuclear@14: { nuclear@14: aGrid->mDivision[0] = 4; nuclear@14: aGrid->mDivision[1] = 4; nuclear@14: aGrid->mDivision[2] = 4; nuclear@14: } nuclear@14: #ifdef __DEBUG_ nuclear@14: printf("Division: (%d %d %d)\n", aGrid->mDivision[0], aGrid->mDivision[1], aGrid->mDivision[2]); nuclear@14: #endif nuclear@14: nuclear@14: // Calculate grid sizes nuclear@14: for(i = 0; i < 3; ++ i) nuclear@14: aGrid->mSize[i] = (aGrid->mMax[i] - aGrid->mMin[i]) / aGrid->mDivision[i]; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmPointToGridIdx() - Convert a point to a grid index. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static CTMuint _ctmPointToGridIdx(_CTMgrid * aGrid, CTMfloat * aPoint) nuclear@14: { nuclear@14: CTMuint i, idx[3]; nuclear@14: nuclear@14: for(i = 0; i < 3; ++ i) nuclear@14: { nuclear@14: idx[i] = (CTMuint) floorf((aPoint[i] - aGrid->mMin[i]) / aGrid->mSize[i]); nuclear@14: if(idx[i] >= aGrid->mDivision[i]) nuclear@14: idx[i] = aGrid->mDivision[i] - 1; nuclear@14: } nuclear@14: nuclear@14: return idx[0] + aGrid->mDivision[0] * (idx[1] + aGrid->mDivision[1] * idx[2]); nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmGridIdxToPoint() - Convert a grid index to a point (the min x/y/z for nuclear@14: // the given grid box). nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmGridIdxToPoint(_CTMgrid * aGrid, CTMuint aIdx, CTMfloat * aPoint) nuclear@14: { nuclear@14: CTMuint gridIdx[3], zdiv, ydiv, i; nuclear@14: nuclear@14: zdiv = aGrid->mDivision[0] * aGrid->mDivision[1]; nuclear@14: ydiv = aGrid->mDivision[0]; nuclear@14: nuclear@14: gridIdx[2] = aIdx / zdiv; nuclear@14: aIdx -= gridIdx[2] * zdiv; nuclear@14: gridIdx[1] = aIdx / ydiv; nuclear@14: aIdx -= gridIdx[1] * ydiv; nuclear@14: gridIdx[0] = aIdx; nuclear@14: nuclear@14: for(i = 0; i < 3; ++ i) nuclear@14: aPoint[i] = gridIdx[i] * aGrid->mSize[i] + aGrid->mMin[i]; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _compareVertex() - Comparator for the vertex sorting. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static int _compareVertex(const void * elem1, const void * elem2) nuclear@14: { nuclear@14: _CTMsortvertex * v1 = (_CTMsortvertex *) elem1; nuclear@14: _CTMsortvertex * v2 = (_CTMsortvertex *) elem2; nuclear@14: if(v1->mGridIndex != v2->mGridIndex) nuclear@14: return v1->mGridIndex - v2->mGridIndex; nuclear@14: else if(v1->x < v2->x) nuclear@14: return -1; nuclear@14: else if(v1->x > v2->x) nuclear@14: return 1; nuclear@14: else nuclear@14: return 0; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmSortVertices() - Setup the vertex array. Assign each vertex to a grid nuclear@14: // box, and sort all vertices. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmSortVertices(_CTMcontext * self, _CTMsortvertex * aSortVertices, nuclear@14: _CTMgrid * aGrid) nuclear@14: { nuclear@14: CTMuint i; nuclear@14: nuclear@14: // Prepare sort vertex array nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Store vertex properties in the sort vertex array nuclear@14: aSortVertices[i].x = self->mVertices[i * 3]; nuclear@14: aSortVertices[i].mGridIndex = _ctmPointToGridIdx(aGrid, &self->mVertices[i * 3]); nuclear@14: aSortVertices[i].mOriginalIndex = i; nuclear@14: } nuclear@14: nuclear@14: // Sort vertices. The elements are first sorted by their grid indices, and nuclear@14: // scondly by their x coordinates. nuclear@14: qsort((void *) aSortVertices, self->mVertexCount, sizeof(_CTMsortvertex), _compareVertex); nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmReIndexIndices() - Re-index all indices, based on the sorted vertices. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static int _ctmReIndexIndices(_CTMcontext * self, _CTMsortvertex * aSortVertices, nuclear@14: CTMuint * aIndices) nuclear@14: { nuclear@14: CTMuint i, * indexLUT; nuclear@14: nuclear@14: // Create temporary lookup-array, O(n) nuclear@14: indexLUT = (CTMuint *) malloc(sizeof(CTMuint) * self->mVertexCount); nuclear@14: if(!indexLUT) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: indexLUT[aSortVertices[i].mOriginalIndex] = i; nuclear@14: nuclear@14: // Convert old indices to new indices, O(n) nuclear@14: for(i = 0; i < self->mTriangleCount * 3; ++ i) nuclear@14: aIndices[i] = indexLUT[self->mIndices[i]]; nuclear@14: nuclear@14: // Free temporary lookup-array nuclear@14: free((void *) indexLUT); nuclear@14: nuclear@14: return CTM_TRUE; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _compareTriangle() - Comparator for the triangle sorting. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static int _compareTriangle(const void * elem1, const void * elem2) nuclear@14: { nuclear@14: CTMuint * tri1 = (CTMuint *) elem1; nuclear@14: CTMuint * tri2 = (CTMuint *) elem2; nuclear@14: if(tri1[0] != tri2[0]) nuclear@14: return tri1[0] - tri2[0]; nuclear@14: else nuclear@14: return tri1[1] - tri2[1]; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmReArrangeTriangles() - Re-arrange all triangles for optimal nuclear@14: // compression. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmReArrangeTriangles(_CTMcontext * self, CTMuint * aIndices) nuclear@14: { nuclear@14: CTMuint * tri, tmp, i; nuclear@14: nuclear@14: // Step 1: Make sure that the first index of each triangle is the smallest nuclear@14: // one (rotate triangle nodes if necessary) nuclear@14: for(i = 0; i < self->mTriangleCount; ++ i) nuclear@14: { nuclear@14: tri = &aIndices[i * 3]; nuclear@14: if((tri[1] < tri[0]) && (tri[1] < tri[2])) nuclear@14: { nuclear@14: tmp = tri[0]; nuclear@14: tri[0] = tri[1]; nuclear@14: tri[1] = tri[2]; nuclear@14: tri[2] = tmp; nuclear@14: } nuclear@14: else if((tri[2] < tri[0]) && (tri[2] < tri[1])) nuclear@14: { nuclear@14: tmp = tri[0]; nuclear@14: tri[0] = tri[2]; nuclear@14: tri[2] = tri[1]; nuclear@14: tri[1] = tmp; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: // Step 2: Sort the triangles based on the first triangle index nuclear@14: qsort((void *) aIndices, self->mTriangleCount, sizeof(CTMuint) * 3, _compareTriangle); nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmMakeIndexDeltas() - Calculate various forms of derivatives in order to nuclear@14: // reduce data entropy. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmMakeIndexDeltas(_CTMcontext * self, CTMuint * aIndices) nuclear@14: { nuclear@14: CTMint i; nuclear@14: for(i = self->mTriangleCount - 1; i >= 0; -- i) nuclear@14: { nuclear@14: // Step 1: Calculate delta from second triangle index to the previous nuclear@14: // second triangle index, if the previous triangle shares the same first nuclear@14: // index, otherwise calculate the delta to the first triangle index nuclear@14: if((i >= 1) && (aIndices[i * 3] == aIndices[(i - 1) * 3])) nuclear@14: aIndices[i * 3 + 1] -= aIndices[(i - 1) * 3 + 1]; nuclear@14: else nuclear@14: aIndices[i * 3 + 1] -= aIndices[i * 3]; nuclear@14: nuclear@14: // Step 2: Calculate delta from third triangle index to the first triangle nuclear@14: // index nuclear@14: aIndices[i * 3 + 2] -= aIndices[i * 3]; nuclear@14: nuclear@14: // Step 3: Calculate derivative of the first triangle index nuclear@14: if(i >= 1) nuclear@14: aIndices[i * 3] -= aIndices[(i - 1) * 3]; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmRestoreIndices() - Restore original indices (inverse derivative nuclear@14: // operation). nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmRestoreIndices(_CTMcontext * self, CTMuint * aIndices) nuclear@14: { nuclear@14: CTMuint i; nuclear@14: nuclear@14: for(i = 0; i < self->mTriangleCount; ++ i) nuclear@14: { nuclear@14: // Step 1: Reverse derivative of the first triangle index nuclear@14: if(i >= 1) nuclear@14: aIndices[i * 3] += aIndices[(i - 1) * 3]; nuclear@14: nuclear@14: // Step 2: Reverse delta from third triangle index to the first triangle nuclear@14: // index nuclear@14: aIndices[i * 3 + 2] += aIndices[i * 3]; nuclear@14: nuclear@14: // Step 3: Reverse delta from second triangle index to the previous nuclear@14: // second triangle index, if the previous triangle shares the same first nuclear@14: // index, otherwise reverse the delta to the first triangle index nuclear@14: if((i >= 1) && (aIndices[i * 3] == aIndices[(i - 1) * 3])) nuclear@14: aIndices[i * 3 + 1] += aIndices[(i - 1) * 3 + 1]; nuclear@14: else nuclear@14: aIndices[i * 3 + 1] += aIndices[i * 3]; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmMakeVertexDeltas() - Calculate various forms of derivatives in order to nuclear@14: // reduce data entropy. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmMakeVertexDeltas(_CTMcontext * self, CTMint * aIntVertices, nuclear@14: _CTMsortvertex * aSortVertices, _CTMgrid * aGrid) nuclear@14: { nuclear@14: CTMuint i, gridIdx, prevGridIndex, oldIdx; nuclear@14: CTMfloat gridOrigin[3], scale; nuclear@14: CTMint deltaX, prevDeltaX; nuclear@14: nuclear@14: // Vertex scaling factor nuclear@14: scale = 1.0f / self->mVertexPrecision; nuclear@14: nuclear@14: prevGridIndex = 0x7fffffff; nuclear@14: prevDeltaX = 0; nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Get grid box origin nuclear@14: gridIdx = aSortVertices[i].mGridIndex; nuclear@14: _ctmGridIdxToPoint(aGrid, gridIdx, gridOrigin); nuclear@14: nuclear@14: // Get old vertex coordinate index (before vertex sorting) nuclear@14: oldIdx = aSortVertices[i].mOriginalIndex; nuclear@14: nuclear@14: // Store delta to the grid box origin in the integer vertex array. For the nuclear@14: // X axis (which is sorted) we also do the delta to the previous coordinate nuclear@14: // in the box. nuclear@14: deltaX = (CTMint) floorf(scale * (self->mVertices[oldIdx * 3] - gridOrigin[0]) + 0.5f); nuclear@14: if(gridIdx == prevGridIndex) nuclear@14: aIntVertices[i * 3] = deltaX - prevDeltaX; nuclear@14: else nuclear@14: aIntVertices[i * 3] = deltaX; nuclear@14: aIntVertices[i * 3 + 1] = (CTMint) floorf(scale * (self->mVertices[oldIdx * 3 + 1] - gridOrigin[1]) + 0.5f); nuclear@14: aIntVertices[i * 3 + 2] = (CTMint) floorf(scale * (self->mVertices[oldIdx * 3 + 2] - gridOrigin[2]) + 0.5f); nuclear@14: nuclear@14: prevGridIndex = gridIdx; nuclear@14: prevDeltaX = deltaX; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmRestoreVertices() - Calculate inverse derivatives of the vertices. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmRestoreVertices(_CTMcontext * self, CTMint * aIntVertices, nuclear@14: CTMuint * aGridIndices, _CTMgrid * aGrid, CTMfloat * aVertices) nuclear@14: { nuclear@14: CTMuint i, gridIdx, prevGridIndex; nuclear@14: CTMfloat gridOrigin[3], scale; nuclear@14: CTMint deltaX, prevDeltaX; nuclear@14: nuclear@14: scale = self->mVertexPrecision; nuclear@14: nuclear@14: prevGridIndex = 0x7fffffff; nuclear@14: prevDeltaX = 0; nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Get grid box origin nuclear@14: gridIdx = aGridIndices[i]; nuclear@14: _ctmGridIdxToPoint(aGrid, gridIdx, gridOrigin); nuclear@14: nuclear@14: // Restore original point nuclear@14: deltaX = aIntVertices[i * 3]; nuclear@14: if(gridIdx == prevGridIndex) nuclear@14: deltaX += prevDeltaX; nuclear@14: aVertices[i * 3] = scale * deltaX + gridOrigin[0]; nuclear@14: aVertices[i * 3 + 1] = scale * aIntVertices[i * 3 + 1] + gridOrigin[1]; nuclear@14: aVertices[i * 3 + 2] = scale * aIntVertices[i * 3 + 2] + gridOrigin[2]; nuclear@14: nuclear@14: prevGridIndex = gridIdx; nuclear@14: prevDeltaX = deltaX; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmCalcSmoothNormals() - Calculate the smooth normals for a given mesh. nuclear@14: // These are used as the nominal normals for normal deltas & reconstruction. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmCalcSmoothNormals(_CTMcontext * self, CTMfloat * aVertices, nuclear@14: CTMuint * aIndices, CTMfloat * aSmoothNormals) nuclear@14: { nuclear@14: CTMuint i, j, k, tri[3]; nuclear@14: CTMfloat len; nuclear@14: CTMfloat v1[3], v2[3], n[3]; nuclear@14: nuclear@14: // Clear smooth normals array nuclear@14: for(i = 0; i < 3 * self->mVertexCount; ++ i) nuclear@14: aSmoothNormals[i] = 0.0f; nuclear@14: nuclear@14: // Calculate sums of all neigbouring triangle normals for each vertex nuclear@14: for(i = 0; i < self->mTriangleCount; ++ i) nuclear@14: { nuclear@14: // Get triangle corner indices nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: tri[j] = aIndices[i * 3 + j]; nuclear@14: nuclear@14: // Calculate the normalized cross product of two triangle edges (i.e. the nuclear@14: // flat triangle normal) nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: { nuclear@14: v1[j] = aVertices[tri[1] * 3 + j] - aVertices[tri[0] * 3 + j]; nuclear@14: v2[j] = aVertices[tri[2] * 3 + j] - aVertices[tri[0] * 3 + j]; nuclear@14: } nuclear@14: n[0] = v1[1] * v2[2] - v1[2] * v2[1]; nuclear@14: n[1] = v1[2] * v2[0] - v1[0] * v2[2]; nuclear@14: n[2] = v1[0] * v2[1] - v1[1] * v2[0]; nuclear@14: len = sqrtf(n[0] * n[0] + n[1] * n[1] + n[2] * n[2]); nuclear@14: if(len > 1e-10f) nuclear@14: len = 1.0f / len; nuclear@14: else nuclear@14: len = 1.0f; nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: n[j] *= len; nuclear@14: nuclear@14: // Add the flat normal to all three triangle vertices nuclear@14: for(k = 0; k < 3; ++ k) nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: aSmoothNormals[tri[k] * 3 + j] += n[j]; nuclear@14: } nuclear@14: nuclear@14: // Normalize the normal sums, which gives the unit length smooth normals nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: len = sqrtf(aSmoothNormals[i * 3] * aSmoothNormals[i * 3] + nuclear@14: aSmoothNormals[i * 3 + 1] * aSmoothNormals[i * 3 + 1] + nuclear@14: aSmoothNormals[i * 3 + 2] * aSmoothNormals[i * 3 + 2]); nuclear@14: if(len > 1e-10f) nuclear@14: len = 1.0f / len; nuclear@14: else nuclear@14: len = 1.0f; nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: aSmoothNormals[i * 3 + j] *= len; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmMakeNormalCoordSys() - Create an ortho-normalized coordinate system nuclear@14: // where the Z-axis is aligned with the given normal. nuclear@14: // Note 1: This function is central to how the compressed normal data is nuclear@14: // interpreted, and it can not be changed (mathematically) without making the nuclear@14: // coder/decoder incompatible with other versions of the library! nuclear@14: // Note 2: Since we do this for every single normal, this routine needs to be nuclear@14: // fast. The current implementation uses: 12 MUL, 1 DIV, 1 SQRT, ~6 ADD. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmMakeNormalCoordSys(CTMfloat * aNormal, CTMfloat * aBasisAxes) nuclear@14: { nuclear@14: CTMfloat len, * x, * y, * z; nuclear@14: CTMuint i; nuclear@14: nuclear@14: // Pointers to the basis axes (aBasisAxes is a 3x3 matrix) nuclear@14: x = aBasisAxes; nuclear@14: y = &aBasisAxes[3]; nuclear@14: z = &aBasisAxes[6]; nuclear@14: nuclear@14: // Z = normal (must be unit length!) nuclear@14: for(i = 0; i < 3; ++ i) nuclear@14: z[i] = aNormal[i]; nuclear@14: nuclear@14: // Calculate a vector that is guaranteed to be orthogonal to the normal, non- nuclear@14: // zero, and a continuous function of the normal (no discrete jumps): nuclear@14: // X = (0,0,1) x normal + (1,0,0) x normal nuclear@14: x[0] = -aNormal[1]; nuclear@14: x[1] = aNormal[0] - aNormal[2]; nuclear@14: x[2] = aNormal[1]; nuclear@14: nuclear@14: // Normalize the new X axis (note: |x[2]| = |x[0]|) nuclear@14: len = sqrtf(2.0 * x[0] * x[0] + x[1] * x[1]); nuclear@14: if(len > 1.0e-20f) nuclear@14: { nuclear@14: len = 1.0f / len; nuclear@14: x[0] *= len; nuclear@14: x[1] *= len; nuclear@14: x[2] *= len; nuclear@14: } nuclear@14: nuclear@14: // Let Y = Z x X (no normalization needed, since |Z| = |X| = 1) nuclear@14: y[0] = z[1] * x[2] - z[2] * x[1]; nuclear@14: y[1] = z[2] * x[0] - z[0] * x[2]; nuclear@14: y[2] = z[0] * x[1] - z[1] * x[0]; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmMakeNormalDeltas() - Convert the normals to a new coordinate system: nuclear@14: // magnitude, phi, theta (relative to predicted smooth normals). nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static CTMint _ctmMakeNormalDeltas(_CTMcontext * self, CTMint * aIntNormals, nuclear@14: CTMfloat * aVertices, CTMuint * aIndices, _CTMsortvertex * aSortVertices) nuclear@14: { nuclear@14: CTMuint i, j, oldIdx, intPhi; nuclear@14: CTMfloat magn, phi, theta, scale, thetaScale; nuclear@14: CTMfloat * smoothNormals, n[3], n2[3], basisAxes[9]; nuclear@14: nuclear@14: // Allocate temporary memory for the nominal vertex normals nuclear@14: smoothNormals = (CTMfloat *) malloc(3 * sizeof(CTMfloat) * self->mVertexCount); nuclear@14: if(!smoothNormals) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Calculate smooth normals (Note: aVertices and aIndices use the sorted nuclear@14: // index space, so smoothNormals will too) nuclear@14: _ctmCalcSmoothNormals(self, aVertices, aIndices, smoothNormals); nuclear@14: nuclear@14: // Normal scaling factor nuclear@14: scale = 1.0f / self->mNormalPrecision; nuclear@14: nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Get old normal index (before vertex sorting) nuclear@14: oldIdx = aSortVertices[i].mOriginalIndex; nuclear@14: nuclear@14: // Calculate normal magnitude (should always be 1.0 for unit length normals) nuclear@14: magn = sqrtf(self->mNormals[oldIdx * 3] * self->mNormals[oldIdx * 3] + nuclear@14: self->mNormals[oldIdx * 3 + 1] * self->mNormals[oldIdx * 3 + 1] + nuclear@14: self->mNormals[oldIdx * 3 + 2] * self->mNormals[oldIdx * 3 + 2]); nuclear@14: if(magn < 1e-10f) nuclear@14: magn = 1.0f; nuclear@14: nuclear@14: // Invert magnitude if the normal is negative compared to the predicted nuclear@14: // smooth normal nuclear@14: if((smoothNormals[i * 3] * self->mNormals[oldIdx * 3] + nuclear@14: smoothNormals[i * 3 + 1] * self->mNormals[oldIdx * 3 + 1] + nuclear@14: smoothNormals[i * 3 + 2] * self->mNormals[oldIdx * 3 + 2]) < 0.0f) nuclear@14: magn = -magn; nuclear@14: nuclear@14: // Store the magnitude in the first element of the three normal elements nuclear@14: aIntNormals[i * 3] = (CTMint) floorf(scale * magn + 0.5f); nuclear@14: nuclear@14: // Normalize the normal (1 / magn) - and flip it if magn < 0 nuclear@14: magn = 1.0f / magn; nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: n[j] = self->mNormals[oldIdx * 3 + j] * magn; nuclear@14: nuclear@14: // Convert the normal to angular representation (phi, theta) in a coordinate nuclear@14: // system where the nominal (smooth) normal is the Z-axis nuclear@14: _ctmMakeNormalCoordSys(&smoothNormals[i * 3], basisAxes); nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: n2[j] = basisAxes[j * 3] * n[0] + nuclear@14: basisAxes[j * 3 + 1] * n[1] + nuclear@14: basisAxes[j * 3 + 2] * n[2]; nuclear@14: if(n2[2] >= 1.0f) nuclear@14: phi = 0.0f; nuclear@14: else nuclear@14: phi = acosf(n2[2]); nuclear@14: theta = atan2f(n2[1], n2[0]); nuclear@14: nuclear@14: // Round phi and theta (spherical coordinates) to integers. Note: We let the nuclear@14: // theta resolution vary with the x/y circumference (roughly phi). nuclear@14: intPhi = (CTMint) floorf(phi * (scale / (0.5f * PI)) + 0.5f); nuclear@14: if(intPhi == 0) nuclear@14: thetaScale = 0.0f; nuclear@14: else if(intPhi <= 4) nuclear@14: thetaScale = 2.0f / PI; nuclear@14: else nuclear@14: thetaScale = ((CTMfloat) intPhi) / (2.0f * PI); nuclear@14: aIntNormals[i * 3 + 1] = intPhi; nuclear@14: aIntNormals[i * 3 + 2] = (CTMint) floorf((theta + PI) * thetaScale + 0.5f); nuclear@14: } nuclear@14: nuclear@14: // Free temporary resources nuclear@14: free(smoothNormals); nuclear@14: nuclear@14: return CTM_TRUE; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmRestoreNormals() - Convert the normals back to cartesian coordinates. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static CTMint _ctmRestoreNormals(_CTMcontext * self, CTMint * aIntNormals) nuclear@14: { nuclear@14: CTMuint i, j, intPhi; nuclear@14: CTMfloat magn, phi, theta, scale, thetaScale; nuclear@14: CTMfloat * smoothNormals, n[3], n2[3], basisAxes[9]; nuclear@14: nuclear@14: // Allocate temporary memory for the nominal vertex normals nuclear@14: smoothNormals = (CTMfloat *) malloc(3 * sizeof(CTMfloat) * self->mVertexCount); nuclear@14: if(!smoothNormals) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Calculate smooth normals (nominal normals) nuclear@14: _ctmCalcSmoothNormals(self, self->mVertices, self->mIndices, smoothNormals); nuclear@14: nuclear@14: // Normal scaling factor nuclear@14: scale = self->mNormalPrecision; nuclear@14: nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Get the normal magnitude from the first of the three normal elements nuclear@14: magn = aIntNormals[i * 3] * scale; nuclear@14: nuclear@14: // Get phi and theta (spherical coordinates, relative to the smooth normal). nuclear@14: intPhi = aIntNormals[i * 3 + 1]; nuclear@14: phi = intPhi * (0.5f * PI) * scale; nuclear@14: if(intPhi == 0) nuclear@14: thetaScale = 0.0f; nuclear@14: else if(intPhi <= 4) nuclear@14: thetaScale = PI / 2.0f; nuclear@14: else nuclear@14: thetaScale = (2.0f * PI) / ((CTMfloat) intPhi); nuclear@14: theta = aIntNormals[i * 3 + 2] * thetaScale - PI; nuclear@14: nuclear@14: // Convert the normal from the angular representation (phi, theta) back to nuclear@14: // cartesian coordinates nuclear@14: n2[0] = sinf(phi) * cosf(theta); nuclear@14: n2[1] = sinf(phi) * sinf(theta); nuclear@14: n2[2] = cosf(phi); nuclear@14: _ctmMakeNormalCoordSys(&smoothNormals[i * 3], basisAxes); nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: n[j] = basisAxes[j] * n2[0] + nuclear@14: basisAxes[3 + j] * n2[1] + nuclear@14: basisAxes[6 + j] * n2[2]; nuclear@14: nuclear@14: // Apply normal magnitude, and output to the normals array nuclear@14: for(j = 0; j < 3; ++ j) nuclear@14: self->mNormals[i * 3 + j] = n[j] * magn; nuclear@14: } nuclear@14: nuclear@14: // Free temporary resources nuclear@14: free(smoothNormals); nuclear@14: nuclear@14: return CTM_TRUE; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmMakeUVCoordDeltas() - Calculate various forms of derivatives in order nuclear@14: // to reduce data entropy. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmMakeUVCoordDeltas(_CTMcontext * self, _CTMfloatmap * aMap, nuclear@14: CTMint * aIntUVCoords, _CTMsortvertex * aSortVertices) nuclear@14: { nuclear@14: CTMuint i, oldIdx; nuclear@14: CTMint u, v, prevU, prevV; nuclear@14: CTMfloat scale; nuclear@14: nuclear@14: // UV coordinate scaling factor nuclear@14: scale = 1.0f / aMap->mPrecision; nuclear@14: nuclear@14: prevU = prevV = 0; nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Get old UV coordinate index (before vertex sorting) nuclear@14: oldIdx = aSortVertices[i].mOriginalIndex; nuclear@14: nuclear@14: // Convert to fixed point nuclear@14: u = (CTMint) floorf(scale * aMap->mValues[oldIdx * 2] + 0.5f); nuclear@14: v = (CTMint) floorf(scale * aMap->mValues[oldIdx * 2 + 1] + 0.5f); nuclear@14: nuclear@14: // Calculate delta and store it in the converted array. NOTE: Here we rely nuclear@14: // on the fact that vertices are sorted, and usually close to each other, nuclear@14: // which means that UV coordinates should also be close to each other... nuclear@14: aIntUVCoords[i * 2] = u - prevU; nuclear@14: aIntUVCoords[i * 2 + 1] = v - prevV; nuclear@14: nuclear@14: prevU = u; nuclear@14: prevV = v; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmRestoreUVCoords() - Calculate inverse derivatives of the UV nuclear@14: // coordinates. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmRestoreUVCoords(_CTMcontext * self, _CTMfloatmap * aMap, nuclear@14: CTMint * aIntUVCoords) nuclear@14: { nuclear@14: CTMuint i; nuclear@14: CTMint u, v, prevU, prevV; nuclear@14: CTMfloat scale; nuclear@14: nuclear@14: // UV coordinate scaling factor nuclear@14: scale = aMap->mPrecision; nuclear@14: nuclear@14: prevU = prevV = 0; nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Calculate inverse delta nuclear@14: u = aIntUVCoords[i * 2] + prevU; nuclear@14: v = aIntUVCoords[i * 2 + 1] + prevV; nuclear@14: nuclear@14: // Convert to floating point nuclear@14: aMap->mValues[i * 2] = (CTMfloat) u * scale; nuclear@14: aMap->mValues[i * 2 + 1] = (CTMfloat) v * scale; nuclear@14: nuclear@14: prevU = u; nuclear@14: prevV = v; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmMakeAttribDeltas() - Calculate various forms of derivatives in order nuclear@14: // to reduce data entropy. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmMakeAttribDeltas(_CTMcontext * self, _CTMfloatmap * aMap, nuclear@14: CTMint * aIntAttribs, _CTMsortvertex * aSortVertices) nuclear@14: { nuclear@14: CTMuint i, j, oldIdx; nuclear@14: CTMint value[4], prev[4]; nuclear@14: CTMfloat scale; nuclear@14: nuclear@14: // Attribute scaling factor nuclear@14: scale = 1.0f / aMap->mPrecision; nuclear@14: nuclear@14: for(j = 0; j < 4; ++ j) nuclear@14: prev[j] = 0; nuclear@14: nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Get old attribute index (before vertex sorting) nuclear@14: oldIdx = aSortVertices[i].mOriginalIndex; nuclear@14: nuclear@14: // Convert to fixed point, and calculate delta and store it in the converted nuclear@14: // array. NOTE: Here we rely on the fact that vertices are sorted, and nuclear@14: // usually close to each other, which means that attributes should also nuclear@14: // be close to each other (and we assume that they somehow vary slowly with nuclear@14: // the geometry)... nuclear@14: for(j = 0; j < 4; ++ j) nuclear@14: { nuclear@14: value[j] = (CTMint) floorf(scale * aMap->mValues[oldIdx * 4 + j] + 0.5f); nuclear@14: aIntAttribs[i * 4 + j] = value[j] - prev[j]; nuclear@14: prev[j] = value[j]; nuclear@14: } nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmRestoreAttribs() - Calculate inverse derivatives of the vertex nuclear@14: // attributes. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: static void _ctmRestoreAttribs(_CTMcontext * self, _CTMfloatmap * aMap, nuclear@14: CTMint * aIntAttribs) nuclear@14: { nuclear@14: CTMuint i, j; nuclear@14: CTMint value[4], prev[4]; nuclear@14: CTMfloat scale; nuclear@14: nuclear@14: // Attribute scaling factor nuclear@14: scale = aMap->mPrecision; nuclear@14: nuclear@14: for(j = 0; j < 4; ++ j) nuclear@14: prev[j] = 0; nuclear@14: nuclear@14: for(i = 0; i < self->mVertexCount; ++ i) nuclear@14: { nuclear@14: // Calculate inverse delta, and convert to floating point nuclear@14: for(j = 0; j < 4; ++ j) nuclear@14: { nuclear@14: value[j] = aIntAttribs[i * 4 + j] + prev[j]; nuclear@14: aMap->mValues[i * 4 + j] = (CTMfloat) value[j] * scale; nuclear@14: prev[j] = value[j]; nuclear@14: } nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmCompressMesh_MG2() - Compress the mesh that is stored in the CTM nuclear@14: // context, and write it the the output stream in the CTM context. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: int _ctmCompressMesh_MG2(_CTMcontext * self) nuclear@14: { nuclear@14: _CTMgrid grid; nuclear@14: _CTMsortvertex * sortVertices; nuclear@14: _CTMfloatmap * map; nuclear@14: CTMuint * indices, * deltaIndices, * gridIndices; nuclear@14: CTMint * intVertices, * intNormals, * intUVCoords, * intAttribs; nuclear@14: CTMfloat * restoredVertices; nuclear@14: CTMuint i; nuclear@14: nuclear@14: #ifdef __DEBUG_ nuclear@14: printf("COMPRESSION METHOD: MG2\n"); nuclear@14: #endif nuclear@14: nuclear@14: // Setup 3D space subdivision grid nuclear@14: _ctmSetupGrid(self, &grid); nuclear@14: nuclear@14: // Write MG2-specific header information to the stream nuclear@14: _ctmStreamWrite(self, (void *) "MG2H", 4); nuclear@14: _ctmStreamWriteFLOAT(self, self->mVertexPrecision); nuclear@14: _ctmStreamWriteFLOAT(self, self->mNormalPrecision); nuclear@14: _ctmStreamWriteFLOAT(self, grid.mMin[0]); nuclear@14: _ctmStreamWriteFLOAT(self, grid.mMin[1]); nuclear@14: _ctmStreamWriteFLOAT(self, grid.mMin[2]); nuclear@14: _ctmStreamWriteFLOAT(self, grid.mMax[0]); nuclear@14: _ctmStreamWriteFLOAT(self, grid.mMax[1]); nuclear@14: _ctmStreamWriteFLOAT(self, grid.mMax[2]); nuclear@14: _ctmStreamWriteUINT(self, grid.mDivision[0]); nuclear@14: _ctmStreamWriteUINT(self, grid.mDivision[1]); nuclear@14: _ctmStreamWriteUINT(self, grid.mDivision[2]); nuclear@14: nuclear@14: // Prepare (sort) vertices nuclear@14: sortVertices = (_CTMsortvertex *) malloc(sizeof(_CTMsortvertex) * self->mVertexCount); nuclear@14: if(!sortVertices) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: _ctmSortVertices(self, sortVertices, &grid); nuclear@14: nuclear@14: // Convert vertices to integers and calculate vertex deltas (entropy-reduction) nuclear@14: intVertices = (CTMint *) malloc(sizeof(CTMint) * 3 * self->mVertexCount); nuclear@14: if(!intVertices) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: _ctmMakeVertexDeltas(self, intVertices, sortVertices, &grid); nuclear@14: nuclear@14: // Write vertices nuclear@14: #ifdef __DEBUG_ nuclear@14: printf("Vertices: "); nuclear@14: #endif nuclear@14: _ctmStreamWrite(self, (void *) "VERT", 4); nuclear@14: if(!_ctmStreamWritePackedInts(self, intVertices, self->mVertexCount, 3, CTM_FALSE)) nuclear@14: { nuclear@14: free((void *) intVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Prepare grid indices (deltas) nuclear@14: gridIndices = (CTMuint *) malloc(sizeof(CTMuint) * self->mVertexCount); nuclear@14: if(!gridIndices) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) intVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: gridIndices[0] = sortVertices[0].mGridIndex; nuclear@14: for(i = 1; i < self->mVertexCount; ++ i) nuclear@14: gridIndices[i] = sortVertices[i].mGridIndex - sortVertices[i - 1].mGridIndex; nuclear@14: nuclear@14: // Write grid indices nuclear@14: #ifdef __DEBUG_ nuclear@14: printf("Grid indices: "); nuclear@14: #endif nuclear@14: _ctmStreamWrite(self, (void *) "GIDX", 4); nuclear@14: if(!_ctmStreamWritePackedInts(self, (CTMint *) gridIndices, self->mVertexCount, 1, CTM_FALSE)) nuclear@14: { nuclear@14: free((void *) gridIndices); nuclear@14: free((void *) intVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Calculate the result of the compressed -> decompressed vertices, in order nuclear@14: // to use the same vertex data for calculating nominal normals as the nuclear@14: // decompression routine (i.e. compensate for the vertex error when nuclear@14: // calculating the normals) nuclear@14: restoredVertices = (CTMfloat *) malloc(sizeof(CTMfloat) * 3 * self->mVertexCount); nuclear@14: if(!restoredVertices) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) gridIndices); nuclear@14: free((void *) intVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: for(i = 1; i < self->mVertexCount; ++ i) nuclear@14: gridIndices[i] += gridIndices[i - 1]; nuclear@14: _ctmRestoreVertices(self, intVertices, gridIndices, &grid, restoredVertices); nuclear@14: nuclear@14: // Free temporary resources nuclear@14: free((void *) gridIndices); nuclear@14: free((void *) intVertices); nuclear@14: nuclear@14: // Perpare (sort) indices nuclear@14: indices = (CTMuint *) malloc(sizeof(CTMuint) * self->mTriangleCount * 3); nuclear@14: if(!indices) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) restoredVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(!_ctmReIndexIndices(self, sortVertices, indices)) nuclear@14: { nuclear@14: free((void *) indices); nuclear@14: free((void *) restoredVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: _ctmReArrangeTriangles(self, indices); nuclear@14: nuclear@14: // Calculate index deltas (entropy-reduction) nuclear@14: deltaIndices = (CTMuint *) malloc(sizeof(CTMuint) * self->mTriangleCount * 3); nuclear@14: if(!indices) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) indices); nuclear@14: free((void *) restoredVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: for(i = 0; i < self->mTriangleCount * 3; ++ i) nuclear@14: deltaIndices[i] = indices[i]; nuclear@14: _ctmMakeIndexDeltas(self, deltaIndices); nuclear@14: nuclear@14: // Write triangle indices nuclear@14: #ifdef __DEBUG_ nuclear@14: printf("Indices: "); nuclear@14: #endif nuclear@14: _ctmStreamWrite(self, (void *) "INDX", 4); nuclear@14: if(!_ctmStreamWritePackedInts(self, (CTMint *) deltaIndices, self->mTriangleCount, 3, CTM_FALSE)) nuclear@14: { nuclear@14: free((void *) deltaIndices); nuclear@14: free((void *) indices); nuclear@14: free((void *) restoredVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Free temporary data for the indices nuclear@14: free((void *) deltaIndices); nuclear@14: nuclear@14: if(self->mNormals) nuclear@14: { nuclear@14: // Convert normals to integers and calculate deltas (entropy-reduction) nuclear@14: intNormals = (CTMint *) malloc(sizeof(CTMint) * 3 * self->mVertexCount); nuclear@14: if(!intNormals) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) indices); nuclear@14: free((void *) restoredVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(!_ctmMakeNormalDeltas(self, intNormals, restoredVertices, indices, sortVertices)) nuclear@14: { nuclear@14: free((void *) indices); nuclear@14: free((void *) intNormals); nuclear@14: free((void *) restoredVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Write normals nuclear@14: #ifdef __DEBUG_ nuclear@14: printf("Normals: "); nuclear@14: #endif nuclear@14: _ctmStreamWrite(self, (void *) "NORM", 4); nuclear@14: if(!_ctmStreamWritePackedInts(self, intNormals, self->mVertexCount, 3, CTM_FALSE)) nuclear@14: { nuclear@14: free((void *) indices); nuclear@14: free((void *) intNormals); nuclear@14: free((void *) restoredVertices); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Free temporary normal data nuclear@14: free((void *) intNormals); nuclear@14: } nuclear@14: nuclear@14: // Free restored indices and vertices nuclear@14: free((void *) indices); nuclear@14: free((void *) restoredVertices); nuclear@14: nuclear@14: // Write UV maps nuclear@14: map = self->mUVMaps; nuclear@14: while(map) nuclear@14: { nuclear@14: // Convert UV coordinates to integers and calculate deltas (entropy-reduction) nuclear@14: intUVCoords = (CTMint *) malloc(sizeof(CTMint) * 2 * self->mVertexCount); nuclear@14: if(!intUVCoords) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: _ctmMakeUVCoordDeltas(self, map, intUVCoords, sortVertices); nuclear@14: nuclear@14: // Write UV coordinates nuclear@14: #ifdef __DEBUG_ nuclear@14: printf("Texture coordinates (%s): ", map->mName ? map->mName : "no name"); nuclear@14: #endif nuclear@14: _ctmStreamWrite(self, (void *) "TEXC", 4); nuclear@14: _ctmStreamWriteSTRING(self, map->mName); nuclear@14: _ctmStreamWriteSTRING(self, map->mFileName); nuclear@14: _ctmStreamWriteFLOAT(self, map->mPrecision); nuclear@14: if(!_ctmStreamWritePackedInts(self, intUVCoords, self->mVertexCount, 2, CTM_TRUE)) nuclear@14: { nuclear@14: free((void *) intUVCoords); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Free temporary UV coordinate data nuclear@14: free((void *) intUVCoords); nuclear@14: nuclear@14: map = map->mNext; nuclear@14: } nuclear@14: nuclear@14: // Write vertex attribute maps nuclear@14: map = self->mAttribMaps; nuclear@14: while(map) nuclear@14: { nuclear@14: // Convert vertex attributes to integers and calculate deltas (entropy-reduction) nuclear@14: intAttribs = (CTMint *) malloc(sizeof(CTMint) * 4 * self->mVertexCount); nuclear@14: if(!intAttribs) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: _ctmMakeAttribDeltas(self, map, intAttribs, sortVertices); nuclear@14: nuclear@14: // Write vertex attributes nuclear@14: #ifdef __DEBUG_ nuclear@14: printf("Vertex attributes (%s): ", map->mName ? map->mName : "no name"); nuclear@14: #endif nuclear@14: _ctmStreamWrite(self, (void *) "ATTR", 4); nuclear@14: _ctmStreamWriteSTRING(self, map->mName); nuclear@14: _ctmStreamWriteFLOAT(self, map->mPrecision); nuclear@14: if(!_ctmStreamWritePackedInts(self, intAttribs, self->mVertexCount, 4, CTM_TRUE)) nuclear@14: { nuclear@14: free((void *) intAttribs); nuclear@14: free((void *) sortVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Free temporary vertex attribute data nuclear@14: free((void *) intAttribs); nuclear@14: nuclear@14: map = map->mNext; nuclear@14: } nuclear@14: nuclear@14: // Free temporary data nuclear@14: free((void *) sortVertices); nuclear@14: nuclear@14: return CTM_TRUE; nuclear@14: } nuclear@14: nuclear@14: //----------------------------------------------------------------------------- nuclear@14: // _ctmUncompressMesh_MG2() - Uncmpress the mesh from the input stream in the nuclear@14: // CTM context, and store the resulting mesh in the CTM context. nuclear@14: //----------------------------------------------------------------------------- nuclear@14: int _ctmUncompressMesh_MG2(_CTMcontext * self) nuclear@14: { nuclear@14: CTMuint * gridIndices, i; nuclear@14: CTMint * intVertices, * intNormals, * intUVCoords, * intAttribs; nuclear@14: _CTMfloatmap * map; nuclear@14: _CTMgrid grid; nuclear@14: nuclear@14: // Read MG2-specific header information from the stream nuclear@14: if(_ctmStreamReadUINT(self) != FOURCC("MG2H")) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: self->mVertexPrecision = _ctmStreamReadFLOAT(self); nuclear@14: if(self->mVertexPrecision <= 0.0f) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: self->mNormalPrecision = _ctmStreamReadFLOAT(self); nuclear@14: if(self->mNormalPrecision <= 0.0f) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: grid.mMin[0] = _ctmStreamReadFLOAT(self); nuclear@14: grid.mMin[1] = _ctmStreamReadFLOAT(self); nuclear@14: grid.mMin[2] = _ctmStreamReadFLOAT(self); nuclear@14: grid.mMax[0] = _ctmStreamReadFLOAT(self); nuclear@14: grid.mMax[1] = _ctmStreamReadFLOAT(self); nuclear@14: grid.mMax[2] = _ctmStreamReadFLOAT(self); nuclear@14: if((grid.mMax[0] < grid.mMin[0]) || nuclear@14: (grid.mMax[1] < grid.mMin[1]) || nuclear@14: (grid.mMax[2] < grid.mMin[2])) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: grid.mDivision[0] = _ctmStreamReadUINT(self); nuclear@14: grid.mDivision[1] = _ctmStreamReadUINT(self); nuclear@14: grid.mDivision[2] = _ctmStreamReadUINT(self); nuclear@14: if((grid.mDivision[0] < 1) || (grid.mDivision[1] < 1) || (grid.mDivision[2] < 1)) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Initialize 3D space subdivision grid nuclear@14: for(i = 0; i < 3; ++ i) nuclear@14: grid.mSize[i] = (grid.mMax[i] - grid.mMin[i]) / grid.mDivision[i]; nuclear@14: nuclear@14: // Read vertices nuclear@14: if(_ctmStreamReadUINT(self) != FOURCC("VERT")) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: intVertices = (CTMint *) malloc(sizeof(CTMint) * self->mVertexCount * 3); nuclear@14: if(!intVertices) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(!_ctmStreamReadPackedInts(self, intVertices, self->mVertexCount, 3, CTM_FALSE)) nuclear@14: { nuclear@14: free((void *) intVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Read grid indices nuclear@14: if(_ctmStreamReadUINT(self) != FOURCC("GIDX")) nuclear@14: { nuclear@14: free((void *) intVertices); nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: gridIndices = (CTMuint *) malloc(sizeof(CTMuint) * self->mVertexCount); nuclear@14: if(!gridIndices) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: free((void *) intVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(!_ctmStreamReadPackedInts(self, (CTMint *) gridIndices, self->mVertexCount, 1, CTM_FALSE)) nuclear@14: { nuclear@14: free((void *) gridIndices); nuclear@14: free((void *) intVertices); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Restore grid indices (deltas) nuclear@14: for(i = 1; i < self->mVertexCount; ++ i) nuclear@14: gridIndices[i] += gridIndices[i - 1]; nuclear@14: nuclear@14: // Restore vertices nuclear@14: _ctmRestoreVertices(self, intVertices, gridIndices, &grid, self->mVertices); nuclear@14: nuclear@14: // Free temporary resources nuclear@14: free((void *) gridIndices); nuclear@14: free((void *) intVertices); nuclear@14: nuclear@14: // Read triangle indices nuclear@14: if(_ctmStreamReadUINT(self) != FOURCC("INDX")) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(!_ctmStreamReadPackedInts(self, (CTMint *) self->mIndices, self->mTriangleCount, 3, CTM_FALSE)) nuclear@14: return CTM_FALSE; nuclear@14: nuclear@14: // Restore indices nuclear@14: _ctmRestoreIndices(self, self->mIndices); nuclear@14: nuclear@14: // Check that all indices are within range nuclear@14: for(i = 0; i < (self->mTriangleCount * 3); ++ i) nuclear@14: { nuclear@14: if(self->mIndices[i] >= self->mVertexCount) nuclear@14: { nuclear@14: self->mError = CTM_INVALID_MESH; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: } nuclear@14: nuclear@14: // Read normals nuclear@14: if(self->mNormals) nuclear@14: { nuclear@14: intNormals = (CTMint *) malloc(sizeof(CTMint) * self->mVertexCount * 3); nuclear@14: if(!intNormals) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(_ctmStreamReadUINT(self) != FOURCC("NORM")) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: free((void *) intNormals); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(!_ctmStreamReadPackedInts(self, intNormals, self->mVertexCount, 3, CTM_FALSE)) nuclear@14: { nuclear@14: free((void *) intNormals); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Restore normals nuclear@14: if(!_ctmRestoreNormals(self, intNormals)) nuclear@14: { nuclear@14: free((void *) intNormals); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Free temporary normals data nuclear@14: free((void *) intNormals); nuclear@14: } nuclear@14: nuclear@14: // Read UV maps nuclear@14: map = self->mUVMaps; nuclear@14: while(map) nuclear@14: { nuclear@14: intUVCoords = (CTMint *) malloc(sizeof(CTMint) * self->mVertexCount * 2); nuclear@14: if(!intUVCoords) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(_ctmStreamReadUINT(self) != FOURCC("TEXC")) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: free((void *) intUVCoords); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: _ctmStreamReadSTRING(self, &map->mName); nuclear@14: _ctmStreamReadSTRING(self, &map->mFileName); nuclear@14: map->mPrecision = _ctmStreamReadFLOAT(self); nuclear@14: if(map->mPrecision <= 0.0f) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: free((void *) intUVCoords); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(!_ctmStreamReadPackedInts(self, intUVCoords, self->mVertexCount, 2, CTM_TRUE)) nuclear@14: { nuclear@14: free((void *) intUVCoords); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Restore UV coordinates nuclear@14: _ctmRestoreUVCoords(self, map, intUVCoords); nuclear@14: nuclear@14: // Free temporary UV coordinate data nuclear@14: free((void *) intUVCoords); nuclear@14: nuclear@14: map = map->mNext; nuclear@14: } nuclear@14: nuclear@14: // Read vertex attribute maps nuclear@14: map = self->mAttribMaps; nuclear@14: while(map) nuclear@14: { nuclear@14: intAttribs = (CTMint *) malloc(sizeof(CTMint) * self->mVertexCount * 4); nuclear@14: if(!intAttribs) nuclear@14: { nuclear@14: self->mError = CTM_OUT_OF_MEMORY; nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(_ctmStreamReadUINT(self) != FOURCC("ATTR")) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: free((void *) intAttribs); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: _ctmStreamReadSTRING(self, &map->mName); nuclear@14: map->mPrecision = _ctmStreamReadFLOAT(self); nuclear@14: if(map->mPrecision <= 0.0f) nuclear@14: { nuclear@14: self->mError = CTM_BAD_FORMAT; nuclear@14: free((void *) intAttribs); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: if(!_ctmStreamReadPackedInts(self, intAttribs, self->mVertexCount, 4, CTM_TRUE)) nuclear@14: { nuclear@14: free((void *) intAttribs); nuclear@14: return CTM_FALSE; nuclear@14: } nuclear@14: nuclear@14: // Restore vertex attributes nuclear@14: _ctmRestoreAttribs(self, map, intAttribs); nuclear@14: nuclear@14: // Free temporary vertex attribute data nuclear@14: free((void *) intAttribs); nuclear@14: nuclear@14: map = map->mNext; nuclear@14: } nuclear@14: nuclear@14: return CTM_TRUE; nuclear@14: }