rev |
line source |
nuclear@14
|
1 //-----------------------------------------------------------------------------
|
nuclear@14
|
2 // Product: OpenCTM
|
nuclear@14
|
3 // File: compressMG2.c
|
nuclear@14
|
4 // Description: Implementation of the MG2 compression method.
|
nuclear@14
|
5 //-----------------------------------------------------------------------------
|
nuclear@14
|
6 // Copyright (c) 2009-2010 Marcus Geelnard
|
nuclear@14
|
7 //
|
nuclear@14
|
8 // This software is provided 'as-is', without any express or implied
|
nuclear@14
|
9 // warranty. In no event will the authors be held liable for any damages
|
nuclear@14
|
10 // arising from the use of this software.
|
nuclear@14
|
11 //
|
nuclear@14
|
12 // Permission is granted to anyone to use this software for any purpose,
|
nuclear@14
|
13 // including commercial applications, and to alter it and redistribute it
|
nuclear@14
|
14 // freely, subject to the following restrictions:
|
nuclear@14
|
15 //
|
nuclear@14
|
16 // 1. The origin of this software must not be misrepresented; you must not
|
nuclear@14
|
17 // claim that you wrote the original software. If you use this software
|
nuclear@14
|
18 // in a product, an acknowledgment in the product documentation would be
|
nuclear@14
|
19 // appreciated but is not required.
|
nuclear@14
|
20 //
|
nuclear@14
|
21 // 2. Altered source versions must be plainly marked as such, and must not
|
nuclear@14
|
22 // be misrepresented as being the original software.
|
nuclear@14
|
23 //
|
nuclear@14
|
24 // 3. This notice may not be removed or altered from any source
|
nuclear@14
|
25 // distribution.
|
nuclear@14
|
26 //-----------------------------------------------------------------------------
|
nuclear@14
|
27
|
nuclear@14
|
28 #include <stdlib.h>
|
nuclear@14
|
29 #include <math.h>
|
nuclear@14
|
30 #include "openctm.h"
|
nuclear@14
|
31 #include "internal.h"
|
nuclear@14
|
32
|
nuclear@14
|
33 #ifdef __DEBUG_
|
nuclear@14
|
34 #include <stdio.h>
|
nuclear@14
|
35 #endif
|
nuclear@14
|
36
|
nuclear@14
|
37 // We need PI
|
nuclear@14
|
38 #ifndef PI
|
nuclear@14
|
39 #define PI 3.141592653589793238462643f
|
nuclear@14
|
40 #endif
|
nuclear@14
|
41
|
nuclear@14
|
42
|
nuclear@14
|
43 //-----------------------------------------------------------------------------
|
nuclear@14
|
44 // _CTMgrid - 3D space subdivision grid.
|
nuclear@14
|
45 //-----------------------------------------------------------------------------
|
nuclear@14
|
46 typedef struct {
|
nuclear@14
|
47 // Axis-aligned boudning box for the grid.
|
nuclear@14
|
48 CTMfloat mMin[3];
|
nuclear@14
|
49 CTMfloat mMax[3];
|
nuclear@14
|
50
|
nuclear@14
|
51 // How many divisions per axis (minimum 1).
|
nuclear@14
|
52 CTMuint mDivision[3];
|
nuclear@14
|
53
|
nuclear@14
|
54 // Size of each grid box.
|
nuclear@14
|
55 CTMfloat mSize[3];
|
nuclear@14
|
56 } _CTMgrid;
|
nuclear@14
|
57
|
nuclear@14
|
58 //-----------------------------------------------------------------------------
|
nuclear@14
|
59 // _CTMsortvertex - Vertex information.
|
nuclear@14
|
60 //-----------------------------------------------------------------------------
|
nuclear@14
|
61 typedef struct {
|
nuclear@14
|
62 // Vertex X coordinate (used for sorting).
|
nuclear@14
|
63 CTMfloat x;
|
nuclear@14
|
64
|
nuclear@14
|
65 // Grid index. This is the index into the 3D space subdivision grid.
|
nuclear@14
|
66 CTMuint mGridIndex;
|
nuclear@14
|
67
|
nuclear@14
|
68 // Original index (before sorting).
|
nuclear@14
|
69 CTMuint mOriginalIndex;
|
nuclear@14
|
70 } _CTMsortvertex;
|
nuclear@14
|
71
|
nuclear@14
|
72 //-----------------------------------------------------------------------------
|
nuclear@14
|
73 // _ctmSetupGrid() - Setup the 3D space subdivision grid.
|
nuclear@14
|
74 //-----------------------------------------------------------------------------
|
nuclear@14
|
75 static void _ctmSetupGrid(_CTMcontext * self, _CTMgrid * aGrid)
|
nuclear@14
|
76 {
|
nuclear@14
|
77 CTMuint i;
|
nuclear@14
|
78 CTMfloat factor[3], sum, wantedGrids;
|
nuclear@14
|
79
|
nuclear@14
|
80 // Calculate the mesh bounding box
|
nuclear@14
|
81 aGrid->mMin[0] = aGrid->mMax[0] = self->mVertices[0];
|
nuclear@14
|
82 aGrid->mMin[1] = aGrid->mMax[1] = self->mVertices[1];
|
nuclear@14
|
83 aGrid->mMin[2] = aGrid->mMax[2] = self->mVertices[2];
|
nuclear@14
|
84 for(i = 1; i < self->mVertexCount; ++ i)
|
nuclear@14
|
85 {
|
nuclear@14
|
86 if(self->mVertices[i * 3] < aGrid->mMin[0])
|
nuclear@14
|
87 aGrid->mMin[0] = self->mVertices[i * 3];
|
nuclear@14
|
88 else if(self->mVertices[i * 3] > aGrid->mMax[0])
|
nuclear@14
|
89 aGrid->mMax[0] = self->mVertices[i * 3];
|
nuclear@14
|
90 if(self->mVertices[i * 3 + 1] < aGrid->mMin[1])
|
nuclear@14
|
91 aGrid->mMin[1] = self->mVertices[i * 3 + 1];
|
nuclear@14
|
92 else if(self->mVertices[i * 3 + 1] > aGrid->mMax[1])
|
nuclear@14
|
93 aGrid->mMax[1] = self->mVertices[i * 3 + 1];
|
nuclear@14
|
94 if(self->mVertices[i * 3 + 2] < aGrid->mMin[2])
|
nuclear@14
|
95 aGrid->mMin[2] = self->mVertices[i * 3 + 2];
|
nuclear@14
|
96 else if(self->mVertices[i * 3 + 2] > aGrid->mMax[2])
|
nuclear@14
|
97 aGrid->mMax[2] = self->mVertices[i * 3 + 2];
|
nuclear@14
|
98 }
|
nuclear@14
|
99
|
nuclear@14
|
100 // Determine optimal grid resolution, based on the number of vertices and
|
nuclear@14
|
101 // the bounding box.
|
nuclear@14
|
102 // NOTE: This algorithm is quite crude, and could very well be optimized for
|
nuclear@14
|
103 // better compression levels in the future without affecting the file format
|
nuclear@14
|
104 // or backward compatibility at all.
|
nuclear@14
|
105 for(i = 0; i < 3; ++ i)
|
nuclear@14
|
106 factor[i] = aGrid->mMax[i] - aGrid->mMin[i];
|
nuclear@14
|
107 sum = factor[0] + factor[1] + factor[2];
|
nuclear@14
|
108 if(sum > 1e-30f)
|
nuclear@14
|
109 {
|
nuclear@14
|
110 sum = 1.0f / sum;
|
nuclear@14
|
111 for(i = 0; i < 3; ++ i)
|
nuclear@14
|
112 factor[i] *= sum;
|
nuclear@14
|
113 wantedGrids = powf(100.0f * self->mVertexCount, 1.0f / 3.0f);
|
nuclear@14
|
114 for(i = 0; i < 3; ++ i)
|
nuclear@14
|
115 {
|
nuclear@14
|
116 aGrid->mDivision[i] = (CTMuint) ceilf(wantedGrids * factor[i]);
|
nuclear@14
|
117 if(aGrid->mDivision[i] < 1)
|
nuclear@14
|
118 aGrid->mDivision[i] = 1;
|
nuclear@14
|
119 }
|
nuclear@14
|
120 }
|
nuclear@14
|
121 else
|
nuclear@14
|
122 {
|
nuclear@14
|
123 aGrid->mDivision[0] = 4;
|
nuclear@14
|
124 aGrid->mDivision[1] = 4;
|
nuclear@14
|
125 aGrid->mDivision[2] = 4;
|
nuclear@14
|
126 }
|
nuclear@14
|
127 #ifdef __DEBUG_
|
nuclear@14
|
128 printf("Division: (%d %d %d)\n", aGrid->mDivision[0], aGrid->mDivision[1], aGrid->mDivision[2]);
|
nuclear@14
|
129 #endif
|
nuclear@14
|
130
|
nuclear@14
|
131 // Calculate grid sizes
|
nuclear@14
|
132 for(i = 0; i < 3; ++ i)
|
nuclear@14
|
133 aGrid->mSize[i] = (aGrid->mMax[i] - aGrid->mMin[i]) / aGrid->mDivision[i];
|
nuclear@14
|
134 }
|
nuclear@14
|
135
|
nuclear@14
|
136 //-----------------------------------------------------------------------------
|
nuclear@14
|
137 // _ctmPointToGridIdx() - Convert a point to a grid index.
|
nuclear@14
|
138 //-----------------------------------------------------------------------------
|
nuclear@14
|
139 static CTMuint _ctmPointToGridIdx(_CTMgrid * aGrid, CTMfloat * aPoint)
|
nuclear@14
|
140 {
|
nuclear@14
|
141 CTMuint i, idx[3];
|
nuclear@14
|
142
|
nuclear@14
|
143 for(i = 0; i < 3; ++ i)
|
nuclear@14
|
144 {
|
nuclear@14
|
145 idx[i] = (CTMuint) floorf((aPoint[i] - aGrid->mMin[i]) / aGrid->mSize[i]);
|
nuclear@14
|
146 if(idx[i] >= aGrid->mDivision[i])
|
nuclear@14
|
147 idx[i] = aGrid->mDivision[i] - 1;
|
nuclear@14
|
148 }
|
nuclear@14
|
149
|
nuclear@14
|
150 return idx[0] + aGrid->mDivision[0] * (idx[1] + aGrid->mDivision[1] * idx[2]);
|
nuclear@14
|
151 }
|
nuclear@14
|
152
|
nuclear@14
|
153 //-----------------------------------------------------------------------------
|
nuclear@14
|
154 // _ctmGridIdxToPoint() - Convert a grid index to a point (the min x/y/z for
|
nuclear@14
|
155 // the given grid box).
|
nuclear@14
|
156 //-----------------------------------------------------------------------------
|
nuclear@14
|
157 static void _ctmGridIdxToPoint(_CTMgrid * aGrid, CTMuint aIdx, CTMfloat * aPoint)
|
nuclear@14
|
158 {
|
nuclear@14
|
159 CTMuint gridIdx[3], zdiv, ydiv, i;
|
nuclear@14
|
160
|
nuclear@14
|
161 zdiv = aGrid->mDivision[0] * aGrid->mDivision[1];
|
nuclear@14
|
162 ydiv = aGrid->mDivision[0];
|
nuclear@14
|
163
|
nuclear@14
|
164 gridIdx[2] = aIdx / zdiv;
|
nuclear@14
|
165 aIdx -= gridIdx[2] * zdiv;
|
nuclear@14
|
166 gridIdx[1] = aIdx / ydiv;
|
nuclear@14
|
167 aIdx -= gridIdx[1] * ydiv;
|
nuclear@14
|
168 gridIdx[0] = aIdx;
|
nuclear@14
|
169
|
nuclear@14
|
170 for(i = 0; i < 3; ++ i)
|
nuclear@14
|
171 aPoint[i] = gridIdx[i] * aGrid->mSize[i] + aGrid->mMin[i];
|
nuclear@14
|
172 }
|
nuclear@14
|
173
|
nuclear@14
|
174 //-----------------------------------------------------------------------------
|
nuclear@14
|
175 // _compareVertex() - Comparator for the vertex sorting.
|
nuclear@14
|
176 //-----------------------------------------------------------------------------
|
nuclear@14
|
177 static int _compareVertex(const void * elem1, const void * elem2)
|
nuclear@14
|
178 {
|
nuclear@14
|
179 _CTMsortvertex * v1 = (_CTMsortvertex *) elem1;
|
nuclear@14
|
180 _CTMsortvertex * v2 = (_CTMsortvertex *) elem2;
|
nuclear@14
|
181 if(v1->mGridIndex != v2->mGridIndex)
|
nuclear@14
|
182 return v1->mGridIndex - v2->mGridIndex;
|
nuclear@14
|
183 else if(v1->x < v2->x)
|
nuclear@14
|
184 return -1;
|
nuclear@14
|
185 else if(v1->x > v2->x)
|
nuclear@14
|
186 return 1;
|
nuclear@14
|
187 else
|
nuclear@14
|
188 return 0;
|
nuclear@14
|
189 }
|
nuclear@14
|
190
|
nuclear@14
|
191 //-----------------------------------------------------------------------------
|
nuclear@14
|
192 // _ctmSortVertices() - Setup the vertex array. Assign each vertex to a grid
|
nuclear@14
|
193 // box, and sort all vertices.
|
nuclear@14
|
194 //-----------------------------------------------------------------------------
|
nuclear@14
|
195 static void _ctmSortVertices(_CTMcontext * self, _CTMsortvertex * aSortVertices,
|
nuclear@14
|
196 _CTMgrid * aGrid)
|
nuclear@14
|
197 {
|
nuclear@14
|
198 CTMuint i;
|
nuclear@14
|
199
|
nuclear@14
|
200 // Prepare sort vertex array
|
nuclear@14
|
201 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
202 {
|
nuclear@14
|
203 // Store vertex properties in the sort vertex array
|
nuclear@14
|
204 aSortVertices[i].x = self->mVertices[i * 3];
|
nuclear@14
|
205 aSortVertices[i].mGridIndex = _ctmPointToGridIdx(aGrid, &self->mVertices[i * 3]);
|
nuclear@14
|
206 aSortVertices[i].mOriginalIndex = i;
|
nuclear@14
|
207 }
|
nuclear@14
|
208
|
nuclear@14
|
209 // Sort vertices. The elements are first sorted by their grid indices, and
|
nuclear@14
|
210 // scondly by their x coordinates.
|
nuclear@14
|
211 qsort((void *) aSortVertices, self->mVertexCount, sizeof(_CTMsortvertex), _compareVertex);
|
nuclear@14
|
212 }
|
nuclear@14
|
213
|
nuclear@14
|
214 //-----------------------------------------------------------------------------
|
nuclear@14
|
215 // _ctmReIndexIndices() - Re-index all indices, based on the sorted vertices.
|
nuclear@14
|
216 //-----------------------------------------------------------------------------
|
nuclear@14
|
217 static int _ctmReIndexIndices(_CTMcontext * self, _CTMsortvertex * aSortVertices,
|
nuclear@14
|
218 CTMuint * aIndices)
|
nuclear@14
|
219 {
|
nuclear@14
|
220 CTMuint i, * indexLUT;
|
nuclear@14
|
221
|
nuclear@14
|
222 // Create temporary lookup-array, O(n)
|
nuclear@14
|
223 indexLUT = (CTMuint *) malloc(sizeof(CTMuint) * self->mVertexCount);
|
nuclear@14
|
224 if(!indexLUT)
|
nuclear@14
|
225 {
|
nuclear@14
|
226 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
227 return CTM_FALSE;
|
nuclear@14
|
228 }
|
nuclear@14
|
229 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
230 indexLUT[aSortVertices[i].mOriginalIndex] = i;
|
nuclear@14
|
231
|
nuclear@14
|
232 // Convert old indices to new indices, O(n)
|
nuclear@14
|
233 for(i = 0; i < self->mTriangleCount * 3; ++ i)
|
nuclear@14
|
234 aIndices[i] = indexLUT[self->mIndices[i]];
|
nuclear@14
|
235
|
nuclear@14
|
236 // Free temporary lookup-array
|
nuclear@14
|
237 free((void *) indexLUT);
|
nuclear@14
|
238
|
nuclear@14
|
239 return CTM_TRUE;
|
nuclear@14
|
240 }
|
nuclear@14
|
241
|
nuclear@14
|
242 //-----------------------------------------------------------------------------
|
nuclear@14
|
243 // _compareTriangle() - Comparator for the triangle sorting.
|
nuclear@14
|
244 //-----------------------------------------------------------------------------
|
nuclear@14
|
245 static int _compareTriangle(const void * elem1, const void * elem2)
|
nuclear@14
|
246 {
|
nuclear@14
|
247 CTMuint * tri1 = (CTMuint *) elem1;
|
nuclear@14
|
248 CTMuint * tri2 = (CTMuint *) elem2;
|
nuclear@14
|
249 if(tri1[0] != tri2[0])
|
nuclear@14
|
250 return tri1[0] - tri2[0];
|
nuclear@14
|
251 else
|
nuclear@14
|
252 return tri1[1] - tri2[1];
|
nuclear@14
|
253 }
|
nuclear@14
|
254
|
nuclear@14
|
255 //-----------------------------------------------------------------------------
|
nuclear@14
|
256 // _ctmReArrangeTriangles() - Re-arrange all triangles for optimal
|
nuclear@14
|
257 // compression.
|
nuclear@14
|
258 //-----------------------------------------------------------------------------
|
nuclear@14
|
259 static void _ctmReArrangeTriangles(_CTMcontext * self, CTMuint * aIndices)
|
nuclear@14
|
260 {
|
nuclear@14
|
261 CTMuint * tri, tmp, i;
|
nuclear@14
|
262
|
nuclear@14
|
263 // Step 1: Make sure that the first index of each triangle is the smallest
|
nuclear@14
|
264 // one (rotate triangle nodes if necessary)
|
nuclear@14
|
265 for(i = 0; i < self->mTriangleCount; ++ i)
|
nuclear@14
|
266 {
|
nuclear@14
|
267 tri = &aIndices[i * 3];
|
nuclear@14
|
268 if((tri[1] < tri[0]) && (tri[1] < tri[2]))
|
nuclear@14
|
269 {
|
nuclear@14
|
270 tmp = tri[0];
|
nuclear@14
|
271 tri[0] = tri[1];
|
nuclear@14
|
272 tri[1] = tri[2];
|
nuclear@14
|
273 tri[2] = tmp;
|
nuclear@14
|
274 }
|
nuclear@14
|
275 else if((tri[2] < tri[0]) && (tri[2] < tri[1]))
|
nuclear@14
|
276 {
|
nuclear@14
|
277 tmp = tri[0];
|
nuclear@14
|
278 tri[0] = tri[2];
|
nuclear@14
|
279 tri[2] = tri[1];
|
nuclear@14
|
280 tri[1] = tmp;
|
nuclear@14
|
281 }
|
nuclear@14
|
282 }
|
nuclear@14
|
283
|
nuclear@14
|
284 // Step 2: Sort the triangles based on the first triangle index
|
nuclear@14
|
285 qsort((void *) aIndices, self->mTriangleCount, sizeof(CTMuint) * 3, _compareTriangle);
|
nuclear@14
|
286 }
|
nuclear@14
|
287
|
nuclear@14
|
288 //-----------------------------------------------------------------------------
|
nuclear@14
|
289 // _ctmMakeIndexDeltas() - Calculate various forms of derivatives in order to
|
nuclear@14
|
290 // reduce data entropy.
|
nuclear@14
|
291 //-----------------------------------------------------------------------------
|
nuclear@14
|
292 static void _ctmMakeIndexDeltas(_CTMcontext * self, CTMuint * aIndices)
|
nuclear@14
|
293 {
|
nuclear@14
|
294 CTMint i;
|
nuclear@14
|
295 for(i = self->mTriangleCount - 1; i >= 0; -- i)
|
nuclear@14
|
296 {
|
nuclear@14
|
297 // Step 1: Calculate delta from second triangle index to the previous
|
nuclear@14
|
298 // second triangle index, if the previous triangle shares the same first
|
nuclear@14
|
299 // index, otherwise calculate the delta to the first triangle index
|
nuclear@14
|
300 if((i >= 1) && (aIndices[i * 3] == aIndices[(i - 1) * 3]))
|
nuclear@14
|
301 aIndices[i * 3 + 1] -= aIndices[(i - 1) * 3 + 1];
|
nuclear@14
|
302 else
|
nuclear@14
|
303 aIndices[i * 3 + 1] -= aIndices[i * 3];
|
nuclear@14
|
304
|
nuclear@14
|
305 // Step 2: Calculate delta from third triangle index to the first triangle
|
nuclear@14
|
306 // index
|
nuclear@14
|
307 aIndices[i * 3 + 2] -= aIndices[i * 3];
|
nuclear@14
|
308
|
nuclear@14
|
309 // Step 3: Calculate derivative of the first triangle index
|
nuclear@14
|
310 if(i >= 1)
|
nuclear@14
|
311 aIndices[i * 3] -= aIndices[(i - 1) * 3];
|
nuclear@14
|
312 }
|
nuclear@14
|
313 }
|
nuclear@14
|
314
|
nuclear@14
|
315 //-----------------------------------------------------------------------------
|
nuclear@14
|
316 // _ctmRestoreIndices() - Restore original indices (inverse derivative
|
nuclear@14
|
317 // operation).
|
nuclear@14
|
318 //-----------------------------------------------------------------------------
|
nuclear@14
|
319 static void _ctmRestoreIndices(_CTMcontext * self, CTMuint * aIndices)
|
nuclear@14
|
320 {
|
nuclear@14
|
321 CTMuint i;
|
nuclear@14
|
322
|
nuclear@14
|
323 for(i = 0; i < self->mTriangleCount; ++ i)
|
nuclear@14
|
324 {
|
nuclear@14
|
325 // Step 1: Reverse derivative of the first triangle index
|
nuclear@14
|
326 if(i >= 1)
|
nuclear@14
|
327 aIndices[i * 3] += aIndices[(i - 1) * 3];
|
nuclear@14
|
328
|
nuclear@14
|
329 // Step 2: Reverse delta from third triangle index to the first triangle
|
nuclear@14
|
330 // index
|
nuclear@14
|
331 aIndices[i * 3 + 2] += aIndices[i * 3];
|
nuclear@14
|
332
|
nuclear@14
|
333 // Step 3: Reverse delta from second triangle index to the previous
|
nuclear@14
|
334 // second triangle index, if the previous triangle shares the same first
|
nuclear@14
|
335 // index, otherwise reverse the delta to the first triangle index
|
nuclear@14
|
336 if((i >= 1) && (aIndices[i * 3] == aIndices[(i - 1) * 3]))
|
nuclear@14
|
337 aIndices[i * 3 + 1] += aIndices[(i - 1) * 3 + 1];
|
nuclear@14
|
338 else
|
nuclear@14
|
339 aIndices[i * 3 + 1] += aIndices[i * 3];
|
nuclear@14
|
340 }
|
nuclear@14
|
341 }
|
nuclear@14
|
342
|
nuclear@14
|
343 //-----------------------------------------------------------------------------
|
nuclear@14
|
344 // _ctmMakeVertexDeltas() - Calculate various forms of derivatives in order to
|
nuclear@14
|
345 // reduce data entropy.
|
nuclear@14
|
346 //-----------------------------------------------------------------------------
|
nuclear@14
|
347 static void _ctmMakeVertexDeltas(_CTMcontext * self, CTMint * aIntVertices,
|
nuclear@14
|
348 _CTMsortvertex * aSortVertices, _CTMgrid * aGrid)
|
nuclear@14
|
349 {
|
nuclear@14
|
350 CTMuint i, gridIdx, prevGridIndex, oldIdx;
|
nuclear@14
|
351 CTMfloat gridOrigin[3], scale;
|
nuclear@14
|
352 CTMint deltaX, prevDeltaX;
|
nuclear@14
|
353
|
nuclear@14
|
354 // Vertex scaling factor
|
nuclear@14
|
355 scale = 1.0f / self->mVertexPrecision;
|
nuclear@14
|
356
|
nuclear@14
|
357 prevGridIndex = 0x7fffffff;
|
nuclear@14
|
358 prevDeltaX = 0;
|
nuclear@14
|
359 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
360 {
|
nuclear@14
|
361 // Get grid box origin
|
nuclear@14
|
362 gridIdx = aSortVertices[i].mGridIndex;
|
nuclear@14
|
363 _ctmGridIdxToPoint(aGrid, gridIdx, gridOrigin);
|
nuclear@14
|
364
|
nuclear@14
|
365 // Get old vertex coordinate index (before vertex sorting)
|
nuclear@14
|
366 oldIdx = aSortVertices[i].mOriginalIndex;
|
nuclear@14
|
367
|
nuclear@14
|
368 // Store delta to the grid box origin in the integer vertex array. For the
|
nuclear@14
|
369 // X axis (which is sorted) we also do the delta to the previous coordinate
|
nuclear@14
|
370 // in the box.
|
nuclear@14
|
371 deltaX = (CTMint) floorf(scale * (self->mVertices[oldIdx * 3] - gridOrigin[0]) + 0.5f);
|
nuclear@14
|
372 if(gridIdx == prevGridIndex)
|
nuclear@14
|
373 aIntVertices[i * 3] = deltaX - prevDeltaX;
|
nuclear@14
|
374 else
|
nuclear@14
|
375 aIntVertices[i * 3] = deltaX;
|
nuclear@14
|
376 aIntVertices[i * 3 + 1] = (CTMint) floorf(scale * (self->mVertices[oldIdx * 3 + 1] - gridOrigin[1]) + 0.5f);
|
nuclear@14
|
377 aIntVertices[i * 3 + 2] = (CTMint) floorf(scale * (self->mVertices[oldIdx * 3 + 2] - gridOrigin[2]) + 0.5f);
|
nuclear@14
|
378
|
nuclear@14
|
379 prevGridIndex = gridIdx;
|
nuclear@14
|
380 prevDeltaX = deltaX;
|
nuclear@14
|
381 }
|
nuclear@14
|
382 }
|
nuclear@14
|
383
|
nuclear@14
|
384 //-----------------------------------------------------------------------------
|
nuclear@14
|
385 // _ctmRestoreVertices() - Calculate inverse derivatives of the vertices.
|
nuclear@14
|
386 //-----------------------------------------------------------------------------
|
nuclear@14
|
387 static void _ctmRestoreVertices(_CTMcontext * self, CTMint * aIntVertices,
|
nuclear@14
|
388 CTMuint * aGridIndices, _CTMgrid * aGrid, CTMfloat * aVertices)
|
nuclear@14
|
389 {
|
nuclear@14
|
390 CTMuint i, gridIdx, prevGridIndex;
|
nuclear@14
|
391 CTMfloat gridOrigin[3], scale;
|
nuclear@14
|
392 CTMint deltaX, prevDeltaX;
|
nuclear@14
|
393
|
nuclear@14
|
394 scale = self->mVertexPrecision;
|
nuclear@14
|
395
|
nuclear@14
|
396 prevGridIndex = 0x7fffffff;
|
nuclear@14
|
397 prevDeltaX = 0;
|
nuclear@14
|
398 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
399 {
|
nuclear@14
|
400 // Get grid box origin
|
nuclear@14
|
401 gridIdx = aGridIndices[i];
|
nuclear@14
|
402 _ctmGridIdxToPoint(aGrid, gridIdx, gridOrigin);
|
nuclear@14
|
403
|
nuclear@14
|
404 // Restore original point
|
nuclear@14
|
405 deltaX = aIntVertices[i * 3];
|
nuclear@14
|
406 if(gridIdx == prevGridIndex)
|
nuclear@14
|
407 deltaX += prevDeltaX;
|
nuclear@14
|
408 aVertices[i * 3] = scale * deltaX + gridOrigin[0];
|
nuclear@14
|
409 aVertices[i * 3 + 1] = scale * aIntVertices[i * 3 + 1] + gridOrigin[1];
|
nuclear@14
|
410 aVertices[i * 3 + 2] = scale * aIntVertices[i * 3 + 2] + gridOrigin[2];
|
nuclear@14
|
411
|
nuclear@14
|
412 prevGridIndex = gridIdx;
|
nuclear@14
|
413 prevDeltaX = deltaX;
|
nuclear@14
|
414 }
|
nuclear@14
|
415 }
|
nuclear@14
|
416
|
nuclear@14
|
417 //-----------------------------------------------------------------------------
|
nuclear@14
|
418 // _ctmCalcSmoothNormals() - Calculate the smooth normals for a given mesh.
|
nuclear@14
|
419 // These are used as the nominal normals for normal deltas & reconstruction.
|
nuclear@14
|
420 //-----------------------------------------------------------------------------
|
nuclear@14
|
421 static void _ctmCalcSmoothNormals(_CTMcontext * self, CTMfloat * aVertices,
|
nuclear@14
|
422 CTMuint * aIndices, CTMfloat * aSmoothNormals)
|
nuclear@14
|
423 {
|
nuclear@14
|
424 CTMuint i, j, k, tri[3];
|
nuclear@14
|
425 CTMfloat len;
|
nuclear@14
|
426 CTMfloat v1[3], v2[3], n[3];
|
nuclear@14
|
427
|
nuclear@14
|
428 // Clear smooth normals array
|
nuclear@14
|
429 for(i = 0; i < 3 * self->mVertexCount; ++ i)
|
nuclear@14
|
430 aSmoothNormals[i] = 0.0f;
|
nuclear@14
|
431
|
nuclear@14
|
432 // Calculate sums of all neigbouring triangle normals for each vertex
|
nuclear@14
|
433 for(i = 0; i < self->mTriangleCount; ++ i)
|
nuclear@14
|
434 {
|
nuclear@14
|
435 // Get triangle corner indices
|
nuclear@14
|
436 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
437 tri[j] = aIndices[i * 3 + j];
|
nuclear@14
|
438
|
nuclear@14
|
439 // Calculate the normalized cross product of two triangle edges (i.e. the
|
nuclear@14
|
440 // flat triangle normal)
|
nuclear@14
|
441 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
442 {
|
nuclear@14
|
443 v1[j] = aVertices[tri[1] * 3 + j] - aVertices[tri[0] * 3 + j];
|
nuclear@14
|
444 v2[j] = aVertices[tri[2] * 3 + j] - aVertices[tri[0] * 3 + j];
|
nuclear@14
|
445 }
|
nuclear@14
|
446 n[0] = v1[1] * v2[2] - v1[2] * v2[1];
|
nuclear@14
|
447 n[1] = v1[2] * v2[0] - v1[0] * v2[2];
|
nuclear@14
|
448 n[2] = v1[0] * v2[1] - v1[1] * v2[0];
|
nuclear@14
|
449 len = sqrtf(n[0] * n[0] + n[1] * n[1] + n[2] * n[2]);
|
nuclear@14
|
450 if(len > 1e-10f)
|
nuclear@14
|
451 len = 1.0f / len;
|
nuclear@14
|
452 else
|
nuclear@14
|
453 len = 1.0f;
|
nuclear@14
|
454 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
455 n[j] *= len;
|
nuclear@14
|
456
|
nuclear@14
|
457 // Add the flat normal to all three triangle vertices
|
nuclear@14
|
458 for(k = 0; k < 3; ++ k)
|
nuclear@14
|
459 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
460 aSmoothNormals[tri[k] * 3 + j] += n[j];
|
nuclear@14
|
461 }
|
nuclear@14
|
462
|
nuclear@14
|
463 // Normalize the normal sums, which gives the unit length smooth normals
|
nuclear@14
|
464 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
465 {
|
nuclear@14
|
466 len = sqrtf(aSmoothNormals[i * 3] * aSmoothNormals[i * 3] +
|
nuclear@14
|
467 aSmoothNormals[i * 3 + 1] * aSmoothNormals[i * 3 + 1] +
|
nuclear@14
|
468 aSmoothNormals[i * 3 + 2] * aSmoothNormals[i * 3 + 2]);
|
nuclear@14
|
469 if(len > 1e-10f)
|
nuclear@14
|
470 len = 1.0f / len;
|
nuclear@14
|
471 else
|
nuclear@14
|
472 len = 1.0f;
|
nuclear@14
|
473 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
474 aSmoothNormals[i * 3 + j] *= len;
|
nuclear@14
|
475 }
|
nuclear@14
|
476 }
|
nuclear@14
|
477
|
nuclear@14
|
478 //-----------------------------------------------------------------------------
|
nuclear@14
|
479 // _ctmMakeNormalCoordSys() - Create an ortho-normalized coordinate system
|
nuclear@14
|
480 // where the Z-axis is aligned with the given normal.
|
nuclear@14
|
481 // Note 1: This function is central to how the compressed normal data is
|
nuclear@14
|
482 // interpreted, and it can not be changed (mathematically) without making the
|
nuclear@14
|
483 // coder/decoder incompatible with other versions of the library!
|
nuclear@14
|
484 // Note 2: Since we do this for every single normal, this routine needs to be
|
nuclear@14
|
485 // fast. The current implementation uses: 12 MUL, 1 DIV, 1 SQRT, ~6 ADD.
|
nuclear@14
|
486 //-----------------------------------------------------------------------------
|
nuclear@14
|
487 static void _ctmMakeNormalCoordSys(CTMfloat * aNormal, CTMfloat * aBasisAxes)
|
nuclear@14
|
488 {
|
nuclear@14
|
489 CTMfloat len, * x, * y, * z;
|
nuclear@14
|
490 CTMuint i;
|
nuclear@14
|
491
|
nuclear@14
|
492 // Pointers to the basis axes (aBasisAxes is a 3x3 matrix)
|
nuclear@14
|
493 x = aBasisAxes;
|
nuclear@14
|
494 y = &aBasisAxes[3];
|
nuclear@14
|
495 z = &aBasisAxes[6];
|
nuclear@14
|
496
|
nuclear@14
|
497 // Z = normal (must be unit length!)
|
nuclear@14
|
498 for(i = 0; i < 3; ++ i)
|
nuclear@14
|
499 z[i] = aNormal[i];
|
nuclear@14
|
500
|
nuclear@14
|
501 // Calculate a vector that is guaranteed to be orthogonal to the normal, non-
|
nuclear@14
|
502 // zero, and a continuous function of the normal (no discrete jumps):
|
nuclear@14
|
503 // X = (0,0,1) x normal + (1,0,0) x normal
|
nuclear@14
|
504 x[0] = -aNormal[1];
|
nuclear@14
|
505 x[1] = aNormal[0] - aNormal[2];
|
nuclear@14
|
506 x[2] = aNormal[1];
|
nuclear@14
|
507
|
nuclear@14
|
508 // Normalize the new X axis (note: |x[2]| = |x[0]|)
|
nuclear@14
|
509 len = sqrtf(2.0 * x[0] * x[0] + x[1] * x[1]);
|
nuclear@14
|
510 if(len > 1.0e-20f)
|
nuclear@14
|
511 {
|
nuclear@14
|
512 len = 1.0f / len;
|
nuclear@14
|
513 x[0] *= len;
|
nuclear@14
|
514 x[1] *= len;
|
nuclear@14
|
515 x[2] *= len;
|
nuclear@14
|
516 }
|
nuclear@14
|
517
|
nuclear@14
|
518 // Let Y = Z x X (no normalization needed, since |Z| = |X| = 1)
|
nuclear@14
|
519 y[0] = z[1] * x[2] - z[2] * x[1];
|
nuclear@14
|
520 y[1] = z[2] * x[0] - z[0] * x[2];
|
nuclear@14
|
521 y[2] = z[0] * x[1] - z[1] * x[0];
|
nuclear@14
|
522 }
|
nuclear@14
|
523
|
nuclear@14
|
524 //-----------------------------------------------------------------------------
|
nuclear@14
|
525 // _ctmMakeNormalDeltas() - Convert the normals to a new coordinate system:
|
nuclear@14
|
526 // magnitude, phi, theta (relative to predicted smooth normals).
|
nuclear@14
|
527 //-----------------------------------------------------------------------------
|
nuclear@14
|
528 static CTMint _ctmMakeNormalDeltas(_CTMcontext * self, CTMint * aIntNormals,
|
nuclear@14
|
529 CTMfloat * aVertices, CTMuint * aIndices, _CTMsortvertex * aSortVertices)
|
nuclear@14
|
530 {
|
nuclear@14
|
531 CTMuint i, j, oldIdx, intPhi;
|
nuclear@14
|
532 CTMfloat magn, phi, theta, scale, thetaScale;
|
nuclear@14
|
533 CTMfloat * smoothNormals, n[3], n2[3], basisAxes[9];
|
nuclear@14
|
534
|
nuclear@14
|
535 // Allocate temporary memory for the nominal vertex normals
|
nuclear@14
|
536 smoothNormals = (CTMfloat *) malloc(3 * sizeof(CTMfloat) * self->mVertexCount);
|
nuclear@14
|
537 if(!smoothNormals)
|
nuclear@14
|
538 {
|
nuclear@14
|
539 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
540 return CTM_FALSE;
|
nuclear@14
|
541 }
|
nuclear@14
|
542
|
nuclear@14
|
543 // Calculate smooth normals (Note: aVertices and aIndices use the sorted
|
nuclear@14
|
544 // index space, so smoothNormals will too)
|
nuclear@14
|
545 _ctmCalcSmoothNormals(self, aVertices, aIndices, smoothNormals);
|
nuclear@14
|
546
|
nuclear@14
|
547 // Normal scaling factor
|
nuclear@14
|
548 scale = 1.0f / self->mNormalPrecision;
|
nuclear@14
|
549
|
nuclear@14
|
550 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
551 {
|
nuclear@14
|
552 // Get old normal index (before vertex sorting)
|
nuclear@14
|
553 oldIdx = aSortVertices[i].mOriginalIndex;
|
nuclear@14
|
554
|
nuclear@14
|
555 // Calculate normal magnitude (should always be 1.0 for unit length normals)
|
nuclear@14
|
556 magn = sqrtf(self->mNormals[oldIdx * 3] * self->mNormals[oldIdx * 3] +
|
nuclear@14
|
557 self->mNormals[oldIdx * 3 + 1] * self->mNormals[oldIdx * 3 + 1] +
|
nuclear@14
|
558 self->mNormals[oldIdx * 3 + 2] * self->mNormals[oldIdx * 3 + 2]);
|
nuclear@14
|
559 if(magn < 1e-10f)
|
nuclear@14
|
560 magn = 1.0f;
|
nuclear@14
|
561
|
nuclear@14
|
562 // Invert magnitude if the normal is negative compared to the predicted
|
nuclear@14
|
563 // smooth normal
|
nuclear@14
|
564 if((smoothNormals[i * 3] * self->mNormals[oldIdx * 3] +
|
nuclear@14
|
565 smoothNormals[i * 3 + 1] * self->mNormals[oldIdx * 3 + 1] +
|
nuclear@14
|
566 smoothNormals[i * 3 + 2] * self->mNormals[oldIdx * 3 + 2]) < 0.0f)
|
nuclear@14
|
567 magn = -magn;
|
nuclear@14
|
568
|
nuclear@14
|
569 // Store the magnitude in the first element of the three normal elements
|
nuclear@14
|
570 aIntNormals[i * 3] = (CTMint) floorf(scale * magn + 0.5f);
|
nuclear@14
|
571
|
nuclear@14
|
572 // Normalize the normal (1 / magn) - and flip it if magn < 0
|
nuclear@14
|
573 magn = 1.0f / magn;
|
nuclear@14
|
574 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
575 n[j] = self->mNormals[oldIdx * 3 + j] * magn;
|
nuclear@14
|
576
|
nuclear@14
|
577 // Convert the normal to angular representation (phi, theta) in a coordinate
|
nuclear@14
|
578 // system where the nominal (smooth) normal is the Z-axis
|
nuclear@14
|
579 _ctmMakeNormalCoordSys(&smoothNormals[i * 3], basisAxes);
|
nuclear@14
|
580 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
581 n2[j] = basisAxes[j * 3] * n[0] +
|
nuclear@14
|
582 basisAxes[j * 3 + 1] * n[1] +
|
nuclear@14
|
583 basisAxes[j * 3 + 2] * n[2];
|
nuclear@14
|
584 if(n2[2] >= 1.0f)
|
nuclear@14
|
585 phi = 0.0f;
|
nuclear@14
|
586 else
|
nuclear@14
|
587 phi = acosf(n2[2]);
|
nuclear@14
|
588 theta = atan2f(n2[1], n2[0]);
|
nuclear@14
|
589
|
nuclear@14
|
590 // Round phi and theta (spherical coordinates) to integers. Note: We let the
|
nuclear@14
|
591 // theta resolution vary with the x/y circumference (roughly phi).
|
nuclear@14
|
592 intPhi = (CTMint) floorf(phi * (scale / (0.5f * PI)) + 0.5f);
|
nuclear@14
|
593 if(intPhi == 0)
|
nuclear@14
|
594 thetaScale = 0.0f;
|
nuclear@14
|
595 else if(intPhi <= 4)
|
nuclear@14
|
596 thetaScale = 2.0f / PI;
|
nuclear@14
|
597 else
|
nuclear@14
|
598 thetaScale = ((CTMfloat) intPhi) / (2.0f * PI);
|
nuclear@14
|
599 aIntNormals[i * 3 + 1] = intPhi;
|
nuclear@14
|
600 aIntNormals[i * 3 + 2] = (CTMint) floorf((theta + PI) * thetaScale + 0.5f);
|
nuclear@14
|
601 }
|
nuclear@14
|
602
|
nuclear@14
|
603 // Free temporary resources
|
nuclear@14
|
604 free(smoothNormals);
|
nuclear@14
|
605
|
nuclear@14
|
606 return CTM_TRUE;
|
nuclear@14
|
607 }
|
nuclear@14
|
608
|
nuclear@14
|
609 //-----------------------------------------------------------------------------
|
nuclear@14
|
610 // _ctmRestoreNormals() - Convert the normals back to cartesian coordinates.
|
nuclear@14
|
611 //-----------------------------------------------------------------------------
|
nuclear@14
|
612 static CTMint _ctmRestoreNormals(_CTMcontext * self, CTMint * aIntNormals)
|
nuclear@14
|
613 {
|
nuclear@14
|
614 CTMuint i, j, intPhi;
|
nuclear@14
|
615 CTMfloat magn, phi, theta, scale, thetaScale;
|
nuclear@14
|
616 CTMfloat * smoothNormals, n[3], n2[3], basisAxes[9];
|
nuclear@14
|
617
|
nuclear@14
|
618 // Allocate temporary memory for the nominal vertex normals
|
nuclear@14
|
619 smoothNormals = (CTMfloat *) malloc(3 * sizeof(CTMfloat) * self->mVertexCount);
|
nuclear@14
|
620 if(!smoothNormals)
|
nuclear@14
|
621 {
|
nuclear@14
|
622 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
623 return CTM_FALSE;
|
nuclear@14
|
624 }
|
nuclear@14
|
625
|
nuclear@14
|
626 // Calculate smooth normals (nominal normals)
|
nuclear@14
|
627 _ctmCalcSmoothNormals(self, self->mVertices, self->mIndices, smoothNormals);
|
nuclear@14
|
628
|
nuclear@14
|
629 // Normal scaling factor
|
nuclear@14
|
630 scale = self->mNormalPrecision;
|
nuclear@14
|
631
|
nuclear@14
|
632 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
633 {
|
nuclear@14
|
634 // Get the normal magnitude from the first of the three normal elements
|
nuclear@14
|
635 magn = aIntNormals[i * 3] * scale;
|
nuclear@14
|
636
|
nuclear@14
|
637 // Get phi and theta (spherical coordinates, relative to the smooth normal).
|
nuclear@14
|
638 intPhi = aIntNormals[i * 3 + 1];
|
nuclear@14
|
639 phi = intPhi * (0.5f * PI) * scale;
|
nuclear@14
|
640 if(intPhi == 0)
|
nuclear@14
|
641 thetaScale = 0.0f;
|
nuclear@14
|
642 else if(intPhi <= 4)
|
nuclear@14
|
643 thetaScale = PI / 2.0f;
|
nuclear@14
|
644 else
|
nuclear@14
|
645 thetaScale = (2.0f * PI) / ((CTMfloat) intPhi);
|
nuclear@14
|
646 theta = aIntNormals[i * 3 + 2] * thetaScale - PI;
|
nuclear@14
|
647
|
nuclear@14
|
648 // Convert the normal from the angular representation (phi, theta) back to
|
nuclear@14
|
649 // cartesian coordinates
|
nuclear@14
|
650 n2[0] = sinf(phi) * cosf(theta);
|
nuclear@14
|
651 n2[1] = sinf(phi) * sinf(theta);
|
nuclear@14
|
652 n2[2] = cosf(phi);
|
nuclear@14
|
653 _ctmMakeNormalCoordSys(&smoothNormals[i * 3], basisAxes);
|
nuclear@14
|
654 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
655 n[j] = basisAxes[j] * n2[0] +
|
nuclear@14
|
656 basisAxes[3 + j] * n2[1] +
|
nuclear@14
|
657 basisAxes[6 + j] * n2[2];
|
nuclear@14
|
658
|
nuclear@14
|
659 // Apply normal magnitude, and output to the normals array
|
nuclear@14
|
660 for(j = 0; j < 3; ++ j)
|
nuclear@14
|
661 self->mNormals[i * 3 + j] = n[j] * magn;
|
nuclear@14
|
662 }
|
nuclear@14
|
663
|
nuclear@14
|
664 // Free temporary resources
|
nuclear@14
|
665 free(smoothNormals);
|
nuclear@14
|
666
|
nuclear@14
|
667 return CTM_TRUE;
|
nuclear@14
|
668 }
|
nuclear@14
|
669
|
nuclear@14
|
670 //-----------------------------------------------------------------------------
|
nuclear@14
|
671 // _ctmMakeUVCoordDeltas() - Calculate various forms of derivatives in order
|
nuclear@14
|
672 // to reduce data entropy.
|
nuclear@14
|
673 //-----------------------------------------------------------------------------
|
nuclear@14
|
674 static void _ctmMakeUVCoordDeltas(_CTMcontext * self, _CTMfloatmap * aMap,
|
nuclear@14
|
675 CTMint * aIntUVCoords, _CTMsortvertex * aSortVertices)
|
nuclear@14
|
676 {
|
nuclear@14
|
677 CTMuint i, oldIdx;
|
nuclear@14
|
678 CTMint u, v, prevU, prevV;
|
nuclear@14
|
679 CTMfloat scale;
|
nuclear@14
|
680
|
nuclear@14
|
681 // UV coordinate scaling factor
|
nuclear@14
|
682 scale = 1.0f / aMap->mPrecision;
|
nuclear@14
|
683
|
nuclear@14
|
684 prevU = prevV = 0;
|
nuclear@14
|
685 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
686 {
|
nuclear@14
|
687 // Get old UV coordinate index (before vertex sorting)
|
nuclear@14
|
688 oldIdx = aSortVertices[i].mOriginalIndex;
|
nuclear@14
|
689
|
nuclear@14
|
690 // Convert to fixed point
|
nuclear@14
|
691 u = (CTMint) floorf(scale * aMap->mValues[oldIdx * 2] + 0.5f);
|
nuclear@14
|
692 v = (CTMint) floorf(scale * aMap->mValues[oldIdx * 2 + 1] + 0.5f);
|
nuclear@14
|
693
|
nuclear@14
|
694 // Calculate delta and store it in the converted array. NOTE: Here we rely
|
nuclear@14
|
695 // on the fact that vertices are sorted, and usually close to each other,
|
nuclear@14
|
696 // which means that UV coordinates should also be close to each other...
|
nuclear@14
|
697 aIntUVCoords[i * 2] = u - prevU;
|
nuclear@14
|
698 aIntUVCoords[i * 2 + 1] = v - prevV;
|
nuclear@14
|
699
|
nuclear@14
|
700 prevU = u;
|
nuclear@14
|
701 prevV = v;
|
nuclear@14
|
702 }
|
nuclear@14
|
703 }
|
nuclear@14
|
704
|
nuclear@14
|
705 //-----------------------------------------------------------------------------
|
nuclear@14
|
706 // _ctmRestoreUVCoords() - Calculate inverse derivatives of the UV
|
nuclear@14
|
707 // coordinates.
|
nuclear@14
|
708 //-----------------------------------------------------------------------------
|
nuclear@14
|
709 static void _ctmRestoreUVCoords(_CTMcontext * self, _CTMfloatmap * aMap,
|
nuclear@14
|
710 CTMint * aIntUVCoords)
|
nuclear@14
|
711 {
|
nuclear@14
|
712 CTMuint i;
|
nuclear@14
|
713 CTMint u, v, prevU, prevV;
|
nuclear@14
|
714 CTMfloat scale;
|
nuclear@14
|
715
|
nuclear@14
|
716 // UV coordinate scaling factor
|
nuclear@14
|
717 scale = aMap->mPrecision;
|
nuclear@14
|
718
|
nuclear@14
|
719 prevU = prevV = 0;
|
nuclear@14
|
720 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
721 {
|
nuclear@14
|
722 // Calculate inverse delta
|
nuclear@14
|
723 u = aIntUVCoords[i * 2] + prevU;
|
nuclear@14
|
724 v = aIntUVCoords[i * 2 + 1] + prevV;
|
nuclear@14
|
725
|
nuclear@14
|
726 // Convert to floating point
|
nuclear@14
|
727 aMap->mValues[i * 2] = (CTMfloat) u * scale;
|
nuclear@14
|
728 aMap->mValues[i * 2 + 1] = (CTMfloat) v * scale;
|
nuclear@14
|
729
|
nuclear@14
|
730 prevU = u;
|
nuclear@14
|
731 prevV = v;
|
nuclear@14
|
732 }
|
nuclear@14
|
733 }
|
nuclear@14
|
734
|
nuclear@14
|
735 //-----------------------------------------------------------------------------
|
nuclear@14
|
736 // _ctmMakeAttribDeltas() - Calculate various forms of derivatives in order
|
nuclear@14
|
737 // to reduce data entropy.
|
nuclear@14
|
738 //-----------------------------------------------------------------------------
|
nuclear@14
|
739 static void _ctmMakeAttribDeltas(_CTMcontext * self, _CTMfloatmap * aMap,
|
nuclear@14
|
740 CTMint * aIntAttribs, _CTMsortvertex * aSortVertices)
|
nuclear@14
|
741 {
|
nuclear@14
|
742 CTMuint i, j, oldIdx;
|
nuclear@14
|
743 CTMint value[4], prev[4];
|
nuclear@14
|
744 CTMfloat scale;
|
nuclear@14
|
745
|
nuclear@14
|
746 // Attribute scaling factor
|
nuclear@14
|
747 scale = 1.0f / aMap->mPrecision;
|
nuclear@14
|
748
|
nuclear@14
|
749 for(j = 0; j < 4; ++ j)
|
nuclear@14
|
750 prev[j] = 0;
|
nuclear@14
|
751
|
nuclear@14
|
752 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
753 {
|
nuclear@14
|
754 // Get old attribute index (before vertex sorting)
|
nuclear@14
|
755 oldIdx = aSortVertices[i].mOriginalIndex;
|
nuclear@14
|
756
|
nuclear@14
|
757 // Convert to fixed point, and calculate delta and store it in the converted
|
nuclear@14
|
758 // array. NOTE: Here we rely on the fact that vertices are sorted, and
|
nuclear@14
|
759 // usually close to each other, which means that attributes should also
|
nuclear@14
|
760 // be close to each other (and we assume that they somehow vary slowly with
|
nuclear@14
|
761 // the geometry)...
|
nuclear@14
|
762 for(j = 0; j < 4; ++ j)
|
nuclear@14
|
763 {
|
nuclear@14
|
764 value[j] = (CTMint) floorf(scale * aMap->mValues[oldIdx * 4 + j] + 0.5f);
|
nuclear@14
|
765 aIntAttribs[i * 4 + j] = value[j] - prev[j];
|
nuclear@14
|
766 prev[j] = value[j];
|
nuclear@14
|
767 }
|
nuclear@14
|
768 }
|
nuclear@14
|
769 }
|
nuclear@14
|
770
|
nuclear@14
|
771 //-----------------------------------------------------------------------------
|
nuclear@14
|
772 // _ctmRestoreAttribs() - Calculate inverse derivatives of the vertex
|
nuclear@14
|
773 // attributes.
|
nuclear@14
|
774 //-----------------------------------------------------------------------------
|
nuclear@14
|
775 static void _ctmRestoreAttribs(_CTMcontext * self, _CTMfloatmap * aMap,
|
nuclear@14
|
776 CTMint * aIntAttribs)
|
nuclear@14
|
777 {
|
nuclear@14
|
778 CTMuint i, j;
|
nuclear@14
|
779 CTMint value[4], prev[4];
|
nuclear@14
|
780 CTMfloat scale;
|
nuclear@14
|
781
|
nuclear@14
|
782 // Attribute scaling factor
|
nuclear@14
|
783 scale = aMap->mPrecision;
|
nuclear@14
|
784
|
nuclear@14
|
785 for(j = 0; j < 4; ++ j)
|
nuclear@14
|
786 prev[j] = 0;
|
nuclear@14
|
787
|
nuclear@14
|
788 for(i = 0; i < self->mVertexCount; ++ i)
|
nuclear@14
|
789 {
|
nuclear@14
|
790 // Calculate inverse delta, and convert to floating point
|
nuclear@14
|
791 for(j = 0; j < 4; ++ j)
|
nuclear@14
|
792 {
|
nuclear@14
|
793 value[j] = aIntAttribs[i * 4 + j] + prev[j];
|
nuclear@14
|
794 aMap->mValues[i * 4 + j] = (CTMfloat) value[j] * scale;
|
nuclear@14
|
795 prev[j] = value[j];
|
nuclear@14
|
796 }
|
nuclear@14
|
797 }
|
nuclear@14
|
798 }
|
nuclear@14
|
799
|
nuclear@14
|
800 //-----------------------------------------------------------------------------
|
nuclear@14
|
801 // _ctmCompressMesh_MG2() - Compress the mesh that is stored in the CTM
|
nuclear@14
|
802 // context, and write it the the output stream in the CTM context.
|
nuclear@14
|
803 //-----------------------------------------------------------------------------
|
nuclear@14
|
804 int _ctmCompressMesh_MG2(_CTMcontext * self)
|
nuclear@14
|
805 {
|
nuclear@14
|
806 _CTMgrid grid;
|
nuclear@14
|
807 _CTMsortvertex * sortVertices;
|
nuclear@14
|
808 _CTMfloatmap * map;
|
nuclear@14
|
809 CTMuint * indices, * deltaIndices, * gridIndices;
|
nuclear@14
|
810 CTMint * intVertices, * intNormals, * intUVCoords, * intAttribs;
|
nuclear@14
|
811 CTMfloat * restoredVertices;
|
nuclear@14
|
812 CTMuint i;
|
nuclear@14
|
813
|
nuclear@14
|
814 #ifdef __DEBUG_
|
nuclear@14
|
815 printf("COMPRESSION METHOD: MG2\n");
|
nuclear@14
|
816 #endif
|
nuclear@14
|
817
|
nuclear@14
|
818 // Setup 3D space subdivision grid
|
nuclear@14
|
819 _ctmSetupGrid(self, &grid);
|
nuclear@14
|
820
|
nuclear@14
|
821 // Write MG2-specific header information to the stream
|
nuclear@14
|
822 _ctmStreamWrite(self, (void *) "MG2H", 4);
|
nuclear@14
|
823 _ctmStreamWriteFLOAT(self, self->mVertexPrecision);
|
nuclear@14
|
824 _ctmStreamWriteFLOAT(self, self->mNormalPrecision);
|
nuclear@14
|
825 _ctmStreamWriteFLOAT(self, grid.mMin[0]);
|
nuclear@14
|
826 _ctmStreamWriteFLOAT(self, grid.mMin[1]);
|
nuclear@14
|
827 _ctmStreamWriteFLOAT(self, grid.mMin[2]);
|
nuclear@14
|
828 _ctmStreamWriteFLOAT(self, grid.mMax[0]);
|
nuclear@14
|
829 _ctmStreamWriteFLOAT(self, grid.mMax[1]);
|
nuclear@14
|
830 _ctmStreamWriteFLOAT(self, grid.mMax[2]);
|
nuclear@14
|
831 _ctmStreamWriteUINT(self, grid.mDivision[0]);
|
nuclear@14
|
832 _ctmStreamWriteUINT(self, grid.mDivision[1]);
|
nuclear@14
|
833 _ctmStreamWriteUINT(self, grid.mDivision[2]);
|
nuclear@14
|
834
|
nuclear@14
|
835 // Prepare (sort) vertices
|
nuclear@14
|
836 sortVertices = (_CTMsortvertex *) malloc(sizeof(_CTMsortvertex) * self->mVertexCount);
|
nuclear@14
|
837 if(!sortVertices)
|
nuclear@14
|
838 {
|
nuclear@14
|
839 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
840 return CTM_FALSE;
|
nuclear@14
|
841 }
|
nuclear@14
|
842 _ctmSortVertices(self, sortVertices, &grid);
|
nuclear@14
|
843
|
nuclear@14
|
844 // Convert vertices to integers and calculate vertex deltas (entropy-reduction)
|
nuclear@14
|
845 intVertices = (CTMint *) malloc(sizeof(CTMint) * 3 * self->mVertexCount);
|
nuclear@14
|
846 if(!intVertices)
|
nuclear@14
|
847 {
|
nuclear@14
|
848 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
849 free((void *) sortVertices);
|
nuclear@14
|
850 return CTM_FALSE;
|
nuclear@14
|
851 }
|
nuclear@14
|
852 _ctmMakeVertexDeltas(self, intVertices, sortVertices, &grid);
|
nuclear@14
|
853
|
nuclear@14
|
854 // Write vertices
|
nuclear@14
|
855 #ifdef __DEBUG_
|
nuclear@14
|
856 printf("Vertices: ");
|
nuclear@14
|
857 #endif
|
nuclear@14
|
858 _ctmStreamWrite(self, (void *) "VERT", 4);
|
nuclear@14
|
859 if(!_ctmStreamWritePackedInts(self, intVertices, self->mVertexCount, 3, CTM_FALSE))
|
nuclear@14
|
860 {
|
nuclear@14
|
861 free((void *) intVertices);
|
nuclear@14
|
862 free((void *) sortVertices);
|
nuclear@14
|
863 return CTM_FALSE;
|
nuclear@14
|
864 }
|
nuclear@14
|
865
|
nuclear@14
|
866 // Prepare grid indices (deltas)
|
nuclear@14
|
867 gridIndices = (CTMuint *) malloc(sizeof(CTMuint) * self->mVertexCount);
|
nuclear@14
|
868 if(!gridIndices)
|
nuclear@14
|
869 {
|
nuclear@14
|
870 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
871 free((void *) intVertices);
|
nuclear@14
|
872 free((void *) sortVertices);
|
nuclear@14
|
873 return CTM_FALSE;
|
nuclear@14
|
874 }
|
nuclear@14
|
875 gridIndices[0] = sortVertices[0].mGridIndex;
|
nuclear@14
|
876 for(i = 1; i < self->mVertexCount; ++ i)
|
nuclear@14
|
877 gridIndices[i] = sortVertices[i].mGridIndex - sortVertices[i - 1].mGridIndex;
|
nuclear@14
|
878
|
nuclear@14
|
879 // Write grid indices
|
nuclear@14
|
880 #ifdef __DEBUG_
|
nuclear@14
|
881 printf("Grid indices: ");
|
nuclear@14
|
882 #endif
|
nuclear@14
|
883 _ctmStreamWrite(self, (void *) "GIDX", 4);
|
nuclear@14
|
884 if(!_ctmStreamWritePackedInts(self, (CTMint *) gridIndices, self->mVertexCount, 1, CTM_FALSE))
|
nuclear@14
|
885 {
|
nuclear@14
|
886 free((void *) gridIndices);
|
nuclear@14
|
887 free((void *) intVertices);
|
nuclear@14
|
888 free((void *) sortVertices);
|
nuclear@14
|
889 return CTM_FALSE;
|
nuclear@14
|
890 }
|
nuclear@14
|
891
|
nuclear@14
|
892 // Calculate the result of the compressed -> decompressed vertices, in order
|
nuclear@14
|
893 // to use the same vertex data for calculating nominal normals as the
|
nuclear@14
|
894 // decompression routine (i.e. compensate for the vertex error when
|
nuclear@14
|
895 // calculating the normals)
|
nuclear@14
|
896 restoredVertices = (CTMfloat *) malloc(sizeof(CTMfloat) * 3 * self->mVertexCount);
|
nuclear@14
|
897 if(!restoredVertices)
|
nuclear@14
|
898 {
|
nuclear@14
|
899 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
900 free((void *) gridIndices);
|
nuclear@14
|
901 free((void *) intVertices);
|
nuclear@14
|
902 free((void *) sortVertices);
|
nuclear@14
|
903 return CTM_FALSE;
|
nuclear@14
|
904 }
|
nuclear@14
|
905 for(i = 1; i < self->mVertexCount; ++ i)
|
nuclear@14
|
906 gridIndices[i] += gridIndices[i - 1];
|
nuclear@14
|
907 _ctmRestoreVertices(self, intVertices, gridIndices, &grid, restoredVertices);
|
nuclear@14
|
908
|
nuclear@14
|
909 // Free temporary resources
|
nuclear@14
|
910 free((void *) gridIndices);
|
nuclear@14
|
911 free((void *) intVertices);
|
nuclear@14
|
912
|
nuclear@14
|
913 // Perpare (sort) indices
|
nuclear@14
|
914 indices = (CTMuint *) malloc(sizeof(CTMuint) * self->mTriangleCount * 3);
|
nuclear@14
|
915 if(!indices)
|
nuclear@14
|
916 {
|
nuclear@14
|
917 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
918 free((void *) restoredVertices);
|
nuclear@14
|
919 free((void *) sortVertices);
|
nuclear@14
|
920 return CTM_FALSE;
|
nuclear@14
|
921 }
|
nuclear@14
|
922 if(!_ctmReIndexIndices(self, sortVertices, indices))
|
nuclear@14
|
923 {
|
nuclear@14
|
924 free((void *) indices);
|
nuclear@14
|
925 free((void *) restoredVertices);
|
nuclear@14
|
926 free((void *) sortVertices);
|
nuclear@14
|
927 return CTM_FALSE;
|
nuclear@14
|
928 }
|
nuclear@14
|
929 _ctmReArrangeTriangles(self, indices);
|
nuclear@14
|
930
|
nuclear@14
|
931 // Calculate index deltas (entropy-reduction)
|
nuclear@14
|
932 deltaIndices = (CTMuint *) malloc(sizeof(CTMuint) * self->mTriangleCount * 3);
|
nuclear@14
|
933 if(!indices)
|
nuclear@14
|
934 {
|
nuclear@14
|
935 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
936 free((void *) indices);
|
nuclear@14
|
937 free((void *) restoredVertices);
|
nuclear@14
|
938 free((void *) sortVertices);
|
nuclear@14
|
939 return CTM_FALSE;
|
nuclear@14
|
940 }
|
nuclear@14
|
941 for(i = 0; i < self->mTriangleCount * 3; ++ i)
|
nuclear@14
|
942 deltaIndices[i] = indices[i];
|
nuclear@14
|
943 _ctmMakeIndexDeltas(self, deltaIndices);
|
nuclear@14
|
944
|
nuclear@14
|
945 // Write triangle indices
|
nuclear@14
|
946 #ifdef __DEBUG_
|
nuclear@14
|
947 printf("Indices: ");
|
nuclear@14
|
948 #endif
|
nuclear@14
|
949 _ctmStreamWrite(self, (void *) "INDX", 4);
|
nuclear@14
|
950 if(!_ctmStreamWritePackedInts(self, (CTMint *) deltaIndices, self->mTriangleCount, 3, CTM_FALSE))
|
nuclear@14
|
951 {
|
nuclear@14
|
952 free((void *) deltaIndices);
|
nuclear@14
|
953 free((void *) indices);
|
nuclear@14
|
954 free((void *) restoredVertices);
|
nuclear@14
|
955 free((void *) sortVertices);
|
nuclear@14
|
956 return CTM_FALSE;
|
nuclear@14
|
957 }
|
nuclear@14
|
958
|
nuclear@14
|
959 // Free temporary data for the indices
|
nuclear@14
|
960 free((void *) deltaIndices);
|
nuclear@14
|
961
|
nuclear@14
|
962 if(self->mNormals)
|
nuclear@14
|
963 {
|
nuclear@14
|
964 // Convert normals to integers and calculate deltas (entropy-reduction)
|
nuclear@14
|
965 intNormals = (CTMint *) malloc(sizeof(CTMint) * 3 * self->mVertexCount);
|
nuclear@14
|
966 if(!intNormals)
|
nuclear@14
|
967 {
|
nuclear@14
|
968 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
969 free((void *) indices);
|
nuclear@14
|
970 free((void *) restoredVertices);
|
nuclear@14
|
971 free((void *) sortVertices);
|
nuclear@14
|
972 return CTM_FALSE;
|
nuclear@14
|
973 }
|
nuclear@14
|
974 if(!_ctmMakeNormalDeltas(self, intNormals, restoredVertices, indices, sortVertices))
|
nuclear@14
|
975 {
|
nuclear@14
|
976 free((void *) indices);
|
nuclear@14
|
977 free((void *) intNormals);
|
nuclear@14
|
978 free((void *) restoredVertices);
|
nuclear@14
|
979 free((void *) sortVertices);
|
nuclear@14
|
980 return CTM_FALSE;
|
nuclear@14
|
981 }
|
nuclear@14
|
982
|
nuclear@14
|
983 // Write normals
|
nuclear@14
|
984 #ifdef __DEBUG_
|
nuclear@14
|
985 printf("Normals: ");
|
nuclear@14
|
986 #endif
|
nuclear@14
|
987 _ctmStreamWrite(self, (void *) "NORM", 4);
|
nuclear@14
|
988 if(!_ctmStreamWritePackedInts(self, intNormals, self->mVertexCount, 3, CTM_FALSE))
|
nuclear@14
|
989 {
|
nuclear@14
|
990 free((void *) indices);
|
nuclear@14
|
991 free((void *) intNormals);
|
nuclear@14
|
992 free((void *) restoredVertices);
|
nuclear@14
|
993 free((void *) sortVertices);
|
nuclear@14
|
994 return CTM_FALSE;
|
nuclear@14
|
995 }
|
nuclear@14
|
996
|
nuclear@14
|
997 // Free temporary normal data
|
nuclear@14
|
998 free((void *) intNormals);
|
nuclear@14
|
999 }
|
nuclear@14
|
1000
|
nuclear@14
|
1001 // Free restored indices and vertices
|
nuclear@14
|
1002 free((void *) indices);
|
nuclear@14
|
1003 free((void *) restoredVertices);
|
nuclear@14
|
1004
|
nuclear@14
|
1005 // Write UV maps
|
nuclear@14
|
1006 map = self->mUVMaps;
|
nuclear@14
|
1007 while(map)
|
nuclear@14
|
1008 {
|
nuclear@14
|
1009 // Convert UV coordinates to integers and calculate deltas (entropy-reduction)
|
nuclear@14
|
1010 intUVCoords = (CTMint *) malloc(sizeof(CTMint) * 2 * self->mVertexCount);
|
nuclear@14
|
1011 if(!intUVCoords)
|
nuclear@14
|
1012 {
|
nuclear@14
|
1013 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
1014 free((void *) sortVertices);
|
nuclear@14
|
1015 return CTM_FALSE;
|
nuclear@14
|
1016 }
|
nuclear@14
|
1017 _ctmMakeUVCoordDeltas(self, map, intUVCoords, sortVertices);
|
nuclear@14
|
1018
|
nuclear@14
|
1019 // Write UV coordinates
|
nuclear@14
|
1020 #ifdef __DEBUG_
|
nuclear@14
|
1021 printf("Texture coordinates (%s): ", map->mName ? map->mName : "no name");
|
nuclear@14
|
1022 #endif
|
nuclear@14
|
1023 _ctmStreamWrite(self, (void *) "TEXC", 4);
|
nuclear@14
|
1024 _ctmStreamWriteSTRING(self, map->mName);
|
nuclear@14
|
1025 _ctmStreamWriteSTRING(self, map->mFileName);
|
nuclear@14
|
1026 _ctmStreamWriteFLOAT(self, map->mPrecision);
|
nuclear@14
|
1027 if(!_ctmStreamWritePackedInts(self, intUVCoords, self->mVertexCount, 2, CTM_TRUE))
|
nuclear@14
|
1028 {
|
nuclear@14
|
1029 free((void *) intUVCoords);
|
nuclear@14
|
1030 free((void *) sortVertices);
|
nuclear@14
|
1031 return CTM_FALSE;
|
nuclear@14
|
1032 }
|
nuclear@14
|
1033
|
nuclear@14
|
1034 // Free temporary UV coordinate data
|
nuclear@14
|
1035 free((void *) intUVCoords);
|
nuclear@14
|
1036
|
nuclear@14
|
1037 map = map->mNext;
|
nuclear@14
|
1038 }
|
nuclear@14
|
1039
|
nuclear@14
|
1040 // Write vertex attribute maps
|
nuclear@14
|
1041 map = self->mAttribMaps;
|
nuclear@14
|
1042 while(map)
|
nuclear@14
|
1043 {
|
nuclear@14
|
1044 // Convert vertex attributes to integers and calculate deltas (entropy-reduction)
|
nuclear@14
|
1045 intAttribs = (CTMint *) malloc(sizeof(CTMint) * 4 * self->mVertexCount);
|
nuclear@14
|
1046 if(!intAttribs)
|
nuclear@14
|
1047 {
|
nuclear@14
|
1048 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
1049 free((void *) sortVertices);
|
nuclear@14
|
1050 return CTM_FALSE;
|
nuclear@14
|
1051 }
|
nuclear@14
|
1052 _ctmMakeAttribDeltas(self, map, intAttribs, sortVertices);
|
nuclear@14
|
1053
|
nuclear@14
|
1054 // Write vertex attributes
|
nuclear@14
|
1055 #ifdef __DEBUG_
|
nuclear@14
|
1056 printf("Vertex attributes (%s): ", map->mName ? map->mName : "no name");
|
nuclear@14
|
1057 #endif
|
nuclear@14
|
1058 _ctmStreamWrite(self, (void *) "ATTR", 4);
|
nuclear@14
|
1059 _ctmStreamWriteSTRING(self, map->mName);
|
nuclear@14
|
1060 _ctmStreamWriteFLOAT(self, map->mPrecision);
|
nuclear@14
|
1061 if(!_ctmStreamWritePackedInts(self, intAttribs, self->mVertexCount, 4, CTM_TRUE))
|
nuclear@14
|
1062 {
|
nuclear@14
|
1063 free((void *) intAttribs);
|
nuclear@14
|
1064 free((void *) sortVertices);
|
nuclear@14
|
1065 return CTM_FALSE;
|
nuclear@14
|
1066 }
|
nuclear@14
|
1067
|
nuclear@14
|
1068 // Free temporary vertex attribute data
|
nuclear@14
|
1069 free((void *) intAttribs);
|
nuclear@14
|
1070
|
nuclear@14
|
1071 map = map->mNext;
|
nuclear@14
|
1072 }
|
nuclear@14
|
1073
|
nuclear@14
|
1074 // Free temporary data
|
nuclear@14
|
1075 free((void *) sortVertices);
|
nuclear@14
|
1076
|
nuclear@14
|
1077 return CTM_TRUE;
|
nuclear@14
|
1078 }
|
nuclear@14
|
1079
|
nuclear@14
|
1080 //-----------------------------------------------------------------------------
|
nuclear@14
|
1081 // _ctmUncompressMesh_MG2() - Uncmpress the mesh from the input stream in the
|
nuclear@14
|
1082 // CTM context, and store the resulting mesh in the CTM context.
|
nuclear@14
|
1083 //-----------------------------------------------------------------------------
|
nuclear@14
|
1084 int _ctmUncompressMesh_MG2(_CTMcontext * self)
|
nuclear@14
|
1085 {
|
nuclear@14
|
1086 CTMuint * gridIndices, i;
|
nuclear@14
|
1087 CTMint * intVertices, * intNormals, * intUVCoords, * intAttribs;
|
nuclear@14
|
1088 _CTMfloatmap * map;
|
nuclear@14
|
1089 _CTMgrid grid;
|
nuclear@14
|
1090
|
nuclear@14
|
1091 // Read MG2-specific header information from the stream
|
nuclear@14
|
1092 if(_ctmStreamReadUINT(self) != FOURCC("MG2H"))
|
nuclear@14
|
1093 {
|
nuclear@14
|
1094 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1095 return CTM_FALSE;
|
nuclear@14
|
1096 }
|
nuclear@14
|
1097 self->mVertexPrecision = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1098 if(self->mVertexPrecision <= 0.0f)
|
nuclear@14
|
1099 {
|
nuclear@14
|
1100 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1101 return CTM_FALSE;
|
nuclear@14
|
1102 }
|
nuclear@14
|
1103 self->mNormalPrecision = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1104 if(self->mNormalPrecision <= 0.0f)
|
nuclear@14
|
1105 {
|
nuclear@14
|
1106 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1107 return CTM_FALSE;
|
nuclear@14
|
1108 }
|
nuclear@14
|
1109 grid.mMin[0] = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1110 grid.mMin[1] = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1111 grid.mMin[2] = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1112 grid.mMax[0] = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1113 grid.mMax[1] = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1114 grid.mMax[2] = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1115 if((grid.mMax[0] < grid.mMin[0]) ||
|
nuclear@14
|
1116 (grid.mMax[1] < grid.mMin[1]) ||
|
nuclear@14
|
1117 (grid.mMax[2] < grid.mMin[2]))
|
nuclear@14
|
1118 {
|
nuclear@14
|
1119 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1120 return CTM_FALSE;
|
nuclear@14
|
1121 }
|
nuclear@14
|
1122 grid.mDivision[0] = _ctmStreamReadUINT(self);
|
nuclear@14
|
1123 grid.mDivision[1] = _ctmStreamReadUINT(self);
|
nuclear@14
|
1124 grid.mDivision[2] = _ctmStreamReadUINT(self);
|
nuclear@14
|
1125 if((grid.mDivision[0] < 1) || (grid.mDivision[1] < 1) || (grid.mDivision[2] < 1))
|
nuclear@14
|
1126 {
|
nuclear@14
|
1127 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1128 return CTM_FALSE;
|
nuclear@14
|
1129 }
|
nuclear@14
|
1130
|
nuclear@14
|
1131 // Initialize 3D space subdivision grid
|
nuclear@14
|
1132 for(i = 0; i < 3; ++ i)
|
nuclear@14
|
1133 grid.mSize[i] = (grid.mMax[i] - grid.mMin[i]) / grid.mDivision[i];
|
nuclear@14
|
1134
|
nuclear@14
|
1135 // Read vertices
|
nuclear@14
|
1136 if(_ctmStreamReadUINT(self) != FOURCC("VERT"))
|
nuclear@14
|
1137 {
|
nuclear@14
|
1138 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1139 return CTM_FALSE;
|
nuclear@14
|
1140 }
|
nuclear@14
|
1141 intVertices = (CTMint *) malloc(sizeof(CTMint) * self->mVertexCount * 3);
|
nuclear@14
|
1142 if(!intVertices)
|
nuclear@14
|
1143 {
|
nuclear@14
|
1144 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
1145 return CTM_FALSE;
|
nuclear@14
|
1146 }
|
nuclear@14
|
1147 if(!_ctmStreamReadPackedInts(self, intVertices, self->mVertexCount, 3, CTM_FALSE))
|
nuclear@14
|
1148 {
|
nuclear@14
|
1149 free((void *) intVertices);
|
nuclear@14
|
1150 return CTM_FALSE;
|
nuclear@14
|
1151 }
|
nuclear@14
|
1152
|
nuclear@14
|
1153 // Read grid indices
|
nuclear@14
|
1154 if(_ctmStreamReadUINT(self) != FOURCC("GIDX"))
|
nuclear@14
|
1155 {
|
nuclear@14
|
1156 free((void *) intVertices);
|
nuclear@14
|
1157 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1158 return CTM_FALSE;
|
nuclear@14
|
1159 }
|
nuclear@14
|
1160 gridIndices = (CTMuint *) malloc(sizeof(CTMuint) * self->mVertexCount);
|
nuclear@14
|
1161 if(!gridIndices)
|
nuclear@14
|
1162 {
|
nuclear@14
|
1163 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
1164 free((void *) intVertices);
|
nuclear@14
|
1165 return CTM_FALSE;
|
nuclear@14
|
1166 }
|
nuclear@14
|
1167 if(!_ctmStreamReadPackedInts(self, (CTMint *) gridIndices, self->mVertexCount, 1, CTM_FALSE))
|
nuclear@14
|
1168 {
|
nuclear@14
|
1169 free((void *) gridIndices);
|
nuclear@14
|
1170 free((void *) intVertices);
|
nuclear@14
|
1171 return CTM_FALSE;
|
nuclear@14
|
1172 }
|
nuclear@14
|
1173
|
nuclear@14
|
1174 // Restore grid indices (deltas)
|
nuclear@14
|
1175 for(i = 1; i < self->mVertexCount; ++ i)
|
nuclear@14
|
1176 gridIndices[i] += gridIndices[i - 1];
|
nuclear@14
|
1177
|
nuclear@14
|
1178 // Restore vertices
|
nuclear@14
|
1179 _ctmRestoreVertices(self, intVertices, gridIndices, &grid, self->mVertices);
|
nuclear@14
|
1180
|
nuclear@14
|
1181 // Free temporary resources
|
nuclear@14
|
1182 free((void *) gridIndices);
|
nuclear@14
|
1183 free((void *) intVertices);
|
nuclear@14
|
1184
|
nuclear@14
|
1185 // Read triangle indices
|
nuclear@14
|
1186 if(_ctmStreamReadUINT(self) != FOURCC("INDX"))
|
nuclear@14
|
1187 {
|
nuclear@14
|
1188 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1189 return CTM_FALSE;
|
nuclear@14
|
1190 }
|
nuclear@14
|
1191 if(!_ctmStreamReadPackedInts(self, (CTMint *) self->mIndices, self->mTriangleCount, 3, CTM_FALSE))
|
nuclear@14
|
1192 return CTM_FALSE;
|
nuclear@14
|
1193
|
nuclear@14
|
1194 // Restore indices
|
nuclear@14
|
1195 _ctmRestoreIndices(self, self->mIndices);
|
nuclear@14
|
1196
|
nuclear@14
|
1197 // Check that all indices are within range
|
nuclear@14
|
1198 for(i = 0; i < (self->mTriangleCount * 3); ++ i)
|
nuclear@14
|
1199 {
|
nuclear@14
|
1200 if(self->mIndices[i] >= self->mVertexCount)
|
nuclear@14
|
1201 {
|
nuclear@14
|
1202 self->mError = CTM_INVALID_MESH;
|
nuclear@14
|
1203 return CTM_FALSE;
|
nuclear@14
|
1204 }
|
nuclear@14
|
1205 }
|
nuclear@14
|
1206
|
nuclear@14
|
1207 // Read normals
|
nuclear@14
|
1208 if(self->mNormals)
|
nuclear@14
|
1209 {
|
nuclear@14
|
1210 intNormals = (CTMint *) malloc(sizeof(CTMint) * self->mVertexCount * 3);
|
nuclear@14
|
1211 if(!intNormals)
|
nuclear@14
|
1212 {
|
nuclear@14
|
1213 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
1214 return CTM_FALSE;
|
nuclear@14
|
1215 }
|
nuclear@14
|
1216 if(_ctmStreamReadUINT(self) != FOURCC("NORM"))
|
nuclear@14
|
1217 {
|
nuclear@14
|
1218 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1219 free((void *) intNormals);
|
nuclear@14
|
1220 return CTM_FALSE;
|
nuclear@14
|
1221 }
|
nuclear@14
|
1222 if(!_ctmStreamReadPackedInts(self, intNormals, self->mVertexCount, 3, CTM_FALSE))
|
nuclear@14
|
1223 {
|
nuclear@14
|
1224 free((void *) intNormals);
|
nuclear@14
|
1225 return CTM_FALSE;
|
nuclear@14
|
1226 }
|
nuclear@14
|
1227
|
nuclear@14
|
1228 // Restore normals
|
nuclear@14
|
1229 if(!_ctmRestoreNormals(self, intNormals))
|
nuclear@14
|
1230 {
|
nuclear@14
|
1231 free((void *) intNormals);
|
nuclear@14
|
1232 return CTM_FALSE;
|
nuclear@14
|
1233 }
|
nuclear@14
|
1234
|
nuclear@14
|
1235 // Free temporary normals data
|
nuclear@14
|
1236 free((void *) intNormals);
|
nuclear@14
|
1237 }
|
nuclear@14
|
1238
|
nuclear@14
|
1239 // Read UV maps
|
nuclear@14
|
1240 map = self->mUVMaps;
|
nuclear@14
|
1241 while(map)
|
nuclear@14
|
1242 {
|
nuclear@14
|
1243 intUVCoords = (CTMint *) malloc(sizeof(CTMint) * self->mVertexCount * 2);
|
nuclear@14
|
1244 if(!intUVCoords)
|
nuclear@14
|
1245 {
|
nuclear@14
|
1246 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
1247 return CTM_FALSE;
|
nuclear@14
|
1248 }
|
nuclear@14
|
1249 if(_ctmStreamReadUINT(self) != FOURCC("TEXC"))
|
nuclear@14
|
1250 {
|
nuclear@14
|
1251 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1252 free((void *) intUVCoords);
|
nuclear@14
|
1253 return CTM_FALSE;
|
nuclear@14
|
1254 }
|
nuclear@14
|
1255 _ctmStreamReadSTRING(self, &map->mName);
|
nuclear@14
|
1256 _ctmStreamReadSTRING(self, &map->mFileName);
|
nuclear@14
|
1257 map->mPrecision = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1258 if(map->mPrecision <= 0.0f)
|
nuclear@14
|
1259 {
|
nuclear@14
|
1260 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1261 free((void *) intUVCoords);
|
nuclear@14
|
1262 return CTM_FALSE;
|
nuclear@14
|
1263 }
|
nuclear@14
|
1264 if(!_ctmStreamReadPackedInts(self, intUVCoords, self->mVertexCount, 2, CTM_TRUE))
|
nuclear@14
|
1265 {
|
nuclear@14
|
1266 free((void *) intUVCoords);
|
nuclear@14
|
1267 return CTM_FALSE;
|
nuclear@14
|
1268 }
|
nuclear@14
|
1269
|
nuclear@14
|
1270 // Restore UV coordinates
|
nuclear@14
|
1271 _ctmRestoreUVCoords(self, map, intUVCoords);
|
nuclear@14
|
1272
|
nuclear@14
|
1273 // Free temporary UV coordinate data
|
nuclear@14
|
1274 free((void *) intUVCoords);
|
nuclear@14
|
1275
|
nuclear@14
|
1276 map = map->mNext;
|
nuclear@14
|
1277 }
|
nuclear@14
|
1278
|
nuclear@14
|
1279 // Read vertex attribute maps
|
nuclear@14
|
1280 map = self->mAttribMaps;
|
nuclear@14
|
1281 while(map)
|
nuclear@14
|
1282 {
|
nuclear@14
|
1283 intAttribs = (CTMint *) malloc(sizeof(CTMint) * self->mVertexCount * 4);
|
nuclear@14
|
1284 if(!intAttribs)
|
nuclear@14
|
1285 {
|
nuclear@14
|
1286 self->mError = CTM_OUT_OF_MEMORY;
|
nuclear@14
|
1287 return CTM_FALSE;
|
nuclear@14
|
1288 }
|
nuclear@14
|
1289 if(_ctmStreamReadUINT(self) != FOURCC("ATTR"))
|
nuclear@14
|
1290 {
|
nuclear@14
|
1291 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1292 free((void *) intAttribs);
|
nuclear@14
|
1293 return CTM_FALSE;
|
nuclear@14
|
1294 }
|
nuclear@14
|
1295 _ctmStreamReadSTRING(self, &map->mName);
|
nuclear@14
|
1296 map->mPrecision = _ctmStreamReadFLOAT(self);
|
nuclear@14
|
1297 if(map->mPrecision <= 0.0f)
|
nuclear@14
|
1298 {
|
nuclear@14
|
1299 self->mError = CTM_BAD_FORMAT;
|
nuclear@14
|
1300 free((void *) intAttribs);
|
nuclear@14
|
1301 return CTM_FALSE;
|
nuclear@14
|
1302 }
|
nuclear@14
|
1303 if(!_ctmStreamReadPackedInts(self, intAttribs, self->mVertexCount, 4, CTM_TRUE))
|
nuclear@14
|
1304 {
|
nuclear@14
|
1305 free((void *) intAttribs);
|
nuclear@14
|
1306 return CTM_FALSE;
|
nuclear@14
|
1307 }
|
nuclear@14
|
1308
|
nuclear@14
|
1309 // Restore vertex attributes
|
nuclear@14
|
1310 _ctmRestoreAttribs(self, map, intAttribs);
|
nuclear@14
|
1311
|
nuclear@14
|
1312 // Free temporary vertex attribute data
|
nuclear@14
|
1313 free((void *) intAttribs);
|
nuclear@14
|
1314
|
nuclear@14
|
1315 map = map->mNext;
|
nuclear@14
|
1316 }
|
nuclear@14
|
1317
|
nuclear@14
|
1318 return CTM_TRUE;
|
nuclear@14
|
1319 }
|