rev |
line source |
nuclear@0
|
1 #include <stdlib.h>
|
nuclear@0
|
2 #include <limits.h>
|
nuclear@0
|
3 #include <assert.h>
|
nuclear@0
|
4 #include "anim.h"
|
nuclear@0
|
5 #include "dynarr.h"
|
nuclear@0
|
6
|
nuclear@0
|
7 #define ROT_USE_SLERP
|
nuclear@0
|
8
|
nuclear@0
|
9 static void invalidate_cache(struct anm_node *node);
|
nuclear@0
|
10
|
nuclear@0
|
11 int anm_init_node(struct anm_node *node)
|
nuclear@0
|
12 {
|
nuclear@0
|
13 int i, j;
|
nuclear@0
|
14 static const float defaults[] = {
|
nuclear@0
|
15 0.0f, 0.0f, 0.0f, /* default position */
|
nuclear@0
|
16 0.0f, 0.0f, 0.0f, 1.0f, /* default rotation quat */
|
nuclear@0
|
17 1.0f, 1.0f, 1.0f /* default scale factor */
|
nuclear@0
|
18 };
|
nuclear@0
|
19
|
nuclear@0
|
20 memset(node, 0, sizeof *node);
|
nuclear@0
|
21
|
nuclear@0
|
22 /* initialize thread-local matrix cache */
|
nuclear@0
|
23 pthread_key_create(&node->cache_key, 0);
|
nuclear@0
|
24 pthread_mutex_init(&node->cache_list_lock, 0);
|
nuclear@0
|
25
|
nuclear@0
|
26 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
27 if(anm_init_track(node->tracks + i) == -1) {
|
nuclear@0
|
28 for(j=0; j<i; j++) {
|
nuclear@0
|
29 anm_destroy_track(node->tracks + i);
|
nuclear@0
|
30 }
|
nuclear@0
|
31 }
|
nuclear@0
|
32 anm_set_track_default(node->tracks + i, defaults[i]);
|
nuclear@0
|
33 }
|
nuclear@0
|
34 return 0;
|
nuclear@0
|
35 }
|
nuclear@0
|
36
|
nuclear@0
|
37 void anm_destroy_node(struct anm_node *node)
|
nuclear@0
|
38 {
|
nuclear@0
|
39 int i;
|
nuclear@0
|
40 free(node->name);
|
nuclear@0
|
41
|
nuclear@0
|
42 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
43 anm_destroy_track(node->tracks + i);
|
nuclear@0
|
44 }
|
nuclear@0
|
45
|
nuclear@0
|
46 /* destroy thread-specific cache */
|
nuclear@0
|
47 pthread_key_delete(node->cache_key);
|
nuclear@0
|
48
|
nuclear@0
|
49 while(node->cache_list) {
|
nuclear@0
|
50 struct mat_cache *tmp = node->cache_list;
|
nuclear@0
|
51 node->cache_list = tmp->next;
|
nuclear@0
|
52 free(tmp);
|
nuclear@0
|
53 }
|
nuclear@0
|
54 }
|
nuclear@0
|
55
|
nuclear@0
|
56 void anm_destroy_node_tree(struct anm_node *tree)
|
nuclear@0
|
57 {
|
nuclear@0
|
58 struct anm_node *c, *tmp;
|
nuclear@0
|
59
|
nuclear@0
|
60 if(!tree) return;
|
nuclear@0
|
61
|
nuclear@0
|
62 c = tree->child;
|
nuclear@0
|
63 while(c) {
|
nuclear@0
|
64 tmp = c;
|
nuclear@0
|
65 c = c->next;
|
nuclear@0
|
66
|
nuclear@0
|
67 anm_destroy_node_tree(tmp);
|
nuclear@0
|
68 }
|
nuclear@0
|
69 anm_destroy_node(tree);
|
nuclear@0
|
70 }
|
nuclear@0
|
71
|
nuclear@0
|
72 struct anm_node *anm_create_node(void)
|
nuclear@0
|
73 {
|
nuclear@0
|
74 struct anm_node *n;
|
nuclear@0
|
75
|
nuclear@0
|
76 if((n = malloc(sizeof *n))) {
|
nuclear@0
|
77 if(anm_init_node(n) == -1) {
|
nuclear@0
|
78 free(n);
|
nuclear@0
|
79 return 0;
|
nuclear@0
|
80 }
|
nuclear@0
|
81 }
|
nuclear@0
|
82 return n;
|
nuclear@0
|
83 }
|
nuclear@0
|
84
|
nuclear@0
|
85 void anm_free_node(struct anm_node *node)
|
nuclear@0
|
86 {
|
nuclear@0
|
87 anm_destroy_node(node);
|
nuclear@0
|
88 free(node);
|
nuclear@0
|
89 }
|
nuclear@0
|
90
|
nuclear@0
|
91 void anm_free_node_tree(struct anm_node *tree)
|
nuclear@0
|
92 {
|
nuclear@0
|
93 struct anm_node *c, *tmp;
|
nuclear@0
|
94
|
nuclear@0
|
95 if(!tree) return;
|
nuclear@0
|
96
|
nuclear@0
|
97 c = tree->child;
|
nuclear@0
|
98 while(c) {
|
nuclear@0
|
99 tmp = c;
|
nuclear@0
|
100 c = c->next;
|
nuclear@0
|
101
|
nuclear@0
|
102 anm_free_node_tree(tmp);
|
nuclear@0
|
103 }
|
nuclear@0
|
104
|
nuclear@0
|
105 anm_free_node(tree);
|
nuclear@0
|
106 }
|
nuclear@0
|
107
|
nuclear@0
|
108 int anm_set_node_name(struct anm_node *node, const char *name)
|
nuclear@0
|
109 {
|
nuclear@0
|
110 char *str;
|
nuclear@0
|
111
|
nuclear@0
|
112 if(!(str = malloc(strlen(name) + 1))) {
|
nuclear@0
|
113 return -1;
|
nuclear@0
|
114 }
|
nuclear@0
|
115 strcpy(str, name);
|
nuclear@0
|
116 free(node->name);
|
nuclear@0
|
117 node->name = str;
|
nuclear@0
|
118 return 0;
|
nuclear@0
|
119 }
|
nuclear@0
|
120
|
nuclear@0
|
121 const char *anm_get_node_name(struct anm_node *node)
|
nuclear@0
|
122 {
|
nuclear@0
|
123 return node->name ? node->name : "";
|
nuclear@0
|
124 }
|
nuclear@0
|
125
|
nuclear@0
|
126 void anm_set_interpolator(struct anm_node *node, enum anm_interpolator in)
|
nuclear@0
|
127 {
|
nuclear@0
|
128 int i;
|
nuclear@0
|
129
|
nuclear@0
|
130 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
131 anm_set_track_interpolator(node->tracks + i, in);
|
nuclear@0
|
132 }
|
nuclear@0
|
133 invalidate_cache(node);
|
nuclear@0
|
134 }
|
nuclear@0
|
135
|
nuclear@0
|
136 void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex)
|
nuclear@0
|
137 {
|
nuclear@0
|
138 int i;
|
nuclear@0
|
139
|
nuclear@0
|
140 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
141 anm_set_track_extrapolator(node->tracks + i, ex);
|
nuclear@0
|
142 }
|
nuclear@0
|
143 invalidate_cache(node);
|
nuclear@0
|
144 }
|
nuclear@0
|
145
|
nuclear@0
|
146 void anm_link_node(struct anm_node *p, struct anm_node *c)
|
nuclear@0
|
147 {
|
nuclear@0
|
148 c->next = p->child;
|
nuclear@0
|
149 p->child = c;
|
nuclear@0
|
150
|
nuclear@0
|
151 c->parent = p;
|
nuclear@0
|
152 invalidate_cache(c);
|
nuclear@0
|
153 }
|
nuclear@0
|
154
|
nuclear@0
|
155 int anm_unlink_node(struct anm_node *p, struct anm_node *c)
|
nuclear@0
|
156 {
|
nuclear@0
|
157 struct anm_node *iter;
|
nuclear@0
|
158
|
nuclear@0
|
159 if(p->child == c) {
|
nuclear@0
|
160 p->child = c->next;
|
nuclear@0
|
161 c->next = 0;
|
nuclear@0
|
162 invalidate_cache(c);
|
nuclear@0
|
163 return 0;
|
nuclear@0
|
164 }
|
nuclear@0
|
165
|
nuclear@0
|
166 iter = p->child;
|
nuclear@0
|
167 while(iter->next) {
|
nuclear@0
|
168 if(iter->next == c) {
|
nuclear@0
|
169 iter->next = c->next;
|
nuclear@0
|
170 c->next = 0;
|
nuclear@0
|
171 invalidate_cache(c);
|
nuclear@0
|
172 return 0;
|
nuclear@0
|
173 }
|
nuclear@0
|
174 }
|
nuclear@0
|
175 return -1;
|
nuclear@0
|
176 }
|
nuclear@0
|
177
|
nuclear@0
|
178 void anm_clear(struct anm_node *node)
|
nuclear@0
|
179 {
|
nuclear@0
|
180 int i;
|
nuclear@0
|
181
|
nuclear@0
|
182 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
183 anm_clear_track(&node->tracks[i]);
|
nuclear@0
|
184 }
|
nuclear@0
|
185 invalidate_cache(node);
|
nuclear@0
|
186 }
|
nuclear@0
|
187
|
nuclear@0
|
188 void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm)
|
nuclear@0
|
189 {
|
nuclear@0
|
190 anm_set_value(node->tracks + ANM_TRACK_POS_X, tm, pos.x);
|
nuclear@0
|
191 anm_set_value(node->tracks + ANM_TRACK_POS_Y, tm, pos.y);
|
nuclear@0
|
192 anm_set_value(node->tracks + ANM_TRACK_POS_Z, tm, pos.z);
|
nuclear@0
|
193 invalidate_cache(node);
|
nuclear@0
|
194 }
|
nuclear@0
|
195
|
nuclear@0
|
196 vec3_t anm_get_node_position(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
197 {
|
nuclear@0
|
198 vec3_t v;
|
nuclear@0
|
199 v.x = anm_get_value(node->tracks + ANM_TRACK_POS_X, tm);
|
nuclear@0
|
200 v.y = anm_get_value(node->tracks + ANM_TRACK_POS_Y, tm);
|
nuclear@0
|
201 v.z = anm_get_value(node->tracks + ANM_TRACK_POS_Z, tm);
|
nuclear@0
|
202 return v;
|
nuclear@0
|
203 }
|
nuclear@0
|
204
|
nuclear@0
|
205 void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm)
|
nuclear@0
|
206 {
|
nuclear@0
|
207 anm_set_value(node->tracks + ANM_TRACK_ROT_X, tm, rot.x);
|
nuclear@0
|
208 anm_set_value(node->tracks + ANM_TRACK_ROT_Y, tm, rot.y);
|
nuclear@0
|
209 anm_set_value(node->tracks + ANM_TRACK_ROT_Z, tm, rot.z);
|
nuclear@0
|
210 anm_set_value(node->tracks + ANM_TRACK_ROT_W, tm, rot.w);
|
nuclear@0
|
211 invalidate_cache(node);
|
nuclear@0
|
212 }
|
nuclear@0
|
213
|
nuclear@0
|
214 quat_t anm_get_node_rotation(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
215 {
|
nuclear@0
|
216 #ifndef ROT_USE_SLERP
|
nuclear@0
|
217 quat_t q;
|
nuclear@0
|
218 q.x = anm_get_value(node->tracks + ANM_TRACK_ROT_X, tm);
|
nuclear@0
|
219 q.y = anm_get_value(node->tracks + ANM_TRACK_ROT_Y, tm);
|
nuclear@0
|
220 q.z = anm_get_value(node->tracks + ANM_TRACK_ROT_Z, tm);
|
nuclear@0
|
221 q.w = anm_get_value(node->tracks + ANM_TRACK_ROT_W, tm);
|
nuclear@0
|
222 return q;
|
nuclear@0
|
223 #else
|
nuclear@0
|
224 int idx0, idx1, last_idx;
|
nuclear@0
|
225 anm_time_t tstart, tend;
|
nuclear@0
|
226 float t, dt;
|
nuclear@0
|
227 struct anm_track *track_x, *track_y, *track_z, *track_w;
|
nuclear@0
|
228 quat_t q, q1, q2;
|
nuclear@0
|
229
|
nuclear@0
|
230 track_x = node->tracks + ANM_TRACK_ROT_X;
|
nuclear@0
|
231 track_y = node->tracks + ANM_TRACK_ROT_Y;
|
nuclear@0
|
232 track_z = node->tracks + ANM_TRACK_ROT_Z;
|
nuclear@0
|
233 track_w = node->tracks + ANM_TRACK_ROT_W;
|
nuclear@0
|
234
|
nuclear@0
|
235 if(!track_x->count) {
|
nuclear@0
|
236 q.x = track_x->def_val;
|
nuclear@0
|
237 q.y = track_y->def_val;
|
nuclear@0
|
238 q.z = track_z->def_val;
|
nuclear@0
|
239 q.w = track_w->def_val;
|
nuclear@0
|
240 return q;
|
nuclear@0
|
241 }
|
nuclear@0
|
242
|
nuclear@0
|
243 last_idx = track_x->count - 1;
|
nuclear@0
|
244
|
nuclear@0
|
245 tstart = track_x->keys[0].time;
|
nuclear@0
|
246 tend = track_x->keys[last_idx].time;
|
nuclear@0
|
247
|
nuclear@0
|
248 if(tstart == tend) {
|
nuclear@0
|
249 q.x = track_x->keys[0].val;
|
nuclear@0
|
250 q.y = track_y->keys[0].val;
|
nuclear@0
|
251 q.z = track_z->keys[0].val;
|
nuclear@0
|
252 q.w = track_w->keys[0].val;
|
nuclear@0
|
253 return q;
|
nuclear@0
|
254 }
|
nuclear@0
|
255
|
nuclear@0
|
256 tm = anm_remap_time(track_x, tm, tstart, tend);
|
nuclear@0
|
257
|
nuclear@0
|
258 idx0 = anm_get_key_interval(track_x, tm);
|
nuclear@0
|
259 assert(idx0 >= 0 && idx0 < track_x->count);
|
nuclear@0
|
260 idx1 = idx0 + 1;
|
nuclear@0
|
261
|
nuclear@0
|
262 if(idx0 == last_idx) {
|
nuclear@0
|
263 q.x = track_x->keys[idx0].val;
|
nuclear@0
|
264 q.y = track_y->keys[idx0].val;
|
nuclear@0
|
265 q.z = track_z->keys[idx0].val;
|
nuclear@0
|
266 q.w = track_w->keys[idx0].val;
|
nuclear@0
|
267 return q;
|
nuclear@0
|
268 }
|
nuclear@0
|
269
|
nuclear@0
|
270 dt = (float)(track_x->keys[idx1].time - track_x->keys[idx0].time);
|
nuclear@0
|
271 t = (float)(tm - track_x->keys[idx0].time) / dt;
|
nuclear@0
|
272
|
nuclear@0
|
273 q1.x = track_x->keys[idx0].val;
|
nuclear@0
|
274 q1.y = track_y->keys[idx0].val;
|
nuclear@0
|
275 q1.z = track_z->keys[idx0].val;
|
nuclear@0
|
276 q1.w = track_w->keys[idx0].val;
|
nuclear@0
|
277
|
nuclear@0
|
278 q2.x = track_x->keys[idx1].val;
|
nuclear@0
|
279 q2.y = track_y->keys[idx1].val;
|
nuclear@0
|
280 q2.z = track_z->keys[idx1].val;
|
nuclear@0
|
281 q2.w = track_w->keys[idx1].val;
|
nuclear@0
|
282
|
nuclear@0
|
283 /*q1 = quat_normalize(q1);
|
nuclear@0
|
284 q2 = quat_normalize(q2);*/
|
nuclear@0
|
285
|
nuclear@0
|
286 return quat_slerp(q1, q2, t);
|
nuclear@0
|
287 #endif
|
nuclear@0
|
288 }
|
nuclear@0
|
289
|
nuclear@0
|
290 void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm)
|
nuclear@0
|
291 {
|
nuclear@0
|
292 anm_set_value(node->tracks + ANM_TRACK_SCL_X, tm, scl.x);
|
nuclear@0
|
293 anm_set_value(node->tracks + ANM_TRACK_SCL_Y, tm, scl.y);
|
nuclear@0
|
294 anm_set_value(node->tracks + ANM_TRACK_SCL_Z, tm, scl.z);
|
nuclear@0
|
295 invalidate_cache(node);
|
nuclear@0
|
296 }
|
nuclear@0
|
297
|
nuclear@0
|
298 vec3_t anm_get_node_scaling(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
299 {
|
nuclear@0
|
300 vec3_t v;
|
nuclear@0
|
301 v.x = anm_get_value(node->tracks + ANM_TRACK_SCL_X, tm);
|
nuclear@0
|
302 v.y = anm_get_value(node->tracks + ANM_TRACK_SCL_Y, tm);
|
nuclear@0
|
303 v.z = anm_get_value(node->tracks + ANM_TRACK_SCL_Z, tm);
|
nuclear@0
|
304 return v;
|
nuclear@0
|
305 }
|
nuclear@0
|
306
|
nuclear@0
|
307
|
nuclear@0
|
308 vec3_t anm_get_position(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
309 {
|
nuclear@0
|
310 mat4_t xform;
|
nuclear@0
|
311 vec3_t pos = {0.0, 0.0, 0.0};
|
nuclear@0
|
312
|
nuclear@0
|
313 if(!node->parent) {
|
nuclear@0
|
314 return anm_get_node_position(node, tm);
|
nuclear@0
|
315 }
|
nuclear@0
|
316
|
nuclear@0
|
317 anm_get_matrix(node, xform, tm);
|
nuclear@0
|
318 return v3_transform(pos, xform);
|
nuclear@0
|
319 }
|
nuclear@0
|
320
|
nuclear@0
|
321 quat_t anm_get_rotation(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
322 {
|
nuclear@0
|
323 quat_t rot, prot;
|
nuclear@0
|
324 rot = anm_get_node_rotation(node, tm);
|
nuclear@0
|
325
|
nuclear@0
|
326 if(!node->parent) {
|
nuclear@0
|
327 return rot;
|
nuclear@0
|
328 }
|
nuclear@0
|
329
|
nuclear@0
|
330 prot = anm_get_rotation(node->parent, tm);
|
nuclear@0
|
331 return quat_mul(prot, rot);
|
nuclear@0
|
332 }
|
nuclear@0
|
333
|
nuclear@0
|
334 vec3_t anm_get_scaling(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
335 {
|
nuclear@0
|
336 vec3_t s, ps;
|
nuclear@0
|
337 s = anm_get_node_scaling(node, tm);
|
nuclear@0
|
338
|
nuclear@0
|
339 if(!node->parent) {
|
nuclear@0
|
340 return s;
|
nuclear@0
|
341 }
|
nuclear@0
|
342
|
nuclear@0
|
343 ps = anm_get_scaling(node->parent, tm);
|
nuclear@0
|
344 return v3_mul(s, ps);
|
nuclear@0
|
345 }
|
nuclear@0
|
346
|
nuclear@0
|
347 void anm_set_pivot(struct anm_node *node, vec3_t piv)
|
nuclear@0
|
348 {
|
nuclear@0
|
349 node->pivot = piv;
|
nuclear@0
|
350 }
|
nuclear@0
|
351
|
nuclear@0
|
352 vec3_t anm_get_pivot(struct anm_node *node)
|
nuclear@0
|
353 {
|
nuclear@0
|
354 return node->pivot;
|
nuclear@0
|
355 }
|
nuclear@0
|
356
|
nuclear@0
|
357 void anm_get_node_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
|
nuclear@0
|
358 {
|
nuclear@0
|
359 int i;
|
nuclear@0
|
360 mat4_t rmat;
|
nuclear@0
|
361 vec3_t pos, scale;
|
nuclear@0
|
362 quat_t rot;
|
nuclear@0
|
363
|
nuclear@0
|
364 pos = anm_get_node_position(node, tm);
|
nuclear@0
|
365 rot = anm_get_node_rotation(node, tm);
|
nuclear@0
|
366 scale = anm_get_node_scaling(node, tm);
|
nuclear@0
|
367
|
nuclear@0
|
368 m4_set_translation(mat, node->pivot.x, node->pivot.y, node->pivot.z);
|
nuclear@0
|
369
|
nuclear@0
|
370 quat_to_mat4(rmat, rot);
|
nuclear@0
|
371 for(i=0; i<3; i++) {
|
nuclear@0
|
372 mat[i][0] = rmat[i][0];
|
nuclear@0
|
373 mat[i][1] = rmat[i][1];
|
nuclear@0
|
374 mat[i][2] = rmat[i][2];
|
nuclear@0
|
375 }
|
nuclear@0
|
376 /* this loop is equivalent to: m4_mult(mat, mat, rmat); */
|
nuclear@0
|
377
|
nuclear@0
|
378 mat[0][0] *= scale.x; mat[0][1] *= scale.y; mat[0][2] *= scale.z; mat[0][3] += pos.x;
|
nuclear@0
|
379 mat[1][0] *= scale.x; mat[1][1] *= scale.y; mat[1][2] *= scale.z; mat[1][3] += pos.y;
|
nuclear@0
|
380 mat[2][0] *= scale.x; mat[2][1] *= scale.y; mat[2][2] *= scale.z; mat[2][3] += pos.z;
|
nuclear@0
|
381
|
nuclear@0
|
382 m4_translate(mat, -node->pivot.x, -node->pivot.y, -node->pivot.z);
|
nuclear@0
|
383
|
nuclear@0
|
384 /* that's basically: pivot * rotation * translation * scaling * -pivot */
|
nuclear@0
|
385 }
|
nuclear@0
|
386
|
nuclear@0
|
387 void anm_get_node_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
|
nuclear@0
|
388 {
|
nuclear@0
|
389 mat4_t tmp;
|
nuclear@0
|
390 anm_get_node_matrix(node, tmp, tm);
|
nuclear@0
|
391 m4_inverse(mat, tmp);
|
nuclear@0
|
392 }
|
nuclear@0
|
393
|
nuclear@0
|
394 void anm_eval_node(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
395 {
|
nuclear@0
|
396 anm_get_node_matrix(node, node->matrix, tm);
|
nuclear@0
|
397 }
|
nuclear@0
|
398
|
nuclear@0
|
399 void anm_eval(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
400 {
|
nuclear@0
|
401 struct anm_node *c;
|
nuclear@0
|
402
|
nuclear@0
|
403 anm_eval_node(node, tm);
|
nuclear@0
|
404
|
nuclear@0
|
405 if(node->parent) {
|
nuclear@0
|
406 /* due to post-order traversal, the parent matrix is already evaluated */
|
nuclear@0
|
407 m4_mult(node->matrix, node->parent->matrix, node->matrix);
|
nuclear@0
|
408 }
|
nuclear@0
|
409
|
nuclear@0
|
410 /* recersively evaluate all children */
|
nuclear@0
|
411 c = node->child;
|
nuclear@0
|
412 while(c) {
|
nuclear@0
|
413 anm_eval(c, tm);
|
nuclear@0
|
414 c = c->next;
|
nuclear@0
|
415 }
|
nuclear@0
|
416 }
|
nuclear@0
|
417
|
nuclear@0
|
418 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
|
nuclear@0
|
419 {
|
nuclear@0
|
420 struct mat_cache *cache = pthread_getspecific(node->cache_key);
|
nuclear@0
|
421 if(!cache) {
|
nuclear@0
|
422 cache = malloc(sizeof *cache);
|
nuclear@0
|
423 assert(cache);
|
nuclear@0
|
424
|
nuclear@0
|
425 pthread_mutex_lock(&node->cache_list_lock);
|
nuclear@0
|
426 cache->next = node->cache_list;
|
nuclear@0
|
427 node->cache_list = cache;
|
nuclear@0
|
428 pthread_mutex_unlock(&node->cache_list_lock);
|
nuclear@0
|
429
|
nuclear@0
|
430 cache->time = ANM_TIME_INVAL;
|
nuclear@0
|
431 cache->inv_time = ANM_TIME_INVAL;
|
nuclear@0
|
432 pthread_setspecific(node->cache_key, cache);
|
nuclear@0
|
433 }
|
nuclear@0
|
434
|
nuclear@0
|
435 if(cache->time != tm) {
|
nuclear@0
|
436 anm_get_node_matrix(node, cache->matrix, tm);
|
nuclear@0
|
437
|
nuclear@0
|
438 if(node->parent) {
|
nuclear@0
|
439 mat4_t parent_mat;
|
nuclear@0
|
440
|
nuclear@0
|
441 anm_get_matrix(node->parent, parent_mat, tm);
|
nuclear@0
|
442 m4_mult(cache->matrix, parent_mat, cache->matrix);
|
nuclear@0
|
443 }
|
nuclear@0
|
444 cache->time = tm;
|
nuclear@0
|
445 }
|
nuclear@0
|
446 m4_copy(mat, cache->matrix);
|
nuclear@0
|
447 }
|
nuclear@0
|
448
|
nuclear@0
|
449 void anm_get_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
|
nuclear@0
|
450 {
|
nuclear@0
|
451 struct mat_cache *cache = pthread_getspecific(node->cache_key);
|
nuclear@0
|
452 if(!cache) {
|
nuclear@0
|
453 cache = malloc(sizeof *cache);
|
nuclear@0
|
454 assert(cache);
|
nuclear@0
|
455
|
nuclear@0
|
456 pthread_mutex_lock(&node->cache_list_lock);
|
nuclear@0
|
457 cache->next = node->cache_list;
|
nuclear@0
|
458 node->cache_list = cache;
|
nuclear@0
|
459 pthread_mutex_unlock(&node->cache_list_lock);
|
nuclear@0
|
460
|
nuclear@0
|
461 cache->inv_time = ANM_TIME_INVAL;
|
nuclear@0
|
462 cache->inv_time = ANM_TIME_INVAL;
|
nuclear@0
|
463 pthread_setspecific(node->cache_key, cache);
|
nuclear@0
|
464 }
|
nuclear@0
|
465
|
nuclear@0
|
466 if(cache->inv_time != tm) {
|
nuclear@0
|
467 anm_get_matrix(node, mat, tm);
|
nuclear@0
|
468 m4_inverse(cache->inv_matrix, mat);
|
nuclear@0
|
469 cache->inv_time = tm;
|
nuclear@0
|
470 }
|
nuclear@0
|
471 m4_copy(mat, cache->inv_matrix);
|
nuclear@0
|
472 }
|
nuclear@0
|
473
|
nuclear@0
|
474 anm_time_t anm_get_start_time(struct anm_node *node)
|
nuclear@0
|
475 {
|
nuclear@0
|
476 int i;
|
nuclear@0
|
477 struct anm_node *c;
|
nuclear@0
|
478 anm_time_t res = LONG_MAX;
|
nuclear@0
|
479
|
nuclear@0
|
480 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
481 if(node->tracks[i].count) {
|
nuclear@0
|
482 anm_time_t tm = node->tracks[i].keys[0].time;
|
nuclear@0
|
483 if(tm < res) {
|
nuclear@0
|
484 res = tm;
|
nuclear@0
|
485 }
|
nuclear@0
|
486 }
|
nuclear@0
|
487 }
|
nuclear@0
|
488
|
nuclear@0
|
489 c = node->child;
|
nuclear@0
|
490 while(c) {
|
nuclear@0
|
491 anm_time_t tm = anm_get_start_time(c);
|
nuclear@0
|
492 if(tm < res) {
|
nuclear@0
|
493 res = tm;
|
nuclear@0
|
494 }
|
nuclear@0
|
495 c = c->next;
|
nuclear@0
|
496 }
|
nuclear@0
|
497 return res;
|
nuclear@0
|
498 }
|
nuclear@0
|
499
|
nuclear@0
|
500 anm_time_t anm_get_end_time(struct anm_node *node)
|
nuclear@0
|
501 {
|
nuclear@0
|
502 int i;
|
nuclear@0
|
503 struct anm_node *c;
|
nuclear@0
|
504 anm_time_t res = LONG_MIN;
|
nuclear@0
|
505
|
nuclear@0
|
506 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
507 if(node->tracks[i].count) {
|
nuclear@0
|
508 anm_time_t tm = node->tracks[i].keys[node->tracks[i].count - 1].time;
|
nuclear@0
|
509 if(tm > res) {
|
nuclear@0
|
510 res = tm;
|
nuclear@0
|
511 }
|
nuclear@0
|
512 }
|
nuclear@0
|
513 }
|
nuclear@0
|
514
|
nuclear@0
|
515 c = node->child;
|
nuclear@0
|
516 while(c) {
|
nuclear@0
|
517 anm_time_t tm = anm_get_end_time(c);
|
nuclear@0
|
518 if(tm > res) {
|
nuclear@0
|
519 res = tm;
|
nuclear@0
|
520 }
|
nuclear@0
|
521 c = c->next;
|
nuclear@0
|
522 }
|
nuclear@0
|
523 return res;
|
nuclear@0
|
524 }
|
nuclear@0
|
525
|
nuclear@0
|
526 static void invalidate_cache(struct anm_node *node)
|
nuclear@0
|
527 {
|
nuclear@0
|
528 struct mat_cache *cache = pthread_getspecific(node->cache_key);
|
nuclear@0
|
529 if(cache) {
|
nuclear@0
|
530 cache->time = cache->inv_time = ANM_TIME_INVAL;
|
nuclear@0
|
531 }
|
nuclear@0
|
532 }
|