rev |
line source |
nuclear@0
|
1 #include <limits.h>
|
nuclear@0
|
2 #include <assert.h>
|
nuclear@0
|
3 #include "anim.h"
|
nuclear@0
|
4 #include "dynarr.h"
|
nuclear@0
|
5
|
nuclear@0
|
6 static void invalidate_cache(struct anm_node *node);
|
nuclear@0
|
7
|
nuclear@0
|
8 int anm_init_node(struct anm_node *node)
|
nuclear@0
|
9 {
|
nuclear@0
|
10 int i, j;
|
nuclear@0
|
11 static const float defaults[] = {
|
nuclear@0
|
12 0.0f, 0.0f, 0.0f, /* default position */
|
nuclear@0
|
13 0.0f, 0.0f, 0.0f, 1.0f, /* default rotation quat */
|
nuclear@0
|
14 1.0f, 1.0f, 1.0f /* default scale factor */
|
nuclear@0
|
15 };
|
nuclear@0
|
16
|
nuclear@0
|
17 memset(node, 0, sizeof *node);
|
nuclear@0
|
18
|
nuclear@0
|
19 /* initialize thread-local matrix cache */
|
nuclear@0
|
20 pthread_key_create(&node->cache_key, 0);
|
nuclear@0
|
21
|
nuclear@0
|
22 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
23 if(anm_init_track(node->tracks + i) == -1) {
|
nuclear@0
|
24 for(j=0; j<i; j++) {
|
nuclear@0
|
25 anm_destroy_track(node->tracks + i);
|
nuclear@0
|
26 }
|
nuclear@0
|
27 }
|
nuclear@0
|
28 anm_set_track_default(node->tracks + i, defaults[i]);
|
nuclear@0
|
29 }
|
nuclear@0
|
30 return 0;
|
nuclear@0
|
31 }
|
nuclear@0
|
32
|
nuclear@0
|
33 void anm_destroy_node(struct anm_node *node)
|
nuclear@0
|
34 {
|
nuclear@0
|
35 int i;
|
nuclear@0
|
36 free(node->name);
|
nuclear@0
|
37
|
nuclear@0
|
38 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
39 anm_destroy_track(node->tracks + i);
|
nuclear@0
|
40 }
|
nuclear@0
|
41
|
nuclear@0
|
42 /* destroy thread-specific cache */
|
nuclear@0
|
43 pthread_key_delete(node->cache_key);
|
nuclear@0
|
44
|
nuclear@0
|
45 while(node->cache_list) {
|
nuclear@0
|
46 struct mat_cache *tmp = node->cache_list;
|
nuclear@0
|
47 node->cache_list = tmp->next;
|
nuclear@0
|
48 free(tmp);
|
nuclear@0
|
49 }
|
nuclear@0
|
50 }
|
nuclear@0
|
51
|
nuclear@0
|
52 void anm_destroy_node_tree(struct anm_node *tree)
|
nuclear@0
|
53 {
|
nuclear@0
|
54 struct anm_node *c, *tmp;
|
nuclear@0
|
55
|
nuclear@0
|
56 if(!tree) return;
|
nuclear@0
|
57
|
nuclear@0
|
58 c = tree->child;
|
nuclear@0
|
59 while(c) {
|
nuclear@0
|
60 tmp = c;
|
nuclear@0
|
61 c = c->next;
|
nuclear@0
|
62
|
nuclear@0
|
63 anm_destroy_node_tree(tmp);
|
nuclear@0
|
64 }
|
nuclear@0
|
65 anm_destroy_node(tree);
|
nuclear@0
|
66 }
|
nuclear@0
|
67
|
nuclear@0
|
68 struct anm_node *anm_create_node(void)
|
nuclear@0
|
69 {
|
nuclear@0
|
70 struct anm_node *n;
|
nuclear@0
|
71
|
nuclear@0
|
72 if((n = malloc(sizeof *n))) {
|
nuclear@0
|
73 if(anm_init_node(n) == -1) {
|
nuclear@0
|
74 free(n);
|
nuclear@0
|
75 return 0;
|
nuclear@0
|
76 }
|
nuclear@0
|
77 }
|
nuclear@0
|
78 return n;
|
nuclear@0
|
79 }
|
nuclear@0
|
80
|
nuclear@0
|
81 void anm_free_node(struct anm_node *node)
|
nuclear@0
|
82 {
|
nuclear@0
|
83 anm_destroy_node(node);
|
nuclear@0
|
84 free(node);
|
nuclear@0
|
85 }
|
nuclear@0
|
86
|
nuclear@0
|
87 void anm_free_node_tree(struct anm_node *tree)
|
nuclear@0
|
88 {
|
nuclear@0
|
89 struct anm_node *c, *tmp;
|
nuclear@0
|
90
|
nuclear@0
|
91 if(!tree) return;
|
nuclear@0
|
92
|
nuclear@0
|
93 c = tree->child;
|
nuclear@0
|
94 while(c) {
|
nuclear@0
|
95 tmp = c;
|
nuclear@0
|
96 c = c->next;
|
nuclear@0
|
97
|
nuclear@0
|
98 anm_free_node_tree(tmp);
|
nuclear@0
|
99 }
|
nuclear@0
|
100
|
nuclear@0
|
101 anm_free_node(tree);
|
nuclear@0
|
102 }
|
nuclear@0
|
103
|
nuclear@0
|
104 int anm_set_node_name(struct anm_node *node, const char *name)
|
nuclear@0
|
105 {
|
nuclear@0
|
106 char *str;
|
nuclear@0
|
107
|
nuclear@0
|
108 if(!(str = malloc(strlen(name) + 1))) {
|
nuclear@0
|
109 return -1;
|
nuclear@0
|
110 }
|
nuclear@0
|
111 strcpy(str, name);
|
nuclear@0
|
112 free(node->name);
|
nuclear@0
|
113 node->name = str;
|
nuclear@0
|
114 return 0;
|
nuclear@0
|
115 }
|
nuclear@0
|
116
|
nuclear@0
|
117 const char *anm_get_node_name(struct anm_node *node)
|
nuclear@0
|
118 {
|
nuclear@0
|
119 return node->name ? node->name : "";
|
nuclear@0
|
120 }
|
nuclear@0
|
121
|
nuclear@0
|
122 void anm_set_interpolator(struct anm_node *node, enum anm_interpolator in)
|
nuclear@0
|
123 {
|
nuclear@0
|
124 int i;
|
nuclear@0
|
125
|
nuclear@0
|
126 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
127 anm_set_track_interpolator(node->tracks + i, in);
|
nuclear@0
|
128 }
|
nuclear@0
|
129 invalidate_cache(node);
|
nuclear@0
|
130 }
|
nuclear@0
|
131
|
nuclear@0
|
132 void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex)
|
nuclear@0
|
133 {
|
nuclear@0
|
134 int i;
|
nuclear@0
|
135
|
nuclear@0
|
136 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
137 anm_set_track_extrapolator(node->tracks + i, ex);
|
nuclear@0
|
138 }
|
nuclear@0
|
139 invalidate_cache(node);
|
nuclear@0
|
140 }
|
nuclear@0
|
141
|
nuclear@0
|
142 void anm_link_node(struct anm_node *p, struct anm_node *c)
|
nuclear@0
|
143 {
|
nuclear@0
|
144 c->next = p->child;
|
nuclear@0
|
145 p->child = c;
|
nuclear@0
|
146
|
nuclear@0
|
147 c->parent = p;
|
nuclear@0
|
148 invalidate_cache(c);
|
nuclear@0
|
149 }
|
nuclear@0
|
150
|
nuclear@0
|
151 int anm_unlink_node(struct anm_node *p, struct anm_node *c)
|
nuclear@0
|
152 {
|
nuclear@0
|
153 struct anm_node *iter;
|
nuclear@0
|
154
|
nuclear@0
|
155 if(p->child == c) {
|
nuclear@0
|
156 p->child = c->next;
|
nuclear@0
|
157 c->next = 0;
|
nuclear@0
|
158 invalidate_cache(c);
|
nuclear@0
|
159 return 0;
|
nuclear@0
|
160 }
|
nuclear@0
|
161
|
nuclear@0
|
162 iter = p->child;
|
nuclear@0
|
163 while(iter->next) {
|
nuclear@0
|
164 if(iter->next == c) {
|
nuclear@0
|
165 iter->next = c->next;
|
nuclear@0
|
166 c->next = 0;
|
nuclear@0
|
167 invalidate_cache(c);
|
nuclear@0
|
168 return 0;
|
nuclear@0
|
169 }
|
nuclear@0
|
170 }
|
nuclear@0
|
171 return -1;
|
nuclear@0
|
172 }
|
nuclear@0
|
173
|
nuclear@0
|
174 void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm)
|
nuclear@0
|
175 {
|
nuclear@0
|
176 anm_set_value(node->tracks + ANM_TRACK_POS_X, tm, pos.x);
|
nuclear@0
|
177 anm_set_value(node->tracks + ANM_TRACK_POS_Y, tm, pos.y);
|
nuclear@0
|
178 anm_set_value(node->tracks + ANM_TRACK_POS_Z, tm, pos.z);
|
nuclear@0
|
179 invalidate_cache(node);
|
nuclear@0
|
180 }
|
nuclear@0
|
181
|
nuclear@0
|
182 vec3_t anm_get_node_position(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
183 {
|
nuclear@0
|
184 vec3_t v;
|
nuclear@0
|
185 v.x = anm_get_value(node->tracks + ANM_TRACK_POS_X, tm);
|
nuclear@0
|
186 v.y = anm_get_value(node->tracks + ANM_TRACK_POS_Y, tm);
|
nuclear@0
|
187 v.z = anm_get_value(node->tracks + ANM_TRACK_POS_Z, tm);
|
nuclear@0
|
188 return v;
|
nuclear@0
|
189 }
|
nuclear@0
|
190
|
nuclear@0
|
191 void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm)
|
nuclear@0
|
192 {
|
nuclear@0
|
193 anm_set_value(node->tracks + ANM_TRACK_ROT_X, tm, rot.x);
|
nuclear@0
|
194 anm_set_value(node->tracks + ANM_TRACK_ROT_Y, tm, rot.y);
|
nuclear@0
|
195 anm_set_value(node->tracks + ANM_TRACK_ROT_Z, tm, rot.z);
|
nuclear@0
|
196 anm_set_value(node->tracks + ANM_TRACK_ROT_W, tm, rot.w);
|
nuclear@0
|
197 invalidate_cache(node);
|
nuclear@0
|
198 }
|
nuclear@0
|
199
|
nuclear@0
|
200 quat_t anm_get_node_rotation(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
201 {
|
nuclear@0
|
202 int idx0, idx1, last_idx;
|
nuclear@0
|
203 anm_time_t tstart, tend;
|
nuclear@0
|
204 float t, dt;
|
nuclear@0
|
205 struct anm_track *track_x, *track_y, *track_z, *track_w;
|
nuclear@0
|
206 quat_t q, q1, q2;
|
nuclear@0
|
207
|
nuclear@0
|
208 track_x = node->tracks + ANM_TRACK_ROT_X;
|
nuclear@0
|
209 track_y = node->tracks + ANM_TRACK_ROT_Y;
|
nuclear@0
|
210 track_z = node->tracks + ANM_TRACK_ROT_Z;
|
nuclear@0
|
211 track_w = node->tracks + ANM_TRACK_ROT_W;
|
nuclear@0
|
212
|
nuclear@0
|
213 if(!track_x->count) {
|
nuclear@0
|
214 q.x = track_x->def_val;
|
nuclear@0
|
215 q.y = track_y->def_val;
|
nuclear@0
|
216 q.z = track_z->def_val;
|
nuclear@0
|
217 q.w = track_w->def_val;
|
nuclear@0
|
218 return q;
|
nuclear@0
|
219 }
|
nuclear@0
|
220
|
nuclear@0
|
221 last_idx = track_x->count - 1;
|
nuclear@0
|
222
|
nuclear@0
|
223 tstart = track_x->keys[0].time;
|
nuclear@0
|
224 tend = track_x->keys[last_idx].time;
|
nuclear@0
|
225 tm = anm_remap_time(track_x, tm, tstart, tend);
|
nuclear@0
|
226
|
nuclear@0
|
227 idx0 = anm_get_key_interval(track_x, tm);
|
nuclear@0
|
228 assert(idx0 >= 0 && idx0 < track_x->count);
|
nuclear@0
|
229 idx1 = idx0 + 1;
|
nuclear@0
|
230
|
nuclear@0
|
231 dt = (float)(track_x->keys[idx1].time - track_x->keys[idx0].time);
|
nuclear@0
|
232 t = (float)(tm - track_x->keys[idx0].time) / dt;
|
nuclear@0
|
233
|
nuclear@0
|
234 q1.x = track_x->keys[idx0].val;
|
nuclear@0
|
235 q1.y = track_y->keys[idx0].val;
|
nuclear@0
|
236 q1.z = track_z->keys[idx0].val;
|
nuclear@0
|
237 q1.w = track_w->keys[idx0].val;
|
nuclear@0
|
238
|
nuclear@0
|
239 q2.x = track_x->keys[idx1].val;
|
nuclear@0
|
240 q2.y = track_y->keys[idx1].val;
|
nuclear@0
|
241 q2.z = track_z->keys[idx1].val;
|
nuclear@0
|
242 q2.w = track_w->keys[idx1].val;
|
nuclear@0
|
243
|
nuclear@0
|
244 return quat_slerp(q1, q2, t);
|
nuclear@0
|
245 }
|
nuclear@0
|
246
|
nuclear@0
|
247 void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm)
|
nuclear@0
|
248 {
|
nuclear@0
|
249 anm_set_value(node->tracks + ANM_TRACK_SCL_X, tm, scl.x);
|
nuclear@0
|
250 anm_set_value(node->tracks + ANM_TRACK_SCL_Y, tm, scl.y);
|
nuclear@0
|
251 anm_set_value(node->tracks + ANM_TRACK_SCL_Z, tm, scl.z);
|
nuclear@0
|
252 invalidate_cache(node);
|
nuclear@0
|
253 }
|
nuclear@0
|
254
|
nuclear@0
|
255 vec3_t anm_get_node_scaling(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
256 {
|
nuclear@0
|
257 vec3_t v;
|
nuclear@0
|
258 v.x = anm_get_value(node->tracks + ANM_TRACK_SCL_X, tm);
|
nuclear@0
|
259 v.y = anm_get_value(node->tracks + ANM_TRACK_SCL_Y, tm);
|
nuclear@0
|
260 v.z = anm_get_value(node->tracks + ANM_TRACK_SCL_Z, tm);
|
nuclear@0
|
261 return v;
|
nuclear@0
|
262 }
|
nuclear@0
|
263
|
nuclear@0
|
264
|
nuclear@0
|
265 vec3_t anm_get_position(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
266 {
|
nuclear@0
|
267 mat4_t xform;
|
nuclear@0
|
268 vec3_t pos = {0.0, 0.0, 0.0};
|
nuclear@0
|
269
|
nuclear@0
|
270 if(!node->parent) {
|
nuclear@0
|
271 return anm_get_node_position(node, tm);
|
nuclear@0
|
272 }
|
nuclear@0
|
273
|
nuclear@0
|
274 anm_get_matrix(node, xform, tm);
|
nuclear@0
|
275 return v3_transform(pos, xform);
|
nuclear@0
|
276 }
|
nuclear@0
|
277
|
nuclear@0
|
278 quat_t anm_get_rotation(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
279 {
|
nuclear@0
|
280 quat_t rot, prot;
|
nuclear@0
|
281 rot = anm_get_node_rotation(node, tm);
|
nuclear@0
|
282
|
nuclear@0
|
283 if(!node->parent) {
|
nuclear@0
|
284 return rot;
|
nuclear@0
|
285 }
|
nuclear@0
|
286
|
nuclear@0
|
287 prot = anm_get_rotation(node->parent, tm);
|
nuclear@0
|
288 return quat_mul(prot, rot);
|
nuclear@0
|
289 }
|
nuclear@0
|
290
|
nuclear@0
|
291 vec3_t anm_get_scaling(struct anm_node *node, anm_time_t tm)
|
nuclear@0
|
292 {
|
nuclear@0
|
293 vec3_t s, ps;
|
nuclear@0
|
294 s = anm_get_node_scaling(node, tm);
|
nuclear@0
|
295
|
nuclear@0
|
296 if(!node->parent) {
|
nuclear@0
|
297 return s;
|
nuclear@0
|
298 }
|
nuclear@0
|
299
|
nuclear@0
|
300 ps = anm_get_scaling(node->parent, tm);
|
nuclear@0
|
301 return v3_mul(s, ps);
|
nuclear@0
|
302 }
|
nuclear@0
|
303
|
nuclear@0
|
304 void anm_set_pivot(struct anm_node *node, vec3_t piv)
|
nuclear@0
|
305 {
|
nuclear@0
|
306 node->pivot = piv;
|
nuclear@0
|
307 }
|
nuclear@0
|
308
|
nuclear@0
|
309 vec3_t anm_get_pivot(struct anm_node *node)
|
nuclear@0
|
310 {
|
nuclear@0
|
311 return node->pivot;
|
nuclear@0
|
312 }
|
nuclear@0
|
313
|
nuclear@0
|
314 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
|
nuclear@0
|
315 {
|
nuclear@0
|
316 struct mat_cache *cache = pthread_getspecific(node->cache_key);
|
nuclear@0
|
317 if(!cache) {
|
nuclear@0
|
318 cache = malloc(sizeof *cache);
|
nuclear@0
|
319 assert(cache);
|
nuclear@0
|
320
|
nuclear@0
|
321 pthread_mutex_lock(&node->cache_list_lock);
|
nuclear@0
|
322 cache->next = node->cache_list;
|
nuclear@0
|
323 node->cache_list = cache;
|
nuclear@0
|
324 pthread_mutex_unlock(&node->cache_list_lock);
|
nuclear@0
|
325
|
nuclear@0
|
326 cache->time = ANM_TIME_INVAL;
|
nuclear@0
|
327 pthread_setspecific(node->cache_key, cache);
|
nuclear@0
|
328 }
|
nuclear@0
|
329
|
nuclear@0
|
330 if(cache->time != tm) {
|
nuclear@0
|
331 mat4_t tmat, rmat, smat, pivmat, neg_pivmat;
|
nuclear@0
|
332 vec3_t pos, scale;
|
nuclear@0
|
333 quat_t rot;
|
nuclear@0
|
334
|
nuclear@0
|
335 m4_identity(tmat);
|
nuclear@0
|
336 /*no need to m4_identity(rmat); quat_to_mat4 sets this properly */
|
nuclear@0
|
337 m4_identity(smat);
|
nuclear@0
|
338 m4_identity(pivmat);
|
nuclear@0
|
339 m4_identity(neg_pivmat);
|
nuclear@0
|
340
|
nuclear@0
|
341 pos = anm_get_node_position(node, tm);
|
nuclear@0
|
342 rot = anm_get_node_rotation(node, tm);
|
nuclear@0
|
343 scale = anm_get_node_scaling(node, tm);
|
nuclear@0
|
344
|
nuclear@0
|
345 m4_translate(pivmat, node->pivot.x, node->pivot.y, node->pivot.z);
|
nuclear@0
|
346 m4_translate(neg_pivmat, -node->pivot.x, -node->pivot.y, -node->pivot.z);
|
nuclear@0
|
347
|
nuclear@0
|
348 m4_translate(tmat, pos.x, pos.y, pos.z);
|
nuclear@0
|
349 quat_to_mat4(rmat, rot);
|
nuclear@0
|
350 m4_translate(smat, scale.x, scale.y, scale.z);
|
nuclear@0
|
351
|
nuclear@0
|
352 /* ok this would look nicer in C++ */
|
nuclear@0
|
353 m4_mult(cache->matrix, pivmat, tmat);
|
nuclear@0
|
354 m4_mult(cache->matrix, cache->matrix, rmat);
|
nuclear@0
|
355 m4_mult(cache->matrix, cache->matrix, smat);
|
nuclear@0
|
356 m4_mult(cache->matrix, cache->matrix, neg_pivmat);
|
nuclear@0
|
357
|
nuclear@0
|
358 if(node->parent) {
|
nuclear@0
|
359 mat4_t parent_mat;
|
nuclear@0
|
360
|
nuclear@0
|
361 anm_get_matrix(node->parent, mat, tm);
|
nuclear@0
|
362 m4_mult(cache->matrix, parent_mat, cache->matrix);
|
nuclear@0
|
363 }
|
nuclear@0
|
364 cache->time = tm;
|
nuclear@0
|
365 }
|
nuclear@0
|
366 m4_copy(mat, cache->matrix);
|
nuclear@0
|
367 }
|
nuclear@0
|
368
|
nuclear@0
|
369 void anm_get_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
|
nuclear@0
|
370 {
|
nuclear@0
|
371 struct mat_cache *cache = pthread_getspecific(node->cache_key);
|
nuclear@0
|
372 if(!cache) {
|
nuclear@0
|
373 cache = malloc(sizeof *cache);
|
nuclear@0
|
374 assert(cache);
|
nuclear@0
|
375
|
nuclear@0
|
376 pthread_mutex_lock(&node->cache_list_lock);
|
nuclear@0
|
377 cache->next = node->cache_list;
|
nuclear@0
|
378 node->cache_list = cache;
|
nuclear@0
|
379 pthread_mutex_unlock(&node->cache_list_lock);
|
nuclear@0
|
380
|
nuclear@0
|
381 cache->inv_time = ANM_TIME_INVAL;
|
nuclear@0
|
382 pthread_setspecific(node->cache_key, cache);
|
nuclear@0
|
383 }
|
nuclear@0
|
384
|
nuclear@0
|
385 if(cache->inv_time != tm) {
|
nuclear@0
|
386 anm_get_matrix(node, mat, tm);
|
nuclear@0
|
387 m4_inverse(cache->inv_matrix, mat);
|
nuclear@0
|
388 cache->inv_time = tm;
|
nuclear@0
|
389 }
|
nuclear@0
|
390 m4_copy(mat, cache->inv_matrix);
|
nuclear@0
|
391 }
|
nuclear@0
|
392
|
nuclear@0
|
393 anm_time_t anm_get_start_time(struct anm_node *node)
|
nuclear@0
|
394 {
|
nuclear@0
|
395 int i;
|
nuclear@0
|
396 struct anm_node *c;
|
nuclear@0
|
397 anm_time_t res = LONG_MAX;
|
nuclear@0
|
398
|
nuclear@0
|
399 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
400 if(node->tracks[i].count) {
|
nuclear@0
|
401 anm_time_t tm = node->tracks[i].keys[0].time;
|
nuclear@0
|
402 if(tm < res) {
|
nuclear@0
|
403 res = tm;
|
nuclear@0
|
404 }
|
nuclear@0
|
405 }
|
nuclear@0
|
406 }
|
nuclear@0
|
407
|
nuclear@0
|
408 c = node->child;
|
nuclear@0
|
409 while(c) {
|
nuclear@0
|
410 anm_time_t tm = anm_get_start_time(c);
|
nuclear@0
|
411 if(tm < res) {
|
nuclear@0
|
412 res = tm;
|
nuclear@0
|
413 }
|
nuclear@0
|
414 c = c->next;
|
nuclear@0
|
415 }
|
nuclear@0
|
416 return res;
|
nuclear@0
|
417 }
|
nuclear@0
|
418
|
nuclear@0
|
419 anm_time_t anm_get_end_time(struct anm_node *node)
|
nuclear@0
|
420 {
|
nuclear@0
|
421 int i;
|
nuclear@0
|
422 struct anm_node *c;
|
nuclear@0
|
423 anm_time_t res = LONG_MIN;
|
nuclear@0
|
424
|
nuclear@0
|
425 for(i=0; i<ANM_NUM_TRACKS; i++) {
|
nuclear@0
|
426 if(node->tracks[i].count) {
|
nuclear@0
|
427 anm_time_t tm = node->tracks[i].keys[node->tracks[i].count - 1].time;
|
nuclear@0
|
428 if(tm > res) {
|
nuclear@0
|
429 res = tm;
|
nuclear@0
|
430 }
|
nuclear@0
|
431 }
|
nuclear@0
|
432 }
|
nuclear@0
|
433
|
nuclear@0
|
434 c = node->child;
|
nuclear@0
|
435 while(c) {
|
nuclear@0
|
436 anm_time_t tm = anm_get_end_time(c);
|
nuclear@0
|
437 if(tm > res) {
|
nuclear@0
|
438 res = tm;
|
nuclear@0
|
439 }
|
nuclear@0
|
440 c = c->next;
|
nuclear@0
|
441 }
|
nuclear@0
|
442 return res;
|
nuclear@0
|
443 }
|
nuclear@0
|
444
|
nuclear@0
|
445 static void invalidate_cache(struct anm_node *node)
|
nuclear@0
|
446 {
|
nuclear@0
|
447 struct mat_cache *cache = pthread_getspecific(node->cache_key);
|
nuclear@0
|
448 if(cache) {
|
nuclear@0
|
449 cache->time = ANM_TIME_INVAL;
|
nuclear@0
|
450 }
|
nuclear@0
|
451 }
|