libanim

annotate src/anim.c @ 57:2da758956e50

added the option of lightweight pre-pass top-down recursive calculation of matrices instead of going through the existing lazy thread-specific caching algorithm.
author John Tsiombikas <nuclear@member.fsf.org>
date Mon, 09 Dec 2013 04:06:30 +0200
parents 46a1f2aa1129
children 5993f405a1cb
rev   line source
nuclear@1 1 #include <stdlib.h>
nuclear@0 2 #include <limits.h>
nuclear@0 3 #include <assert.h>
nuclear@0 4 #include "anim.h"
nuclear@0 5 #include "dynarr.h"
nuclear@0 6
nuclear@7 7 #define ROT_USE_SLERP
nuclear@7 8
nuclear@0 9 static void invalidate_cache(struct anm_node *node);
nuclear@0 10
nuclear@0 11 int anm_init_node(struct anm_node *node)
nuclear@0 12 {
nuclear@0 13 int i, j;
nuclear@0 14 static const float defaults[] = {
nuclear@0 15 0.0f, 0.0f, 0.0f, /* default position */
nuclear@0 16 0.0f, 0.0f, 0.0f, 1.0f, /* default rotation quat */
nuclear@0 17 1.0f, 1.0f, 1.0f /* default scale factor */
nuclear@0 18 };
nuclear@0 19
nuclear@0 20 memset(node, 0, sizeof *node);
nuclear@0 21
nuclear@0 22 /* initialize thread-local matrix cache */
nuclear@0 23 pthread_key_create(&node->cache_key, 0);
nuclear@10 24 pthread_mutex_init(&node->cache_list_lock, 0);
nuclear@0 25
nuclear@0 26 for(i=0; i<ANM_NUM_TRACKS; i++) {
nuclear@0 27 if(anm_init_track(node->tracks + i) == -1) {
nuclear@0 28 for(j=0; j<i; j++) {
nuclear@0 29 anm_destroy_track(node->tracks + i);
nuclear@0 30 }
nuclear@0 31 }
nuclear@0 32 anm_set_track_default(node->tracks + i, defaults[i]);
nuclear@0 33 }
nuclear@0 34 return 0;
nuclear@0 35 }
nuclear@0 36
nuclear@0 37 void anm_destroy_node(struct anm_node *node)
nuclear@0 38 {
nuclear@0 39 int i;
nuclear@0 40 free(node->name);
nuclear@0 41
nuclear@0 42 for(i=0; i<ANM_NUM_TRACKS; i++) {
nuclear@0 43 anm_destroy_track(node->tracks + i);
nuclear@0 44 }
nuclear@0 45
nuclear@0 46 /* destroy thread-specific cache */
nuclear@0 47 pthread_key_delete(node->cache_key);
nuclear@0 48
nuclear@0 49 while(node->cache_list) {
nuclear@0 50 struct mat_cache *tmp = node->cache_list;
nuclear@0 51 node->cache_list = tmp->next;
nuclear@0 52 free(tmp);
nuclear@0 53 }
nuclear@0 54 }
nuclear@0 55
nuclear@0 56 void anm_destroy_node_tree(struct anm_node *tree)
nuclear@0 57 {
nuclear@0 58 struct anm_node *c, *tmp;
nuclear@0 59
nuclear@0 60 if(!tree) return;
nuclear@0 61
nuclear@0 62 c = tree->child;
nuclear@0 63 while(c) {
nuclear@0 64 tmp = c;
nuclear@0 65 c = c->next;
nuclear@0 66
nuclear@0 67 anm_destroy_node_tree(tmp);
nuclear@0 68 }
nuclear@0 69 anm_destroy_node(tree);
nuclear@0 70 }
nuclear@0 71
nuclear@0 72 struct anm_node *anm_create_node(void)
nuclear@0 73 {
nuclear@0 74 struct anm_node *n;
nuclear@0 75
nuclear@0 76 if((n = malloc(sizeof *n))) {
nuclear@0 77 if(anm_init_node(n) == -1) {
nuclear@0 78 free(n);
nuclear@0 79 return 0;
nuclear@0 80 }
nuclear@0 81 }
nuclear@0 82 return n;
nuclear@0 83 }
nuclear@0 84
nuclear@0 85 void anm_free_node(struct anm_node *node)
nuclear@0 86 {
nuclear@0 87 anm_destroy_node(node);
nuclear@0 88 free(node);
nuclear@0 89 }
nuclear@0 90
nuclear@0 91 void anm_free_node_tree(struct anm_node *tree)
nuclear@0 92 {
nuclear@0 93 struct anm_node *c, *tmp;
nuclear@0 94
nuclear@0 95 if(!tree) return;
nuclear@0 96
nuclear@0 97 c = tree->child;
nuclear@0 98 while(c) {
nuclear@0 99 tmp = c;
nuclear@0 100 c = c->next;
nuclear@0 101
nuclear@0 102 anm_free_node_tree(tmp);
nuclear@0 103 }
nuclear@0 104
nuclear@0 105 anm_free_node(tree);
nuclear@0 106 }
nuclear@0 107
nuclear@0 108 int anm_set_node_name(struct anm_node *node, const char *name)
nuclear@0 109 {
nuclear@0 110 char *str;
nuclear@0 111
nuclear@0 112 if(!(str = malloc(strlen(name) + 1))) {
nuclear@0 113 return -1;
nuclear@0 114 }
nuclear@0 115 strcpy(str, name);
nuclear@0 116 free(node->name);
nuclear@0 117 node->name = str;
nuclear@0 118 return 0;
nuclear@0 119 }
nuclear@0 120
nuclear@0 121 const char *anm_get_node_name(struct anm_node *node)
nuclear@0 122 {
nuclear@0 123 return node->name ? node->name : "";
nuclear@0 124 }
nuclear@0 125
nuclear@0 126 void anm_set_interpolator(struct anm_node *node, enum anm_interpolator in)
nuclear@0 127 {
nuclear@0 128 int i;
nuclear@0 129
nuclear@0 130 for(i=0; i<ANM_NUM_TRACKS; i++) {
nuclear@0 131 anm_set_track_interpolator(node->tracks + i, in);
nuclear@0 132 }
nuclear@0 133 invalidate_cache(node);
nuclear@0 134 }
nuclear@0 135
nuclear@0 136 void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex)
nuclear@0 137 {
nuclear@0 138 int i;
nuclear@0 139
nuclear@0 140 for(i=0; i<ANM_NUM_TRACKS; i++) {
nuclear@0 141 anm_set_track_extrapolator(node->tracks + i, ex);
nuclear@0 142 }
nuclear@0 143 invalidate_cache(node);
nuclear@0 144 }
nuclear@0 145
nuclear@0 146 void anm_link_node(struct anm_node *p, struct anm_node *c)
nuclear@0 147 {
nuclear@0 148 c->next = p->child;
nuclear@0 149 p->child = c;
nuclear@0 150
nuclear@0 151 c->parent = p;
nuclear@0 152 invalidate_cache(c);
nuclear@0 153 }
nuclear@0 154
nuclear@0 155 int anm_unlink_node(struct anm_node *p, struct anm_node *c)
nuclear@0 156 {
nuclear@0 157 struct anm_node *iter;
nuclear@0 158
nuclear@0 159 if(p->child == c) {
nuclear@0 160 p->child = c->next;
nuclear@0 161 c->next = 0;
nuclear@0 162 invalidate_cache(c);
nuclear@0 163 return 0;
nuclear@0 164 }
nuclear@0 165
nuclear@0 166 iter = p->child;
nuclear@0 167 while(iter->next) {
nuclear@0 168 if(iter->next == c) {
nuclear@0 169 iter->next = c->next;
nuclear@0 170 c->next = 0;
nuclear@0 171 invalidate_cache(c);
nuclear@0 172 return 0;
nuclear@0 173 }
nuclear@0 174 }
nuclear@0 175 return -1;
nuclear@0 176 }
nuclear@0 177
nuclear@0 178 void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm)
nuclear@0 179 {
nuclear@0 180 anm_set_value(node->tracks + ANM_TRACK_POS_X, tm, pos.x);
nuclear@0 181 anm_set_value(node->tracks + ANM_TRACK_POS_Y, tm, pos.y);
nuclear@0 182 anm_set_value(node->tracks + ANM_TRACK_POS_Z, tm, pos.z);
nuclear@0 183 invalidate_cache(node);
nuclear@0 184 }
nuclear@0 185
nuclear@0 186 vec3_t anm_get_node_position(struct anm_node *node, anm_time_t tm)
nuclear@0 187 {
nuclear@0 188 vec3_t v;
nuclear@0 189 v.x = anm_get_value(node->tracks + ANM_TRACK_POS_X, tm);
nuclear@0 190 v.y = anm_get_value(node->tracks + ANM_TRACK_POS_Y, tm);
nuclear@0 191 v.z = anm_get_value(node->tracks + ANM_TRACK_POS_Z, tm);
nuclear@0 192 return v;
nuclear@0 193 }
nuclear@0 194
nuclear@0 195 void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm)
nuclear@0 196 {
nuclear@0 197 anm_set_value(node->tracks + ANM_TRACK_ROT_X, tm, rot.x);
nuclear@0 198 anm_set_value(node->tracks + ANM_TRACK_ROT_Y, tm, rot.y);
nuclear@0 199 anm_set_value(node->tracks + ANM_TRACK_ROT_Z, tm, rot.z);
nuclear@0 200 anm_set_value(node->tracks + ANM_TRACK_ROT_W, tm, rot.w);
nuclear@0 201 invalidate_cache(node);
nuclear@0 202 }
nuclear@0 203
nuclear@0 204 quat_t anm_get_node_rotation(struct anm_node *node, anm_time_t tm)
nuclear@0 205 {
nuclear@7 206 #ifndef ROT_USE_SLERP
nuclear@7 207 quat_t q;
nuclear@6 208 q.x = anm_get_value(node->tracks + ANM_TRACK_ROT_X, tm);
nuclear@6 209 q.y = anm_get_value(node->tracks + ANM_TRACK_ROT_Y, tm);
nuclear@6 210 q.z = anm_get_value(node->tracks + ANM_TRACK_ROT_Z, tm);
nuclear@6 211 q.w = anm_get_value(node->tracks + ANM_TRACK_ROT_W, tm);
nuclear@7 212 return q;
nuclear@7 213 #else
nuclear@0 214 int idx0, idx1, last_idx;
nuclear@0 215 anm_time_t tstart, tend;
nuclear@0 216 float t, dt;
nuclear@0 217 struct anm_track *track_x, *track_y, *track_z, *track_w;
nuclear@0 218 quat_t q, q1, q2;
nuclear@0 219
nuclear@0 220 track_x = node->tracks + ANM_TRACK_ROT_X;
nuclear@0 221 track_y = node->tracks + ANM_TRACK_ROT_Y;
nuclear@0 222 track_z = node->tracks + ANM_TRACK_ROT_Z;
nuclear@0 223 track_w = node->tracks + ANM_TRACK_ROT_W;
nuclear@0 224
nuclear@0 225 if(!track_x->count) {
nuclear@0 226 q.x = track_x->def_val;
nuclear@0 227 q.y = track_y->def_val;
nuclear@0 228 q.z = track_z->def_val;
nuclear@0 229 q.w = track_w->def_val;
nuclear@0 230 return q;
nuclear@0 231 }
nuclear@0 232
nuclear@0 233 last_idx = track_x->count - 1;
nuclear@0 234
nuclear@0 235 tstart = track_x->keys[0].time;
nuclear@0 236 tend = track_x->keys[last_idx].time;
nuclear@6 237
nuclear@6 238 if(tstart == tend) {
nuclear@6 239 q.x = track_x->keys[0].val;
nuclear@6 240 q.y = track_y->keys[0].val;
nuclear@6 241 q.z = track_z->keys[0].val;
nuclear@6 242 q.w = track_w->keys[0].val;
nuclear@6 243 return q;
nuclear@6 244 }
nuclear@6 245
nuclear@0 246 tm = anm_remap_time(track_x, tm, tstart, tend);
nuclear@0 247
nuclear@0 248 idx0 = anm_get_key_interval(track_x, tm);
nuclear@0 249 assert(idx0 >= 0 && idx0 < track_x->count);
nuclear@0 250 idx1 = idx0 + 1;
nuclear@0 251
nuclear@6 252 if(idx0 == last_idx) {
nuclear@6 253 q.x = track_x->keys[idx0].val;
nuclear@6 254 q.y = track_y->keys[idx0].val;
nuclear@6 255 q.z = track_z->keys[idx0].val;
nuclear@6 256 q.w = track_w->keys[idx0].val;
nuclear@6 257 return q;
nuclear@6 258 }
nuclear@6 259
nuclear@0 260 dt = (float)(track_x->keys[idx1].time - track_x->keys[idx0].time);
nuclear@0 261 t = (float)(tm - track_x->keys[idx0].time) / dt;
nuclear@0 262
nuclear@0 263 q1.x = track_x->keys[idx0].val;
nuclear@0 264 q1.y = track_y->keys[idx0].val;
nuclear@0 265 q1.z = track_z->keys[idx0].val;
nuclear@0 266 q1.w = track_w->keys[idx0].val;
nuclear@0 267
nuclear@0 268 q2.x = track_x->keys[idx1].val;
nuclear@0 269 q2.y = track_y->keys[idx1].val;
nuclear@0 270 q2.z = track_z->keys[idx1].val;
nuclear@0 271 q2.w = track_w->keys[idx1].val;
nuclear@0 272
nuclear@9 273 /*q1 = quat_normalize(q1);
nuclear@9 274 q2 = quat_normalize(q2);*/
nuclear@9 275
nuclear@0 276 return quat_slerp(q1, q2, t);
nuclear@7 277 #endif
nuclear@0 278 }
nuclear@0 279
nuclear@0 280 void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm)
nuclear@0 281 {
nuclear@0 282 anm_set_value(node->tracks + ANM_TRACK_SCL_X, tm, scl.x);
nuclear@0 283 anm_set_value(node->tracks + ANM_TRACK_SCL_Y, tm, scl.y);
nuclear@0 284 anm_set_value(node->tracks + ANM_TRACK_SCL_Z, tm, scl.z);
nuclear@0 285 invalidate_cache(node);
nuclear@0 286 }
nuclear@0 287
nuclear@0 288 vec3_t anm_get_node_scaling(struct anm_node *node, anm_time_t tm)
nuclear@0 289 {
nuclear@0 290 vec3_t v;
nuclear@0 291 v.x = anm_get_value(node->tracks + ANM_TRACK_SCL_X, tm);
nuclear@0 292 v.y = anm_get_value(node->tracks + ANM_TRACK_SCL_Y, tm);
nuclear@0 293 v.z = anm_get_value(node->tracks + ANM_TRACK_SCL_Z, tm);
nuclear@0 294 return v;
nuclear@0 295 }
nuclear@0 296
nuclear@0 297
nuclear@0 298 vec3_t anm_get_position(struct anm_node *node, anm_time_t tm)
nuclear@0 299 {
nuclear@0 300 mat4_t xform;
nuclear@0 301 vec3_t pos = {0.0, 0.0, 0.0};
nuclear@0 302
nuclear@0 303 if(!node->parent) {
nuclear@0 304 return anm_get_node_position(node, tm);
nuclear@0 305 }
nuclear@0 306
nuclear@0 307 anm_get_matrix(node, xform, tm);
nuclear@0 308 return v3_transform(pos, xform);
nuclear@0 309 }
nuclear@0 310
nuclear@0 311 quat_t anm_get_rotation(struct anm_node *node, anm_time_t tm)
nuclear@0 312 {
nuclear@0 313 quat_t rot, prot;
nuclear@0 314 rot = anm_get_node_rotation(node, tm);
nuclear@0 315
nuclear@0 316 if(!node->parent) {
nuclear@0 317 return rot;
nuclear@0 318 }
nuclear@0 319
nuclear@0 320 prot = anm_get_rotation(node->parent, tm);
nuclear@0 321 return quat_mul(prot, rot);
nuclear@0 322 }
nuclear@0 323
nuclear@0 324 vec3_t anm_get_scaling(struct anm_node *node, anm_time_t tm)
nuclear@0 325 {
nuclear@0 326 vec3_t s, ps;
nuclear@0 327 s = anm_get_node_scaling(node, tm);
nuclear@0 328
nuclear@0 329 if(!node->parent) {
nuclear@0 330 return s;
nuclear@0 331 }
nuclear@0 332
nuclear@0 333 ps = anm_get_scaling(node->parent, tm);
nuclear@0 334 return v3_mul(s, ps);
nuclear@0 335 }
nuclear@0 336
nuclear@0 337 void anm_set_pivot(struct anm_node *node, vec3_t piv)
nuclear@0 338 {
nuclear@0 339 node->pivot = piv;
nuclear@0 340 }
nuclear@0 341
nuclear@0 342 vec3_t anm_get_pivot(struct anm_node *node)
nuclear@0 343 {
nuclear@0 344 return node->pivot;
nuclear@0 345 }
nuclear@0 346
nuclear@5 347 void anm_get_node_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
nuclear@5 348 {
nuclear@9 349 int i;
nuclear@9 350 mat4_t rmat;
nuclear@5 351 vec3_t pos, scale;
nuclear@5 352 quat_t rot;
nuclear@5 353
nuclear@5 354 pos = anm_get_node_position(node, tm);
nuclear@5 355 rot = anm_get_node_rotation(node, tm);
nuclear@5 356 scale = anm_get_node_scaling(node, tm);
nuclear@5 357
nuclear@9 358 m4_set_translation(mat, node->pivot.x, node->pivot.y, node->pivot.z);
nuclear@5 359
nuclear@5 360 quat_to_mat4(rmat, rot);
nuclear@9 361 for(i=0; i<3; i++) {
nuclear@9 362 mat[i][0] = rmat[i][0];
nuclear@9 363 mat[i][1] = rmat[i][1];
nuclear@9 364 mat[i][2] = rmat[i][2];
nuclear@9 365 }
nuclear@9 366 /* this loop is equivalent to: m4_mult(mat, mat, rmat); */
nuclear@5 367
nuclear@9 368 mat[0][0] *= scale.x; mat[0][1] *= scale.y; mat[0][2] *= scale.z; mat[0][3] += pos.x;
nuclear@9 369 mat[1][0] *= scale.x; mat[1][1] *= scale.y; mat[1][2] *= scale.z; mat[1][3] += pos.y;
nuclear@9 370 mat[2][0] *= scale.x; mat[2][1] *= scale.y; mat[2][2] *= scale.z; mat[2][3] += pos.z;
nuclear@9 371
nuclear@9 372 m4_translate(mat, -node->pivot.x, -node->pivot.y, -node->pivot.z);
nuclear@9 373
nuclear@9 374 /* that's basically: pivot * rotation * translation * scaling * -pivot */
nuclear@5 375 }
nuclear@5 376
nuclear@5 377 void anm_get_node_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
nuclear@5 378 {
nuclear@5 379 mat4_t tmp;
nuclear@5 380 anm_get_node_matrix(node, tmp, tm);
nuclear@5 381 m4_inverse(mat, tmp);
nuclear@5 382 }
nuclear@5 383
nuclear@20 384 void anm_eval_node(struct anm_node *node, anm_time_t tm)
nuclear@20 385 {
nuclear@20 386 anm_get_node_matrix(node, node->matrix, tm);
nuclear@20 387 }
nuclear@20 388
nuclear@20 389 void anm_eval(struct anm_node *node, anm_time_t tm)
nuclear@20 390 {
nuclear@20 391 struct anm_node *c;
nuclear@20 392
nuclear@20 393 anm_eval_node(node, tm);
nuclear@20 394
nuclear@20 395 if(node->parent) {
nuclear@20 396 /* due to post-order traversal, the parent matrix is already evaluated */
nuclear@20 397 m4_mult(node->matrix, node->parent->matrix, node->matrix);
nuclear@20 398 }
nuclear@20 399
nuclear@20 400 /* recersively evaluate all children */
nuclear@20 401 c = node->child;
nuclear@20 402 while(c) {
nuclear@20 403 anm_eval(c, tm);
nuclear@20 404 c = c->next;
nuclear@20 405 }
nuclear@20 406 }
nuclear@20 407
nuclear@0 408 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
nuclear@0 409 {
nuclear@0 410 struct mat_cache *cache = pthread_getspecific(node->cache_key);
nuclear@0 411 if(!cache) {
nuclear@0 412 cache = malloc(sizeof *cache);
nuclear@0 413 assert(cache);
nuclear@0 414
nuclear@0 415 pthread_mutex_lock(&node->cache_list_lock);
nuclear@0 416 cache->next = node->cache_list;
nuclear@0 417 node->cache_list = cache;
nuclear@0 418 pthread_mutex_unlock(&node->cache_list_lock);
nuclear@0 419
nuclear@0 420 cache->time = ANM_TIME_INVAL;
nuclear@2 421 cache->inv_time = ANM_TIME_INVAL;
nuclear@0 422 pthread_setspecific(node->cache_key, cache);
nuclear@0 423 }
nuclear@0 424
nuclear@0 425 if(cache->time != tm) {
nuclear@5 426 anm_get_node_matrix(node, cache->matrix, tm);
nuclear@0 427
nuclear@0 428 if(node->parent) {
nuclear@0 429 mat4_t parent_mat;
nuclear@0 430
nuclear@4 431 anm_get_matrix(node->parent, parent_mat, tm);
nuclear@0 432 m4_mult(cache->matrix, parent_mat, cache->matrix);
nuclear@0 433 }
nuclear@0 434 cache->time = tm;
nuclear@0 435 }
nuclear@0 436 m4_copy(mat, cache->matrix);
nuclear@0 437 }
nuclear@0 438
nuclear@0 439 void anm_get_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
nuclear@0 440 {
nuclear@0 441 struct mat_cache *cache = pthread_getspecific(node->cache_key);
nuclear@0 442 if(!cache) {
nuclear@0 443 cache = malloc(sizeof *cache);
nuclear@0 444 assert(cache);
nuclear@0 445
nuclear@0 446 pthread_mutex_lock(&node->cache_list_lock);
nuclear@0 447 cache->next = node->cache_list;
nuclear@0 448 node->cache_list = cache;
nuclear@0 449 pthread_mutex_unlock(&node->cache_list_lock);
nuclear@0 450
nuclear@0 451 cache->inv_time = ANM_TIME_INVAL;
nuclear@2 452 cache->inv_time = ANM_TIME_INVAL;
nuclear@0 453 pthread_setspecific(node->cache_key, cache);
nuclear@0 454 }
nuclear@0 455
nuclear@0 456 if(cache->inv_time != tm) {
nuclear@0 457 anm_get_matrix(node, mat, tm);
nuclear@0 458 m4_inverse(cache->inv_matrix, mat);
nuclear@0 459 cache->inv_time = tm;
nuclear@0 460 }
nuclear@0 461 m4_copy(mat, cache->inv_matrix);
nuclear@0 462 }
nuclear@0 463
nuclear@0 464 anm_time_t anm_get_start_time(struct anm_node *node)
nuclear@0 465 {
nuclear@0 466 int i;
nuclear@0 467 struct anm_node *c;
nuclear@0 468 anm_time_t res = LONG_MAX;
nuclear@0 469
nuclear@0 470 for(i=0; i<ANM_NUM_TRACKS; i++) {
nuclear@0 471 if(node->tracks[i].count) {
nuclear@0 472 anm_time_t tm = node->tracks[i].keys[0].time;
nuclear@0 473 if(tm < res) {
nuclear@0 474 res = tm;
nuclear@0 475 }
nuclear@0 476 }
nuclear@0 477 }
nuclear@0 478
nuclear@0 479 c = node->child;
nuclear@0 480 while(c) {
nuclear@0 481 anm_time_t tm = anm_get_start_time(c);
nuclear@0 482 if(tm < res) {
nuclear@0 483 res = tm;
nuclear@0 484 }
nuclear@0 485 c = c->next;
nuclear@0 486 }
nuclear@0 487 return res;
nuclear@0 488 }
nuclear@0 489
nuclear@0 490 anm_time_t anm_get_end_time(struct anm_node *node)
nuclear@0 491 {
nuclear@0 492 int i;
nuclear@0 493 struct anm_node *c;
nuclear@0 494 anm_time_t res = LONG_MIN;
nuclear@0 495
nuclear@0 496 for(i=0; i<ANM_NUM_TRACKS; i++) {
nuclear@0 497 if(node->tracks[i].count) {
nuclear@0 498 anm_time_t tm = node->tracks[i].keys[node->tracks[i].count - 1].time;
nuclear@0 499 if(tm > res) {
nuclear@0 500 res = tm;
nuclear@0 501 }
nuclear@0 502 }
nuclear@0 503 }
nuclear@0 504
nuclear@0 505 c = node->child;
nuclear@0 506 while(c) {
nuclear@0 507 anm_time_t tm = anm_get_end_time(c);
nuclear@0 508 if(tm > res) {
nuclear@0 509 res = tm;
nuclear@0 510 }
nuclear@0 511 c = c->next;
nuclear@0 512 }
nuclear@0 513 return res;
nuclear@0 514 }
nuclear@0 515
nuclear@0 516 static void invalidate_cache(struct anm_node *node)
nuclear@0 517 {
nuclear@0 518 struct mat_cache *cache = pthread_getspecific(node->cache_key);
nuclear@0 519 if(cache) {
nuclear@13 520 cache->time = cache->inv_time = ANM_TIME_INVAL;
nuclear@0 521 }
nuclear@0 522 }