libanim
diff src/anim.c @ 57:2da758956e50
added the option of lightweight pre-pass top-down recursive calculation of
matrices instead of going through the existing lazy thread-specific caching
algorithm.
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Mon, 09 Dec 2013 04:06:30 +0200 |
parents | 46a1f2aa1129 |
children | 5993f405a1cb |
line diff
1.1 --- a/src/anim.c Sat Nov 16 12:31:03 2013 +0200 1.2 +++ b/src/anim.c Mon Dec 09 04:06:30 2013 +0200 1.3 @@ -381,6 +381,30 @@ 1.4 m4_inverse(mat, tmp); 1.5 } 1.6 1.7 +void anm_eval_node(struct anm_node *node, anm_time_t tm) 1.8 +{ 1.9 + anm_get_node_matrix(node, node->matrix, tm); 1.10 +} 1.11 + 1.12 +void anm_eval(struct anm_node *node, anm_time_t tm) 1.13 +{ 1.14 + struct anm_node *c; 1.15 + 1.16 + anm_eval_node(node, tm); 1.17 + 1.18 + if(node->parent) { 1.19 + /* due to post-order traversal, the parent matrix is already evaluated */ 1.20 + m4_mult(node->matrix, node->parent->matrix, node->matrix); 1.21 + } 1.22 + 1.23 + /* recersively evaluate all children */ 1.24 + c = node->child; 1.25 + while(c) { 1.26 + anm_eval(c, tm); 1.27 + c = c->next; 1.28 + } 1.29 +} 1.30 + 1.31 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm) 1.32 { 1.33 struct mat_cache *cache = pthread_getspecific(node->cache_key);