libanim
view src/anim.c @ 50:b92a21c31008
invalidate_cache was not invalidating the inverse matrix time
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Sat, 13 Apr 2013 08:13:27 +0300 |
parents | b408f3f655e9 |
children | 3c2428cb38f7 |
line source
1 #include <stdlib.h>
2 #include <limits.h>
3 #include <assert.h>
4 #include "anim.h"
5 #include "dynarr.h"
7 #define ROT_USE_SLERP
9 static void invalidate_cache(struct anm_node *node);
11 int anm_init_node(struct anm_node *node)
12 {
13 int i, j;
14 static const float defaults[] = {
15 0.0f, 0.0f, 0.0f, /* default position */
16 0.0f, 0.0f, 0.0f, 1.0f, /* default rotation quat */
17 1.0f, 1.0f, 1.0f /* default scale factor */
18 };
20 memset(node, 0, sizeof *node);
22 /* initialize thread-local matrix cache */
23 pthread_key_create(&node->cache_key, 0);
24 pthread_mutex_init(&node->cache_list_lock, 0);
26 for(i=0; i<ANM_NUM_TRACKS; i++) {
27 if(anm_init_track(node->tracks + i) == -1) {
28 for(j=0; j<i; j++) {
29 anm_destroy_track(node->tracks + i);
30 }
31 }
32 anm_set_track_default(node->tracks + i, defaults[i]);
33 }
34 return 0;
35 }
37 void anm_destroy_node(struct anm_node *node)
38 {
39 int i;
40 free(node->name);
42 for(i=0; i<ANM_NUM_TRACKS; i++) {
43 anm_destroy_track(node->tracks + i);
44 }
46 /* destroy thread-specific cache */
47 pthread_key_delete(node->cache_key);
49 while(node->cache_list) {
50 struct mat_cache *tmp = node->cache_list;
51 node->cache_list = tmp->next;
52 free(tmp);
53 }
54 }
56 void anm_destroy_node_tree(struct anm_node *tree)
57 {
58 struct anm_node *c, *tmp;
60 if(!tree) return;
62 c = tree->child;
63 while(c) {
64 tmp = c;
65 c = c->next;
67 anm_destroy_node_tree(tmp);
68 }
69 anm_destroy_node(tree);
70 }
72 struct anm_node *anm_create_node(void)
73 {
74 struct anm_node *n;
76 if((n = malloc(sizeof *n))) {
77 if(anm_init_node(n) == -1) {
78 free(n);
79 return 0;
80 }
81 }
82 return n;
83 }
85 void anm_free_node(struct anm_node *node)
86 {
87 anm_destroy_node(node);
88 free(node);
89 }
91 void anm_free_node_tree(struct anm_node *tree)
92 {
93 struct anm_node *c, *tmp;
95 if(!tree) return;
97 c = tree->child;
98 while(c) {
99 tmp = c;
100 c = c->next;
102 anm_free_node_tree(tmp);
103 }
105 anm_free_node(tree);
106 }
108 int anm_set_node_name(struct anm_node *node, const char *name)
109 {
110 char *str;
112 if(!(str = malloc(strlen(name) + 1))) {
113 return -1;
114 }
115 strcpy(str, name);
116 free(node->name);
117 node->name = str;
118 return 0;
119 }
121 const char *anm_get_node_name(struct anm_node *node)
122 {
123 return node->name ? node->name : "";
124 }
126 void anm_set_interpolator(struct anm_node *node, enum anm_interpolator in)
127 {
128 int i;
130 for(i=0; i<ANM_NUM_TRACKS; i++) {
131 anm_set_track_interpolator(node->tracks + i, in);
132 }
133 invalidate_cache(node);
134 }
136 void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex)
137 {
138 int i;
140 for(i=0; i<ANM_NUM_TRACKS; i++) {
141 anm_set_track_extrapolator(node->tracks + i, ex);
142 }
143 invalidate_cache(node);
144 }
146 void anm_link_node(struct anm_node *p, struct anm_node *c)
147 {
148 c->next = p->child;
149 p->child = c;
151 c->parent = p;
152 invalidate_cache(c);
153 }
155 int anm_unlink_node(struct anm_node *p, struct anm_node *c)
156 {
157 struct anm_node *iter;
159 if(p->child == c) {
160 p->child = c->next;
161 c->next = 0;
162 invalidate_cache(c);
163 return 0;
164 }
166 iter = p->child;
167 while(iter->next) {
168 if(iter->next == c) {
169 iter->next = c->next;
170 c->next = 0;
171 invalidate_cache(c);
172 return 0;
173 }
174 }
175 return -1;
176 }
178 void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm)
179 {
180 anm_set_value(node->tracks + ANM_TRACK_POS_X, tm, pos.x);
181 anm_set_value(node->tracks + ANM_TRACK_POS_Y, tm, pos.y);
182 anm_set_value(node->tracks + ANM_TRACK_POS_Z, tm, pos.z);
183 invalidate_cache(node);
184 }
186 vec3_t anm_get_node_position(struct anm_node *node, anm_time_t tm)
187 {
188 vec3_t v;
189 v.x = anm_get_value(node->tracks + ANM_TRACK_POS_X, tm);
190 v.y = anm_get_value(node->tracks + ANM_TRACK_POS_Y, tm);
191 v.z = anm_get_value(node->tracks + ANM_TRACK_POS_Z, tm);
192 return v;
193 }
195 void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm)
196 {
197 anm_set_value(node->tracks + ANM_TRACK_ROT_X, tm, rot.x);
198 anm_set_value(node->tracks + ANM_TRACK_ROT_Y, tm, rot.y);
199 anm_set_value(node->tracks + ANM_TRACK_ROT_Z, tm, rot.z);
200 anm_set_value(node->tracks + ANM_TRACK_ROT_W, tm, rot.w);
201 invalidate_cache(node);
202 }
204 quat_t anm_get_node_rotation(struct anm_node *node, anm_time_t tm)
205 {
206 #ifndef ROT_USE_SLERP
207 quat_t q;
208 q.x = anm_get_value(node->tracks + ANM_TRACK_ROT_X, tm);
209 q.y = anm_get_value(node->tracks + ANM_TRACK_ROT_Y, tm);
210 q.z = anm_get_value(node->tracks + ANM_TRACK_ROT_Z, tm);
211 q.w = anm_get_value(node->tracks + ANM_TRACK_ROT_W, tm);
212 return q;
213 #else
214 int idx0, idx1, last_idx;
215 anm_time_t tstart, tend;
216 float t, dt;
217 struct anm_track *track_x, *track_y, *track_z, *track_w;
218 quat_t q, q1, q2;
220 track_x = node->tracks + ANM_TRACK_ROT_X;
221 track_y = node->tracks + ANM_TRACK_ROT_Y;
222 track_z = node->tracks + ANM_TRACK_ROT_Z;
223 track_w = node->tracks + ANM_TRACK_ROT_W;
225 if(!track_x->count) {
226 q.x = track_x->def_val;
227 q.y = track_y->def_val;
228 q.z = track_z->def_val;
229 q.w = track_w->def_val;
230 return q;
231 }
233 last_idx = track_x->count - 1;
235 tstart = track_x->keys[0].time;
236 tend = track_x->keys[last_idx].time;
238 if(tstart == tend) {
239 q.x = track_x->keys[0].val;
240 q.y = track_y->keys[0].val;
241 q.z = track_z->keys[0].val;
242 q.w = track_w->keys[0].val;
243 return q;
244 }
246 tm = anm_remap_time(track_x, tm, tstart, tend);
248 idx0 = anm_get_key_interval(track_x, tm);
249 assert(idx0 >= 0 && idx0 < track_x->count);
250 idx1 = idx0 + 1;
252 if(idx0 == last_idx) {
253 q.x = track_x->keys[idx0].val;
254 q.y = track_y->keys[idx0].val;
255 q.z = track_z->keys[idx0].val;
256 q.w = track_w->keys[idx0].val;
257 return q;
258 }
260 dt = (float)(track_x->keys[idx1].time - track_x->keys[idx0].time);
261 t = (float)(tm - track_x->keys[idx0].time) / dt;
263 q1.x = track_x->keys[idx0].val;
264 q1.y = track_y->keys[idx0].val;
265 q1.z = track_z->keys[idx0].val;
266 q1.w = track_w->keys[idx0].val;
268 q2.x = track_x->keys[idx1].val;
269 q2.y = track_y->keys[idx1].val;
270 q2.z = track_z->keys[idx1].val;
271 q2.w = track_w->keys[idx1].val;
273 /*q1 = quat_normalize(q1);
274 q2 = quat_normalize(q2);*/
276 return quat_slerp(q1, q2, t);
277 #endif
278 }
280 void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm)
281 {
282 anm_set_value(node->tracks + ANM_TRACK_SCL_X, tm, scl.x);
283 anm_set_value(node->tracks + ANM_TRACK_SCL_Y, tm, scl.y);
284 anm_set_value(node->tracks + ANM_TRACK_SCL_Z, tm, scl.z);
285 invalidate_cache(node);
286 }
288 vec3_t anm_get_node_scaling(struct anm_node *node, anm_time_t tm)
289 {
290 vec3_t v;
291 v.x = anm_get_value(node->tracks + ANM_TRACK_SCL_X, tm);
292 v.y = anm_get_value(node->tracks + ANM_TRACK_SCL_Y, tm);
293 v.z = anm_get_value(node->tracks + ANM_TRACK_SCL_Z, tm);
294 return v;
295 }
298 vec3_t anm_get_position(struct anm_node *node, anm_time_t tm)
299 {
300 mat4_t xform;
301 vec3_t pos = {0.0, 0.0, 0.0};
303 if(!node->parent) {
304 return anm_get_node_position(node, tm);
305 }
307 anm_get_matrix(node, xform, tm);
308 return v3_transform(pos, xform);
309 }
311 quat_t anm_get_rotation(struct anm_node *node, anm_time_t tm)
312 {
313 quat_t rot, prot;
314 rot = anm_get_node_rotation(node, tm);
316 if(!node->parent) {
317 return rot;
318 }
320 prot = anm_get_rotation(node->parent, tm);
321 return quat_mul(prot, rot);
322 }
324 vec3_t anm_get_scaling(struct anm_node *node, anm_time_t tm)
325 {
326 vec3_t s, ps;
327 s = anm_get_node_scaling(node, tm);
329 if(!node->parent) {
330 return s;
331 }
333 ps = anm_get_scaling(node->parent, tm);
334 return v3_mul(s, ps);
335 }
337 void anm_set_pivot(struct anm_node *node, vec3_t piv)
338 {
339 node->pivot = piv;
340 }
342 vec3_t anm_get_pivot(struct anm_node *node)
343 {
344 return node->pivot;
345 }
347 void anm_get_node_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
348 {
349 int i;
350 mat4_t rmat;
351 vec3_t pos, scale;
352 quat_t rot;
354 pos = anm_get_node_position(node, tm);
355 rot = anm_get_node_rotation(node, tm);
356 scale = anm_get_node_scaling(node, tm);
358 m4_set_translation(mat, node->pivot.x, node->pivot.y, node->pivot.z);
360 quat_to_mat4(rmat, rot);
361 for(i=0; i<3; i++) {
362 mat[i][0] = rmat[i][0];
363 mat[i][1] = rmat[i][1];
364 mat[i][2] = rmat[i][2];
365 }
366 /* this loop is equivalent to: m4_mult(mat, mat, rmat); */
368 mat[0][0] *= scale.x; mat[0][1] *= scale.y; mat[0][2] *= scale.z; mat[0][3] += pos.x;
369 mat[1][0] *= scale.x; mat[1][1] *= scale.y; mat[1][2] *= scale.z; mat[1][3] += pos.y;
370 mat[2][0] *= scale.x; mat[2][1] *= scale.y; mat[2][2] *= scale.z; mat[2][3] += pos.z;
372 m4_translate(mat, -node->pivot.x, -node->pivot.y, -node->pivot.z);
374 /* that's basically: pivot * rotation * translation * scaling * -pivot */
375 }
377 void anm_get_node_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
378 {
379 mat4_t tmp;
380 anm_get_node_matrix(node, tmp, tm);
381 m4_inverse(mat, tmp);
382 }
384 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
385 {
386 struct mat_cache *cache = pthread_getspecific(node->cache_key);
387 if(!cache) {
388 cache = malloc(sizeof *cache);
389 assert(cache);
391 pthread_mutex_lock(&node->cache_list_lock);
392 cache->next = node->cache_list;
393 node->cache_list = cache;
394 pthread_mutex_unlock(&node->cache_list_lock);
396 cache->time = ANM_TIME_INVAL;
397 cache->inv_time = ANM_TIME_INVAL;
398 pthread_setspecific(node->cache_key, cache);
399 }
401 if(cache->time != tm) {
402 anm_get_node_matrix(node, cache->matrix, tm);
404 if(node->parent) {
405 mat4_t parent_mat;
407 anm_get_matrix(node->parent, parent_mat, tm);
408 m4_mult(cache->matrix, parent_mat, cache->matrix);
409 }
410 cache->time = tm;
411 }
412 m4_copy(mat, cache->matrix);
413 }
415 void anm_get_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
416 {
417 struct mat_cache *cache = pthread_getspecific(node->cache_key);
418 if(!cache) {
419 cache = malloc(sizeof *cache);
420 assert(cache);
422 pthread_mutex_lock(&node->cache_list_lock);
423 cache->next = node->cache_list;
424 node->cache_list = cache;
425 pthread_mutex_unlock(&node->cache_list_lock);
427 cache->inv_time = ANM_TIME_INVAL;
428 cache->inv_time = ANM_TIME_INVAL;
429 pthread_setspecific(node->cache_key, cache);
430 }
432 if(cache->inv_time != tm) {
433 anm_get_matrix(node, mat, tm);
434 m4_inverse(cache->inv_matrix, mat);
435 cache->inv_time = tm;
436 }
437 m4_copy(mat, cache->inv_matrix);
438 }
440 anm_time_t anm_get_start_time(struct anm_node *node)
441 {
442 int i;
443 struct anm_node *c;
444 anm_time_t res = LONG_MAX;
446 for(i=0; i<ANM_NUM_TRACKS; i++) {
447 if(node->tracks[i].count) {
448 anm_time_t tm = node->tracks[i].keys[0].time;
449 if(tm < res) {
450 res = tm;
451 }
452 }
453 }
455 c = node->child;
456 while(c) {
457 anm_time_t tm = anm_get_start_time(c);
458 if(tm < res) {
459 res = tm;
460 }
461 c = c->next;
462 }
463 return res;
464 }
466 anm_time_t anm_get_end_time(struct anm_node *node)
467 {
468 int i;
469 struct anm_node *c;
470 anm_time_t res = LONG_MIN;
472 for(i=0; i<ANM_NUM_TRACKS; i++) {
473 if(node->tracks[i].count) {
474 anm_time_t tm = node->tracks[i].keys[node->tracks[i].count - 1].time;
475 if(tm > res) {
476 res = tm;
477 }
478 }
479 }
481 c = node->child;
482 while(c) {
483 anm_time_t tm = anm_get_end_time(c);
484 if(tm > res) {
485 res = tm;
486 }
487 c = c->next;
488 }
489 return res;
490 }
492 static void invalidate_cache(struct anm_node *node)
493 {
494 struct mat_cache *cache = pthread_getspecific(node->cache_key);
495 if(cache) {
496 cache->time = cache->inv_time = ANM_TIME_INVAL;
497 }
498 }