libanim

view src/anim.c @ 43:a720e8d7023d

fixed a bug in the manual slerping in anm_get_node_rotation
author John Tsiombikas <nuclear@member.fsf.org>
date Fri, 01 Mar 2013 08:21:19 +0200
parents 2cf7284d2bbb
children ffe668a61bca
line source
1 #include <stdlib.h>
2 #include <limits.h>
3 #include <assert.h>
4 #include "anim.h"
5 #include "dynarr.h"
7 static void invalidate_cache(struct anm_node *node);
9 int anm_init_node(struct anm_node *node)
10 {
11 int i, j;
12 static const float defaults[] = {
13 0.0f, 0.0f, 0.0f, /* default position */
14 0.0f, 0.0f, 0.0f, 1.0f, /* default rotation quat */
15 1.0f, 1.0f, 1.0f /* default scale factor */
16 };
18 memset(node, 0, sizeof *node);
20 /* initialize thread-local matrix cache */
21 pthread_key_create(&node->cache_key, 0);
23 for(i=0; i<ANM_NUM_TRACKS; i++) {
24 if(anm_init_track(node->tracks + i) == -1) {
25 for(j=0; j<i; j++) {
26 anm_destroy_track(node->tracks + i);
27 }
28 }
29 anm_set_track_default(node->tracks + i, defaults[i]);
30 }
31 return 0;
32 }
34 void anm_destroy_node(struct anm_node *node)
35 {
36 int i;
37 free(node->name);
39 for(i=0; i<ANM_NUM_TRACKS; i++) {
40 anm_destroy_track(node->tracks + i);
41 }
43 /* destroy thread-specific cache */
44 pthread_key_delete(node->cache_key);
46 while(node->cache_list) {
47 struct mat_cache *tmp = node->cache_list;
48 node->cache_list = tmp->next;
49 free(tmp);
50 }
51 }
53 void anm_destroy_node_tree(struct anm_node *tree)
54 {
55 struct anm_node *c, *tmp;
57 if(!tree) return;
59 c = tree->child;
60 while(c) {
61 tmp = c;
62 c = c->next;
64 anm_destroy_node_tree(tmp);
65 }
66 anm_destroy_node(tree);
67 }
69 struct anm_node *anm_create_node(void)
70 {
71 struct anm_node *n;
73 if((n = malloc(sizeof *n))) {
74 if(anm_init_node(n) == -1) {
75 free(n);
76 return 0;
77 }
78 }
79 return n;
80 }
82 void anm_free_node(struct anm_node *node)
83 {
84 anm_destroy_node(node);
85 free(node);
86 }
88 void anm_free_node_tree(struct anm_node *tree)
89 {
90 struct anm_node *c, *tmp;
92 if(!tree) return;
94 c = tree->child;
95 while(c) {
96 tmp = c;
97 c = c->next;
99 anm_free_node_tree(tmp);
100 }
102 anm_free_node(tree);
103 }
105 int anm_set_node_name(struct anm_node *node, const char *name)
106 {
107 char *str;
109 if(!(str = malloc(strlen(name) + 1))) {
110 return -1;
111 }
112 strcpy(str, name);
113 free(node->name);
114 node->name = str;
115 return 0;
116 }
118 const char *anm_get_node_name(struct anm_node *node)
119 {
120 return node->name ? node->name : "";
121 }
123 void anm_set_interpolator(struct anm_node *node, enum anm_interpolator in)
124 {
125 int i;
127 for(i=0; i<ANM_NUM_TRACKS; i++) {
128 anm_set_track_interpolator(node->tracks + i, in);
129 }
130 invalidate_cache(node);
131 }
133 void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex)
134 {
135 int i;
137 for(i=0; i<ANM_NUM_TRACKS; i++) {
138 anm_set_track_extrapolator(node->tracks + i, ex);
139 }
140 invalidate_cache(node);
141 }
143 void anm_link_node(struct anm_node *p, struct anm_node *c)
144 {
145 c->next = p->child;
146 p->child = c;
148 c->parent = p;
149 invalidate_cache(c);
150 }
152 int anm_unlink_node(struct anm_node *p, struct anm_node *c)
153 {
154 struct anm_node *iter;
156 if(p->child == c) {
157 p->child = c->next;
158 c->next = 0;
159 invalidate_cache(c);
160 return 0;
161 }
163 iter = p->child;
164 while(iter->next) {
165 if(iter->next == c) {
166 iter->next = c->next;
167 c->next = 0;
168 invalidate_cache(c);
169 return 0;
170 }
171 }
172 return -1;
173 }
175 void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm)
176 {
177 anm_set_value(node->tracks + ANM_TRACK_POS_X, tm, pos.x);
178 anm_set_value(node->tracks + ANM_TRACK_POS_Y, tm, pos.y);
179 anm_set_value(node->tracks + ANM_TRACK_POS_Z, tm, pos.z);
180 invalidate_cache(node);
181 }
183 vec3_t anm_get_node_position(struct anm_node *node, anm_time_t tm)
184 {
185 vec3_t v;
186 v.x = anm_get_value(node->tracks + ANM_TRACK_POS_X, tm);
187 v.y = anm_get_value(node->tracks + ANM_TRACK_POS_Y, tm);
188 v.z = anm_get_value(node->tracks + ANM_TRACK_POS_Z, tm);
189 return v;
190 }
192 void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm)
193 {
194 anm_set_value(node->tracks + ANM_TRACK_ROT_X, tm, rot.x);
195 anm_set_value(node->tracks + ANM_TRACK_ROT_Y, tm, rot.y);
196 anm_set_value(node->tracks + ANM_TRACK_ROT_Z, tm, rot.z);
197 anm_set_value(node->tracks + ANM_TRACK_ROT_W, tm, rot.w);
198 invalidate_cache(node);
199 }
201 quat_t anm_get_node_rotation(struct anm_node *node, anm_time_t tm)
202 {
203 /*quat_t q;
204 q.x = anm_get_value(node->tracks + ANM_TRACK_ROT_X, tm);
205 q.y = anm_get_value(node->tracks + ANM_TRACK_ROT_Y, tm);
206 q.z = anm_get_value(node->tracks + ANM_TRACK_ROT_Z, tm);
207 q.w = anm_get_value(node->tracks + ANM_TRACK_ROT_W, tm);
208 return q;*/
210 int idx0, idx1, last_idx;
211 anm_time_t tstart, tend;
212 float t, dt;
213 struct anm_track *track_x, *track_y, *track_z, *track_w;
214 quat_t q, q1, q2;
216 track_x = node->tracks + ANM_TRACK_ROT_X;
217 track_y = node->tracks + ANM_TRACK_ROT_Y;
218 track_z = node->tracks + ANM_TRACK_ROT_Z;
219 track_w = node->tracks + ANM_TRACK_ROT_W;
221 if(!track_x->count) {
222 q.x = track_x->def_val;
223 q.y = track_y->def_val;
224 q.z = track_z->def_val;
225 q.w = track_w->def_val;
226 return q;
227 }
229 last_idx = track_x->count - 1;
231 tstart = track_x->keys[0].time;
232 tend = track_x->keys[last_idx].time;
234 if(tstart == tend) {
235 q.x = track_x->keys[0].val;
236 q.y = track_y->keys[0].val;
237 q.z = track_z->keys[0].val;
238 q.w = track_w->keys[0].val;
239 return q;
240 }
242 tm = anm_remap_time(track_x, tm, tstart, tend);
244 idx0 = anm_get_key_interval(track_x, tm);
245 assert(idx0 >= 0 && idx0 < track_x->count);
246 idx1 = idx0 + 1;
248 if(idx0 == last_idx) {
249 q.x = track_x->keys[idx0].val;
250 q.y = track_y->keys[idx0].val;
251 q.z = track_z->keys[idx0].val;
252 q.w = track_w->keys[idx0].val;
253 return q;
254 }
256 dt = (float)(track_x->keys[idx1].time - track_x->keys[idx0].time);
257 t = (float)(tm - track_x->keys[idx0].time) / dt;
259 q1.x = track_x->keys[idx0].val;
260 q1.y = track_y->keys[idx0].val;
261 q1.z = track_z->keys[idx0].val;
262 q1.w = track_w->keys[idx0].val;
264 q2.x = track_x->keys[idx1].val;
265 q2.y = track_y->keys[idx1].val;
266 q2.z = track_z->keys[idx1].val;
267 q2.w = track_w->keys[idx1].val;
269 return quat_slerp(q1, q2, t);
270 }
272 void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm)
273 {
274 anm_set_value(node->tracks + ANM_TRACK_SCL_X, tm, scl.x);
275 anm_set_value(node->tracks + ANM_TRACK_SCL_Y, tm, scl.y);
276 anm_set_value(node->tracks + ANM_TRACK_SCL_Z, tm, scl.z);
277 invalidate_cache(node);
278 }
280 vec3_t anm_get_node_scaling(struct anm_node *node, anm_time_t tm)
281 {
282 vec3_t v;
283 v.x = anm_get_value(node->tracks + ANM_TRACK_SCL_X, tm);
284 v.y = anm_get_value(node->tracks + ANM_TRACK_SCL_Y, tm);
285 v.z = anm_get_value(node->tracks + ANM_TRACK_SCL_Z, tm);
286 return v;
287 }
290 vec3_t anm_get_position(struct anm_node *node, anm_time_t tm)
291 {
292 mat4_t xform;
293 vec3_t pos = {0.0, 0.0, 0.0};
295 if(!node->parent) {
296 return anm_get_node_position(node, tm);
297 }
299 anm_get_matrix(node, xform, tm);
300 return v3_transform(pos, xform);
301 }
303 quat_t anm_get_rotation(struct anm_node *node, anm_time_t tm)
304 {
305 quat_t rot, prot;
306 rot = anm_get_node_rotation(node, tm);
308 if(!node->parent) {
309 return rot;
310 }
312 prot = anm_get_rotation(node->parent, tm);
313 return quat_mul(prot, rot);
314 }
316 vec3_t anm_get_scaling(struct anm_node *node, anm_time_t tm)
317 {
318 vec3_t s, ps;
319 s = anm_get_node_scaling(node, tm);
321 if(!node->parent) {
322 return s;
323 }
325 ps = anm_get_scaling(node->parent, tm);
326 return v3_mul(s, ps);
327 }
329 void anm_set_pivot(struct anm_node *node, vec3_t piv)
330 {
331 node->pivot = piv;
332 }
334 vec3_t anm_get_pivot(struct anm_node *node)
335 {
336 return node->pivot;
337 }
339 void anm_get_node_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
340 {
341 mat4_t tmat, rmat, smat, pivmat, neg_pivmat;
342 vec3_t pos, scale;
343 quat_t rot;
345 m4_identity(tmat);
346 /*no need to m4_identity(rmat); quat_to_mat4 sets this properly */
347 m4_identity(smat);
348 m4_identity(pivmat);
349 m4_identity(neg_pivmat);
351 pos = anm_get_node_position(node, tm);
352 rot = anm_get_node_rotation(node, tm);
353 scale = anm_get_node_scaling(node, tm);
355 m4_translate(pivmat, node->pivot.x, node->pivot.y, node->pivot.z);
356 m4_translate(neg_pivmat, -node->pivot.x, -node->pivot.y, -node->pivot.z);
358 m4_translate(tmat, pos.x, pos.y, pos.z);
359 quat_to_mat4(rmat, rot);
360 m4_scale(smat, scale.x, scale.y, scale.z);
362 /* ok this would look nicer in C++ */
363 m4_mult(mat, pivmat, tmat);
364 m4_mult(mat, mat, rmat);
365 m4_mult(mat, mat, smat);
366 m4_mult(mat, mat, neg_pivmat);
367 }
369 void anm_get_node_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
370 {
371 mat4_t tmp;
372 anm_get_node_matrix(node, tmp, tm);
373 m4_inverse(mat, tmp);
374 }
376 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
377 {
378 struct mat_cache *cache = pthread_getspecific(node->cache_key);
379 if(!cache) {
380 cache = malloc(sizeof *cache);
381 assert(cache);
383 pthread_mutex_lock(&node->cache_list_lock);
384 cache->next = node->cache_list;
385 node->cache_list = cache;
386 pthread_mutex_unlock(&node->cache_list_lock);
388 cache->time = ANM_TIME_INVAL;
389 cache->inv_time = ANM_TIME_INVAL;
390 pthread_setspecific(node->cache_key, cache);
391 }
393 if(cache->time != tm) {
394 anm_get_node_matrix(node, cache->matrix, tm);
396 if(node->parent) {
397 mat4_t parent_mat;
399 anm_get_matrix(node->parent, parent_mat, tm);
400 m4_mult(cache->matrix, parent_mat, cache->matrix);
401 }
402 cache->time = tm;
403 }
404 m4_copy(mat, cache->matrix);
405 }
407 void anm_get_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
408 {
409 struct mat_cache *cache = pthread_getspecific(node->cache_key);
410 if(!cache) {
411 cache = malloc(sizeof *cache);
412 assert(cache);
414 pthread_mutex_lock(&node->cache_list_lock);
415 cache->next = node->cache_list;
416 node->cache_list = cache;
417 pthread_mutex_unlock(&node->cache_list_lock);
419 cache->inv_time = ANM_TIME_INVAL;
420 cache->inv_time = ANM_TIME_INVAL;
421 pthread_setspecific(node->cache_key, cache);
422 }
424 if(cache->inv_time != tm) {
425 anm_get_matrix(node, mat, tm);
426 m4_inverse(cache->inv_matrix, mat);
427 cache->inv_time = tm;
428 }
429 m4_copy(mat, cache->inv_matrix);
430 }
432 anm_time_t anm_get_start_time(struct anm_node *node)
433 {
434 int i;
435 struct anm_node *c;
436 anm_time_t res = LONG_MAX;
438 for(i=0; i<ANM_NUM_TRACKS; i++) {
439 if(node->tracks[i].count) {
440 anm_time_t tm = node->tracks[i].keys[0].time;
441 if(tm < res) {
442 res = tm;
443 }
444 }
445 }
447 c = node->child;
448 while(c) {
449 anm_time_t tm = anm_get_start_time(c);
450 if(tm < res) {
451 res = tm;
452 }
453 c = c->next;
454 }
455 return res;
456 }
458 anm_time_t anm_get_end_time(struct anm_node *node)
459 {
460 int i;
461 struct anm_node *c;
462 anm_time_t res = LONG_MIN;
464 for(i=0; i<ANM_NUM_TRACKS; i++) {
465 if(node->tracks[i].count) {
466 anm_time_t tm = node->tracks[i].keys[node->tracks[i].count - 1].time;
467 if(tm > res) {
468 res = tm;
469 }
470 }
471 }
473 c = node->child;
474 while(c) {
475 anm_time_t tm = anm_get_end_time(c);
476 if(tm > res) {
477 res = tm;
478 }
479 c = c->next;
480 }
481 return res;
482 }
484 static void invalidate_cache(struct anm_node *node)
485 {
486 struct mat_cache *cache = pthread_getspecific(node->cache_key);
487 if(cache) {
488 cache->time = ANM_TIME_INVAL;
489 }
490 }