libanim

view src/anim.c @ 44:e9db1d0c09b3

made the kind of interpolation for quaternion tracks into a conditional block for testing (lerp/slerp), obviously defaults to slerp...
author John Tsiombikas <nuclear@member.fsf.org>
date Fri, 01 Mar 2013 09:44:20 +0200
parents b3312cf87715
children 710658962108
line source
1 #include <stdlib.h>
2 #include <limits.h>
3 #include <assert.h>
4 #include "anim.h"
5 #include "dynarr.h"
7 #define ROT_USE_SLERP
9 static void invalidate_cache(struct anm_node *node);
11 int anm_init_node(struct anm_node *node)
12 {
13 int i, j;
14 static const float defaults[] = {
15 0.0f, 0.0f, 0.0f, /* default position */
16 0.0f, 0.0f, 0.0f, 1.0f, /* default rotation quat */
17 1.0f, 1.0f, 1.0f /* default scale factor */
18 };
20 memset(node, 0, sizeof *node);
22 /* initialize thread-local matrix cache */
23 pthread_key_create(&node->cache_key, 0);
25 for(i=0; i<ANM_NUM_TRACKS; i++) {
26 if(anm_init_track(node->tracks + i) == -1) {
27 for(j=0; j<i; j++) {
28 anm_destroy_track(node->tracks + i);
29 }
30 }
31 anm_set_track_default(node->tracks + i, defaults[i]);
32 }
33 return 0;
34 }
36 void anm_destroy_node(struct anm_node *node)
37 {
38 int i;
39 free(node->name);
41 for(i=0; i<ANM_NUM_TRACKS; i++) {
42 anm_destroy_track(node->tracks + i);
43 }
45 /* destroy thread-specific cache */
46 pthread_key_delete(node->cache_key);
48 while(node->cache_list) {
49 struct mat_cache *tmp = node->cache_list;
50 node->cache_list = tmp->next;
51 free(tmp);
52 }
53 }
55 void anm_destroy_node_tree(struct anm_node *tree)
56 {
57 struct anm_node *c, *tmp;
59 if(!tree) return;
61 c = tree->child;
62 while(c) {
63 tmp = c;
64 c = c->next;
66 anm_destroy_node_tree(tmp);
67 }
68 anm_destroy_node(tree);
69 }
71 struct anm_node *anm_create_node(void)
72 {
73 struct anm_node *n;
75 if((n = malloc(sizeof *n))) {
76 if(anm_init_node(n) == -1) {
77 free(n);
78 return 0;
79 }
80 }
81 return n;
82 }
84 void anm_free_node(struct anm_node *node)
85 {
86 anm_destroy_node(node);
87 free(node);
88 }
90 void anm_free_node_tree(struct anm_node *tree)
91 {
92 struct anm_node *c, *tmp;
94 if(!tree) return;
96 c = tree->child;
97 while(c) {
98 tmp = c;
99 c = c->next;
101 anm_free_node_tree(tmp);
102 }
104 anm_free_node(tree);
105 }
107 int anm_set_node_name(struct anm_node *node, const char *name)
108 {
109 char *str;
111 if(!(str = malloc(strlen(name) + 1))) {
112 return -1;
113 }
114 strcpy(str, name);
115 free(node->name);
116 node->name = str;
117 return 0;
118 }
120 const char *anm_get_node_name(struct anm_node *node)
121 {
122 return node->name ? node->name : "";
123 }
125 void anm_set_interpolator(struct anm_node *node, enum anm_interpolator in)
126 {
127 int i;
129 for(i=0; i<ANM_NUM_TRACKS; i++) {
130 anm_set_track_interpolator(node->tracks + i, in);
131 }
132 invalidate_cache(node);
133 }
135 void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex)
136 {
137 int i;
139 for(i=0; i<ANM_NUM_TRACKS; i++) {
140 anm_set_track_extrapolator(node->tracks + i, ex);
141 }
142 invalidate_cache(node);
143 }
145 void anm_link_node(struct anm_node *p, struct anm_node *c)
146 {
147 c->next = p->child;
148 p->child = c;
150 c->parent = p;
151 invalidate_cache(c);
152 }
154 int anm_unlink_node(struct anm_node *p, struct anm_node *c)
155 {
156 struct anm_node *iter;
158 if(p->child == c) {
159 p->child = c->next;
160 c->next = 0;
161 invalidate_cache(c);
162 return 0;
163 }
165 iter = p->child;
166 while(iter->next) {
167 if(iter->next == c) {
168 iter->next = c->next;
169 c->next = 0;
170 invalidate_cache(c);
171 return 0;
172 }
173 }
174 return -1;
175 }
177 void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm)
178 {
179 anm_set_value(node->tracks + ANM_TRACK_POS_X, tm, pos.x);
180 anm_set_value(node->tracks + ANM_TRACK_POS_Y, tm, pos.y);
181 anm_set_value(node->tracks + ANM_TRACK_POS_Z, tm, pos.z);
182 invalidate_cache(node);
183 }
185 vec3_t anm_get_node_position(struct anm_node *node, anm_time_t tm)
186 {
187 vec3_t v;
188 v.x = anm_get_value(node->tracks + ANM_TRACK_POS_X, tm);
189 v.y = anm_get_value(node->tracks + ANM_TRACK_POS_Y, tm);
190 v.z = anm_get_value(node->tracks + ANM_TRACK_POS_Z, tm);
191 return v;
192 }
194 void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm)
195 {
196 anm_set_value(node->tracks + ANM_TRACK_ROT_X, tm, rot.x);
197 anm_set_value(node->tracks + ANM_TRACK_ROT_Y, tm, rot.y);
198 anm_set_value(node->tracks + ANM_TRACK_ROT_Z, tm, rot.z);
199 anm_set_value(node->tracks + ANM_TRACK_ROT_W, tm, rot.w);
200 invalidate_cache(node);
201 }
203 quat_t anm_get_node_rotation(struct anm_node *node, anm_time_t tm)
204 {
205 #ifndef ROT_USE_SLERP
206 quat_t q;
207 q.x = anm_get_value(node->tracks + ANM_TRACK_ROT_X, tm);
208 q.y = anm_get_value(node->tracks + ANM_TRACK_ROT_Y, tm);
209 q.z = anm_get_value(node->tracks + ANM_TRACK_ROT_Z, tm);
210 q.w = anm_get_value(node->tracks + ANM_TRACK_ROT_W, tm);
211 return q;
212 #else
213 int idx0, idx1, last_idx;
214 anm_time_t tstart, tend;
215 float t, dt;
216 struct anm_track *track_x, *track_y, *track_z, *track_w;
217 quat_t q, q1, q2;
219 track_x = node->tracks + ANM_TRACK_ROT_X;
220 track_y = node->tracks + ANM_TRACK_ROT_Y;
221 track_z = node->tracks + ANM_TRACK_ROT_Z;
222 track_w = node->tracks + ANM_TRACK_ROT_W;
224 if(!track_x->count) {
225 q.x = track_x->def_val;
226 q.y = track_y->def_val;
227 q.z = track_z->def_val;
228 q.w = track_w->def_val;
229 return q;
230 }
232 last_idx = track_x->count - 1;
234 tstart = track_x->keys[0].time;
235 tend = track_x->keys[last_idx].time;
237 if(tstart == tend) {
238 q.x = track_x->keys[0].val;
239 q.y = track_y->keys[0].val;
240 q.z = track_z->keys[0].val;
241 q.w = track_w->keys[0].val;
242 return q;
243 }
245 tm = anm_remap_time(track_x, tm, tstart, tend);
247 idx0 = anm_get_key_interval(track_x, tm);
248 assert(idx0 >= 0 && idx0 < track_x->count);
249 idx1 = idx0 + 1;
251 if(idx0 == last_idx) {
252 q.x = track_x->keys[idx0].val;
253 q.y = track_y->keys[idx0].val;
254 q.z = track_z->keys[idx0].val;
255 q.w = track_w->keys[idx0].val;
256 return q;
257 }
259 dt = (float)(track_x->keys[idx1].time - track_x->keys[idx0].time);
260 t = (float)(tm - track_x->keys[idx0].time) / dt;
262 q1.x = track_x->keys[idx0].val;
263 q1.y = track_y->keys[idx0].val;
264 q1.z = track_z->keys[idx0].val;
265 q1.w = track_w->keys[idx0].val;
267 q2.x = track_x->keys[idx1].val;
268 q2.y = track_y->keys[idx1].val;
269 q2.z = track_z->keys[idx1].val;
270 q2.w = track_w->keys[idx1].val;
272 return quat_slerp(q1, q2, t);
273 #endif
274 }
276 void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm)
277 {
278 anm_set_value(node->tracks + ANM_TRACK_SCL_X, tm, scl.x);
279 anm_set_value(node->tracks + ANM_TRACK_SCL_Y, tm, scl.y);
280 anm_set_value(node->tracks + ANM_TRACK_SCL_Z, tm, scl.z);
281 invalidate_cache(node);
282 }
284 vec3_t anm_get_node_scaling(struct anm_node *node, anm_time_t tm)
285 {
286 vec3_t v;
287 v.x = anm_get_value(node->tracks + ANM_TRACK_SCL_X, tm);
288 v.y = anm_get_value(node->tracks + ANM_TRACK_SCL_Y, tm);
289 v.z = anm_get_value(node->tracks + ANM_TRACK_SCL_Z, tm);
290 return v;
291 }
294 vec3_t anm_get_position(struct anm_node *node, anm_time_t tm)
295 {
296 mat4_t xform;
297 vec3_t pos = {0.0, 0.0, 0.0};
299 if(!node->parent) {
300 return anm_get_node_position(node, tm);
301 }
303 anm_get_matrix(node, xform, tm);
304 return v3_transform(pos, xform);
305 }
307 quat_t anm_get_rotation(struct anm_node *node, anm_time_t tm)
308 {
309 quat_t rot, prot;
310 rot = anm_get_node_rotation(node, tm);
312 if(!node->parent) {
313 return rot;
314 }
316 prot = anm_get_rotation(node->parent, tm);
317 return quat_mul(prot, rot);
318 }
320 vec3_t anm_get_scaling(struct anm_node *node, anm_time_t tm)
321 {
322 vec3_t s, ps;
323 s = anm_get_node_scaling(node, tm);
325 if(!node->parent) {
326 return s;
327 }
329 ps = anm_get_scaling(node->parent, tm);
330 return v3_mul(s, ps);
331 }
333 void anm_set_pivot(struct anm_node *node, vec3_t piv)
334 {
335 node->pivot = piv;
336 }
338 vec3_t anm_get_pivot(struct anm_node *node)
339 {
340 return node->pivot;
341 }
343 void anm_get_node_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
344 {
345 mat4_t tmat, rmat, smat, pivmat, neg_pivmat;
346 vec3_t pos, scale;
347 quat_t rot;
349 m4_identity(tmat);
350 /*no need to m4_identity(rmat); quat_to_mat4 sets this properly */
351 m4_identity(smat);
352 m4_identity(pivmat);
353 m4_identity(neg_pivmat);
355 pos = anm_get_node_position(node, tm);
356 rot = anm_get_node_rotation(node, tm);
357 scale = anm_get_node_scaling(node, tm);
359 m4_translate(pivmat, node->pivot.x, node->pivot.y, node->pivot.z);
360 m4_translate(neg_pivmat, -node->pivot.x, -node->pivot.y, -node->pivot.z);
362 m4_translate(tmat, pos.x, pos.y, pos.z);
363 quat_to_mat4(rmat, rot);
364 m4_scale(smat, scale.x, scale.y, scale.z);
366 /* ok this would look nicer in C++ */
367 m4_mult(mat, pivmat, tmat);
368 m4_mult(mat, mat, rmat);
369 m4_mult(mat, mat, smat);
370 m4_mult(mat, mat, neg_pivmat);
371 }
373 void anm_get_node_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
374 {
375 mat4_t tmp;
376 anm_get_node_matrix(node, tmp, tm);
377 m4_inverse(mat, tmp);
378 }
380 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
381 {
382 struct mat_cache *cache = pthread_getspecific(node->cache_key);
383 if(!cache) {
384 cache = malloc(sizeof *cache);
385 assert(cache);
387 pthread_mutex_lock(&node->cache_list_lock);
388 cache->next = node->cache_list;
389 node->cache_list = cache;
390 pthread_mutex_unlock(&node->cache_list_lock);
392 cache->time = ANM_TIME_INVAL;
393 cache->inv_time = ANM_TIME_INVAL;
394 pthread_setspecific(node->cache_key, cache);
395 }
397 if(cache->time != tm) {
398 anm_get_node_matrix(node, cache->matrix, tm);
400 if(node->parent) {
401 mat4_t parent_mat;
403 anm_get_matrix(node->parent, parent_mat, tm);
404 m4_mult(cache->matrix, parent_mat, cache->matrix);
405 }
406 cache->time = tm;
407 }
408 m4_copy(mat, cache->matrix);
409 }
411 void anm_get_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
412 {
413 struct mat_cache *cache = pthread_getspecific(node->cache_key);
414 if(!cache) {
415 cache = malloc(sizeof *cache);
416 assert(cache);
418 pthread_mutex_lock(&node->cache_list_lock);
419 cache->next = node->cache_list;
420 node->cache_list = cache;
421 pthread_mutex_unlock(&node->cache_list_lock);
423 cache->inv_time = ANM_TIME_INVAL;
424 cache->inv_time = ANM_TIME_INVAL;
425 pthread_setspecific(node->cache_key, cache);
426 }
428 if(cache->inv_time != tm) {
429 anm_get_matrix(node, mat, tm);
430 m4_inverse(cache->inv_matrix, mat);
431 cache->inv_time = tm;
432 }
433 m4_copy(mat, cache->inv_matrix);
434 }
436 anm_time_t anm_get_start_time(struct anm_node *node)
437 {
438 int i;
439 struct anm_node *c;
440 anm_time_t res = LONG_MAX;
442 for(i=0; i<ANM_NUM_TRACKS; i++) {
443 if(node->tracks[i].count) {
444 anm_time_t tm = node->tracks[i].keys[0].time;
445 if(tm < res) {
446 res = tm;
447 }
448 }
449 }
451 c = node->child;
452 while(c) {
453 anm_time_t tm = anm_get_start_time(c);
454 if(tm < res) {
455 res = tm;
456 }
457 c = c->next;
458 }
459 return res;
460 }
462 anm_time_t anm_get_end_time(struct anm_node *node)
463 {
464 int i;
465 struct anm_node *c;
466 anm_time_t res = LONG_MIN;
468 for(i=0; i<ANM_NUM_TRACKS; i++) {
469 if(node->tracks[i].count) {
470 anm_time_t tm = node->tracks[i].keys[node->tracks[i].count - 1].time;
471 if(tm > res) {
472 res = tm;
473 }
474 }
475 }
477 c = node->child;
478 while(c) {
479 anm_time_t tm = anm_get_end_time(c);
480 if(tm > res) {
481 res = tm;
482 }
483 c = c->next;
484 }
485 return res;
486 }
488 static void invalidate_cache(struct anm_node *node)
489 {
490 struct mat_cache *cache = pthread_getspecific(node->cache_key);
491 if(cache) {
492 cache->time = ANM_TIME_INVAL;
493 }
494 }