libanim

view src/anim.c @ 37:c97151c60302

libanim mercurial repo
author John Tsiombikas <nuclear@mutantstargoat.com>
date Sun, 08 Jan 2012 05:13:13 +0200
parents
children 69654793abc3
line source
1 #include <limits.h>
2 #include <assert.h>
3 #include "anim.h"
4 #include "dynarr.h"
6 static void invalidate_cache(struct anm_node *node);
8 int anm_init_node(struct anm_node *node)
9 {
10 int i, j;
11 static const float defaults[] = {
12 0.0f, 0.0f, 0.0f, /* default position */
13 0.0f, 0.0f, 0.0f, 1.0f, /* default rotation quat */
14 1.0f, 1.0f, 1.0f /* default scale factor */
15 };
17 memset(node, 0, sizeof *node);
19 /* initialize thread-local matrix cache */
20 pthread_key_create(&node->cache_key, 0);
22 for(i=0; i<ANM_NUM_TRACKS; i++) {
23 if(anm_init_track(node->tracks + i) == -1) {
24 for(j=0; j<i; j++) {
25 anm_destroy_track(node->tracks + i);
26 }
27 }
28 anm_set_track_default(node->tracks + i, defaults[i]);
29 }
30 return 0;
31 }
33 void anm_destroy_node(struct anm_node *node)
34 {
35 int i;
36 free(node->name);
38 for(i=0; i<ANM_NUM_TRACKS; i++) {
39 anm_destroy_track(node->tracks + i);
40 }
42 /* destroy thread-specific cache */
43 pthread_key_delete(node->cache_key);
45 while(node->cache_list) {
46 struct mat_cache *tmp = node->cache_list;
47 node->cache_list = tmp->next;
48 free(tmp);
49 }
50 }
52 void anm_destroy_node_tree(struct anm_node *tree)
53 {
54 struct anm_node *c, *tmp;
56 if(!tree) return;
58 c = tree->child;
59 while(c) {
60 tmp = c;
61 c = c->next;
63 anm_destroy_node_tree(tmp);
64 }
65 anm_destroy_node(tree);
66 }
68 struct anm_node *anm_create_node(void)
69 {
70 struct anm_node *n;
72 if((n = malloc(sizeof *n))) {
73 if(anm_init_node(n) == -1) {
74 free(n);
75 return 0;
76 }
77 }
78 return n;
79 }
81 void anm_free_node(struct anm_node *node)
82 {
83 anm_destroy_node(node);
84 free(node);
85 }
87 void anm_free_node_tree(struct anm_node *tree)
88 {
89 struct anm_node *c, *tmp;
91 if(!tree) return;
93 c = tree->child;
94 while(c) {
95 tmp = c;
96 c = c->next;
98 anm_free_node_tree(tmp);
99 }
101 anm_free_node(tree);
102 }
104 int anm_set_node_name(struct anm_node *node, const char *name)
105 {
106 char *str;
108 if(!(str = malloc(strlen(name) + 1))) {
109 return -1;
110 }
111 strcpy(str, name);
112 free(node->name);
113 node->name = str;
114 return 0;
115 }
117 const char *anm_get_node_name(struct anm_node *node)
118 {
119 return node->name ? node->name : "";
120 }
122 void anm_set_interpolator(struct anm_node *node, enum anm_interpolator in)
123 {
124 int i;
126 for(i=0; i<ANM_NUM_TRACKS; i++) {
127 anm_set_track_interpolator(node->tracks + i, in);
128 }
129 invalidate_cache(node);
130 }
132 void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex)
133 {
134 int i;
136 for(i=0; i<ANM_NUM_TRACKS; i++) {
137 anm_set_track_extrapolator(node->tracks + i, ex);
138 }
139 invalidate_cache(node);
140 }
142 void anm_link_node(struct anm_node *p, struct anm_node *c)
143 {
144 c->next = p->child;
145 p->child = c;
147 c->parent = p;
148 invalidate_cache(c);
149 }
151 int anm_unlink_node(struct anm_node *p, struct anm_node *c)
152 {
153 struct anm_node *iter;
155 if(p->child == c) {
156 p->child = c->next;
157 c->next = 0;
158 invalidate_cache(c);
159 return 0;
160 }
162 iter = p->child;
163 while(iter->next) {
164 if(iter->next == c) {
165 iter->next = c->next;
166 c->next = 0;
167 invalidate_cache(c);
168 return 0;
169 }
170 }
171 return -1;
172 }
174 void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm)
175 {
176 anm_set_value(node->tracks + ANM_TRACK_POS_X, tm, pos.x);
177 anm_set_value(node->tracks + ANM_TRACK_POS_Y, tm, pos.y);
178 anm_set_value(node->tracks + ANM_TRACK_POS_Z, tm, pos.z);
179 invalidate_cache(node);
180 }
182 vec3_t anm_get_node_position(struct anm_node *node, anm_time_t tm)
183 {
184 vec3_t v;
185 v.x = anm_get_value(node->tracks + ANM_TRACK_POS_X, tm);
186 v.y = anm_get_value(node->tracks + ANM_TRACK_POS_Y, tm);
187 v.z = anm_get_value(node->tracks + ANM_TRACK_POS_Z, tm);
188 return v;
189 }
191 void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm)
192 {
193 anm_set_value(node->tracks + ANM_TRACK_ROT_X, tm, rot.x);
194 anm_set_value(node->tracks + ANM_TRACK_ROT_Y, tm, rot.y);
195 anm_set_value(node->tracks + ANM_TRACK_ROT_Z, tm, rot.z);
196 anm_set_value(node->tracks + ANM_TRACK_ROT_W, tm, rot.w);
197 invalidate_cache(node);
198 }
200 quat_t anm_get_node_rotation(struct anm_node *node, anm_time_t tm)
201 {
202 int idx0, idx1, last_idx;
203 anm_time_t tstart, tend;
204 float t, dt;
205 struct anm_track *track_x, *track_y, *track_z, *track_w;
206 quat_t q, q1, q2;
208 track_x = node->tracks + ANM_TRACK_ROT_X;
209 track_y = node->tracks + ANM_TRACK_ROT_Y;
210 track_z = node->tracks + ANM_TRACK_ROT_Z;
211 track_w = node->tracks + ANM_TRACK_ROT_W;
213 if(!track_x->count) {
214 q.x = track_x->def_val;
215 q.y = track_y->def_val;
216 q.z = track_z->def_val;
217 q.w = track_w->def_val;
218 return q;
219 }
221 last_idx = track_x->count - 1;
223 tstart = track_x->keys[0].time;
224 tend = track_x->keys[last_idx].time;
225 tm = anm_remap_time(track_x, tm, tstart, tend);
227 idx0 = anm_get_key_interval(track_x, tm);
228 assert(idx0 >= 0 && idx0 < track_x->count);
229 idx1 = idx0 + 1;
231 dt = (float)(track_x->keys[idx1].time - track_x->keys[idx0].time);
232 t = (float)(tm - track_x->keys[idx0].time) / dt;
234 q1.x = track_x->keys[idx0].val;
235 q1.y = track_y->keys[idx0].val;
236 q1.z = track_z->keys[idx0].val;
237 q1.w = track_w->keys[idx0].val;
239 q2.x = track_x->keys[idx1].val;
240 q2.y = track_y->keys[idx1].val;
241 q2.z = track_z->keys[idx1].val;
242 q2.w = track_w->keys[idx1].val;
244 return quat_slerp(q1, q2, t);
245 }
247 void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm)
248 {
249 anm_set_value(node->tracks + ANM_TRACK_SCL_X, tm, scl.x);
250 anm_set_value(node->tracks + ANM_TRACK_SCL_Y, tm, scl.y);
251 anm_set_value(node->tracks + ANM_TRACK_SCL_Z, tm, scl.z);
252 invalidate_cache(node);
253 }
255 vec3_t anm_get_node_scaling(struct anm_node *node, anm_time_t tm)
256 {
257 vec3_t v;
258 v.x = anm_get_value(node->tracks + ANM_TRACK_SCL_X, tm);
259 v.y = anm_get_value(node->tracks + ANM_TRACK_SCL_Y, tm);
260 v.z = anm_get_value(node->tracks + ANM_TRACK_SCL_Z, tm);
261 return v;
262 }
265 vec3_t anm_get_position(struct anm_node *node, anm_time_t tm)
266 {
267 mat4_t xform;
268 vec3_t pos = {0.0, 0.0, 0.0};
270 if(!node->parent) {
271 return anm_get_node_position(node, tm);
272 }
274 anm_get_matrix(node, xform, tm);
275 return v3_transform(pos, xform);
276 }
278 quat_t anm_get_rotation(struct anm_node *node, anm_time_t tm)
279 {
280 quat_t rot, prot;
281 rot = anm_get_node_rotation(node, tm);
283 if(!node->parent) {
284 return rot;
285 }
287 prot = anm_get_rotation(node->parent, tm);
288 return quat_mul(prot, rot);
289 }
291 vec3_t anm_get_scaling(struct anm_node *node, anm_time_t tm)
292 {
293 vec3_t s, ps;
294 s = anm_get_node_scaling(node, tm);
296 if(!node->parent) {
297 return s;
298 }
300 ps = anm_get_scaling(node->parent, tm);
301 return v3_mul(s, ps);
302 }
304 void anm_set_pivot(struct anm_node *node, vec3_t piv)
305 {
306 node->pivot = piv;
307 }
309 vec3_t anm_get_pivot(struct anm_node *node)
310 {
311 return node->pivot;
312 }
314 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
315 {
316 struct mat_cache *cache = pthread_getspecific(node->cache_key);
317 if(!cache) {
318 cache = malloc(sizeof *cache);
319 assert(cache);
321 pthread_mutex_lock(&node->cache_list_lock);
322 cache->next = node->cache_list;
323 node->cache_list = cache;
324 pthread_mutex_unlock(&node->cache_list_lock);
326 cache->time = ANM_TIME_INVAL;
327 pthread_setspecific(node->cache_key, cache);
328 }
330 if(cache->time != tm) {
331 mat4_t tmat, rmat, smat, pivmat, neg_pivmat;
332 vec3_t pos, scale;
333 quat_t rot;
335 m4_identity(tmat);
336 /*no need to m4_identity(rmat); quat_to_mat4 sets this properly */
337 m4_identity(smat);
338 m4_identity(pivmat);
339 m4_identity(neg_pivmat);
341 pos = anm_get_node_position(node, tm);
342 rot = anm_get_node_rotation(node, tm);
343 scale = anm_get_node_scaling(node, tm);
345 m4_translate(pivmat, node->pivot.x, node->pivot.y, node->pivot.z);
346 m4_translate(neg_pivmat, -node->pivot.x, -node->pivot.y, -node->pivot.z);
348 m4_translate(tmat, pos.x, pos.y, pos.z);
349 quat_to_mat4(rmat, rot);
350 m4_translate(smat, scale.x, scale.y, scale.z);
352 /* ok this would look nicer in C++ */
353 m4_mult(cache->matrix, pivmat, tmat);
354 m4_mult(cache->matrix, cache->matrix, rmat);
355 m4_mult(cache->matrix, cache->matrix, smat);
356 m4_mult(cache->matrix, cache->matrix, neg_pivmat);
358 if(node->parent) {
359 mat4_t parent_mat;
361 anm_get_matrix(node->parent, mat, tm);
362 m4_mult(cache->matrix, parent_mat, cache->matrix);
363 }
364 cache->time = tm;
365 }
366 m4_copy(mat, cache->matrix);
367 }
369 void anm_get_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
370 {
371 struct mat_cache *cache = pthread_getspecific(node->cache_key);
372 if(!cache) {
373 cache = malloc(sizeof *cache);
374 assert(cache);
376 pthread_mutex_lock(&node->cache_list_lock);
377 cache->next = node->cache_list;
378 node->cache_list = cache;
379 pthread_mutex_unlock(&node->cache_list_lock);
381 cache->inv_time = ANM_TIME_INVAL;
382 pthread_setspecific(node->cache_key, cache);
383 }
385 if(cache->inv_time != tm) {
386 anm_get_matrix(node, mat, tm);
387 m4_inverse(cache->inv_matrix, mat);
388 cache->inv_time = tm;
389 }
390 m4_copy(mat, cache->inv_matrix);
391 }
393 anm_time_t anm_get_start_time(struct anm_node *node)
394 {
395 int i;
396 struct anm_node *c;
397 anm_time_t res = LONG_MAX;
399 for(i=0; i<ANM_NUM_TRACKS; i++) {
400 if(node->tracks[i].count) {
401 anm_time_t tm = node->tracks[i].keys[0].time;
402 if(tm < res) {
403 res = tm;
404 }
405 }
406 }
408 c = node->child;
409 while(c) {
410 anm_time_t tm = anm_get_start_time(c);
411 if(tm < res) {
412 res = tm;
413 }
414 c = c->next;
415 }
416 return res;
417 }
419 anm_time_t anm_get_end_time(struct anm_node *node)
420 {
421 int i;
422 struct anm_node *c;
423 anm_time_t res = LONG_MIN;
425 for(i=0; i<ANM_NUM_TRACKS; i++) {
426 if(node->tracks[i].count) {
427 anm_time_t tm = node->tracks[i].keys[node->tracks[i].count - 1].time;
428 if(tm > res) {
429 res = tm;
430 }
431 }
432 }
434 c = node->child;
435 while(c) {
436 anm_time_t tm = anm_get_end_time(c);
437 if(tm > res) {
438 res = tm;
439 }
440 c = c->next;
441 }
442 return res;
443 }
445 static void invalidate_cache(struct anm_node *node)
446 {
447 struct mat_cache *cache = pthread_getspecific(node->cache_key);
448 if(cache) {
449 cache->time = ANM_TIME_INVAL;
450 }
451 }