gpuray_glsl
view anim/anim.c @ 2:6e3a4daf3159
adding cones
author | John Tsiombikas <nuclear@mutantstargoat.com> |
---|---|
date | Sun, 09 Nov 2014 14:39:01 +0200 |
parents | f234630e38ff |
children |
line source
1 #include <stdlib.h>
2 #include <limits.h>
3 #include <assert.h>
4 #include "anim.h"
5 #include "dynarr.h"
7 #define ROT_USE_SLERP
9 static void invalidate_cache(struct anm_node *node);
11 int anm_init_node(struct anm_node *node)
12 {
13 int i, j;
14 static const float defaults[] = {
15 0.0f, 0.0f, 0.0f, /* default position */
16 0.0f, 0.0f, 0.0f, 1.0f, /* default rotation quat */
17 1.0f, 1.0f, 1.0f /* default scale factor */
18 };
20 memset(node, 0, sizeof *node);
22 #ifdef ANIM_THREAD_SAFE
23 /* initialize thread-local matrix cache */
24 pthread_key_create(&node->cache_key, 0);
25 pthread_mutex_init(&node->cache_list_lock, 0);
26 #endif
28 for(i=0; i<ANM_NUM_TRACKS; i++) {
29 if(anm_init_track(node->tracks + i) == -1) {
30 for(j=0; j<i; j++) {
31 anm_destroy_track(node->tracks + i);
32 }
33 }
34 anm_set_track_default(node->tracks + i, defaults[i]);
35 }
36 return 0;
37 }
39 void anm_destroy_node(struct anm_node *node)
40 {
41 int i;
42 free(node->name);
44 for(i=0; i<ANM_NUM_TRACKS; i++) {
45 anm_destroy_track(node->tracks + i);
46 }
48 #ifdef ANIM_THREAD_SAFE
49 /* destroy thread-specific cache */
50 pthread_key_delete(node->cache_key);
51 #endif
53 while(node->cache_list) {
54 struct mat_cache *tmp = node->cache_list;
55 node->cache_list = tmp->next;
56 free(tmp);
57 }
58 }
60 void anm_destroy_node_tree(struct anm_node *tree)
61 {
62 struct anm_node *c, *tmp;
64 if(!tree) return;
66 c = tree->child;
67 while(c) {
68 tmp = c;
69 c = c->next;
71 anm_destroy_node_tree(tmp);
72 }
73 anm_destroy_node(tree);
74 }
76 struct anm_node *anm_create_node(void)
77 {
78 struct anm_node *n;
80 if((n = malloc(sizeof *n))) {
81 if(anm_init_node(n) == -1) {
82 free(n);
83 return 0;
84 }
85 }
86 return n;
87 }
89 void anm_free_node(struct anm_node *node)
90 {
91 anm_destroy_node(node);
92 free(node);
93 }
95 void anm_free_node_tree(struct anm_node *tree)
96 {
97 struct anm_node *c, *tmp;
99 if(!tree) return;
101 c = tree->child;
102 while(c) {
103 tmp = c;
104 c = c->next;
106 anm_free_node_tree(tmp);
107 }
109 anm_free_node(tree);
110 }
112 int anm_set_node_name(struct anm_node *node, const char *name)
113 {
114 char *str;
116 if(!(str = malloc(strlen(name) + 1))) {
117 return -1;
118 }
119 strcpy(str, name);
120 free(node->name);
121 node->name = str;
122 return 0;
123 }
125 const char *anm_get_node_name(struct anm_node *node)
126 {
127 return node->name ? node->name : "";
128 }
130 void anm_set_interpolator(struct anm_node *node, enum anm_interpolator in)
131 {
132 int i;
134 for(i=0; i<ANM_NUM_TRACKS; i++) {
135 anm_set_track_interpolator(node->tracks + i, in);
136 }
137 invalidate_cache(node);
138 }
140 void anm_set_extrapolator(struct anm_node *node, enum anm_extrapolator ex)
141 {
142 int i;
144 for(i=0; i<ANM_NUM_TRACKS; i++) {
145 anm_set_track_extrapolator(node->tracks + i, ex);
146 }
147 invalidate_cache(node);
148 }
150 void anm_link_node(struct anm_node *p, struct anm_node *c)
151 {
152 c->next = p->child;
153 p->child = c;
155 c->parent = p;
156 invalidate_cache(c);
157 }
159 int anm_unlink_node(struct anm_node *p, struct anm_node *c)
160 {
161 struct anm_node *iter;
163 if(p->child == c) {
164 p->child = c->next;
165 c->next = 0;
166 invalidate_cache(c);
167 return 0;
168 }
170 iter = p->child;
171 while(iter->next) {
172 if(iter->next == c) {
173 iter->next = c->next;
174 c->next = 0;
175 invalidate_cache(c);
176 return 0;
177 }
178 }
179 return -1;
180 }
182 void anm_set_position(struct anm_node *node, vec3_t pos, anm_time_t tm)
183 {
184 anm_set_value(node->tracks + ANM_TRACK_POS_X, tm, pos.x);
185 anm_set_value(node->tracks + ANM_TRACK_POS_Y, tm, pos.y);
186 anm_set_value(node->tracks + ANM_TRACK_POS_Z, tm, pos.z);
187 invalidate_cache(node);
188 }
190 vec3_t anm_get_node_position(struct anm_node *node, anm_time_t tm)
191 {
192 vec3_t v;
193 v.x = anm_get_value(node->tracks + ANM_TRACK_POS_X, tm);
194 v.y = anm_get_value(node->tracks + ANM_TRACK_POS_Y, tm);
195 v.z = anm_get_value(node->tracks + ANM_TRACK_POS_Z, tm);
196 return v;
197 }
199 void anm_set_rotation(struct anm_node *node, quat_t rot, anm_time_t tm)
200 {
201 anm_set_value(node->tracks + ANM_TRACK_ROT_X, tm, rot.x);
202 anm_set_value(node->tracks + ANM_TRACK_ROT_Y, tm, rot.y);
203 anm_set_value(node->tracks + ANM_TRACK_ROT_Z, tm, rot.z);
204 anm_set_value(node->tracks + ANM_TRACK_ROT_W, tm, rot.w);
205 invalidate_cache(node);
206 }
208 quat_t anm_get_node_rotation(struct anm_node *node, anm_time_t tm)
209 {
210 #ifndef ROT_USE_SLERP
211 quat_t q;
212 q.x = anm_get_value(node->tracks + ANM_TRACK_ROT_X, tm);
213 q.y = anm_get_value(node->tracks + ANM_TRACK_ROT_Y, tm);
214 q.z = anm_get_value(node->tracks + ANM_TRACK_ROT_Z, tm);
215 q.w = anm_get_value(node->tracks + ANM_TRACK_ROT_W, tm);
216 return q;
217 #else
218 int idx0, idx1, last_idx;
219 anm_time_t tstart, tend;
220 float t, dt;
221 struct anm_track *track_x, *track_y, *track_z, *track_w;
222 quat_t q, q1, q2;
224 track_x = node->tracks + ANM_TRACK_ROT_X;
225 track_y = node->tracks + ANM_TRACK_ROT_Y;
226 track_z = node->tracks + ANM_TRACK_ROT_Z;
227 track_w = node->tracks + ANM_TRACK_ROT_W;
229 if(!track_x->count) {
230 q.x = track_x->def_val;
231 q.y = track_y->def_val;
232 q.z = track_z->def_val;
233 q.w = track_w->def_val;
234 return q;
235 }
237 last_idx = track_x->count - 1;
239 tstart = track_x->keys[0].time;
240 tend = track_x->keys[last_idx].time;
242 if(tstart == tend) {
243 q.x = track_x->keys[0].val;
244 q.y = track_y->keys[0].val;
245 q.z = track_z->keys[0].val;
246 q.w = track_w->keys[0].val;
247 return q;
248 }
250 tm = anm_remap_time(track_x, tm, tstart, tend);
252 idx0 = anm_get_key_interval(track_x, tm);
253 assert(idx0 >= 0 && idx0 < track_x->count);
254 idx1 = idx0 + 1;
256 if(idx0 == last_idx) {
257 q.x = track_x->keys[idx0].val;
258 q.y = track_y->keys[idx0].val;
259 q.z = track_z->keys[idx0].val;
260 q.w = track_w->keys[idx0].val;
261 return q;
262 }
264 dt = (float)(track_x->keys[idx1].time - track_x->keys[idx0].time);
265 t = (float)(tm - track_x->keys[idx0].time) / dt;
267 q1.x = track_x->keys[idx0].val;
268 q1.y = track_y->keys[idx0].val;
269 q1.z = track_z->keys[idx0].val;
270 q1.w = track_w->keys[idx0].val;
272 q2.x = track_x->keys[idx1].val;
273 q2.y = track_y->keys[idx1].val;
274 q2.z = track_z->keys[idx1].val;
275 q2.w = track_w->keys[idx1].val;
277 /*q1 = quat_normalize(q1);
278 q2 = quat_normalize(q2);*/
280 return quat_slerp(q1, q2, t);
281 #endif
282 }
284 void anm_set_scaling(struct anm_node *node, vec3_t scl, anm_time_t tm)
285 {
286 anm_set_value(node->tracks + ANM_TRACK_SCL_X, tm, scl.x);
287 anm_set_value(node->tracks + ANM_TRACK_SCL_Y, tm, scl.y);
288 anm_set_value(node->tracks + ANM_TRACK_SCL_Z, tm, scl.z);
289 invalidate_cache(node);
290 }
292 vec3_t anm_get_node_scaling(struct anm_node *node, anm_time_t tm)
293 {
294 vec3_t v;
295 v.x = anm_get_value(node->tracks + ANM_TRACK_SCL_X, tm);
296 v.y = anm_get_value(node->tracks + ANM_TRACK_SCL_Y, tm);
297 v.z = anm_get_value(node->tracks + ANM_TRACK_SCL_Z, tm);
298 return v;
299 }
302 vec3_t anm_get_position(struct anm_node *node, anm_time_t tm)
303 {
304 mat4_t xform;
305 vec3_t pos = {0.0, 0.0, 0.0};
307 if(!node->parent) {
308 return anm_get_node_position(node, tm);
309 }
311 anm_get_matrix(node, xform, tm);
312 return v3_transform(pos, xform);
313 }
315 quat_t anm_get_rotation(struct anm_node *node, anm_time_t tm)
316 {
317 quat_t rot, prot;
318 rot = anm_get_node_rotation(node, tm);
320 if(!node->parent) {
321 return rot;
322 }
324 prot = anm_get_rotation(node->parent, tm);
325 return quat_mul(prot, rot);
326 }
328 vec3_t anm_get_scaling(struct anm_node *node, anm_time_t tm)
329 {
330 vec3_t s, ps;
331 s = anm_get_node_scaling(node, tm);
333 if(!node->parent) {
334 return s;
335 }
337 ps = anm_get_scaling(node->parent, tm);
338 return v3_mul(s, ps);
339 }
341 void anm_set_pivot(struct anm_node *node, vec3_t piv)
342 {
343 node->pivot = piv;
344 }
346 vec3_t anm_get_pivot(struct anm_node *node)
347 {
348 return node->pivot;
349 }
351 void anm_get_node_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
352 {
353 int i;
354 mat4_t rmat;
355 vec3_t pos, scale;
356 quat_t rot;
358 pos = anm_get_node_position(node, tm);
359 rot = anm_get_node_rotation(node, tm);
360 scale = anm_get_node_scaling(node, tm);
362 m4_set_translation(mat, node->pivot.x, node->pivot.y, node->pivot.z);
364 quat_to_mat4(rmat, rot);
365 for(i=0; i<3; i++) {
366 mat[i][0] = rmat[i][0];
367 mat[i][1] = rmat[i][1];
368 mat[i][2] = rmat[i][2];
369 }
370 /* this loop is equivalent to: m4_mult(mat, mat, rmat); */
372 mat[0][0] *= scale.x; mat[0][1] *= scale.y; mat[0][2] *= scale.z; mat[0][3] += pos.x;
373 mat[1][0] *= scale.x; mat[1][1] *= scale.y; mat[1][2] *= scale.z; mat[1][3] += pos.y;
374 mat[2][0] *= scale.x; mat[2][1] *= scale.y; mat[2][2] *= scale.z; mat[2][3] += pos.z;
376 m4_translate(mat, -node->pivot.x, -node->pivot.y, -node->pivot.z);
378 /* that's basically: pivot * rotation * translation * scaling * -pivot */
379 }
381 void anm_get_node_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
382 {
383 mat4_t tmp;
384 anm_get_node_matrix(node, tmp, tm);
385 m4_inverse(mat, tmp);
386 }
388 void anm_get_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
389 {
390 #ifdef ANIM_THREAD_SAFE
391 struct mat_cache *cache = pthread_getspecific(node->cache_key);
392 #else
393 struct mat_cache *cache = node->cache_list;
394 #endif
395 if(!cache) {
396 cache = malloc(sizeof *cache);
397 assert(cache);
398 cache->time = ANM_TIME_INVAL;
399 cache->inv_time = ANM_TIME_INVAL;
401 #ifdef ANIM_THREAD_SAFE
402 pthread_mutex_lock(&node->cache_list_lock);
403 cache->next = node->cache_list;
404 node->cache_list = cache;
405 pthread_mutex_unlock(&node->cache_list_lock);
406 pthread_setspecific(node->cache_key, cache);
407 #else
408 cache->next = node->cache_list;
409 node->cache_list = cache;
410 #endif
411 }
413 if(cache->time != tm) {
414 anm_get_node_matrix(node, cache->matrix, tm);
416 if(node->parent) {
417 mat4_t parent_mat;
419 anm_get_matrix(node->parent, parent_mat, tm);
420 m4_mult(cache->matrix, parent_mat, cache->matrix);
421 }
422 cache->time = tm;
423 }
424 m4_copy(mat, cache->matrix);
425 }
427 void anm_get_inv_matrix(struct anm_node *node, mat4_t mat, anm_time_t tm)
428 {
429 #ifdef ANIM_THREAD_SAFE
430 struct mat_cache *cache = pthread_getspecific(node->cache_key);
431 #else
432 struct mat_cache *cache = node->cache_list;
433 #endif
434 if(!cache) {
435 cache = malloc(sizeof *cache);
436 assert(cache);
437 cache->time = ANM_TIME_INVAL;
438 cache->inv_time = ANM_TIME_INVAL;
440 #ifdef ANIM_THREAD_SAFE
441 pthread_mutex_lock(&node->cache_list_lock);
442 cache->next = node->cache_list;
443 node->cache_list = cache;
444 pthread_mutex_unlock(&node->cache_list_lock);
445 pthread_setspecific(node->cache_key, cache);
446 #else
447 cache->next = node->cache_list;
448 node->cache_list = cache;
449 #endif
450 }
452 if(cache->inv_time != tm) {
453 anm_get_matrix(node, mat, tm);
454 m4_inverse(cache->inv_matrix, mat);
455 cache->inv_time = tm;
456 }
457 m4_copy(mat, cache->inv_matrix);
458 }
460 anm_time_t anm_get_start_time(struct anm_node *node)
461 {
462 int i;
463 struct anm_node *c;
464 anm_time_t res = LONG_MAX;
466 for(i=0; i<ANM_NUM_TRACKS; i++) {
467 if(node->tracks[i].count) {
468 anm_time_t tm = node->tracks[i].keys[0].time;
469 if(tm < res) {
470 res = tm;
471 }
472 }
473 }
475 c = node->child;
476 while(c) {
477 anm_time_t tm = anm_get_start_time(c);
478 if(tm < res) {
479 res = tm;
480 }
481 c = c->next;
482 }
483 return res;
484 }
486 anm_time_t anm_get_end_time(struct anm_node *node)
487 {
488 int i;
489 struct anm_node *c;
490 anm_time_t res = LONG_MIN;
492 for(i=0; i<ANM_NUM_TRACKS; i++) {
493 if(node->tracks[i].count) {
494 anm_time_t tm = node->tracks[i].keys[node->tracks[i].count - 1].time;
495 if(tm > res) {
496 res = tm;
497 }
498 }
499 }
501 c = node->child;
502 while(c) {
503 anm_time_t tm = anm_get_end_time(c);
504 if(tm > res) {
505 res = tm;
506 }
507 c = c->next;
508 }
509 return res;
510 }
512 static void invalidate_cache(struct anm_node *node)
513 {
514 #ifdef ANIM_THREAD_SAFE
515 struct mat_cache *cache = pthread_getspecific(node->cache_key);
516 #else
517 struct mat_cache *cache = node->cache_list;
518 #endif
519 if(cache) {
520 cache->time = cache->inv_time = ANM_TIME_INVAL;
521 }
522 }