goat3d

view libs/vmath/matrix.inl @ 42:6c8a6ee24448

added vs2008 solution
author John Tsiombikas <nuclear@member.fsf.org>
date Sun, 01 Dec 2013 01:32:12 +0200
parents 4deb0b12fe14
children
line source
1 /*
2 libvmath - a vector math library
3 Copyright (C) 2004-2011 John Tsiombikas <nuclear@member.fsf.org>
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU Lesser General Public License as published
7 by the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
19 #include <string.h>
21 #ifdef __cplusplus
22 extern "C" {
23 #endif /* __cplusplus */
25 /* C matrix 3x3 functions */
26 static VMATH_INLINE void m3_identity(mat3_t m)
27 {
28 static const mat3_t id = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}};
29 memcpy(m, id, sizeof id);
30 }
32 static VMATH_INLINE void m3_cons(mat3_t m,
33 scalar_t m11, scalar_t m12, scalar_t m13,
34 scalar_t m21, scalar_t m22, scalar_t m23,
35 scalar_t m31, scalar_t m32, scalar_t m33)
36 {
37 m[0][0] = m11; m[0][1] = m12; m[0][2] = m13;
38 m[1][0] = m21; m[1][1] = m22; m[1][2] = m23;
39 m[2][0] = m31; m[2][1] = m32; m[2][2] = m33;
40 }
42 static VMATH_INLINE void m3_copy(mat3_t dest, mat3_t src)
43 {
44 memcpy(dest, src, sizeof(mat3_t));
45 }
48 /* C matrix 4x4 functions */
49 static VMATH_INLINE void m4_identity(mat4_t m)
50 {
51 static const mat4_t id = {{1, 0, 0, 0}, {0, 1, 0, 0}, {0, 0, 1, 0}, {0, 0, 0, 1}};
52 memcpy(m, id, sizeof id);
53 }
55 static VMATH_INLINE void m4_cons(mat4_t m,
56 scalar_t m11, scalar_t m12, scalar_t m13, scalar_t m14,
57 scalar_t m21, scalar_t m22, scalar_t m23, scalar_t m24,
58 scalar_t m31, scalar_t m32, scalar_t m33, scalar_t m34,
59 scalar_t m41, scalar_t m42, scalar_t m43, scalar_t m44)
60 {
61 m[0][0] = m11; m[0][1] = m12; m[0][2] = m13; m[0][3] = m14;
62 m[1][0] = m21; m[1][1] = m22; m[1][2] = m23; m[1][3] = m24;
63 m[2][0] = m31; m[2][1] = m32; m[2][2] = m33; m[2][3] = m34;
64 m[3][0] = m41; m[3][1] = m42; m[3][2] = m43; m[3][3] = m44;
65 }
67 static VMATH_INLINE void m4_copy(mat4_t dest, mat4_t src)
68 {
69 memcpy(dest, src, sizeof(mat4_t));
70 }
72 static VMATH_INLINE void m4_mult(mat4_t res, mat4_t m1, mat4_t m2)
73 {
74 mat4_t tmp;
76 /*
77 int i, j;
78 for(i=0; i<4; i++) {
79 for(j=0; j<4; j++) {
80 tmp[i][j] = m1[i][0] * m2[0][j] + m1[i][1] * m2[1][j] + m1[i][2] * m2[2][j] + m1[i][3] * m2[3][j];
81 }
82 }
83 */
85 tmp[0][0] = m1[0][0] * m2[0][0] + m1[0][1] * m2[1][0] + m1[0][2] * m2[2][0] + m1[0][3] * m2[3][0];
86 tmp[0][1] = m1[0][0] * m2[0][1] + m1[0][1] * m2[1][1] + m1[0][2] * m2[2][1] + m1[0][3] * m2[3][1];
87 tmp[0][2] = m1[0][0] * m2[0][2] + m1[0][1] * m2[1][2] + m1[0][2] * m2[2][2] + m1[0][3] * m2[3][2];
88 tmp[0][3] = m1[0][0] * m2[0][3] + m1[0][1] * m2[1][3] + m1[0][2] * m2[2][3] + m1[0][3] * m2[3][3];
90 tmp[1][0] = m1[1][0] * m2[0][0] + m1[1][1] * m2[1][0] + m1[1][2] * m2[2][0] + m1[1][3] * m2[3][0];
91 tmp[1][1] = m1[1][0] * m2[0][1] + m1[1][1] * m2[1][1] + m1[1][2] * m2[2][1] + m1[1][3] * m2[3][1];
92 tmp[1][2] = m1[1][0] * m2[0][2] + m1[1][1] * m2[1][2] + m1[1][2] * m2[2][2] + m1[1][3] * m2[3][2];
93 tmp[1][3] = m1[1][0] * m2[0][3] + m1[1][1] * m2[1][3] + m1[1][2] * m2[2][3] + m1[1][3] * m2[3][3];
95 tmp[2][0] = m1[2][0] * m2[0][0] + m1[2][1] * m2[1][0] + m1[2][2] * m2[2][0] + m1[2][3] * m2[3][0];
96 tmp[2][1] = m1[2][0] * m2[0][1] + m1[2][1] * m2[1][1] + m1[2][2] * m2[2][1] + m1[2][3] * m2[3][1];
97 tmp[2][2] = m1[2][0] * m2[0][2] + m1[2][1] * m2[1][2] + m1[2][2] * m2[2][2] + m1[2][3] * m2[3][2];
98 tmp[2][3] = m1[2][0] * m2[0][3] + m1[2][1] * m2[1][3] + m1[2][2] * m2[2][3] + m1[2][3] * m2[3][3];
100 tmp[3][0] = m1[3][0] * m2[0][0] + m1[3][1] * m2[1][0] + m1[3][2] * m2[2][0] + m1[3][3] * m2[3][0];
101 tmp[3][1] = m1[3][0] * m2[0][1] + m1[3][1] * m2[1][1] + m1[3][2] * m2[2][1] + m1[3][3] * m2[3][1];
102 tmp[3][2] = m1[3][0] * m2[0][2] + m1[3][1] * m2[1][2] + m1[3][2] * m2[2][2] + m1[3][3] * m2[3][2];
103 tmp[3][3] = m1[3][0] * m2[0][3] + m1[3][1] * m2[1][3] + m1[3][2] * m2[2][3] + m1[3][3] * m2[3][3];
105 m4_copy(res, tmp);
106 }
108 static VMATH_INLINE void m4_set_column(mat4_t m, vec4_t v, int idx)
109 {
110 m[0][idx] = v.x;
111 m[1][idx] = v.y;
112 m[2][idx] = v.z;
113 m[3][idx] = v.w;
114 }
116 static VMATH_INLINE void m4_set_row(mat4_t m, vec4_t v, int idx)
117 {
118 m[idx][0] = v.x;
119 m[idx][1] = v.y;
120 m[idx][2] = v.z;
121 m[idx][3] = v.w;
122 }
124 #ifdef __cplusplus
125 } /* extern "C" */
128 /* unrolled to hell and VMATH_INLINE */
129 VMATH_INLINE Matrix4x4 operator *(const Matrix4x4 &m1, const Matrix4x4 &m2)
130 {
131 Matrix4x4 res;
133 /*
134 for(i=0; i<4; i++) {
135 for(j=0; j<4; j++) {
136 res.m[i][j] = m1.m[i][0] * m2.m[0][j] + m1.m[i][1] * m2.m[1][j] + m1.m[i][2] * m2.m[2][j] + m1.m[i][3] * m2.m[3][j];
137 }
138 }
139 */
141 res.m[0][0] = m1.m[0][0] * m2.m[0][0] + m1.m[0][1] * m2.m[1][0] + m1.m[0][2] * m2.m[2][0] + m1.m[0][3] * m2.m[3][0];
142 res.m[0][1] = m1.m[0][0] * m2.m[0][1] + m1.m[0][1] * m2.m[1][1] + m1.m[0][2] * m2.m[2][1] + m1.m[0][3] * m2.m[3][1];
143 res.m[0][2] = m1.m[0][0] * m2.m[0][2] + m1.m[0][1] * m2.m[1][2] + m1.m[0][2] * m2.m[2][2] + m1.m[0][3] * m2.m[3][2];
144 res.m[0][3] = m1.m[0][0] * m2.m[0][3] + m1.m[0][1] * m2.m[1][3] + m1.m[0][2] * m2.m[2][3] + m1.m[0][3] * m2.m[3][3];
146 res.m[1][0] = m1.m[1][0] * m2.m[0][0] + m1.m[1][1] * m2.m[1][0] + m1.m[1][2] * m2.m[2][0] + m1.m[1][3] * m2.m[3][0];
147 res.m[1][1] = m1.m[1][0] * m2.m[0][1] + m1.m[1][1] * m2.m[1][1] + m1.m[1][2] * m2.m[2][1] + m1.m[1][3] * m2.m[3][1];
148 res.m[1][2] = m1.m[1][0] * m2.m[0][2] + m1.m[1][1] * m2.m[1][2] + m1.m[1][2] * m2.m[2][2] + m1.m[1][3] * m2.m[3][2];
149 res.m[1][3] = m1.m[1][0] * m2.m[0][3] + m1.m[1][1] * m2.m[1][3] + m1.m[1][2] * m2.m[2][3] + m1.m[1][3] * m2.m[3][3];
151 res.m[2][0] = m1.m[2][0] * m2.m[0][0] + m1.m[2][1] * m2.m[1][0] + m1.m[2][2] * m2.m[2][0] + m1.m[2][3] * m2.m[3][0];
152 res.m[2][1] = m1.m[2][0] * m2.m[0][1] + m1.m[2][1] * m2.m[1][1] + m1.m[2][2] * m2.m[2][1] + m1.m[2][3] * m2.m[3][1];
153 res.m[2][2] = m1.m[2][0] * m2.m[0][2] + m1.m[2][1] * m2.m[1][2] + m1.m[2][2] * m2.m[2][2] + m1.m[2][3] * m2.m[3][2];
154 res.m[2][3] = m1.m[2][0] * m2.m[0][3] + m1.m[2][1] * m2.m[1][3] + m1.m[2][2] * m2.m[2][3] + m1.m[2][3] * m2.m[3][3];
156 res.m[3][0] = m1.m[3][0] * m2.m[0][0] + m1.m[3][1] * m2.m[1][0] + m1.m[3][2] * m2.m[2][0] + m1.m[3][3] * m2.m[3][0];
157 res.m[3][1] = m1.m[3][0] * m2.m[0][1] + m1.m[3][1] * m2.m[1][1] + m1.m[3][2] * m2.m[2][1] + m1.m[3][3] * m2.m[3][1];
158 res.m[3][2] = m1.m[3][0] * m2.m[0][2] + m1.m[3][1] * m2.m[1][2] + m1.m[3][2] * m2.m[2][2] + m1.m[3][3] * m2.m[3][2];
159 res.m[3][3] = m1.m[3][0] * m2.m[0][3] + m1.m[3][1] * m2.m[1][3] + m1.m[3][2] * m2.m[2][3] + m1.m[3][3] * m2.m[3][3];
161 return res;
162 }
164 VMATH_INLINE void operator *=(Matrix4x4 &m1, const Matrix4x4 &m2)
165 {
166 Matrix4x4 res = m1 * m2;
167 m1 = res;
168 }
171 VMATH_INLINE scalar_t *Matrix3x3::operator [](int index)
172 {
173 return m[index];
174 }
176 VMATH_INLINE const scalar_t *Matrix3x3::operator [](int index) const
177 {
178 return m[index];
179 }
181 VMATH_INLINE void Matrix3x3::reset_identity()
182 {
183 *this = identity;
184 }
186 VMATH_INLINE scalar_t *Matrix4x4::operator [](int index)
187 {
188 return m[index];
189 }
191 VMATH_INLINE const scalar_t *Matrix4x4::operator [](int index) const
192 {
193 return m[index];
194 }
196 VMATH_INLINE void Matrix4x4::reset_identity()
197 {
198 *this = identity;
199 }
200 #endif /* __cplusplus */