rev |
line source |
nuclear@3
|
1 /************************************************************************************
|
nuclear@3
|
2
|
nuclear@3
|
3 PublicHeader: OVR.h
|
nuclear@3
|
4 Filename : OVR_Alg.h
|
nuclear@3
|
5 Content : Simple general purpose algorithms: Sort, Binary Search, etc.
|
nuclear@3
|
6 Created : September 19, 2012
|
nuclear@3
|
7 Notes :
|
nuclear@3
|
8
|
nuclear@3
|
9 Copyright : Copyright 2012 Oculus VR, Inc. All Rights reserved.
|
nuclear@3
|
10
|
nuclear@3
|
11 Use of this software is subject to the terms of the Oculus license
|
nuclear@3
|
12 agreement provided at the time of installation or download, or which
|
nuclear@3
|
13 otherwise accompanies this software in either electronic or hard copy form.
|
nuclear@3
|
14
|
nuclear@3
|
15 ************************************************************************************/
|
nuclear@3
|
16
|
nuclear@3
|
17 #ifndef OVR_Alg_h
|
nuclear@3
|
18 #define OVR_Alg_h
|
nuclear@3
|
19
|
nuclear@3
|
20 #include "OVR_Types.h"
|
nuclear@3
|
21 #include <string.h>
|
nuclear@3
|
22
|
nuclear@3
|
23 namespace OVR { namespace Alg {
|
nuclear@3
|
24
|
nuclear@3
|
25
|
nuclear@3
|
26 //-----------------------------------------------------------------------------------
|
nuclear@3
|
27 // ***** Operator extensions
|
nuclear@3
|
28
|
nuclear@3
|
29 template <typename T> OVR_FORCE_INLINE void Swap(T &a, T &b)
|
nuclear@3
|
30 { T temp(a); a = b; b = temp; }
|
nuclear@3
|
31
|
nuclear@3
|
32
|
nuclear@3
|
33 // ***** min/max are not implemented in Visual Studio 6 standard STL
|
nuclear@3
|
34
|
nuclear@3
|
35 template <typename T> OVR_FORCE_INLINE const T Min(const T a, const T b)
|
nuclear@3
|
36 { return (a < b) ? a : b; }
|
nuclear@3
|
37
|
nuclear@3
|
38 template <typename T> OVR_FORCE_INLINE const T Max(const T a, const T b)
|
nuclear@3
|
39 { return (b < a) ? a : b; }
|
nuclear@3
|
40
|
nuclear@3
|
41 template <typename T> OVR_FORCE_INLINE const T Clamp(const T v, const T minVal, const T maxVal)
|
nuclear@3
|
42 { return Max<T>(minVal, Min<T>(v, maxVal)); }
|
nuclear@3
|
43
|
nuclear@3
|
44 template <typename T> OVR_FORCE_INLINE int Chop(T f)
|
nuclear@3
|
45 { return (int)f; }
|
nuclear@3
|
46
|
nuclear@3
|
47 template <typename T> OVR_FORCE_INLINE T Lerp(T a, T b, T f)
|
nuclear@3
|
48 { return (b - a) * f + a; }
|
nuclear@3
|
49
|
nuclear@3
|
50
|
nuclear@3
|
51 // These functions stand to fix a stupid VC++ warning (with /Wp64 on):
|
nuclear@3
|
52 // "warning C4267: 'argument' : conversion from 'size_t' to 'const unsigned', possible loss of data"
|
nuclear@3
|
53 // Use these functions instead of gmin/gmax if the argument has size
|
nuclear@3
|
54 // of the pointer to avoid the warning. Though, functionally they are
|
nuclear@3
|
55 // absolutelly the same as regular gmin/gmax.
|
nuclear@3
|
56 template <typename T> OVR_FORCE_INLINE const T PMin(const T a, const T b)
|
nuclear@3
|
57 {
|
nuclear@3
|
58 OVR_COMPILER_ASSERT(sizeof(T) == sizeof(UPInt));
|
nuclear@3
|
59 return (a < b) ? a : b;
|
nuclear@3
|
60 }
|
nuclear@3
|
61 template <typename T> OVR_FORCE_INLINE const T PMax(const T a, const T b)
|
nuclear@3
|
62 {
|
nuclear@3
|
63 OVR_COMPILER_ASSERT(sizeof(T) == sizeof(UPInt));
|
nuclear@3
|
64 return (b < a) ? a : b;
|
nuclear@3
|
65 }
|
nuclear@3
|
66
|
nuclear@3
|
67
|
nuclear@3
|
68 template <typename T> OVR_FORCE_INLINE const T Abs(const T v)
|
nuclear@3
|
69 { return (v>=0) ? v : -v; }
|
nuclear@3
|
70
|
nuclear@3
|
71
|
nuclear@3
|
72 //-----------------------------------------------------------------------------------
|
nuclear@3
|
73 // ***** OperatorLess
|
nuclear@3
|
74 //
|
nuclear@3
|
75 template<class T> struct OperatorLess
|
nuclear@3
|
76 {
|
nuclear@3
|
77 static bool Compare(const T& a, const T& b)
|
nuclear@3
|
78 {
|
nuclear@3
|
79 return a < b;
|
nuclear@3
|
80 }
|
nuclear@3
|
81 };
|
nuclear@3
|
82
|
nuclear@3
|
83
|
nuclear@3
|
84 //-----------------------------------------------------------------------------------
|
nuclear@3
|
85 // ***** QuickSortSliced
|
nuclear@3
|
86 //
|
nuclear@3
|
87 // Sort any part of any array: plain, Array, ArrayPaged, ArrayUnsafe.
|
nuclear@3
|
88 // The range is specified with start, end, where "end" is exclusive!
|
nuclear@3
|
89 // The comparison predicate must be specified.
|
nuclear@3
|
90 template<class Array, class Less>
|
nuclear@3
|
91 void QuickSortSliced(Array& arr, UPInt start, UPInt end, Less less)
|
nuclear@3
|
92 {
|
nuclear@3
|
93 enum
|
nuclear@3
|
94 {
|
nuclear@3
|
95 Threshold = 9
|
nuclear@3
|
96 };
|
nuclear@3
|
97
|
nuclear@3
|
98 if(end - start < 2) return;
|
nuclear@3
|
99
|
nuclear@3
|
100 SPInt stack[80];
|
nuclear@3
|
101 SPInt* top = stack;
|
nuclear@3
|
102 SPInt base = (SPInt)start;
|
nuclear@3
|
103 SPInt limit = (SPInt)end;
|
nuclear@3
|
104
|
nuclear@3
|
105 for(;;)
|
nuclear@3
|
106 {
|
nuclear@3
|
107 SPInt len = limit - base;
|
nuclear@3
|
108 SPInt i, j, pivot;
|
nuclear@3
|
109
|
nuclear@3
|
110 if(len > Threshold)
|
nuclear@3
|
111 {
|
nuclear@3
|
112 // we use base + len/2 as the pivot
|
nuclear@3
|
113 pivot = base + len / 2;
|
nuclear@3
|
114 Swap(arr[base], arr[pivot]);
|
nuclear@3
|
115
|
nuclear@3
|
116 i = base + 1;
|
nuclear@3
|
117 j = limit - 1;
|
nuclear@3
|
118
|
nuclear@3
|
119 // now ensure that *i <= *base <= *j
|
nuclear@3
|
120 if(less(arr[j], arr[i])) Swap(arr[j], arr[i]);
|
nuclear@3
|
121 if(less(arr[base], arr[i])) Swap(arr[base], arr[i]);
|
nuclear@3
|
122 if(less(arr[j], arr[base])) Swap(arr[j], arr[base]);
|
nuclear@3
|
123
|
nuclear@3
|
124 for(;;)
|
nuclear@3
|
125 {
|
nuclear@3
|
126 do i++; while( less(arr[i], arr[base]) );
|
nuclear@3
|
127 do j--; while( less(arr[base], arr[j]) );
|
nuclear@3
|
128
|
nuclear@3
|
129 if( i > j )
|
nuclear@3
|
130 {
|
nuclear@3
|
131 break;
|
nuclear@3
|
132 }
|
nuclear@3
|
133
|
nuclear@3
|
134 Swap(arr[i], arr[j]);
|
nuclear@3
|
135 }
|
nuclear@3
|
136
|
nuclear@3
|
137 Swap(arr[base], arr[j]);
|
nuclear@3
|
138
|
nuclear@3
|
139 // now, push the largest sub-array
|
nuclear@3
|
140 if(j - base > limit - i)
|
nuclear@3
|
141 {
|
nuclear@3
|
142 top[0] = base;
|
nuclear@3
|
143 top[1] = j;
|
nuclear@3
|
144 base = i;
|
nuclear@3
|
145 }
|
nuclear@3
|
146 else
|
nuclear@3
|
147 {
|
nuclear@3
|
148 top[0] = i;
|
nuclear@3
|
149 top[1] = limit;
|
nuclear@3
|
150 limit = j;
|
nuclear@3
|
151 }
|
nuclear@3
|
152 top += 2;
|
nuclear@3
|
153 }
|
nuclear@3
|
154 else
|
nuclear@3
|
155 {
|
nuclear@3
|
156 // the sub-array is small, perform insertion sort
|
nuclear@3
|
157 j = base;
|
nuclear@3
|
158 i = j + 1;
|
nuclear@3
|
159
|
nuclear@3
|
160 for(; i < limit; j = i, i++)
|
nuclear@3
|
161 {
|
nuclear@3
|
162 for(; less(arr[j + 1], arr[j]); j--)
|
nuclear@3
|
163 {
|
nuclear@3
|
164 Swap(arr[j + 1], arr[j]);
|
nuclear@3
|
165 if(j == base)
|
nuclear@3
|
166 {
|
nuclear@3
|
167 break;
|
nuclear@3
|
168 }
|
nuclear@3
|
169 }
|
nuclear@3
|
170 }
|
nuclear@3
|
171 if(top > stack)
|
nuclear@3
|
172 {
|
nuclear@3
|
173 top -= 2;
|
nuclear@3
|
174 base = top[0];
|
nuclear@3
|
175 limit = top[1];
|
nuclear@3
|
176 }
|
nuclear@3
|
177 else
|
nuclear@3
|
178 {
|
nuclear@3
|
179 break;
|
nuclear@3
|
180 }
|
nuclear@3
|
181 }
|
nuclear@3
|
182 }
|
nuclear@3
|
183 }
|
nuclear@3
|
184
|
nuclear@3
|
185
|
nuclear@3
|
186 //-----------------------------------------------------------------------------------
|
nuclear@3
|
187 // ***** QuickSortSliced
|
nuclear@3
|
188 //
|
nuclear@3
|
189 // Sort any part of any array: plain, Array, ArrayPaged, ArrayUnsafe.
|
nuclear@3
|
190 // The range is specified with start, end, where "end" is exclusive!
|
nuclear@3
|
191 // The data type must have a defined "<" operator.
|
nuclear@3
|
192 template<class Array>
|
nuclear@3
|
193 void QuickSortSliced(Array& arr, UPInt start, UPInt end)
|
nuclear@3
|
194 {
|
nuclear@3
|
195 typedef typename Array::ValueType ValueType;
|
nuclear@3
|
196 QuickSortSliced(arr, start, end, OperatorLess<ValueType>::Compare);
|
nuclear@3
|
197 }
|
nuclear@3
|
198
|
nuclear@3
|
199 // Same as corresponding G_QuickSortSliced but with checking array limits to avoid
|
nuclear@3
|
200 // crash in the case of wrong comparator functor.
|
nuclear@3
|
201 template<class Array, class Less>
|
nuclear@3
|
202 bool QuickSortSlicedSafe(Array& arr, UPInt start, UPInt end, Less less)
|
nuclear@3
|
203 {
|
nuclear@3
|
204 enum
|
nuclear@3
|
205 {
|
nuclear@3
|
206 Threshold = 9
|
nuclear@3
|
207 };
|
nuclear@3
|
208
|
nuclear@3
|
209 if(end - start < 2) return true;
|
nuclear@3
|
210
|
nuclear@3
|
211 SPInt stack[80];
|
nuclear@3
|
212 SPInt* top = stack;
|
nuclear@3
|
213 SPInt base = (SPInt)start;
|
nuclear@3
|
214 SPInt limit = (SPInt)end;
|
nuclear@3
|
215
|
nuclear@3
|
216 for(;;)
|
nuclear@3
|
217 {
|
nuclear@3
|
218 SPInt len = limit - base;
|
nuclear@3
|
219 SPInt i, j, pivot;
|
nuclear@3
|
220
|
nuclear@3
|
221 if(len > Threshold)
|
nuclear@3
|
222 {
|
nuclear@3
|
223 // we use base + len/2 as the pivot
|
nuclear@3
|
224 pivot = base + len / 2;
|
nuclear@3
|
225 Swap(arr[base], arr[pivot]);
|
nuclear@3
|
226
|
nuclear@3
|
227 i = base + 1;
|
nuclear@3
|
228 j = limit - 1;
|
nuclear@3
|
229
|
nuclear@3
|
230 // now ensure that *i <= *base <= *j
|
nuclear@3
|
231 if(less(arr[j], arr[i])) Swap(arr[j], arr[i]);
|
nuclear@3
|
232 if(less(arr[base], arr[i])) Swap(arr[base], arr[i]);
|
nuclear@3
|
233 if(less(arr[j], arr[base])) Swap(arr[j], arr[base]);
|
nuclear@3
|
234
|
nuclear@3
|
235 for(;;)
|
nuclear@3
|
236 {
|
nuclear@3
|
237 do
|
nuclear@3
|
238 {
|
nuclear@3
|
239 i++;
|
nuclear@3
|
240 if (i >= limit)
|
nuclear@3
|
241 return false;
|
nuclear@3
|
242 } while( less(arr[i], arr[base]) );
|
nuclear@3
|
243 do
|
nuclear@3
|
244 {
|
nuclear@3
|
245 j--;
|
nuclear@3
|
246 if (j < 0)
|
nuclear@3
|
247 return false;
|
nuclear@3
|
248 } while( less(arr[base], arr[j]) );
|
nuclear@3
|
249
|
nuclear@3
|
250 if( i > j )
|
nuclear@3
|
251 {
|
nuclear@3
|
252 break;
|
nuclear@3
|
253 }
|
nuclear@3
|
254
|
nuclear@3
|
255 Swap(arr[i], arr[j]);
|
nuclear@3
|
256 }
|
nuclear@3
|
257
|
nuclear@3
|
258 Swap(arr[base], arr[j]);
|
nuclear@3
|
259
|
nuclear@3
|
260 // now, push the largest sub-array
|
nuclear@3
|
261 if(j - base > limit - i)
|
nuclear@3
|
262 {
|
nuclear@3
|
263 top[0] = base;
|
nuclear@3
|
264 top[1] = j;
|
nuclear@3
|
265 base = i;
|
nuclear@3
|
266 }
|
nuclear@3
|
267 else
|
nuclear@3
|
268 {
|
nuclear@3
|
269 top[0] = i;
|
nuclear@3
|
270 top[1] = limit;
|
nuclear@3
|
271 limit = j;
|
nuclear@3
|
272 }
|
nuclear@3
|
273 top += 2;
|
nuclear@3
|
274 }
|
nuclear@3
|
275 else
|
nuclear@3
|
276 {
|
nuclear@3
|
277 // the sub-array is small, perform insertion sort
|
nuclear@3
|
278 j = base;
|
nuclear@3
|
279 i = j + 1;
|
nuclear@3
|
280
|
nuclear@3
|
281 for(; i < limit; j = i, i++)
|
nuclear@3
|
282 {
|
nuclear@3
|
283 for(; less(arr[j + 1], arr[j]); j--)
|
nuclear@3
|
284 {
|
nuclear@3
|
285 Swap(arr[j + 1], arr[j]);
|
nuclear@3
|
286 if(j == base)
|
nuclear@3
|
287 {
|
nuclear@3
|
288 break;
|
nuclear@3
|
289 }
|
nuclear@3
|
290 }
|
nuclear@3
|
291 }
|
nuclear@3
|
292 if(top > stack)
|
nuclear@3
|
293 {
|
nuclear@3
|
294 top -= 2;
|
nuclear@3
|
295 base = top[0];
|
nuclear@3
|
296 limit = top[1];
|
nuclear@3
|
297 }
|
nuclear@3
|
298 else
|
nuclear@3
|
299 {
|
nuclear@3
|
300 break;
|
nuclear@3
|
301 }
|
nuclear@3
|
302 }
|
nuclear@3
|
303 }
|
nuclear@3
|
304 return true;
|
nuclear@3
|
305 }
|
nuclear@3
|
306
|
nuclear@3
|
307 template<class Array>
|
nuclear@3
|
308 bool QuickSortSlicedSafe(Array& arr, UPInt start, UPInt end)
|
nuclear@3
|
309 {
|
nuclear@3
|
310 typedef typename Array::ValueType ValueType;
|
nuclear@3
|
311 return QuickSortSlicedSafe(arr, start, end, OperatorLess<ValueType>::Compare);
|
nuclear@3
|
312 }
|
nuclear@3
|
313
|
nuclear@3
|
314 //-----------------------------------------------------------------------------------
|
nuclear@3
|
315 // ***** QuickSort
|
nuclear@3
|
316 //
|
nuclear@3
|
317 // Sort an array Array, ArrayPaged, ArrayUnsafe.
|
nuclear@3
|
318 // The array must have GetSize() function.
|
nuclear@3
|
319 // The comparison predicate must be specified.
|
nuclear@3
|
320 template<class Array, class Less>
|
nuclear@3
|
321 void QuickSort(Array& arr, Less less)
|
nuclear@3
|
322 {
|
nuclear@3
|
323 QuickSortSliced(arr, 0, arr.GetSize(), less);
|
nuclear@3
|
324 }
|
nuclear@3
|
325
|
nuclear@3
|
326 // checks for boundaries
|
nuclear@3
|
327 template<class Array, class Less>
|
nuclear@3
|
328 bool QuickSortSafe(Array& arr, Less less)
|
nuclear@3
|
329 {
|
nuclear@3
|
330 return QuickSortSlicedSafe(arr, 0, arr.GetSize(), less);
|
nuclear@3
|
331 }
|
nuclear@3
|
332
|
nuclear@3
|
333
|
nuclear@3
|
334 //-----------------------------------------------------------------------------------
|
nuclear@3
|
335 // ***** QuickSort
|
nuclear@3
|
336 //
|
nuclear@3
|
337 // Sort an array Array, ArrayPaged, ArrayUnsafe.
|
nuclear@3
|
338 // The array must have GetSize() function.
|
nuclear@3
|
339 // The data type must have a defined "<" operator.
|
nuclear@3
|
340 template<class Array>
|
nuclear@3
|
341 void QuickSort(Array& arr)
|
nuclear@3
|
342 {
|
nuclear@3
|
343 typedef typename Array::ValueType ValueType;
|
nuclear@3
|
344 QuickSortSliced(arr, 0, arr.GetSize(), OperatorLess<ValueType>::Compare);
|
nuclear@3
|
345 }
|
nuclear@3
|
346
|
nuclear@3
|
347 template<class Array>
|
nuclear@3
|
348 bool QuickSortSafe(Array& arr)
|
nuclear@3
|
349 {
|
nuclear@3
|
350 typedef typename Array::ValueType ValueType;
|
nuclear@3
|
351 return QuickSortSlicedSafe(arr, 0, arr.GetSize(), OperatorLess<ValueType>::Compare);
|
nuclear@3
|
352 }
|
nuclear@3
|
353
|
nuclear@3
|
354 //-----------------------------------------------------------------------------------
|
nuclear@3
|
355 // ***** InsertionSortSliced
|
nuclear@3
|
356 //
|
nuclear@3
|
357 // Sort any part of any array: plain, Array, ArrayPaged, ArrayUnsafe.
|
nuclear@3
|
358 // The range is specified with start, end, where "end" is exclusive!
|
nuclear@3
|
359 // The comparison predicate must be specified.
|
nuclear@3
|
360 // Unlike Quick Sort, the Insertion Sort works much slower in average,
|
nuclear@3
|
361 // but may be much faster on almost sorted arrays. Besides, it guarantees
|
nuclear@3
|
362 // that the elements will not be swapped if not necessary. For example,
|
nuclear@3
|
363 // an array with all equal elements will remain "untouched", while
|
nuclear@3
|
364 // Quick Sort will considerably shuffle the elements in this case.
|
nuclear@3
|
365 template<class Array, class Less>
|
nuclear@3
|
366 void InsertionSortSliced(Array& arr, UPInt start, UPInt end, Less less)
|
nuclear@3
|
367 {
|
nuclear@3
|
368 UPInt j = start;
|
nuclear@3
|
369 UPInt i = j + 1;
|
nuclear@3
|
370 UPInt limit = end;
|
nuclear@3
|
371
|
nuclear@3
|
372 for(; i < limit; j = i, i++)
|
nuclear@3
|
373 {
|
nuclear@3
|
374 for(; less(arr[j + 1], arr[j]); j--)
|
nuclear@3
|
375 {
|
nuclear@3
|
376 Swap(arr[j + 1], arr[j]);
|
nuclear@3
|
377 if(j <= start)
|
nuclear@3
|
378 {
|
nuclear@3
|
379 break;
|
nuclear@3
|
380 }
|
nuclear@3
|
381 }
|
nuclear@3
|
382 }
|
nuclear@3
|
383 }
|
nuclear@3
|
384
|
nuclear@3
|
385
|
nuclear@3
|
386 //-----------------------------------------------------------------------------------
|
nuclear@3
|
387 // ***** InsertionSortSliced
|
nuclear@3
|
388 //
|
nuclear@3
|
389 // Sort any part of any array: plain, Array, ArrayPaged, ArrayUnsafe.
|
nuclear@3
|
390 // The range is specified with start, end, where "end" is exclusive!
|
nuclear@3
|
391 // The data type must have a defined "<" operator.
|
nuclear@3
|
392 template<class Array>
|
nuclear@3
|
393 void InsertionSortSliced(Array& arr, UPInt start, UPInt end)
|
nuclear@3
|
394 {
|
nuclear@3
|
395 typedef typename Array::ValueType ValueType;
|
nuclear@3
|
396 InsertionSortSliced(arr, start, end, OperatorLess<ValueType>::Compare);
|
nuclear@3
|
397 }
|
nuclear@3
|
398
|
nuclear@3
|
399 //-----------------------------------------------------------------------------------
|
nuclear@3
|
400 // ***** InsertionSort
|
nuclear@3
|
401 //
|
nuclear@3
|
402 // Sort an array Array, ArrayPaged, ArrayUnsafe.
|
nuclear@3
|
403 // The array must have GetSize() function.
|
nuclear@3
|
404 // The comparison predicate must be specified.
|
nuclear@3
|
405
|
nuclear@3
|
406 template<class Array, class Less>
|
nuclear@3
|
407 void InsertionSort(Array& arr, Less less)
|
nuclear@3
|
408 {
|
nuclear@3
|
409 InsertionSortSliced(arr, 0, arr.GetSize(), less);
|
nuclear@3
|
410 }
|
nuclear@3
|
411
|
nuclear@3
|
412 //-----------------------------------------------------------------------------------
|
nuclear@3
|
413 // ***** InsertionSort
|
nuclear@3
|
414 //
|
nuclear@3
|
415 // Sort an array Array, ArrayPaged, ArrayUnsafe.
|
nuclear@3
|
416 // The array must have GetSize() function.
|
nuclear@3
|
417 // The data type must have a defined "<" operator.
|
nuclear@3
|
418 template<class Array>
|
nuclear@3
|
419 void InsertionSort(Array& arr)
|
nuclear@3
|
420 {
|
nuclear@3
|
421 typedef typename Array::ValueType ValueType;
|
nuclear@3
|
422 InsertionSortSliced(arr, 0, arr.GetSize(), OperatorLess<ValueType>::Compare);
|
nuclear@3
|
423 }
|
nuclear@3
|
424
|
nuclear@3
|
425
|
nuclear@3
|
426
|
nuclear@3
|
427 //-----------------------------------------------------------------------------------
|
nuclear@3
|
428 // ***** LowerBoundSliced
|
nuclear@3
|
429 //
|
nuclear@3
|
430 template<class Array, class Value, class Less>
|
nuclear@3
|
431 UPInt LowerBoundSliced(const Array& arr, UPInt start, UPInt end, const Value& val, Less less)
|
nuclear@3
|
432 {
|
nuclear@3
|
433 SPInt first = (SPInt)start;
|
nuclear@3
|
434 SPInt len = (SPInt)(end - start);
|
nuclear@3
|
435 SPInt half;
|
nuclear@3
|
436 SPInt middle;
|
nuclear@3
|
437
|
nuclear@3
|
438 while(len > 0)
|
nuclear@3
|
439 {
|
nuclear@3
|
440 half = len >> 1;
|
nuclear@3
|
441 middle = first + half;
|
nuclear@3
|
442 if(less(arr[middle], val))
|
nuclear@3
|
443 {
|
nuclear@3
|
444 first = middle + 1;
|
nuclear@3
|
445 len = len - half - 1;
|
nuclear@3
|
446 }
|
nuclear@3
|
447 else
|
nuclear@3
|
448 {
|
nuclear@3
|
449 len = half;
|
nuclear@3
|
450 }
|
nuclear@3
|
451 }
|
nuclear@3
|
452 return (UPInt)first;
|
nuclear@3
|
453 }
|
nuclear@3
|
454
|
nuclear@3
|
455
|
nuclear@3
|
456 //-----------------------------------------------------------------------------------
|
nuclear@3
|
457 // ***** LowerBoundSliced
|
nuclear@3
|
458 //
|
nuclear@3
|
459 template<class Array, class Value>
|
nuclear@3
|
460 UPInt LowerBoundSliced(const Array& arr, UPInt start, UPInt end, const Value& val)
|
nuclear@3
|
461 {
|
nuclear@3
|
462 return LowerBoundSliced(arr, start, end, val, OperatorLess<Value>::Compare);
|
nuclear@3
|
463 }
|
nuclear@3
|
464
|
nuclear@3
|
465 //-----------------------------------------------------------------------------------
|
nuclear@3
|
466 // ***** LowerBoundSized
|
nuclear@3
|
467 //
|
nuclear@3
|
468 template<class Array, class Value>
|
nuclear@3
|
469 UPInt LowerBoundSized(const Array& arr, UPInt size, const Value& val)
|
nuclear@3
|
470 {
|
nuclear@3
|
471 return LowerBoundSliced(arr, 0, size, val, OperatorLess<Value>::Compare);
|
nuclear@3
|
472 }
|
nuclear@3
|
473
|
nuclear@3
|
474 //-----------------------------------------------------------------------------------
|
nuclear@3
|
475 // ***** LowerBound
|
nuclear@3
|
476 //
|
nuclear@3
|
477 template<class Array, class Value, class Less>
|
nuclear@3
|
478 UPInt LowerBound(const Array& arr, const Value& val, Less less)
|
nuclear@3
|
479 {
|
nuclear@3
|
480 return LowerBoundSliced(arr, 0, arr.GetSize(), val, less);
|
nuclear@3
|
481 }
|
nuclear@3
|
482
|
nuclear@3
|
483
|
nuclear@3
|
484 //-----------------------------------------------------------------------------------
|
nuclear@3
|
485 // ***** LowerBound
|
nuclear@3
|
486 //
|
nuclear@3
|
487 template<class Array, class Value>
|
nuclear@3
|
488 UPInt LowerBound(const Array& arr, const Value& val)
|
nuclear@3
|
489 {
|
nuclear@3
|
490 return LowerBoundSliced(arr, 0, arr.GetSize(), val, OperatorLess<Value>::Compare);
|
nuclear@3
|
491 }
|
nuclear@3
|
492
|
nuclear@3
|
493
|
nuclear@3
|
494
|
nuclear@3
|
495 //-----------------------------------------------------------------------------------
|
nuclear@3
|
496 // ***** UpperBoundSliced
|
nuclear@3
|
497 //
|
nuclear@3
|
498 template<class Array, class Value, class Less>
|
nuclear@3
|
499 UPInt UpperBoundSliced(const Array& arr, UPInt start, UPInt end, const Value& val, Less less)
|
nuclear@3
|
500 {
|
nuclear@3
|
501 SPInt first = (SPInt)start;
|
nuclear@3
|
502 SPInt len = (SPInt)(end - start);
|
nuclear@3
|
503 SPInt half;
|
nuclear@3
|
504 SPInt middle;
|
nuclear@3
|
505
|
nuclear@3
|
506 while(len > 0)
|
nuclear@3
|
507 {
|
nuclear@3
|
508 half = len >> 1;
|
nuclear@3
|
509 middle = first + half;
|
nuclear@3
|
510 if(less(val, arr[middle]))
|
nuclear@3
|
511 {
|
nuclear@3
|
512 len = half;
|
nuclear@3
|
513 }
|
nuclear@3
|
514 else
|
nuclear@3
|
515 {
|
nuclear@3
|
516 first = middle + 1;
|
nuclear@3
|
517 len = len - half - 1;
|
nuclear@3
|
518 }
|
nuclear@3
|
519 }
|
nuclear@3
|
520 return (UPInt)first;
|
nuclear@3
|
521 }
|
nuclear@3
|
522
|
nuclear@3
|
523
|
nuclear@3
|
524 //-----------------------------------------------------------------------------------
|
nuclear@3
|
525 // ***** UpperBoundSliced
|
nuclear@3
|
526 //
|
nuclear@3
|
527 template<class Array, class Value>
|
nuclear@3
|
528 UPInt UpperBoundSliced(const Array& arr, UPInt start, UPInt end, const Value& val)
|
nuclear@3
|
529 {
|
nuclear@3
|
530 return UpperBoundSliced(arr, start, end, val, OperatorLess<Value>::Compare);
|
nuclear@3
|
531 }
|
nuclear@3
|
532
|
nuclear@3
|
533
|
nuclear@3
|
534 //-----------------------------------------------------------------------------------
|
nuclear@3
|
535 // ***** UpperBoundSized
|
nuclear@3
|
536 //
|
nuclear@3
|
537 template<class Array, class Value>
|
nuclear@3
|
538 UPInt UpperBoundSized(const Array& arr, UPInt size, const Value& val)
|
nuclear@3
|
539 {
|
nuclear@3
|
540 return UpperBoundSliced(arr, 0, size, val, OperatorLess<Value>::Compare);
|
nuclear@3
|
541 }
|
nuclear@3
|
542
|
nuclear@3
|
543
|
nuclear@3
|
544 //-----------------------------------------------------------------------------------
|
nuclear@3
|
545 // ***** UpperBound
|
nuclear@3
|
546 //
|
nuclear@3
|
547 template<class Array, class Value, class Less>
|
nuclear@3
|
548 UPInt UpperBound(const Array& arr, const Value& val, Less less)
|
nuclear@3
|
549 {
|
nuclear@3
|
550 return UpperBoundSliced(arr, 0, arr.GetSize(), val, less);
|
nuclear@3
|
551 }
|
nuclear@3
|
552
|
nuclear@3
|
553
|
nuclear@3
|
554 //-----------------------------------------------------------------------------------
|
nuclear@3
|
555 // ***** UpperBound
|
nuclear@3
|
556 //
|
nuclear@3
|
557 template<class Array, class Value>
|
nuclear@3
|
558 UPInt UpperBound(const Array& arr, const Value& val)
|
nuclear@3
|
559 {
|
nuclear@3
|
560 return UpperBoundSliced(arr, 0, arr.GetSize(), val, OperatorLess<Value>::Compare);
|
nuclear@3
|
561 }
|
nuclear@3
|
562
|
nuclear@3
|
563
|
nuclear@3
|
564 //-----------------------------------------------------------------------------------
|
nuclear@3
|
565 // ***** ReverseArray
|
nuclear@3
|
566 //
|
nuclear@3
|
567 template<class Array> void ReverseArray(Array& arr)
|
nuclear@3
|
568 {
|
nuclear@3
|
569 SPInt from = 0;
|
nuclear@3
|
570 SPInt to = arr.GetSize() - 1;
|
nuclear@3
|
571 while(from < to)
|
nuclear@3
|
572 {
|
nuclear@3
|
573 Swap(arr[from], arr[to]);
|
nuclear@3
|
574 ++from;
|
nuclear@3
|
575 --to;
|
nuclear@3
|
576 }
|
nuclear@3
|
577 }
|
nuclear@3
|
578
|
nuclear@3
|
579
|
nuclear@3
|
580 // ***** AppendArray
|
nuclear@3
|
581 //
|
nuclear@3
|
582 template<class CDst, class CSrc>
|
nuclear@3
|
583 void AppendArray(CDst& dst, const CSrc& src)
|
nuclear@3
|
584 {
|
nuclear@3
|
585 UPInt i;
|
nuclear@3
|
586 for(i = 0; i < src.GetSize(); i++)
|
nuclear@3
|
587 dst.PushBack(src[i]);
|
nuclear@3
|
588 }
|
nuclear@3
|
589
|
nuclear@3
|
590 //-----------------------------------------------------------------------------------
|
nuclear@3
|
591 // ***** ArrayAdaptor
|
nuclear@3
|
592 //
|
nuclear@3
|
593 // A simple adapter that provides the GetSize() method and overloads
|
nuclear@3
|
594 // operator []. Used to wrap plain arrays in QuickSort and such.
|
nuclear@3
|
595 template<class T> class ArrayAdaptor
|
nuclear@3
|
596 {
|
nuclear@3
|
597 public:
|
nuclear@3
|
598 typedef T ValueType;
|
nuclear@3
|
599 ArrayAdaptor() : Data(0), Size(0) {}
|
nuclear@3
|
600 ArrayAdaptor(T* ptr, UPInt size) : Data(ptr), Size(size) {}
|
nuclear@3
|
601 UPInt GetSize() const { return Size; }
|
nuclear@3
|
602 const T& operator [] (UPInt i) const { return Data[i]; }
|
nuclear@3
|
603 T& operator [] (UPInt i) { return Data[i]; }
|
nuclear@3
|
604 private:
|
nuclear@3
|
605 T* Data;
|
nuclear@3
|
606 UPInt Size;
|
nuclear@3
|
607 };
|
nuclear@3
|
608
|
nuclear@3
|
609
|
nuclear@3
|
610 //-----------------------------------------------------------------------------------
|
nuclear@3
|
611 // ***** GConstArrayAdaptor
|
nuclear@3
|
612 //
|
nuclear@3
|
613 // A simple const adapter that provides the GetSize() method and overloads
|
nuclear@3
|
614 // operator []. Used to wrap plain arrays in LowerBound and such.
|
nuclear@3
|
615 template<class T> class ConstArrayAdaptor
|
nuclear@3
|
616 {
|
nuclear@3
|
617 public:
|
nuclear@3
|
618 typedef T ValueType;
|
nuclear@3
|
619 ConstArrayAdaptor() : Data(0), Size(0) {}
|
nuclear@3
|
620 ConstArrayAdaptor(const T* ptr, UPInt size) : Data(ptr), Size(size) {}
|
nuclear@3
|
621 UPInt GetSize() const { return Size; }
|
nuclear@3
|
622 const T& operator [] (UPInt i) const { return Data[i]; }
|
nuclear@3
|
623 private:
|
nuclear@3
|
624 const T* Data;
|
nuclear@3
|
625 UPInt Size;
|
nuclear@3
|
626 };
|
nuclear@3
|
627
|
nuclear@3
|
628
|
nuclear@3
|
629
|
nuclear@3
|
630 //-----------------------------------------------------------------------------------
|
nuclear@3
|
631 extern const UByte UpperBitTable[256];
|
nuclear@3
|
632 extern const UByte LowerBitTable[256];
|
nuclear@3
|
633
|
nuclear@3
|
634
|
nuclear@3
|
635
|
nuclear@3
|
636 //-----------------------------------------------------------------------------------
|
nuclear@3
|
637 inline UByte UpperBit(UPInt val)
|
nuclear@3
|
638 {
|
nuclear@3
|
639 #ifndef OVR_64BIT_POINTERS
|
nuclear@3
|
640
|
nuclear@3
|
641 if (val & 0xFFFF0000)
|
nuclear@3
|
642 {
|
nuclear@3
|
643 return (val & 0xFF000000) ?
|
nuclear@3
|
644 UpperBitTable[(val >> 24) ] + 24:
|
nuclear@3
|
645 UpperBitTable[(val >> 16) & 0xFF] + 16;
|
nuclear@3
|
646 }
|
nuclear@3
|
647 return (val & 0xFF00) ?
|
nuclear@3
|
648 UpperBitTable[(val >> 8) & 0xFF] + 8:
|
nuclear@3
|
649 UpperBitTable[(val ) & 0xFF];
|
nuclear@3
|
650
|
nuclear@3
|
651 #else
|
nuclear@3
|
652
|
nuclear@3
|
653 if (val & 0xFFFFFFFF00000000)
|
nuclear@3
|
654 {
|
nuclear@3
|
655 if (val & 0xFFFF000000000000)
|
nuclear@3
|
656 {
|
nuclear@3
|
657 return (val & 0xFF00000000000000) ?
|
nuclear@3
|
658 UpperBitTable[(val >> 56) ] + 56:
|
nuclear@3
|
659 UpperBitTable[(val >> 48) & 0xFF] + 48;
|
nuclear@3
|
660 }
|
nuclear@3
|
661 return (val & 0xFF0000000000) ?
|
nuclear@3
|
662 UpperBitTable[(val >> 40) & 0xFF] + 40:
|
nuclear@3
|
663 UpperBitTable[(val >> 32) & 0xFF] + 32;
|
nuclear@3
|
664 }
|
nuclear@3
|
665 else
|
nuclear@3
|
666 {
|
nuclear@3
|
667 if (val & 0xFFFF0000)
|
nuclear@3
|
668 {
|
nuclear@3
|
669 return (val & 0xFF000000) ?
|
nuclear@3
|
670 UpperBitTable[(val >> 24) ] + 24:
|
nuclear@3
|
671 UpperBitTable[(val >> 16) & 0xFF] + 16;
|
nuclear@3
|
672 }
|
nuclear@3
|
673 return (val & 0xFF00) ?
|
nuclear@3
|
674 UpperBitTable[(val >> 8) & 0xFF] + 8:
|
nuclear@3
|
675 UpperBitTable[(val ) & 0xFF];
|
nuclear@3
|
676 }
|
nuclear@3
|
677
|
nuclear@3
|
678 #endif
|
nuclear@3
|
679 }
|
nuclear@3
|
680
|
nuclear@3
|
681 //-----------------------------------------------------------------------------------
|
nuclear@3
|
682 inline UByte LowerBit(UPInt val)
|
nuclear@3
|
683 {
|
nuclear@3
|
684 #ifndef OVR_64BIT_POINTERS
|
nuclear@3
|
685
|
nuclear@3
|
686 if (val & 0xFFFF)
|
nuclear@3
|
687 {
|
nuclear@3
|
688 return (val & 0xFF) ?
|
nuclear@3
|
689 LowerBitTable[ val & 0xFF]:
|
nuclear@3
|
690 LowerBitTable[(val >> 8) & 0xFF] + 8;
|
nuclear@3
|
691 }
|
nuclear@3
|
692 return (val & 0xFF0000) ?
|
nuclear@3
|
693 LowerBitTable[(val >> 16) & 0xFF] + 16:
|
nuclear@3
|
694 LowerBitTable[(val >> 24) & 0xFF] + 24;
|
nuclear@3
|
695
|
nuclear@3
|
696 #else
|
nuclear@3
|
697
|
nuclear@3
|
698 if (val & 0xFFFFFFFF)
|
nuclear@3
|
699 {
|
nuclear@3
|
700 if (val & 0xFFFF)
|
nuclear@3
|
701 {
|
nuclear@3
|
702 return (val & 0xFF) ?
|
nuclear@3
|
703 LowerBitTable[ val & 0xFF]:
|
nuclear@3
|
704 LowerBitTable[(val >> 8) & 0xFF] + 8;
|
nuclear@3
|
705 }
|
nuclear@3
|
706 return (val & 0xFF0000) ?
|
nuclear@3
|
707 LowerBitTable[(val >> 16) & 0xFF] + 16:
|
nuclear@3
|
708 LowerBitTable[(val >> 24) & 0xFF] + 24;
|
nuclear@3
|
709 }
|
nuclear@3
|
710 else
|
nuclear@3
|
711 {
|
nuclear@3
|
712 if (val & 0xFFFF00000000)
|
nuclear@3
|
713 {
|
nuclear@3
|
714 return (val & 0xFF00000000) ?
|
nuclear@3
|
715 LowerBitTable[(val >> 32) & 0xFF] + 32:
|
nuclear@3
|
716 LowerBitTable[(val >> 40) & 0xFF] + 40;
|
nuclear@3
|
717 }
|
nuclear@3
|
718 return (val & 0xFF000000000000) ?
|
nuclear@3
|
719 LowerBitTable[(val >> 48) & 0xFF] + 48:
|
nuclear@3
|
720 LowerBitTable[(val >> 56) & 0xFF] + 56;
|
nuclear@3
|
721 }
|
nuclear@3
|
722
|
nuclear@3
|
723 #endif
|
nuclear@3
|
724 }
|
nuclear@3
|
725
|
nuclear@3
|
726
|
nuclear@3
|
727
|
nuclear@3
|
728 // ******* Special (optimized) memory routines
|
nuclear@3
|
729 // Note: null (bad) pointer is not tested
|
nuclear@3
|
730 class MemUtil
|
nuclear@3
|
731 {
|
nuclear@3
|
732 public:
|
nuclear@3
|
733
|
nuclear@3
|
734 // Memory compare
|
nuclear@3
|
735 static int Cmp (const void* p1, const void* p2, UPInt byteCount) { return memcmp(p1, p2, byteCount); }
|
nuclear@3
|
736 static int Cmp16(const void* p1, const void* p2, UPInt int16Count);
|
nuclear@3
|
737 static int Cmp32(const void* p1, const void* p2, UPInt int32Count);
|
nuclear@3
|
738 static int Cmp64(const void* p1, const void* p2, UPInt int64Count);
|
nuclear@3
|
739 };
|
nuclear@3
|
740
|
nuclear@3
|
741 // ** Inline Implementation
|
nuclear@3
|
742
|
nuclear@3
|
743 inline int MemUtil::Cmp16(const void* p1, const void* p2, UPInt int16Count)
|
nuclear@3
|
744 {
|
nuclear@3
|
745 SInt16* pa = (SInt16*)p1;
|
nuclear@3
|
746 SInt16* pb = (SInt16*)p2;
|
nuclear@3
|
747 unsigned ic = 0;
|
nuclear@3
|
748 if (int16Count == 0)
|
nuclear@3
|
749 return 0;
|
nuclear@3
|
750 while (pa[ic] == pb[ic])
|
nuclear@3
|
751 if (++ic==int16Count)
|
nuclear@3
|
752 return 0;
|
nuclear@3
|
753 return pa[ic] > pb[ic] ? 1 : -1;
|
nuclear@3
|
754 }
|
nuclear@3
|
755 inline int MemUtil::Cmp32(const void* p1, const void* p2, UPInt int32Count)
|
nuclear@3
|
756 {
|
nuclear@3
|
757 SInt32* pa = (SInt32*)p1;
|
nuclear@3
|
758 SInt32* pb = (SInt32*)p2;
|
nuclear@3
|
759 unsigned ic = 0;
|
nuclear@3
|
760 if (int32Count == 0)
|
nuclear@3
|
761 return 0;
|
nuclear@3
|
762 while (pa[ic] == pb[ic])
|
nuclear@3
|
763 if (++ic==int32Count)
|
nuclear@3
|
764 return 0;
|
nuclear@3
|
765 return pa[ic] > pb[ic] ? 1 : -1;
|
nuclear@3
|
766 }
|
nuclear@3
|
767 inline int MemUtil::Cmp64(const void* p1, const void* p2, UPInt int64Count)
|
nuclear@3
|
768 {
|
nuclear@3
|
769 SInt64* pa = (SInt64*)p1;
|
nuclear@3
|
770 SInt64* pb = (SInt64*)p2;
|
nuclear@3
|
771 unsigned ic = 0;
|
nuclear@3
|
772 if (int64Count == 0)
|
nuclear@3
|
773 return 0;
|
nuclear@3
|
774 while (pa[ic] == pb[ic])
|
nuclear@3
|
775 if (++ic==int64Count)
|
nuclear@3
|
776 return 0;
|
nuclear@3
|
777 return pa[ic] > pb[ic] ? 1 : -1;
|
nuclear@3
|
778 }
|
nuclear@3
|
779
|
nuclear@3
|
780 // ** End Inline Implementation
|
nuclear@3
|
781
|
nuclear@3
|
782
|
nuclear@3
|
783 //-----------------------------------------------------------------------------------
|
nuclear@3
|
784 // ******* Byte Order Conversions
|
nuclear@3
|
785 namespace ByteUtil {
|
nuclear@3
|
786
|
nuclear@3
|
787 // *** Swap Byte Order
|
nuclear@3
|
788
|
nuclear@3
|
789 // Swap the byte order of a byte array
|
nuclear@3
|
790 inline void SwapOrder(void* pv, int size)
|
nuclear@3
|
791 {
|
nuclear@3
|
792 UByte* pb = (UByte*)pv;
|
nuclear@3
|
793 UByte temp;
|
nuclear@3
|
794 for (int i = 0; i < size>>1; i++)
|
nuclear@3
|
795 {
|
nuclear@3
|
796 temp = pb[size-1-i];
|
nuclear@3
|
797 pb[size-1-i] = pb[i];
|
nuclear@3
|
798 pb[i] = temp;
|
nuclear@3
|
799 }
|
nuclear@3
|
800 }
|
nuclear@3
|
801
|
nuclear@3
|
802 // Swap the byte order of primitive types
|
nuclear@3
|
803 inline UByte SwapOrder(UByte v) { return v; }
|
nuclear@3
|
804 inline SByte SwapOrder(SByte v) { return v; }
|
nuclear@3
|
805 inline UInt16 SwapOrder(UInt16 v) { return UInt16(v>>8)|UInt16(v<<8); }
|
nuclear@3
|
806 inline SInt16 SwapOrder(SInt16 v) { return SInt16((UInt16(v)>>8)|(v<<8)); }
|
nuclear@3
|
807 inline UInt32 SwapOrder(UInt32 v) { return (v>>24)|((v&0x00FF0000)>>8)|((v&0x0000FF00)<<8)|(v<<24); }
|
nuclear@3
|
808 inline SInt32 SwapOrder(SInt32 p) { return (SInt32)SwapOrder(UInt32(p)); }
|
nuclear@3
|
809 inline UInt64 SwapOrder(UInt64 v)
|
nuclear@3
|
810 {
|
nuclear@3
|
811 return (v>>56) |
|
nuclear@3
|
812 ((v&UInt64(0x00FF000000000000))>>40) |
|
nuclear@3
|
813 ((v&UInt64(0x0000FF0000000000))>>24) |
|
nuclear@3
|
814 ((v&UInt64(0x000000FF00000000))>>8) |
|
nuclear@3
|
815 ((v&UInt64(0x00000000FF000000))<<8) |
|
nuclear@3
|
816 ((v&UInt64(0x0000000000FF0000))<<24) |
|
nuclear@3
|
817 ((v&UInt64(0x000000000000FF00))<<40) |
|
nuclear@3
|
818 (v<<56);
|
nuclear@3
|
819 }
|
nuclear@3
|
820 inline SInt64 SwapOrder(SInt64 v) { return (SInt64)SwapOrder(UInt64(v)); }
|
nuclear@3
|
821 inline float SwapOrder(float p)
|
nuclear@3
|
822 {
|
nuclear@3
|
823 union {
|
nuclear@3
|
824 float p;
|
nuclear@3
|
825 UInt32 v;
|
nuclear@3
|
826 } u;
|
nuclear@3
|
827 u.p = p;
|
nuclear@3
|
828 u.v = SwapOrder(u.v);
|
nuclear@3
|
829 return u.p;
|
nuclear@3
|
830 }
|
nuclear@3
|
831
|
nuclear@3
|
832 inline double SwapOrder(double p)
|
nuclear@3
|
833 {
|
nuclear@3
|
834 union {
|
nuclear@3
|
835 double p;
|
nuclear@3
|
836 UInt64 v;
|
nuclear@3
|
837 } u;
|
nuclear@3
|
838 u.p = p;
|
nuclear@3
|
839 u.v = SwapOrder(u.v);
|
nuclear@3
|
840 return u.p;
|
nuclear@3
|
841 }
|
nuclear@3
|
842
|
nuclear@3
|
843 // *** Byte-order conversion
|
nuclear@3
|
844
|
nuclear@3
|
845 #if (OVR_BYTE_ORDER == OVR_LITTLE_ENDIAN)
|
nuclear@3
|
846 // Little Endian to System (LE)
|
nuclear@3
|
847 inline UByte LEToSystem(UByte v) { return v; }
|
nuclear@3
|
848 inline SByte LEToSystem(SByte v) { return v; }
|
nuclear@3
|
849 inline UInt16 LEToSystem(UInt16 v) { return v; }
|
nuclear@3
|
850 inline SInt16 LEToSystem(SInt16 v) { return v; }
|
nuclear@3
|
851 inline UInt32 LEToSystem(UInt32 v) { return v; }
|
nuclear@3
|
852 inline SInt32 LEToSystem(SInt32 v) { return v; }
|
nuclear@3
|
853 inline UInt64 LEToSystem(UInt64 v) { return v; }
|
nuclear@3
|
854 inline SInt64 LEToSystem(SInt64 v) { return v; }
|
nuclear@3
|
855 inline float LEToSystem(float v) { return v; }
|
nuclear@3
|
856 inline double LEToSystem(double v) { return v; }
|
nuclear@3
|
857
|
nuclear@3
|
858 // Big Endian to System (LE)
|
nuclear@3
|
859 inline UByte BEToSystem(UByte v) { return SwapOrder(v); }
|
nuclear@3
|
860 inline SByte BEToSystem(SByte v) { return SwapOrder(v); }
|
nuclear@3
|
861 inline UInt16 BEToSystem(UInt16 v) { return SwapOrder(v); }
|
nuclear@3
|
862 inline SInt16 BEToSystem(SInt16 v) { return SwapOrder(v); }
|
nuclear@3
|
863 inline UInt32 BEToSystem(UInt32 v) { return SwapOrder(v); }
|
nuclear@3
|
864 inline SInt32 BEToSystem(SInt32 v) { return SwapOrder(v); }
|
nuclear@3
|
865 inline UInt64 BEToSystem(UInt64 v) { return SwapOrder(v); }
|
nuclear@3
|
866 inline SInt64 BEToSystem(SInt64 v) { return SwapOrder(v); }
|
nuclear@3
|
867 inline float BEToSystem(float v) { return SwapOrder(v); }
|
nuclear@3
|
868 inline double BEToSystem(double v) { return SwapOrder(v); }
|
nuclear@3
|
869
|
nuclear@3
|
870 // System (LE) to Little Endian
|
nuclear@3
|
871 inline UByte SystemToLE(UByte v) { return v; }
|
nuclear@3
|
872 inline SByte SystemToLE(SByte v) { return v; }
|
nuclear@3
|
873 inline UInt16 SystemToLE(UInt16 v) { return v; }
|
nuclear@3
|
874 inline SInt16 SystemToLE(SInt16 v) { return v; }
|
nuclear@3
|
875 inline UInt32 SystemToLE(UInt32 v) { return v; }
|
nuclear@3
|
876 inline SInt32 SystemToLE(SInt32 v) { return v; }
|
nuclear@3
|
877 inline UInt64 SystemToLE(UInt64 v) { return v; }
|
nuclear@3
|
878 inline SInt64 SystemToLE(SInt64 v) { return v; }
|
nuclear@3
|
879 inline float SystemToLE(float v) { return v; }
|
nuclear@3
|
880 inline double SystemToLE(double v) { return v; }
|
nuclear@3
|
881
|
nuclear@3
|
882 // System (LE) to Big Endian
|
nuclear@3
|
883 inline UByte SystemToBE(UByte v) { return SwapOrder(v); }
|
nuclear@3
|
884 inline SByte SystemToBE(SByte v) { return SwapOrder(v); }
|
nuclear@3
|
885 inline UInt16 SystemToBE(UInt16 v) { return SwapOrder(v); }
|
nuclear@3
|
886 inline SInt16 SystemToBE(SInt16 v) { return SwapOrder(v); }
|
nuclear@3
|
887 inline UInt32 SystemToBE(UInt32 v) { return SwapOrder(v); }
|
nuclear@3
|
888 inline SInt32 SystemToBE(SInt32 v) { return SwapOrder(v); }
|
nuclear@3
|
889 inline UInt64 SystemToBE(UInt64 v) { return SwapOrder(v); }
|
nuclear@3
|
890 inline SInt64 SystemToBE(SInt64 v) { return SwapOrder(v); }
|
nuclear@3
|
891 inline float SystemToBE(float v) { return SwapOrder(v); }
|
nuclear@3
|
892 inline double SystemToBE(double v) { return SwapOrder(v); }
|
nuclear@3
|
893
|
nuclear@3
|
894 #elif (OVR_BYTE_ORDER == OVR_BIG_ENDIAN)
|
nuclear@3
|
895 // Little Endian to System (BE)
|
nuclear@3
|
896 inline UByte LEToSystem(UByte v) { return SwapOrder(v); }
|
nuclear@3
|
897 inline SByte LEToSystem(SByte v) { return SwapOrder(v); }
|
nuclear@3
|
898 inline UInt16 LEToSystem(UInt16 v) { return SwapOrder(v); }
|
nuclear@3
|
899 inline SInt16 LEToSystem(SInt16 v) { return SwapOrder(v); }
|
nuclear@3
|
900 inline UInt32 LEToSystem(UInt32 v) { return SwapOrder(v); }
|
nuclear@3
|
901 inline SInt32 LEToSystem(SInt32 v) { return SwapOrder(v); }
|
nuclear@3
|
902 inline UInt64 LEToSystem(UInt64 v) { return SwapOrder(v); }
|
nuclear@3
|
903 inline SInt64 LEToSystem(SInt64 v) { return SwapOrder(v); }
|
nuclear@3
|
904 inline float LEToSystem(float v) { return SwapOrder(v); }
|
nuclear@3
|
905 inline double LEToSystem(double v) { return SwapOrder(v); }
|
nuclear@3
|
906
|
nuclear@3
|
907 // Big Endian to System (BE)
|
nuclear@3
|
908 inline UByte BEToSystem(UByte v) { return v; }
|
nuclear@3
|
909 inline SByte BEToSystem(SByte v) { return v; }
|
nuclear@3
|
910 inline UInt16 BEToSystem(UInt16 v) { return v; }
|
nuclear@3
|
911 inline SInt16 BEToSystem(SInt16 v) { return v; }
|
nuclear@3
|
912 inline UInt32 BEToSystem(UInt32 v) { return v; }
|
nuclear@3
|
913 inline SInt32 BEToSystem(SInt32 v) { return v; }
|
nuclear@3
|
914 inline UInt64 BEToSystem(UInt64 v) { return v; }
|
nuclear@3
|
915 inline SInt64 BEToSystem(SInt64 v) { return v; }
|
nuclear@3
|
916 inline float BEToSystem(float v) { return v; }
|
nuclear@3
|
917 inline double BEToSystem(double v) { return v; }
|
nuclear@3
|
918
|
nuclear@3
|
919 // System (BE) to Little Endian
|
nuclear@3
|
920 inline UByte SystemToLE(UByte v) { return SwapOrder(v); }
|
nuclear@3
|
921 inline SByte SystemToLE(SByte v) { return SwapOrder(v); }
|
nuclear@3
|
922 inline UInt16 SystemToLE(UInt16 v) { return SwapOrder(v); }
|
nuclear@3
|
923 inline SInt16 SystemToLE(SInt16 v) { return SwapOrder(v); }
|
nuclear@3
|
924 inline UInt32 SystemToLE(UInt32 v) { return SwapOrder(v); }
|
nuclear@3
|
925 inline SInt32 SystemToLE(SInt32 v) { return SwapOrder(v); }
|
nuclear@3
|
926 inline UInt64 SystemToLE(UInt64 v) { return SwapOrder(v); }
|
nuclear@3
|
927 inline SInt64 SystemToLE(SInt64 v) { return SwapOrder(v); }
|
nuclear@3
|
928 inline float SystemToLE(float v) { return SwapOrder(v); }
|
nuclear@3
|
929 inline double SystemToLE(double v) { return SwapOrder(v); }
|
nuclear@3
|
930
|
nuclear@3
|
931 // System (BE) to Big Endian
|
nuclear@3
|
932 inline UByte SystemToBE(UByte v) { return v; }
|
nuclear@3
|
933 inline SByte SystemToBE(SByte v) { return v; }
|
nuclear@3
|
934 inline UInt16 SystemToBE(UInt16 v) { return v; }
|
nuclear@3
|
935 inline SInt16 SystemToBE(SInt16 v) { return v; }
|
nuclear@3
|
936 inline UInt32 SystemToBE(UInt32 v) { return v; }
|
nuclear@3
|
937 inline SInt32 SystemToBE(SInt32 v) { return v; }
|
nuclear@3
|
938 inline UInt64 SystemToBE(UInt64 v) { return v; }
|
nuclear@3
|
939 inline SInt64 SystemToBE(SInt64 v) { return v; }
|
nuclear@3
|
940 inline float SystemToBE(float v) { return v; }
|
nuclear@3
|
941 inline double SystemToBE(double v) { return v; }
|
nuclear@3
|
942
|
nuclear@3
|
943 #else
|
nuclear@3
|
944 #error "OVR_BYTE_ORDER must be defined to OVR_LITTLE_ENDIAN or OVR_BIG_ENDIAN"
|
nuclear@3
|
945 #endif
|
nuclear@3
|
946
|
nuclear@3
|
947 } // namespace ByteUtil
|
nuclear@3
|
948
|
nuclear@3
|
949
|
nuclear@3
|
950
|
nuclear@3
|
951 }} // OVR::Alg
|
nuclear@3
|
952
|
nuclear@3
|
953 #endif
|