rev |
line source |
nuclear@1
|
1 /************************************************************************************
|
nuclear@1
|
2
|
nuclear@1
|
3 PublicHeader: OVR.h
|
nuclear@1
|
4 Filename : OVR_Atomic.h
|
nuclear@1
|
5 Content : Contains atomic operations and inline fastest locking
|
nuclear@1
|
6 functionality. Will contain #ifdefs for OS efficiency.
|
nuclear@1
|
7 Have non-thread-safe implementaion if not available.
|
nuclear@1
|
8 Created : September 19, 2012
|
nuclear@1
|
9 Notes :
|
nuclear@1
|
10
|
nuclear@1
|
11 Copyright : Copyright 2012 Oculus VR, Inc. All Rights reserved.
|
nuclear@1
|
12
|
nuclear@1
|
13 Use of this software is subject to the terms of the Oculus license
|
nuclear@1
|
14 agreement provided at the time of installation or download, or which
|
nuclear@1
|
15 otherwise accompanies this software in either electronic or hard copy form.
|
nuclear@1
|
16
|
nuclear@1
|
17 ************************************************************************************/
|
nuclear@1
|
18 #ifndef OVR_Atomic_h
|
nuclear@1
|
19 #define OVR_Atomic_h
|
nuclear@1
|
20
|
nuclear@1
|
21 #include "OVR_Types.h"
|
nuclear@1
|
22
|
nuclear@1
|
23 // Include System thread functionality.
|
nuclear@1
|
24 #if defined(OVR_OS_WIN32)
|
nuclear@1
|
25 #include <windows.h>
|
nuclear@1
|
26 #else
|
nuclear@1
|
27 #include <pthread.h>
|
nuclear@1
|
28 #endif
|
nuclear@1
|
29
|
nuclear@1
|
30
|
nuclear@1
|
31 namespace OVR {
|
nuclear@1
|
32
|
nuclear@1
|
33
|
nuclear@1
|
34 // ****** Declared classes
|
nuclear@1
|
35
|
nuclear@1
|
36 // If there is NO thread support we implement AtomicOps and
|
nuclear@1
|
37 // Lock objects as no-ops. The other classes are not defined.
|
nuclear@1
|
38 template<class C> class AtomicOps;
|
nuclear@1
|
39 template<class T> class AtomicInt;
|
nuclear@1
|
40 template<class T> class AtomicPtr;
|
nuclear@1
|
41
|
nuclear@1
|
42 class Lock;
|
nuclear@1
|
43
|
nuclear@1
|
44
|
nuclear@1
|
45 //-----------------------------------------------------------------------------------
|
nuclear@1
|
46 // ***** AtomicOps
|
nuclear@1
|
47
|
nuclear@1
|
48 // Atomic operations are provided by the AtomicOps templates class,
|
nuclear@1
|
49 // implemented through system-specific AtomicOpsRaw specializations.
|
nuclear@1
|
50 // It provides several fundamental operations such as Exchange, ExchangeAdd
|
nuclear@1
|
51 // CompareAndSet, and Store_Release. Each function includes several memory
|
nuclear@1
|
52 // synchronization versions, important for multiprocessing CPUs with weak
|
nuclear@1
|
53 // memory consistency. The following memory fencing strategies are supported:
|
nuclear@1
|
54 //
|
nuclear@1
|
55 // - NoSync. No memory synchronization is done for atomic op.
|
nuclear@1
|
56 // - Release. All other memory writes are completed before atomic op
|
nuclear@1
|
57 // writes its results.
|
nuclear@1
|
58 // - Acquire. Further memory reads are forced to wait until atomic op
|
nuclear@1
|
59 // executes, guaranteeing that the right values will be seen.
|
nuclear@1
|
60 // - Sync. A combination of Release and Acquire.
|
nuclear@1
|
61
|
nuclear@1
|
62
|
nuclear@1
|
63 // *** AtomicOpsRaw
|
nuclear@1
|
64
|
nuclear@1
|
65 // AtomicOpsRaw is a specialized template that provides atomic operations
|
nuclear@1
|
66 // used by AtomicOps. This class has two fundamental qualities: (1) it
|
nuclear@1
|
67 // defines a type T of correct size, and (2) provides operations that work
|
nuclear@1
|
68 // atomically, such as Exchange_Sync and CompareAndSet_Release.
|
nuclear@1
|
69
|
nuclear@1
|
70 // AtomicOpsRawBase class contains shared constants/classes for AtomicOpsRaw.
|
nuclear@1
|
71 // The primary thing is does is define sync class objects, whose destructor and
|
nuclear@1
|
72 // constructor provide places to insert appropriate synchronization calls, on
|
nuclear@1
|
73 // systems where such calls are necessary. So far, the breakdown is as follows:
|
nuclear@1
|
74 //
|
nuclear@1
|
75 // - X86 systems don't need custom syncs, since their exchange/atomic
|
nuclear@1
|
76 // instructions are implicitly synchronized.
|
nuclear@1
|
77 // - PowerPC requires lwsync/isync instructions that can use this mechanism.
|
nuclear@1
|
78 // - If some other systems require a mechanism where syncing type is associated
|
nuclear@1
|
79 // with a particular instruction, the default implementation (which implements
|
nuclear@1
|
80 // all Sync, Acquire, and Release modes in terms of NoSync and fence) may not
|
nuclear@1
|
81 // work. Ii that case it will need to be #ifdef-ed conditionally.
|
nuclear@1
|
82
|
nuclear@1
|
83 struct AtomicOpsRawBase
|
nuclear@1
|
84 {
|
nuclear@1
|
85 #if !defined(OVR_ENABLE_THREADS) || defined(OVR_CPU_X86) || defined(OVR_OS_WIN32) || defined(OVR_OS_IPHONE)
|
nuclear@1
|
86 // Need to have empty constructor to avoid class 'unused' variable warning.
|
nuclear@1
|
87 struct FullSync { inline FullSync() { } };
|
nuclear@1
|
88 struct AcquireSync { inline AcquireSync() { } };
|
nuclear@1
|
89 struct ReleaseSync { inline ReleaseSync() { } };
|
nuclear@1
|
90
|
nuclear@1
|
91 #elif defined(OVR_CPU_PPC64) || defined(OVR_CPU_PPC)
|
nuclear@1
|
92 struct FullSync { inline FullSync() { asm volatile("sync\n"); } ~FullSync() { asm volatile("isync\n"); } };
|
nuclear@1
|
93 struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("isync\n"); } };
|
nuclear@1
|
94 struct ReleaseSync { inline ReleaseSync() { asm volatile("sync\n"); } };
|
nuclear@1
|
95
|
nuclear@1
|
96 #elif defined(OVR_CPU_MIPS)
|
nuclear@1
|
97 struct FullSync { inline FullSync() { asm volatile("sync\n"); } ~FullSync() { asm volatile("sync\n"); } };
|
nuclear@1
|
98 struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("sync\n"); } };
|
nuclear@1
|
99 struct ReleaseSync { inline ReleaseSync() { asm volatile("sync\n"); } };
|
nuclear@1
|
100
|
nuclear@1
|
101 #elif defined(OVR_CPU_ARM)
|
nuclear@1
|
102 struct FullSync { inline FullSync() { asm volatile("dmb\n"); } ~FullSync() { asm volatile("dmb\n"); } };
|
nuclear@1
|
103 struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("dmb\n"); } };
|
nuclear@1
|
104 struct ReleaseSync { inline ReleaseSync() { asm volatile("dmb\n"); } };
|
nuclear@1
|
105
|
nuclear@1
|
106
|
nuclear@1
|
107 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4)
|
nuclear@1
|
108 // __sync functions are already full sync
|
nuclear@1
|
109 struct FullSync { inline FullSync() { } };
|
nuclear@1
|
110 struct AcquireSync { inline AcquireSync() { } };
|
nuclear@1
|
111 struct ReleaseSync { inline ReleaseSync() { } };
|
nuclear@1
|
112 #endif
|
nuclear@1
|
113 };
|
nuclear@1
|
114
|
nuclear@1
|
115
|
nuclear@1
|
116 // 4-Byte raw data atomic op implementation class.
|
nuclear@1
|
117 struct AtomicOpsRaw_4ByteImpl : public AtomicOpsRawBase
|
nuclear@1
|
118 {
|
nuclear@1
|
119 #if !defined(OVR_ENABLE_THREADS)
|
nuclear@1
|
120
|
nuclear@1
|
121 // Provide a type for no-thread-support cases. Used by AtomicOpsRaw_DefImpl.
|
nuclear@1
|
122 typedef UInt32 T;
|
nuclear@1
|
123
|
nuclear@1
|
124 // *** Thread - Safe Atomic Versions.
|
nuclear@1
|
125
|
nuclear@1
|
126 #elif defined(OVR_OS_WIN32)
|
nuclear@1
|
127
|
nuclear@1
|
128 // Use special defined for VC6, where volatile is not used and
|
nuclear@1
|
129 // InterlockedCompareExchange is declared incorrectly.
|
nuclear@1
|
130 typedef LONG T;
|
nuclear@1
|
131 #if defined(OVR_CC_MSVC) && (OVR_CC_MSVC < 1300)
|
nuclear@1
|
132 typedef T* InterlockTPtr;
|
nuclear@1
|
133 typedef LPVOID ET;
|
nuclear@1
|
134 typedef ET* InterlockETPtr;
|
nuclear@1
|
135 #else
|
nuclear@1
|
136 typedef volatile T* InterlockTPtr;
|
nuclear@1
|
137 typedef T ET;
|
nuclear@1
|
138 typedef InterlockTPtr InterlockETPtr;
|
nuclear@1
|
139 #endif
|
nuclear@1
|
140 inline static T Exchange_NoSync(volatile T* p, T val) { return InterlockedExchange((InterlockTPtr)p, val); }
|
nuclear@1
|
141 inline static T ExchangeAdd_NoSync(volatile T* p, T val) { return InterlockedExchangeAdd((InterlockTPtr)p, val); }
|
nuclear@1
|
142 inline static bool CompareAndSet_NoSync(volatile T* p, T c, T val) { return InterlockedCompareExchange((InterlockETPtr)p, (ET)val, (ET)c) == (ET)c; }
|
nuclear@1
|
143
|
nuclear@1
|
144 #elif defined(OVR_CPU_PPC64) || defined(OVR_CPU_PPC)
|
nuclear@1
|
145 typedef UInt32 T;
|
nuclear@1
|
146 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
147 {
|
nuclear@1
|
148 UInt32 ret;
|
nuclear@1
|
149
|
nuclear@1
|
150 asm volatile("1:\n\t"
|
nuclear@1
|
151 "lwarx %[r],0,%[i]\n\t"
|
nuclear@1
|
152 "stwcx. %[j],0,%[i]\n\t"
|
nuclear@1
|
153 "bne- 1b\n"
|
nuclear@1
|
154 : "+m" (*i), [r] "=&b" (ret) : [i] "b" (i), [j] "b" (j) : "cc", "memory");
|
nuclear@1
|
155
|
nuclear@1
|
156 return ret;
|
nuclear@1
|
157 }
|
nuclear@1
|
158
|
nuclear@1
|
159 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
160 {
|
nuclear@1
|
161 UInt32 dummy, ret;
|
nuclear@1
|
162
|
nuclear@1
|
163 asm volatile("1:\n\t"
|
nuclear@1
|
164 "lwarx %[r],0,%[i]\n\t"
|
nuclear@1
|
165 "add %[o],%[r],%[j]\n\t"
|
nuclear@1
|
166 "stwcx. %[o],0,%[i]\n\t"
|
nuclear@1
|
167 "bne- 1b\n"
|
nuclear@1
|
168 : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc", "memory");
|
nuclear@1
|
169
|
nuclear@1
|
170 return ret;
|
nuclear@1
|
171 }
|
nuclear@1
|
172
|
nuclear@1
|
173 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
|
nuclear@1
|
174 {
|
nuclear@1
|
175 UInt32 ret;
|
nuclear@1
|
176
|
nuclear@1
|
177 asm volatile("1:\n\t"
|
nuclear@1
|
178 "lwarx %[r],0,%[i]\n\t"
|
nuclear@1
|
179 "cmpw 0,%[r],%[cmp]\n\t"
|
nuclear@1
|
180 "mfcr %[r]\n\t"
|
nuclear@1
|
181 "bne- 2f\n\t"
|
nuclear@1
|
182 "stwcx. %[val],0,%[i]\n\t"
|
nuclear@1
|
183 "bne- 1b\n\t"
|
nuclear@1
|
184 "2:\n"
|
nuclear@1
|
185 : "+m" (*i), [r] "=&b" (ret) : [i] "b" (i), [cmp] "b" (c), [val] "b" (value) : "cc", "memory");
|
nuclear@1
|
186
|
nuclear@1
|
187 return (ret & 0x20000000) ? 1 : 0;
|
nuclear@1
|
188 }
|
nuclear@1
|
189
|
nuclear@1
|
190 #elif defined(OVR_CPU_MIPS)
|
nuclear@1
|
191 typedef UInt32 T;
|
nuclear@1
|
192
|
nuclear@1
|
193 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
194 {
|
nuclear@1
|
195 UInt32 ret;
|
nuclear@1
|
196
|
nuclear@1
|
197 asm volatile("1:\n\t"
|
nuclear@1
|
198 "ll %[r],0(%[i])\n\t"
|
nuclear@1
|
199 "sc %[j],0(%[i])\n\t"
|
nuclear@1
|
200 "beq %[j],$0,1b\n\t"
|
nuclear@1
|
201 "nop \n"
|
nuclear@1
|
202 : "+m" (*i), [r] "=&d" (ret) : [i] "d" (i), [j] "d" (j) : "cc", "memory");
|
nuclear@1
|
203
|
nuclear@1
|
204 return ret;
|
nuclear@1
|
205 }
|
nuclear@1
|
206
|
nuclear@1
|
207 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
208 {
|
nuclear@1
|
209 UInt32 ret;
|
nuclear@1
|
210
|
nuclear@1
|
211 asm volatile("1:\n\t"
|
nuclear@1
|
212 "ll %[r],0(%[i])\n\t"
|
nuclear@1
|
213 "addu %[j],%[r],%[j]\n\t"
|
nuclear@1
|
214 "sc %[j],0(%[i])\n\t"
|
nuclear@1
|
215 "beq %[j],$0,1b\n\t"
|
nuclear@1
|
216 "nop \n"
|
nuclear@1
|
217 : "+m" (*i), [r] "=&d" (ret) : [i] "d" (i), [j] "d" (j) : "cc", "memory");
|
nuclear@1
|
218
|
nuclear@1
|
219 return ret;
|
nuclear@1
|
220 }
|
nuclear@1
|
221
|
nuclear@1
|
222 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
|
nuclear@1
|
223 {
|
nuclear@1
|
224 UInt32 ret, dummy;
|
nuclear@1
|
225
|
nuclear@1
|
226 asm volatile("1:\n\t"
|
nuclear@1
|
227 "move %[r],$0\n\t"
|
nuclear@1
|
228 "ll %[o],0(%[i])\n\t"
|
nuclear@1
|
229 "bne %[o],%[c],2f\n\t"
|
nuclear@1
|
230 "move %[r],%[v]\n\t"
|
nuclear@1
|
231 "sc %[r],0(%[i])\n\t"
|
nuclear@1
|
232 "beq %[r],$0,1b\n\t"
|
nuclear@1
|
233 "nop \n\t"
|
nuclear@1
|
234 "2:\n"
|
nuclear@1
|
235 : "+m" (*i),[r] "=&d" (ret), [o] "=&d" (dummy) : [i] "d" (i), [c] "d" (c), [v] "d" (value)
|
nuclear@1
|
236 : "cc", "memory");
|
nuclear@1
|
237
|
nuclear@1
|
238 return ret;
|
nuclear@1
|
239 }
|
nuclear@1
|
240
|
nuclear@1
|
241 #elif defined(OVR_CPU_ARM) && defined(OVR_CC_ARM)
|
nuclear@1
|
242 typedef UInt32 T;
|
nuclear@1
|
243
|
nuclear@1
|
244 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
245 {
|
nuclear@1
|
246 for(;;)
|
nuclear@1
|
247 {
|
nuclear@1
|
248 T r = __ldrex(i);
|
nuclear@1
|
249 if (__strex(j, i) == 0)
|
nuclear@1
|
250 return r;
|
nuclear@1
|
251 }
|
nuclear@1
|
252 }
|
nuclear@1
|
253 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
254 {
|
nuclear@1
|
255 for(;;)
|
nuclear@1
|
256 {
|
nuclear@1
|
257 T r = __ldrex(i);
|
nuclear@1
|
258 if (__strex(r + j, i) == 0)
|
nuclear@1
|
259 return r;
|
nuclear@1
|
260 }
|
nuclear@1
|
261 }
|
nuclear@1
|
262
|
nuclear@1
|
263 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
|
nuclear@1
|
264 {
|
nuclear@1
|
265 for(;;)
|
nuclear@1
|
266 {
|
nuclear@1
|
267 T r = __ldrex(i);
|
nuclear@1
|
268 if (r != c)
|
nuclear@1
|
269 return 0;
|
nuclear@1
|
270 if (__strex(value, i) == 0)
|
nuclear@1
|
271 return 1;
|
nuclear@1
|
272 }
|
nuclear@1
|
273 }
|
nuclear@1
|
274
|
nuclear@1
|
275 #elif defined(OVR_CPU_ARM)
|
nuclear@1
|
276 typedef UInt32 T;
|
nuclear@1
|
277
|
nuclear@1
|
278 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
279 {
|
nuclear@1
|
280 UInt32 ret, dummy;
|
nuclear@1
|
281
|
nuclear@1
|
282 asm volatile("1:\n\t"
|
nuclear@1
|
283 "ldrex %[r],[%[i]]\n\t"
|
nuclear@1
|
284 "strex %[t],%[j],[%[i]]\n\t"
|
nuclear@1
|
285 "cmp %[t],#0\n\t"
|
nuclear@1
|
286 "bne 1b\n\t"
|
nuclear@1
|
287 : "+m" (*i), [r] "=&r" (ret), [t] "=&r" (dummy) : [i] "r" (i), [j] "r" (j) : "cc", "memory");
|
nuclear@1
|
288
|
nuclear@1
|
289 return ret;
|
nuclear@1
|
290 }
|
nuclear@1
|
291
|
nuclear@1
|
292 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
293 {
|
nuclear@1
|
294 UInt32 ret, dummy, test;
|
nuclear@1
|
295
|
nuclear@1
|
296 asm volatile("1:\n\t"
|
nuclear@1
|
297 "ldrex %[r],[%[i]]\n\t"
|
nuclear@1
|
298 "add %[o],%[r],%[j]\n\t"
|
nuclear@1
|
299 "strex %[t],%[o],[%[i]]\n\t"
|
nuclear@1
|
300 "cmp %[t],#0\n\t"
|
nuclear@1
|
301 "bne 1b\n\t"
|
nuclear@1
|
302 : "+m" (*i), [r] "=&r" (ret), [o] "=&r" (dummy), [t] "=&r" (test) : [i] "r" (i), [j] "r" (j) : "cc", "memory");
|
nuclear@1
|
303
|
nuclear@1
|
304 return ret;
|
nuclear@1
|
305 }
|
nuclear@1
|
306
|
nuclear@1
|
307 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
|
nuclear@1
|
308 {
|
nuclear@1
|
309 UInt32 ret = 1, dummy, test;
|
nuclear@1
|
310
|
nuclear@1
|
311 asm volatile("1:\n\t"
|
nuclear@1
|
312 "ldrex %[o],[%[i]]\n\t"
|
nuclear@1
|
313 "cmp %[o],%[c]\n\t"
|
nuclear@1
|
314 "bne 2f\n\t"
|
nuclear@1
|
315 "strex %[r],%[v],[%[i]]\n\t"
|
nuclear@1
|
316 "cmp %[r],#0\n\t"
|
nuclear@1
|
317 "bne 1b\n\t"
|
nuclear@1
|
318 "2:\n"
|
nuclear@1
|
319 : "+m" (*i),[r] "=&r" (ret), [o] "=&r" (dummy), [t] "=&r" (test) : [i] "r" (i), [c] "r" (c), [v] "r" (value)
|
nuclear@1
|
320 : "cc", "memory");
|
nuclear@1
|
321
|
nuclear@1
|
322 return !ret;
|
nuclear@1
|
323 }
|
nuclear@1
|
324
|
nuclear@1
|
325 #elif defined(OVR_CPU_X86)
|
nuclear@1
|
326 typedef UInt32 T;
|
nuclear@1
|
327
|
nuclear@1
|
328 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
329 {
|
nuclear@1
|
330 asm volatile("xchgl %1,%[i]\n"
|
nuclear@1
|
331 : "+m" (*i), "=q" (j) : [i] "m" (*i), "1" (j) : "cc", "memory");
|
nuclear@1
|
332
|
nuclear@1
|
333 return j;
|
nuclear@1
|
334 }
|
nuclear@1
|
335
|
nuclear@1
|
336 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
|
nuclear@1
|
337 {
|
nuclear@1
|
338 asm volatile("lock; xaddl %1,%[i]\n"
|
nuclear@1
|
339 : "+m" (*i), "+q" (j) : [i] "m" (*i) : "cc", "memory");
|
nuclear@1
|
340
|
nuclear@1
|
341 return j;
|
nuclear@1
|
342 }
|
nuclear@1
|
343
|
nuclear@1
|
344 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
|
nuclear@1
|
345 {
|
nuclear@1
|
346 UInt32 ret;
|
nuclear@1
|
347
|
nuclear@1
|
348 asm volatile("lock; cmpxchgl %[v],%[i]\n"
|
nuclear@1
|
349 : "+m" (*i), "=a" (ret) : [i] "m" (*i), "1" (c), [v] "q" (value) : "cc", "memory");
|
nuclear@1
|
350
|
nuclear@1
|
351 return (ret == c);
|
nuclear@1
|
352 }
|
nuclear@1
|
353
|
nuclear@1
|
354 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1)
|
nuclear@1
|
355
|
nuclear@1
|
356 typedef UInt32 T;
|
nuclear@1
|
357
|
nuclear@1
|
358 static inline T Exchange_NoSync(volatile T *i, T j)
|
nuclear@1
|
359 {
|
nuclear@1
|
360 T v;
|
nuclear@1
|
361 do {
|
nuclear@1
|
362 v = *i;
|
nuclear@1
|
363 } while (!__sync_bool_compare_and_swap(i, v, j));
|
nuclear@1
|
364 return v;
|
nuclear@1
|
365 }
|
nuclear@1
|
366
|
nuclear@1
|
367 static inline T ExchangeAdd_NoSync(volatile T *i, T j)
|
nuclear@1
|
368 {
|
nuclear@1
|
369 return __sync_fetch_and_add(i, j);
|
nuclear@1
|
370 }
|
nuclear@1
|
371
|
nuclear@1
|
372 static inline bool CompareAndSet_NoSync(volatile T *i, T c, T value)
|
nuclear@1
|
373 {
|
nuclear@1
|
374 return __sync_bool_compare_and_swap(i, c, value);
|
nuclear@1
|
375 }
|
nuclear@1
|
376
|
nuclear@1
|
377 #endif // OS
|
nuclear@1
|
378 };
|
nuclear@1
|
379
|
nuclear@1
|
380
|
nuclear@1
|
381 // 8-Byte raw data data atomic op implementation class.
|
nuclear@1
|
382 // Currently implementation is provided only on systems with 64-bit pointers.
|
nuclear@1
|
383 struct AtomicOpsRaw_8ByteImpl : public AtomicOpsRawBase
|
nuclear@1
|
384 {
|
nuclear@1
|
385 #if !defined(OVR_64BIT_POINTERS) || !defined(OVR_ENABLE_THREADS)
|
nuclear@1
|
386
|
nuclear@1
|
387 // Provide a type for no-thread-support cases. Used by AtomicOpsRaw_DefImpl.
|
nuclear@1
|
388 typedef UInt64 T;
|
nuclear@1
|
389
|
nuclear@1
|
390 // *** Thread - Safe OS specific versions.
|
nuclear@1
|
391 #elif defined(OVR_OS_WIN32)
|
nuclear@1
|
392
|
nuclear@1
|
393 // This is only for 64-bit systems.
|
nuclear@1
|
394 typedef LONG64 T;
|
nuclear@1
|
395 typedef volatile T* InterlockTPtr;
|
nuclear@1
|
396 inline static T Exchange_NoSync(volatile T* p, T val) { return InterlockedExchange64((InterlockTPtr)p, val); }
|
nuclear@1
|
397 inline static T ExchangeAdd_NoSync(volatile T* p, T val) { return InterlockedExchangeAdd64((InterlockTPtr)p, val); }
|
nuclear@1
|
398 inline static bool CompareAndSet_NoSync(volatile T* p, T c, T val) { return InterlockedCompareExchange64((InterlockTPtr)p, val, c) == c; }
|
nuclear@1
|
399
|
nuclear@1
|
400 #elif defined(OVR_CPU_PPC64)
|
nuclear@1
|
401
|
nuclear@1
|
402 typedef UInt64 T;
|
nuclear@1
|
403
|
nuclear@1
|
404 static inline UInt64 Exchange_NoSync(volatile UInt64 *i, UInt64 j)
|
nuclear@1
|
405 {
|
nuclear@1
|
406 UInt64 dummy, ret;
|
nuclear@1
|
407
|
nuclear@1
|
408 asm volatile("1:\n\t"
|
nuclear@1
|
409 "ldarx %[r],0,%[i]\n\t"
|
nuclear@1
|
410 "mr %[o],%[j]\n\t"
|
nuclear@1
|
411 "stdcx. %[o],0,%[i]\n\t"
|
nuclear@1
|
412 "bne- 1b\n"
|
nuclear@1
|
413 : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc");
|
nuclear@1
|
414
|
nuclear@1
|
415 return ret;
|
nuclear@1
|
416 }
|
nuclear@1
|
417
|
nuclear@1
|
418 static inline UInt64 ExchangeAdd_NoSync(volatile UInt64 *i, UInt64 j)
|
nuclear@1
|
419 {
|
nuclear@1
|
420 UInt64 dummy, ret;
|
nuclear@1
|
421
|
nuclear@1
|
422 asm volatile("1:\n\t"
|
nuclear@1
|
423 "ldarx %[r],0,%[i]\n\t"
|
nuclear@1
|
424 "add %[o],%[r],%[j]\n\t"
|
nuclear@1
|
425 "stdcx. %[o],0,%[i]\n\t"
|
nuclear@1
|
426 "bne- 1b\n"
|
nuclear@1
|
427 : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc");
|
nuclear@1
|
428
|
nuclear@1
|
429 return ret;
|
nuclear@1
|
430 }
|
nuclear@1
|
431
|
nuclear@1
|
432 static inline bool CompareAndSet_NoSync(volatile UInt64 *i, UInt64 c, UInt64 value)
|
nuclear@1
|
433 {
|
nuclear@1
|
434 UInt64 ret, dummy;
|
nuclear@1
|
435
|
nuclear@1
|
436 asm volatile("1:\n\t"
|
nuclear@1
|
437 "ldarx %[r],0,%[i]\n\t"
|
nuclear@1
|
438 "cmpw 0,%[r],%[cmp]\n\t"
|
nuclear@1
|
439 "mfcr %[r]\n\t"
|
nuclear@1
|
440 "bne- 2f\n\t"
|
nuclear@1
|
441 "stdcx. %[val],0,%[i]\n\t"
|
nuclear@1
|
442 "bne- 1b\n\t"
|
nuclear@1
|
443 "2:\n"
|
nuclear@1
|
444 : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [cmp] "b" (c), [val] "b" (value) : "cc");
|
nuclear@1
|
445
|
nuclear@1
|
446 return (ret & 0x20000000) ? 1 : 0;
|
nuclear@1
|
447 }
|
nuclear@1
|
448
|
nuclear@1
|
449 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1)
|
nuclear@1
|
450
|
nuclear@1
|
451 typedef UInt64 T;
|
nuclear@1
|
452
|
nuclear@1
|
453 static inline T Exchange_NoSync(volatile T *i, T j)
|
nuclear@1
|
454 {
|
nuclear@1
|
455 T v;
|
nuclear@1
|
456 do {
|
nuclear@1
|
457 v = *i;
|
nuclear@1
|
458 } while (!__sync_bool_compare_and_swap(i, v, j));
|
nuclear@1
|
459 return v;
|
nuclear@1
|
460 }
|
nuclear@1
|
461
|
nuclear@1
|
462 static inline T ExchangeAdd_NoSync(volatile T *i, T j)
|
nuclear@1
|
463 {
|
nuclear@1
|
464 return __sync_fetch_and_add(i, j);
|
nuclear@1
|
465 }
|
nuclear@1
|
466
|
nuclear@1
|
467 static inline bool CompareAndSet_NoSync(volatile T *i, T c, T value)
|
nuclear@1
|
468 {
|
nuclear@1
|
469 return __sync_bool_compare_and_swap(i, c, value);
|
nuclear@1
|
470 }
|
nuclear@1
|
471
|
nuclear@1
|
472 #endif // OS
|
nuclear@1
|
473 };
|
nuclear@1
|
474
|
nuclear@1
|
475
|
nuclear@1
|
476 // Default implementation for AtomicOpsRaw; provides implementation of mem-fenced
|
nuclear@1
|
477 // atomic operations where fencing is done with a sync object wrapped around a NoSync
|
nuclear@1
|
478 // operation implemented in the base class. If such implementation is not possible
|
nuclear@1
|
479 // on a given platform, #ifdefs can be used to disable it and then op functions can be
|
nuclear@1
|
480 // implemented individually in the appropriate AtomicOpsRaw<size> class.
|
nuclear@1
|
481
|
nuclear@1
|
482 template<class O>
|
nuclear@1
|
483 struct AtomicOpsRaw_DefImpl : public O
|
nuclear@1
|
484 {
|
nuclear@1
|
485 typedef typename O::T O_T;
|
nuclear@1
|
486 typedef typename O::FullSync O_FullSync;
|
nuclear@1
|
487 typedef typename O::AcquireSync O_AcquireSync;
|
nuclear@1
|
488 typedef typename O::ReleaseSync O_ReleaseSync;
|
nuclear@1
|
489
|
nuclear@1
|
490 // If there is no thread support, provide the default implementation. In this case,
|
nuclear@1
|
491 // the base class (0) must still provide the T declaration.
|
nuclear@1
|
492 #ifndef OVR_ENABLE_THREADS
|
nuclear@1
|
493
|
nuclear@1
|
494 // Atomic exchange of val with argument. Returns old val.
|
nuclear@1
|
495 inline static O_T Exchange_NoSync(volatile O_T* p, O_T val) { O_T old = *p; *p = val; return old; }
|
nuclear@1
|
496 // Adds a new val to argument; returns its old val.
|
nuclear@1
|
497 inline static O_T ExchangeAdd_NoSync(volatile O_T* p, O_T val) { O_T old = *p; *p += val; return old; }
|
nuclear@1
|
498 // Compares the argument data with 'c' val.
|
nuclear@1
|
499 // If succeeded, stores val int '*p' and returns true; otherwise returns false.
|
nuclear@1
|
500 inline static bool CompareAndSet_NoSync(volatile O_T* p, O_T c, O_T val) { if (*p==c) { *p = val; return 1; } return 0; }
|
nuclear@1
|
501
|
nuclear@1
|
502 #endif
|
nuclear@1
|
503
|
nuclear@1
|
504 // If NoSync wrapped implementation may not be possible, it this block should be
|
nuclear@1
|
505 // replaced with per-function implementation in O.
|
nuclear@1
|
506 // "AtomicOpsRaw_DefImpl<O>::" prefix in calls below.
|
nuclear@1
|
507 inline static O_T Exchange_Sync(volatile O_T* p, O_T val) { O_FullSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
|
nuclear@1
|
508 inline static O_T Exchange_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
|
nuclear@1
|
509 inline static O_T Exchange_Acquire(volatile O_T* p, O_T val) { O_AcquireSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
|
nuclear@1
|
510 inline static O_T ExchangeAdd_Sync(volatile O_T* p, O_T val) { O_FullSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
|
nuclear@1
|
511 inline static O_T ExchangeAdd_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
|
nuclear@1
|
512 inline static O_T ExchangeAdd_Acquire(volatile O_T* p, O_T val) { O_AcquireSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
|
nuclear@1
|
513 inline static bool CompareAndSet_Sync(volatile O_T* p, O_T c, O_T val) { O_FullSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
|
nuclear@1
|
514 inline static bool CompareAndSet_Release(volatile O_T* p, O_T c, O_T val) { O_ReleaseSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
|
nuclear@1
|
515 inline static bool CompareAndSet_Acquire(volatile O_T* p, O_T c, O_T val) { O_AcquireSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
|
nuclear@1
|
516
|
nuclear@1
|
517 // Loads and stores with memory fence. These have only the relevant versions.
|
nuclear@1
|
518 #ifdef OVR_CPU_X86
|
nuclear@1
|
519 // On X86, Store_Release is implemented as exchange. Note that we can also
|
nuclear@1
|
520 // consider 'sfence' in the future, although it is not as compatible with older CPUs.
|
nuclear@1
|
521 inline static void Store_Release(volatile O_T* p, O_T val) { Exchange_Release(p, val); }
|
nuclear@1
|
522 #else
|
nuclear@1
|
523 inline static void Store_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; OVR_UNUSED(sync); *p = val; }
|
nuclear@1
|
524 #endif
|
nuclear@1
|
525 inline static O_T Load_Acquire(const volatile O_T* p) { O_AcquireSync sync; OVR_UNUSED(sync); return *p; }
|
nuclear@1
|
526 };
|
nuclear@1
|
527
|
nuclear@1
|
528
|
nuclear@1
|
529 template<int size>
|
nuclear@1
|
530 struct AtomicOpsRaw : public AtomicOpsRawBase { };
|
nuclear@1
|
531
|
nuclear@1
|
532 template<>
|
nuclear@1
|
533 struct AtomicOpsRaw<4> : public AtomicOpsRaw_DefImpl<AtomicOpsRaw_4ByteImpl>
|
nuclear@1
|
534 {
|
nuclear@1
|
535 // Ensure that assigned type size is correct.
|
nuclear@1
|
536 AtomicOpsRaw()
|
nuclear@1
|
537 { OVR_COMPILER_ASSERT(sizeof(AtomicOpsRaw_DefImpl<AtomicOpsRaw_4ByteImpl>::T) == 4); }
|
nuclear@1
|
538 };
|
nuclear@1
|
539 template<>
|
nuclear@1
|
540 struct AtomicOpsRaw<8> : public AtomicOpsRaw_DefImpl<AtomicOpsRaw_8ByteImpl>
|
nuclear@1
|
541 {
|
nuclear@1
|
542 AtomicOpsRaw()
|
nuclear@1
|
543 { OVR_COMPILER_ASSERT(sizeof(AtomicOpsRaw_DefImpl<AtomicOpsRaw_8ByteImpl>::T) == 8); }
|
nuclear@1
|
544 };
|
nuclear@1
|
545
|
nuclear@1
|
546
|
nuclear@1
|
547 // *** AtomicOps - implementation of atomic Ops for specified class
|
nuclear@1
|
548
|
nuclear@1
|
549 // Implements atomic ops on a class, provided that the object is either
|
nuclear@1
|
550 // 4 or 8 bytes in size (depending on the AtomicOpsRaw specializations
|
nuclear@1
|
551 // available). Relies on AtomicOpsRaw for much of implementation.
|
nuclear@1
|
552
|
nuclear@1
|
553 template<class C>
|
nuclear@1
|
554 class AtomicOps
|
nuclear@1
|
555 {
|
nuclear@1
|
556 typedef AtomicOpsRaw<sizeof(C)> Ops;
|
nuclear@1
|
557 typedef typename Ops::T T;
|
nuclear@1
|
558 typedef volatile typename Ops::T* PT;
|
nuclear@1
|
559 // We cast through unions to (1) avoid pointer size compiler warnings
|
nuclear@1
|
560 // and (2) ensure that there are no problems with strict pointer aliasing.
|
nuclear@1
|
561 union C2T_union { C c; T t; };
|
nuclear@1
|
562
|
nuclear@1
|
563 public:
|
nuclear@1
|
564 // General purpose implementation for standard syncs.
|
nuclear@1
|
565 inline static C Exchange_Sync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Sync((PT)p, u.t); return u.c; }
|
nuclear@1
|
566 inline static C Exchange_Release(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Release((PT)p, u.t); return u.c; }
|
nuclear@1
|
567 inline static C Exchange_Acquire(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Acquire((PT)p, u.t); return u.c; }
|
nuclear@1
|
568 inline static C Exchange_NoSync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_NoSync((PT)p, u.t); return u.c; }
|
nuclear@1
|
569 inline static C ExchangeAdd_Sync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Sync((PT)p, u.t); return u.c; }
|
nuclear@1
|
570 inline static C ExchangeAdd_Release(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Release((PT)p, u.t); return u.c; }
|
nuclear@1
|
571 inline static C ExchangeAdd_Acquire(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Acquire((PT)p, u.t); return u.c; }
|
nuclear@1
|
572 inline static C ExchangeAdd_NoSync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_NoSync((PT)p, u.t); return u.c; }
|
nuclear@1
|
573 inline static bool CompareAndSet_Sync(volatile C* p, C c, C val) { C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Sync((PT)p, cu.t, u.t); }
|
nuclear@1
|
574 inline static bool CompareAndSet_Release(volatile C* p, C c, C val){ C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Release((PT)p, cu.t, u.t); }
|
nuclear@1
|
575 inline static bool CompareAndSet_Relse(volatile C* p, C c, C val){ C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Acquire((PT)p, cu.t, u.t); }
|
nuclear@1
|
576 inline static bool CompareAndSet_NoSync(volatile C* p, C c, C val) { C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_NoSync((PT)p, cu.t, u.t); }
|
nuclear@1
|
577 // Loads and stores with memory fence. These have only the relevant versions.
|
nuclear@1
|
578 inline static void Store_Release(volatile C* p, C val) { C2T_union u; u.c = val; Ops::Store_Release((PT)p, u.t); }
|
nuclear@1
|
579 inline static C Load_Acquire(const volatile C* p) { C2T_union u; u.t = Ops::Load_Acquire((PT)p); return u.c; }
|
nuclear@1
|
580 };
|
nuclear@1
|
581
|
nuclear@1
|
582
|
nuclear@1
|
583
|
nuclear@1
|
584 // Atomic value base class - implements operations shared for integers and pointers.
|
nuclear@1
|
585 template<class T>
|
nuclear@1
|
586 class AtomicValueBase
|
nuclear@1
|
587 {
|
nuclear@1
|
588 protected:
|
nuclear@1
|
589 typedef AtomicOps<T> Ops;
|
nuclear@1
|
590 public:
|
nuclear@1
|
591
|
nuclear@1
|
592 volatile T Value;
|
nuclear@1
|
593
|
nuclear@1
|
594 inline AtomicValueBase() { }
|
nuclear@1
|
595 explicit inline AtomicValueBase(T val) { Ops::Store_Release(&Value, val); }
|
nuclear@1
|
596
|
nuclear@1
|
597 // Most libraries (TBB and Joshua Scholar's) library do not do Load_Acquire
|
nuclear@1
|
598 // here, since most algorithms do not require atomic loads. Needs some research.
|
nuclear@1
|
599 inline operator T() const { return Value; }
|
nuclear@1
|
600
|
nuclear@1
|
601 // *** Standard Atomic inlines
|
nuclear@1
|
602 inline T Exchange_Sync(T val) { return Ops::Exchange_Sync(&Value, val); }
|
nuclear@1
|
603 inline T Exchange_Release(T val) { return Ops::Exchange_Release(&Value, val); }
|
nuclear@1
|
604 inline T Exchange_Acquire(T val) { return Ops::Exchange_Acquire(&Value, val); }
|
nuclear@1
|
605 inline T Exchange_NoSync(T val) { return Ops::Exchange_NoSync(&Value, val); }
|
nuclear@1
|
606 inline bool CompareAndSet_Sync(T c, T val) { return Ops::CompareAndSet_Sync(&Value, c, val); }
|
nuclear@1
|
607 inline bool CompareAndSet_Release(T c, T val) { return Ops::CompareAndSet_Release(&Value, c, val); }
|
nuclear@1
|
608 inline bool CompareAndSet_Acquire(T c, T val) { return Ops::CompareAndSet_Relse(&Value, c, val); }
|
nuclear@1
|
609 inline bool CompareAndSet_NoSync(T c, T val) { return Ops::CompareAndSet_NoSync(&Value, c, val); }
|
nuclear@1
|
610 // Load & Store.
|
nuclear@1
|
611 inline void Store_Release(T val) { Ops::Store_Release(&Value, val); }
|
nuclear@1
|
612 inline T Load_Acquire() const { return Ops::Load_Acquire(&Value); }
|
nuclear@1
|
613 };
|
nuclear@1
|
614
|
nuclear@1
|
615
|
nuclear@1
|
616 // ***** AtomicPtr - Atomic pointer template
|
nuclear@1
|
617
|
nuclear@1
|
618 // This pointer class supports atomic assignments with release,
|
nuclear@1
|
619 // increment / decrement operations, and conditional compare + set.
|
nuclear@1
|
620
|
nuclear@1
|
621 template<class T>
|
nuclear@1
|
622 class AtomicPtr : public AtomicValueBase<T*>
|
nuclear@1
|
623 {
|
nuclear@1
|
624 typedef typename AtomicValueBase<T*>::Ops Ops;
|
nuclear@1
|
625
|
nuclear@1
|
626 public:
|
nuclear@1
|
627 // Initialize pointer value to 0 by default; use Store_Release only with explicit constructor.
|
nuclear@1
|
628 inline AtomicPtr() : AtomicValueBase<T*>() { this->Value = 0; }
|
nuclear@1
|
629 explicit inline AtomicPtr(T* val) : AtomicValueBase<T*>(val) { }
|
nuclear@1
|
630
|
nuclear@1
|
631 // Pointer access.
|
nuclear@1
|
632 inline T* operator -> () const { return this->Load_Acquire(); }
|
nuclear@1
|
633
|
nuclear@1
|
634 // It looks like it is convenient to have Load_Acquire characteristics
|
nuclear@1
|
635 // for this, since that is convenient for algorithms such as linked
|
nuclear@1
|
636 // list traversals that can be added to bu another thread.
|
nuclear@1
|
637 inline operator T* () const { return this->Load_Acquire(); }
|
nuclear@1
|
638
|
nuclear@1
|
639
|
nuclear@1
|
640 // *** Standard Atomic inlines (applicable to pointers)
|
nuclear@1
|
641
|
nuclear@1
|
642 // ExhangeAdd considers pointer size for pointers.
|
nuclear@1
|
643 template<class I>
|
nuclear@1
|
644 inline T* ExchangeAdd_Sync(I incr) { return Ops::ExchangeAdd_Sync(&this->Value, ((T*)0) + incr); }
|
nuclear@1
|
645 template<class I>
|
nuclear@1
|
646 inline T* ExchangeAdd_Release(I incr) { return Ops::ExchangeAdd_Release(&this->Value, ((T*)0) + incr); }
|
nuclear@1
|
647 template<class I>
|
nuclear@1
|
648 inline T* ExchangeAdd_Acquire(I incr) { return Ops::ExchangeAdd_Acquire(&this->Value, ((T*)0) + incr); }
|
nuclear@1
|
649 template<class I>
|
nuclear@1
|
650 inline T* ExchangeAdd_NoSync(I incr) { return Ops::ExchangeAdd_NoSync(&this->Value, ((T*)0) + incr); }
|
nuclear@1
|
651
|
nuclear@1
|
652 // *** Atomic Operators
|
nuclear@1
|
653
|
nuclear@1
|
654 inline T* operator = (T* val) { this->Store_Release(val); return val; }
|
nuclear@1
|
655
|
nuclear@1
|
656 template<class I>
|
nuclear@1
|
657 inline T* operator += (I val) { return ExchangeAdd_Sync(val) + val; }
|
nuclear@1
|
658 template<class I>
|
nuclear@1
|
659 inline T* operator -= (I val) { return operator += (-val); }
|
nuclear@1
|
660
|
nuclear@1
|
661 inline T* operator ++ () { return ExchangeAdd_Sync(1) + 1; }
|
nuclear@1
|
662 inline T* operator -- () { return ExchangeAdd_Sync(-1) - 1; }
|
nuclear@1
|
663 inline T* operator ++ (int) { return ExchangeAdd_Sync(1); }
|
nuclear@1
|
664 inline T* operator -- (int) { return ExchangeAdd_Sync(-1); }
|
nuclear@1
|
665 };
|
nuclear@1
|
666
|
nuclear@1
|
667
|
nuclear@1
|
668 // ***** AtomicInt - Atomic integer template
|
nuclear@1
|
669
|
nuclear@1
|
670 // Implements an atomic integer type; the exact type to use is provided
|
nuclear@1
|
671 // as an argument. Supports atomic Acquire / Release semantics, atomic
|
nuclear@1
|
672 // arithmetic operations, and atomic conditional compare + set.
|
nuclear@1
|
673
|
nuclear@1
|
674 template<class T>
|
nuclear@1
|
675 class AtomicInt : public AtomicValueBase<T>
|
nuclear@1
|
676 {
|
nuclear@1
|
677 typedef typename AtomicValueBase<T>::Ops Ops;
|
nuclear@1
|
678
|
nuclear@1
|
679 public:
|
nuclear@1
|
680 inline AtomicInt() : AtomicValueBase<T>() { }
|
nuclear@1
|
681 explicit inline AtomicInt(T val) : AtomicValueBase<T>(val) { }
|
nuclear@1
|
682
|
nuclear@1
|
683
|
nuclear@1
|
684 // *** Standard Atomic inlines (applicable to int)
|
nuclear@1
|
685 inline T ExchangeAdd_Sync(T val) { return Ops::ExchangeAdd_Sync(&this->Value, val); }
|
nuclear@1
|
686 inline T ExchangeAdd_Release(T val) { return Ops::ExchangeAdd_Release(&this->Value, val); }
|
nuclear@1
|
687 inline T ExchangeAdd_Acquire(T val) { return Ops::ExchangeAdd_Acquire(&this->Value, val); }
|
nuclear@1
|
688 inline T ExchangeAdd_NoSync(T val) { return Ops::ExchangeAdd_NoSync(&this->Value, val); }
|
nuclear@1
|
689 // These increments could be more efficient because they don't return a value.
|
nuclear@1
|
690 inline void Increment_Sync() { ExchangeAdd_Sync((T)1); }
|
nuclear@1
|
691 inline void Increment_Release() { ExchangeAdd_Release((T)1); }
|
nuclear@1
|
692 inline void Increment_Acquire() { ExchangeAdd_Acquire((T)1); }
|
nuclear@1
|
693 inline void Increment_NoSync() { ExchangeAdd_NoSync((T)1); }
|
nuclear@1
|
694
|
nuclear@1
|
695 // *** Atomic Operators
|
nuclear@1
|
696
|
nuclear@1
|
697 inline T operator = (T val) { this->Store_Release(val); return val; }
|
nuclear@1
|
698 inline T operator += (T val) { return ExchangeAdd_Sync(val) + val; }
|
nuclear@1
|
699 inline T operator -= (T val) { return ExchangeAdd_Sync(0 - val) - val; }
|
nuclear@1
|
700
|
nuclear@1
|
701 inline T operator ++ () { return ExchangeAdd_Sync((T)1) + 1; }
|
nuclear@1
|
702 inline T operator -- () { return ExchangeAdd_Sync(((T)0)-1) - 1; }
|
nuclear@1
|
703 inline T operator ++ (int) { return ExchangeAdd_Sync((T)1); }
|
nuclear@1
|
704 inline T operator -- (int) { return ExchangeAdd_Sync(((T)0)-1); }
|
nuclear@1
|
705
|
nuclear@1
|
706 // More complex atomic operations. Leave it to compiler whether to optimize them or not.
|
nuclear@1
|
707 T operator &= (T arg)
|
nuclear@1
|
708 {
|
nuclear@1
|
709 T comp, newVal;
|
nuclear@1
|
710 do {
|
nuclear@1
|
711 comp = this->Value;
|
nuclear@1
|
712 newVal = comp & arg;
|
nuclear@1
|
713 } while(!this->CompareAndSet_Sync(comp, newVal));
|
nuclear@1
|
714 return newVal;
|
nuclear@1
|
715 }
|
nuclear@1
|
716
|
nuclear@1
|
717 T operator |= (T arg)
|
nuclear@1
|
718 {
|
nuclear@1
|
719 T comp, newVal;
|
nuclear@1
|
720 do {
|
nuclear@1
|
721 comp = this->Value;
|
nuclear@1
|
722 newVal = comp | arg;
|
nuclear@1
|
723 } while(!this->CompareAndSet_Sync(comp, newVal));
|
nuclear@1
|
724 return newVal;
|
nuclear@1
|
725 }
|
nuclear@1
|
726
|
nuclear@1
|
727 T operator ^= (T arg)
|
nuclear@1
|
728 {
|
nuclear@1
|
729 T comp, newVal;
|
nuclear@1
|
730 do {
|
nuclear@1
|
731 comp = this->Value;
|
nuclear@1
|
732 newVal = comp ^ arg;
|
nuclear@1
|
733 } while(!this->CompareAndSet_Sync(comp, newVal));
|
nuclear@1
|
734 return newVal;
|
nuclear@1
|
735 }
|
nuclear@1
|
736
|
nuclear@1
|
737 T operator *= (T arg)
|
nuclear@1
|
738 {
|
nuclear@1
|
739 T comp, newVal;
|
nuclear@1
|
740 do {
|
nuclear@1
|
741 comp = this->Value;
|
nuclear@1
|
742 newVal = comp * arg;
|
nuclear@1
|
743 } while(!this->CompareAndSet_Sync(comp, newVal));
|
nuclear@1
|
744 return newVal;
|
nuclear@1
|
745 }
|
nuclear@1
|
746
|
nuclear@1
|
747 T operator /= (T arg)
|
nuclear@1
|
748 {
|
nuclear@1
|
749 T comp, newVal;
|
nuclear@1
|
750 do {
|
nuclear@1
|
751 comp = this->Value;
|
nuclear@1
|
752 newVal = comp / arg;
|
nuclear@1
|
753 } while(!CompareAndSet_Sync(comp, newVal));
|
nuclear@1
|
754 return newVal;
|
nuclear@1
|
755 }
|
nuclear@1
|
756
|
nuclear@1
|
757 T operator >>= (unsigned bits)
|
nuclear@1
|
758 {
|
nuclear@1
|
759 T comp, newVal;
|
nuclear@1
|
760 do {
|
nuclear@1
|
761 comp = this->Value;
|
nuclear@1
|
762 newVal = comp >> bits;
|
nuclear@1
|
763 } while(!CompareAndSet_Sync(comp, newVal));
|
nuclear@1
|
764 return newVal;
|
nuclear@1
|
765 }
|
nuclear@1
|
766
|
nuclear@1
|
767 T operator <<= (unsigned bits)
|
nuclear@1
|
768 {
|
nuclear@1
|
769 T comp, newVal;
|
nuclear@1
|
770 do {
|
nuclear@1
|
771 comp = this->Value;
|
nuclear@1
|
772 newVal = comp << bits;
|
nuclear@1
|
773 } while(!this->CompareAndSet_Sync(comp, newVal));
|
nuclear@1
|
774 return newVal;
|
nuclear@1
|
775 }
|
nuclear@1
|
776 };
|
nuclear@1
|
777
|
nuclear@1
|
778
|
nuclear@1
|
779
|
nuclear@1
|
780 //-----------------------------------------------------------------------------------
|
nuclear@1
|
781 // ***** Lock
|
nuclear@1
|
782
|
nuclear@1
|
783 // Lock is a simplest and most efficient mutual-exclusion lock class.
|
nuclear@1
|
784 // Unlike Mutex, it cannot be waited on.
|
nuclear@1
|
785
|
nuclear@1
|
786 class Lock
|
nuclear@1
|
787 {
|
nuclear@1
|
788 // NOTE: Locks are not allocatable and they themselves should not allocate
|
nuclear@1
|
789 // memory by standard means. This is the case because StandardAllocator
|
nuclear@1
|
790 // relies on this class.
|
nuclear@1
|
791 // Make 'delete' private. Don't do this for 'new' since it can be redefined.
|
nuclear@1
|
792 void operator delete(void*) {}
|
nuclear@1
|
793
|
nuclear@1
|
794
|
nuclear@1
|
795 // *** Lock implementation for various platforms.
|
nuclear@1
|
796
|
nuclear@1
|
797 #if !defined(OVR_ENABLE_THREADS)
|
nuclear@1
|
798
|
nuclear@1
|
799 public:
|
nuclear@1
|
800 // With no thread support, lock does nothing.
|
nuclear@1
|
801 inline Lock() { }
|
nuclear@1
|
802 inline Lock(unsigned) { }
|
nuclear@1
|
803 inline ~Lock() { }
|
nuclear@1
|
804 inline void DoLock() { }
|
nuclear@1
|
805 inline void Unlock() { }
|
nuclear@1
|
806
|
nuclear@1
|
807 // Windows.
|
nuclear@1
|
808 #elif defined(OVR_OS_WIN32)
|
nuclear@1
|
809
|
nuclear@1
|
810 CRITICAL_SECTION cs;
|
nuclear@1
|
811 public:
|
nuclear@1
|
812 Lock(unsigned spinCount = 0);
|
nuclear@1
|
813 ~Lock();
|
nuclear@1
|
814 // Locking functions.
|
nuclear@1
|
815 inline void DoLock() { ::EnterCriticalSection(&cs); }
|
nuclear@1
|
816 inline void Unlock() { ::LeaveCriticalSection(&cs); }
|
nuclear@1
|
817
|
nuclear@1
|
818 #else
|
nuclear@1
|
819 pthread_mutex_t mutex;
|
nuclear@1
|
820
|
nuclear@1
|
821 public:
|
nuclear@1
|
822 static pthread_mutexattr_t RecursiveAttr;
|
nuclear@1
|
823 static bool RecursiveAttrInit;
|
nuclear@1
|
824
|
nuclear@1
|
825 Lock (unsigned dummy = 0)
|
nuclear@1
|
826 {
|
nuclear@1
|
827 if (!RecursiveAttrInit)
|
nuclear@1
|
828 {
|
nuclear@1
|
829 pthread_mutexattr_init(&RecursiveAttr);
|
nuclear@1
|
830 pthread_mutexattr_settype(&RecursiveAttr, PTHREAD_MUTEX_RECURSIVE);
|
nuclear@1
|
831 RecursiveAttrInit = 1;
|
nuclear@1
|
832 }
|
nuclear@1
|
833 pthread_mutex_init(&mutex,&RecursiveAttr);
|
nuclear@1
|
834 }
|
nuclear@1
|
835 ~Lock () { pthread_mutex_destroy(&mutex); }
|
nuclear@1
|
836 inline void DoLock() { pthread_mutex_lock(&mutex); }
|
nuclear@1
|
837 inline void Unlock() { pthread_mutex_unlock(&mutex); }
|
nuclear@1
|
838
|
nuclear@1
|
839 #endif // OVR_ENABLE_THREDS
|
nuclear@1
|
840
|
nuclear@1
|
841
|
nuclear@1
|
842 public:
|
nuclear@1
|
843 // Locker class, used for automatic locking
|
nuclear@1
|
844 class Locker
|
nuclear@1
|
845 {
|
nuclear@1
|
846 public:
|
nuclear@1
|
847 Lock *pLock;
|
nuclear@1
|
848 inline Locker(Lock *plock)
|
nuclear@1
|
849 { pLock = plock; pLock->DoLock(); }
|
nuclear@1
|
850 inline ~Locker()
|
nuclear@1
|
851 { pLock->Unlock(); }
|
nuclear@1
|
852 };
|
nuclear@1
|
853 };
|
nuclear@1
|
854
|
nuclear@1
|
855
|
nuclear@1
|
856
|
nuclear@1
|
857 } // OVR
|
nuclear@1
|
858
|
nuclear@1
|
859 #endif
|