oculus1

annotate libovr/Src/Kernel/OVR_Atomic.h @ 20:ff3bfd4da86b

removed unnecessary check for framebuffer resizing when not using vr mode
author John Tsiombikas <nuclear@member.fsf.org>
date Tue, 01 Oct 2013 12:51:20 +0300
parents e2f9e4603129
children
rev   line source
nuclear@3 1 /************************************************************************************
nuclear@3 2
nuclear@3 3 PublicHeader: OVR.h
nuclear@3 4 Filename : OVR_Atomic.h
nuclear@3 5 Content : Contains atomic operations and inline fastest locking
nuclear@3 6 functionality. Will contain #ifdefs for OS efficiency.
nuclear@3 7 Have non-thread-safe implementaion if not available.
nuclear@3 8 Created : September 19, 2012
nuclear@3 9 Notes :
nuclear@3 10
nuclear@3 11 Copyright : Copyright 2012 Oculus VR, Inc. All Rights reserved.
nuclear@3 12
nuclear@3 13 Use of this software is subject to the terms of the Oculus license
nuclear@3 14 agreement provided at the time of installation or download, or which
nuclear@3 15 otherwise accompanies this software in either electronic or hard copy form.
nuclear@3 16
nuclear@3 17 ************************************************************************************/
nuclear@3 18 #ifndef OVR_Atomic_h
nuclear@3 19 #define OVR_Atomic_h
nuclear@3 20
nuclear@3 21 #include "OVR_Types.h"
nuclear@3 22
nuclear@3 23 // Include System thread functionality.
nuclear@3 24 #if defined(OVR_OS_WIN32)
nuclear@3 25 #include <windows.h>
nuclear@3 26 #else
nuclear@3 27 #include <pthread.h>
nuclear@3 28 #endif
nuclear@3 29
nuclear@3 30
nuclear@3 31 namespace OVR {
nuclear@3 32
nuclear@3 33
nuclear@3 34 // ****** Declared classes
nuclear@3 35
nuclear@3 36 // If there is NO thread support we implement AtomicOps and
nuclear@3 37 // Lock objects as no-ops. The other classes are not defined.
nuclear@3 38 template<class C> class AtomicOps;
nuclear@3 39 template<class T> class AtomicInt;
nuclear@3 40 template<class T> class AtomicPtr;
nuclear@3 41
nuclear@3 42 class Lock;
nuclear@3 43
nuclear@3 44
nuclear@3 45 //-----------------------------------------------------------------------------------
nuclear@3 46 // ***** AtomicOps
nuclear@3 47
nuclear@3 48 // Atomic operations are provided by the AtomicOps templates class,
nuclear@3 49 // implemented through system-specific AtomicOpsRaw specializations.
nuclear@3 50 // It provides several fundamental operations such as Exchange, ExchangeAdd
nuclear@3 51 // CompareAndSet, and Store_Release. Each function includes several memory
nuclear@3 52 // synchronization versions, important for multiprocessing CPUs with weak
nuclear@3 53 // memory consistency. The following memory fencing strategies are supported:
nuclear@3 54 //
nuclear@3 55 // - NoSync. No memory synchronization is done for atomic op.
nuclear@3 56 // - Release. All other memory writes are completed before atomic op
nuclear@3 57 // writes its results.
nuclear@3 58 // - Acquire. Further memory reads are forced to wait until atomic op
nuclear@3 59 // executes, guaranteeing that the right values will be seen.
nuclear@3 60 // - Sync. A combination of Release and Acquire.
nuclear@3 61
nuclear@3 62
nuclear@3 63 // *** AtomicOpsRaw
nuclear@3 64
nuclear@3 65 // AtomicOpsRaw is a specialized template that provides atomic operations
nuclear@3 66 // used by AtomicOps. This class has two fundamental qualities: (1) it
nuclear@3 67 // defines a type T of correct size, and (2) provides operations that work
nuclear@3 68 // atomically, such as Exchange_Sync and CompareAndSet_Release.
nuclear@3 69
nuclear@3 70 // AtomicOpsRawBase class contains shared constants/classes for AtomicOpsRaw.
nuclear@3 71 // The primary thing is does is define sync class objects, whose destructor and
nuclear@3 72 // constructor provide places to insert appropriate synchronization calls, on
nuclear@3 73 // systems where such calls are necessary. So far, the breakdown is as follows:
nuclear@3 74 //
nuclear@3 75 // - X86 systems don't need custom syncs, since their exchange/atomic
nuclear@3 76 // instructions are implicitly synchronized.
nuclear@3 77 // - PowerPC requires lwsync/isync instructions that can use this mechanism.
nuclear@3 78 // - If some other systems require a mechanism where syncing type is associated
nuclear@3 79 // with a particular instruction, the default implementation (which implements
nuclear@3 80 // all Sync, Acquire, and Release modes in terms of NoSync and fence) may not
nuclear@3 81 // work. Ii that case it will need to be #ifdef-ed conditionally.
nuclear@3 82
nuclear@3 83 struct AtomicOpsRawBase
nuclear@3 84 {
nuclear@3 85 #if !defined(OVR_ENABLE_THREADS) || defined(OVR_CPU_X86) || defined(OVR_OS_WIN32) || defined(OVR_OS_IPHONE)
nuclear@3 86 // Need to have empty constructor to avoid class 'unused' variable warning.
nuclear@3 87 struct FullSync { inline FullSync() { } };
nuclear@3 88 struct AcquireSync { inline AcquireSync() { } };
nuclear@3 89 struct ReleaseSync { inline ReleaseSync() { } };
nuclear@3 90
nuclear@3 91 #elif defined(OVR_CPU_PPC64) || defined(OVR_CPU_PPC)
nuclear@3 92 struct FullSync { inline FullSync() { asm volatile("sync\n"); } ~FullSync() { asm volatile("isync\n"); } };
nuclear@3 93 struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("isync\n"); } };
nuclear@3 94 struct ReleaseSync { inline ReleaseSync() { asm volatile("sync\n"); } };
nuclear@3 95
nuclear@3 96 #elif defined(OVR_CPU_MIPS)
nuclear@3 97 struct FullSync { inline FullSync() { asm volatile("sync\n"); } ~FullSync() { asm volatile("sync\n"); } };
nuclear@3 98 struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("sync\n"); } };
nuclear@3 99 struct ReleaseSync { inline ReleaseSync() { asm volatile("sync\n"); } };
nuclear@3 100
nuclear@3 101 #elif defined(OVR_CPU_ARM)
nuclear@3 102 struct FullSync { inline FullSync() { asm volatile("dmb\n"); } ~FullSync() { asm volatile("dmb\n"); } };
nuclear@3 103 struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("dmb\n"); } };
nuclear@3 104 struct ReleaseSync { inline ReleaseSync() { asm volatile("dmb\n"); } };
nuclear@3 105
nuclear@3 106
nuclear@3 107 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4)
nuclear@3 108 // __sync functions are already full sync
nuclear@3 109 struct FullSync { inline FullSync() { } };
nuclear@3 110 struct AcquireSync { inline AcquireSync() { } };
nuclear@3 111 struct ReleaseSync { inline ReleaseSync() { } };
nuclear@3 112 #endif
nuclear@3 113 };
nuclear@3 114
nuclear@3 115
nuclear@3 116 // 4-Byte raw data atomic op implementation class.
nuclear@3 117 struct AtomicOpsRaw_4ByteImpl : public AtomicOpsRawBase
nuclear@3 118 {
nuclear@3 119 #if !defined(OVR_ENABLE_THREADS)
nuclear@3 120
nuclear@3 121 // Provide a type for no-thread-support cases. Used by AtomicOpsRaw_DefImpl.
nuclear@3 122 typedef UInt32 T;
nuclear@3 123
nuclear@3 124 // *** Thread - Safe Atomic Versions.
nuclear@3 125
nuclear@3 126 #elif defined(OVR_OS_WIN32)
nuclear@3 127
nuclear@3 128 // Use special defined for VC6, where volatile is not used and
nuclear@3 129 // InterlockedCompareExchange is declared incorrectly.
nuclear@3 130 typedef LONG T;
nuclear@3 131 #if defined(OVR_CC_MSVC) && (OVR_CC_MSVC < 1300)
nuclear@3 132 typedef T* InterlockTPtr;
nuclear@3 133 typedef LPVOID ET;
nuclear@3 134 typedef ET* InterlockETPtr;
nuclear@3 135 #else
nuclear@3 136 typedef volatile T* InterlockTPtr;
nuclear@3 137 typedef T ET;
nuclear@3 138 typedef InterlockTPtr InterlockETPtr;
nuclear@3 139 #endif
nuclear@3 140 inline static T Exchange_NoSync(volatile T* p, T val) { return InterlockedExchange((InterlockTPtr)p, val); }
nuclear@3 141 inline static T ExchangeAdd_NoSync(volatile T* p, T val) { return InterlockedExchangeAdd((InterlockTPtr)p, val); }
nuclear@3 142 inline static bool CompareAndSet_NoSync(volatile T* p, T c, T val) { return InterlockedCompareExchange((InterlockETPtr)p, (ET)val, (ET)c) == (ET)c; }
nuclear@3 143
nuclear@3 144 #elif defined(OVR_CPU_PPC64) || defined(OVR_CPU_PPC)
nuclear@3 145 typedef UInt32 T;
nuclear@3 146 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 147 {
nuclear@3 148 UInt32 ret;
nuclear@3 149
nuclear@3 150 asm volatile("1:\n\t"
nuclear@3 151 "lwarx %[r],0,%[i]\n\t"
nuclear@3 152 "stwcx. %[j],0,%[i]\n\t"
nuclear@3 153 "bne- 1b\n"
nuclear@3 154 : "+m" (*i), [r] "=&b" (ret) : [i] "b" (i), [j] "b" (j) : "cc", "memory");
nuclear@3 155
nuclear@3 156 return ret;
nuclear@3 157 }
nuclear@3 158
nuclear@3 159 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 160 {
nuclear@3 161 UInt32 dummy, ret;
nuclear@3 162
nuclear@3 163 asm volatile("1:\n\t"
nuclear@3 164 "lwarx %[r],0,%[i]\n\t"
nuclear@3 165 "add %[o],%[r],%[j]\n\t"
nuclear@3 166 "stwcx. %[o],0,%[i]\n\t"
nuclear@3 167 "bne- 1b\n"
nuclear@3 168 : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc", "memory");
nuclear@3 169
nuclear@3 170 return ret;
nuclear@3 171 }
nuclear@3 172
nuclear@3 173 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
nuclear@3 174 {
nuclear@3 175 UInt32 ret;
nuclear@3 176
nuclear@3 177 asm volatile("1:\n\t"
nuclear@3 178 "lwarx %[r],0,%[i]\n\t"
nuclear@3 179 "cmpw 0,%[r],%[cmp]\n\t"
nuclear@3 180 "mfcr %[r]\n\t"
nuclear@3 181 "bne- 2f\n\t"
nuclear@3 182 "stwcx. %[val],0,%[i]\n\t"
nuclear@3 183 "bne- 1b\n\t"
nuclear@3 184 "2:\n"
nuclear@3 185 : "+m" (*i), [r] "=&b" (ret) : [i] "b" (i), [cmp] "b" (c), [val] "b" (value) : "cc", "memory");
nuclear@3 186
nuclear@3 187 return (ret & 0x20000000) ? 1 : 0;
nuclear@3 188 }
nuclear@3 189
nuclear@3 190 #elif defined(OVR_CPU_MIPS)
nuclear@3 191 typedef UInt32 T;
nuclear@3 192
nuclear@3 193 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 194 {
nuclear@3 195 UInt32 ret;
nuclear@3 196
nuclear@3 197 asm volatile("1:\n\t"
nuclear@3 198 "ll %[r],0(%[i])\n\t"
nuclear@3 199 "sc %[j],0(%[i])\n\t"
nuclear@3 200 "beq %[j],$0,1b\n\t"
nuclear@3 201 "nop \n"
nuclear@3 202 : "+m" (*i), [r] "=&d" (ret) : [i] "d" (i), [j] "d" (j) : "cc", "memory");
nuclear@3 203
nuclear@3 204 return ret;
nuclear@3 205 }
nuclear@3 206
nuclear@3 207 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 208 {
nuclear@3 209 UInt32 ret;
nuclear@3 210
nuclear@3 211 asm volatile("1:\n\t"
nuclear@3 212 "ll %[r],0(%[i])\n\t"
nuclear@3 213 "addu %[j],%[r],%[j]\n\t"
nuclear@3 214 "sc %[j],0(%[i])\n\t"
nuclear@3 215 "beq %[j],$0,1b\n\t"
nuclear@3 216 "nop \n"
nuclear@3 217 : "+m" (*i), [r] "=&d" (ret) : [i] "d" (i), [j] "d" (j) : "cc", "memory");
nuclear@3 218
nuclear@3 219 return ret;
nuclear@3 220 }
nuclear@3 221
nuclear@3 222 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
nuclear@3 223 {
nuclear@3 224 UInt32 ret, dummy;
nuclear@3 225
nuclear@3 226 asm volatile("1:\n\t"
nuclear@3 227 "move %[r],$0\n\t"
nuclear@3 228 "ll %[o],0(%[i])\n\t"
nuclear@3 229 "bne %[o],%[c],2f\n\t"
nuclear@3 230 "move %[r],%[v]\n\t"
nuclear@3 231 "sc %[r],0(%[i])\n\t"
nuclear@3 232 "beq %[r],$0,1b\n\t"
nuclear@3 233 "nop \n\t"
nuclear@3 234 "2:\n"
nuclear@3 235 : "+m" (*i),[r] "=&d" (ret), [o] "=&d" (dummy) : [i] "d" (i), [c] "d" (c), [v] "d" (value)
nuclear@3 236 : "cc", "memory");
nuclear@3 237
nuclear@3 238 return ret;
nuclear@3 239 }
nuclear@3 240
nuclear@3 241 #elif defined(OVR_CPU_ARM) && defined(OVR_CC_ARM)
nuclear@3 242 typedef UInt32 T;
nuclear@3 243
nuclear@3 244 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 245 {
nuclear@3 246 for(;;)
nuclear@3 247 {
nuclear@3 248 T r = __ldrex(i);
nuclear@3 249 if (__strex(j, i) == 0)
nuclear@3 250 return r;
nuclear@3 251 }
nuclear@3 252 }
nuclear@3 253 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 254 {
nuclear@3 255 for(;;)
nuclear@3 256 {
nuclear@3 257 T r = __ldrex(i);
nuclear@3 258 if (__strex(r + j, i) == 0)
nuclear@3 259 return r;
nuclear@3 260 }
nuclear@3 261 }
nuclear@3 262
nuclear@3 263 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
nuclear@3 264 {
nuclear@3 265 for(;;)
nuclear@3 266 {
nuclear@3 267 T r = __ldrex(i);
nuclear@3 268 if (r != c)
nuclear@3 269 return 0;
nuclear@3 270 if (__strex(value, i) == 0)
nuclear@3 271 return 1;
nuclear@3 272 }
nuclear@3 273 }
nuclear@3 274
nuclear@3 275 #elif defined(OVR_CPU_ARM)
nuclear@3 276 typedef UInt32 T;
nuclear@3 277
nuclear@3 278 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 279 {
nuclear@3 280 UInt32 ret, dummy;
nuclear@3 281
nuclear@3 282 asm volatile("1:\n\t"
nuclear@3 283 "ldrex %[r],[%[i]]\n\t"
nuclear@3 284 "strex %[t],%[j],[%[i]]\n\t"
nuclear@3 285 "cmp %[t],#0\n\t"
nuclear@3 286 "bne 1b\n\t"
nuclear@3 287 : "+m" (*i), [r] "=&r" (ret), [t] "=&r" (dummy) : [i] "r" (i), [j] "r" (j) : "cc", "memory");
nuclear@3 288
nuclear@3 289 return ret;
nuclear@3 290 }
nuclear@3 291
nuclear@3 292 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 293 {
nuclear@3 294 UInt32 ret, dummy, test;
nuclear@3 295
nuclear@3 296 asm volatile("1:\n\t"
nuclear@3 297 "ldrex %[r],[%[i]]\n\t"
nuclear@3 298 "add %[o],%[r],%[j]\n\t"
nuclear@3 299 "strex %[t],%[o],[%[i]]\n\t"
nuclear@3 300 "cmp %[t],#0\n\t"
nuclear@3 301 "bne 1b\n\t"
nuclear@3 302 : "+m" (*i), [r] "=&r" (ret), [o] "=&r" (dummy), [t] "=&r" (test) : [i] "r" (i), [j] "r" (j) : "cc", "memory");
nuclear@3 303
nuclear@3 304 return ret;
nuclear@3 305 }
nuclear@3 306
nuclear@3 307 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
nuclear@3 308 {
nuclear@3 309 UInt32 ret = 1, dummy, test;
nuclear@3 310
nuclear@3 311 asm volatile("1:\n\t"
nuclear@3 312 "ldrex %[o],[%[i]]\n\t"
nuclear@3 313 "cmp %[o],%[c]\n\t"
nuclear@3 314 "bne 2f\n\t"
nuclear@3 315 "strex %[r],%[v],[%[i]]\n\t"
nuclear@3 316 "cmp %[r],#0\n\t"
nuclear@3 317 "bne 1b\n\t"
nuclear@3 318 "2:\n"
nuclear@3 319 : "+m" (*i),[r] "=&r" (ret), [o] "=&r" (dummy), [t] "=&r" (test) : [i] "r" (i), [c] "r" (c), [v] "r" (value)
nuclear@3 320 : "cc", "memory");
nuclear@3 321
nuclear@3 322 return !ret;
nuclear@3 323 }
nuclear@3 324
nuclear@3 325 #elif defined(OVR_CPU_X86)
nuclear@3 326 typedef UInt32 T;
nuclear@3 327
nuclear@3 328 static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 329 {
nuclear@3 330 asm volatile("xchgl %1,%[i]\n"
nuclear@3 331 : "+m" (*i), "=q" (j) : [i] "m" (*i), "1" (j) : "cc", "memory");
nuclear@3 332
nuclear@3 333 return j;
nuclear@3 334 }
nuclear@3 335
nuclear@3 336 static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
nuclear@3 337 {
nuclear@3 338 asm volatile("lock; xaddl %1,%[i]\n"
nuclear@3 339 : "+m" (*i), "+q" (j) : [i] "m" (*i) : "cc", "memory");
nuclear@3 340
nuclear@3 341 return j;
nuclear@3 342 }
nuclear@3 343
nuclear@3 344 static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
nuclear@3 345 {
nuclear@3 346 UInt32 ret;
nuclear@3 347
nuclear@3 348 asm volatile("lock; cmpxchgl %[v],%[i]\n"
nuclear@3 349 : "+m" (*i), "=a" (ret) : [i] "m" (*i), "1" (c), [v] "q" (value) : "cc", "memory");
nuclear@3 350
nuclear@3 351 return (ret == c);
nuclear@3 352 }
nuclear@3 353
nuclear@3 354 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1)
nuclear@3 355
nuclear@3 356 typedef UInt32 T;
nuclear@3 357
nuclear@3 358 static inline T Exchange_NoSync(volatile T *i, T j)
nuclear@3 359 {
nuclear@3 360 T v;
nuclear@3 361 do {
nuclear@3 362 v = *i;
nuclear@3 363 } while (!__sync_bool_compare_and_swap(i, v, j));
nuclear@3 364 return v;
nuclear@3 365 }
nuclear@3 366
nuclear@3 367 static inline T ExchangeAdd_NoSync(volatile T *i, T j)
nuclear@3 368 {
nuclear@3 369 return __sync_fetch_and_add(i, j);
nuclear@3 370 }
nuclear@3 371
nuclear@3 372 static inline bool CompareAndSet_NoSync(volatile T *i, T c, T value)
nuclear@3 373 {
nuclear@3 374 return __sync_bool_compare_and_swap(i, c, value);
nuclear@3 375 }
nuclear@3 376
nuclear@3 377 #endif // OS
nuclear@3 378 };
nuclear@3 379
nuclear@3 380
nuclear@3 381 // 8-Byte raw data data atomic op implementation class.
nuclear@3 382 // Currently implementation is provided only on systems with 64-bit pointers.
nuclear@3 383 struct AtomicOpsRaw_8ByteImpl : public AtomicOpsRawBase
nuclear@3 384 {
nuclear@3 385 #if !defined(OVR_64BIT_POINTERS) || !defined(OVR_ENABLE_THREADS)
nuclear@3 386
nuclear@3 387 // Provide a type for no-thread-support cases. Used by AtomicOpsRaw_DefImpl.
nuclear@3 388 typedef UInt64 T;
nuclear@3 389
nuclear@3 390 // *** Thread - Safe OS specific versions.
nuclear@3 391 #elif defined(OVR_OS_WIN32)
nuclear@3 392
nuclear@3 393 // This is only for 64-bit systems.
nuclear@3 394 typedef LONG64 T;
nuclear@3 395 typedef volatile T* InterlockTPtr;
nuclear@3 396 inline static T Exchange_NoSync(volatile T* p, T val) { return InterlockedExchange64((InterlockTPtr)p, val); }
nuclear@3 397 inline static T ExchangeAdd_NoSync(volatile T* p, T val) { return InterlockedExchangeAdd64((InterlockTPtr)p, val); }
nuclear@3 398 inline static bool CompareAndSet_NoSync(volatile T* p, T c, T val) { return InterlockedCompareExchange64((InterlockTPtr)p, val, c) == c; }
nuclear@3 399
nuclear@3 400 #elif defined(OVR_CPU_PPC64)
nuclear@3 401
nuclear@3 402 typedef UInt64 T;
nuclear@3 403
nuclear@3 404 static inline UInt64 Exchange_NoSync(volatile UInt64 *i, UInt64 j)
nuclear@3 405 {
nuclear@3 406 UInt64 dummy, ret;
nuclear@3 407
nuclear@3 408 asm volatile("1:\n\t"
nuclear@3 409 "ldarx %[r],0,%[i]\n\t"
nuclear@3 410 "mr %[o],%[j]\n\t"
nuclear@3 411 "stdcx. %[o],0,%[i]\n\t"
nuclear@3 412 "bne- 1b\n"
nuclear@3 413 : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc");
nuclear@3 414
nuclear@3 415 return ret;
nuclear@3 416 }
nuclear@3 417
nuclear@3 418 static inline UInt64 ExchangeAdd_NoSync(volatile UInt64 *i, UInt64 j)
nuclear@3 419 {
nuclear@3 420 UInt64 dummy, ret;
nuclear@3 421
nuclear@3 422 asm volatile("1:\n\t"
nuclear@3 423 "ldarx %[r],0,%[i]\n\t"
nuclear@3 424 "add %[o],%[r],%[j]\n\t"
nuclear@3 425 "stdcx. %[o],0,%[i]\n\t"
nuclear@3 426 "bne- 1b\n"
nuclear@3 427 : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc");
nuclear@3 428
nuclear@3 429 return ret;
nuclear@3 430 }
nuclear@3 431
nuclear@3 432 static inline bool CompareAndSet_NoSync(volatile UInt64 *i, UInt64 c, UInt64 value)
nuclear@3 433 {
nuclear@3 434 UInt64 ret, dummy;
nuclear@3 435
nuclear@3 436 asm volatile("1:\n\t"
nuclear@3 437 "ldarx %[r],0,%[i]\n\t"
nuclear@3 438 "cmpw 0,%[r],%[cmp]\n\t"
nuclear@3 439 "mfcr %[r]\n\t"
nuclear@3 440 "bne- 2f\n\t"
nuclear@3 441 "stdcx. %[val],0,%[i]\n\t"
nuclear@3 442 "bne- 1b\n\t"
nuclear@3 443 "2:\n"
nuclear@3 444 : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [cmp] "b" (c), [val] "b" (value) : "cc");
nuclear@3 445
nuclear@3 446 return (ret & 0x20000000) ? 1 : 0;
nuclear@3 447 }
nuclear@3 448
nuclear@3 449 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1)
nuclear@3 450
nuclear@3 451 typedef UInt64 T;
nuclear@3 452
nuclear@3 453 static inline T Exchange_NoSync(volatile T *i, T j)
nuclear@3 454 {
nuclear@3 455 T v;
nuclear@3 456 do {
nuclear@3 457 v = *i;
nuclear@3 458 } while (!__sync_bool_compare_and_swap(i, v, j));
nuclear@3 459 return v;
nuclear@3 460 }
nuclear@3 461
nuclear@3 462 static inline T ExchangeAdd_NoSync(volatile T *i, T j)
nuclear@3 463 {
nuclear@3 464 return __sync_fetch_and_add(i, j);
nuclear@3 465 }
nuclear@3 466
nuclear@3 467 static inline bool CompareAndSet_NoSync(volatile T *i, T c, T value)
nuclear@3 468 {
nuclear@3 469 return __sync_bool_compare_and_swap(i, c, value);
nuclear@3 470 }
nuclear@3 471
nuclear@3 472 #endif // OS
nuclear@3 473 };
nuclear@3 474
nuclear@3 475
nuclear@3 476 // Default implementation for AtomicOpsRaw; provides implementation of mem-fenced
nuclear@3 477 // atomic operations where fencing is done with a sync object wrapped around a NoSync
nuclear@3 478 // operation implemented in the base class. If such implementation is not possible
nuclear@3 479 // on a given platform, #ifdefs can be used to disable it and then op functions can be
nuclear@3 480 // implemented individually in the appropriate AtomicOpsRaw<size> class.
nuclear@3 481
nuclear@3 482 template<class O>
nuclear@3 483 struct AtomicOpsRaw_DefImpl : public O
nuclear@3 484 {
nuclear@3 485 typedef typename O::T O_T;
nuclear@3 486 typedef typename O::FullSync O_FullSync;
nuclear@3 487 typedef typename O::AcquireSync O_AcquireSync;
nuclear@3 488 typedef typename O::ReleaseSync O_ReleaseSync;
nuclear@3 489
nuclear@3 490 // If there is no thread support, provide the default implementation. In this case,
nuclear@3 491 // the base class (0) must still provide the T declaration.
nuclear@3 492 #ifndef OVR_ENABLE_THREADS
nuclear@3 493
nuclear@3 494 // Atomic exchange of val with argument. Returns old val.
nuclear@3 495 inline static O_T Exchange_NoSync(volatile O_T* p, O_T val) { O_T old = *p; *p = val; return old; }
nuclear@3 496 // Adds a new val to argument; returns its old val.
nuclear@3 497 inline static O_T ExchangeAdd_NoSync(volatile O_T* p, O_T val) { O_T old = *p; *p += val; return old; }
nuclear@3 498 // Compares the argument data with 'c' val.
nuclear@3 499 // If succeeded, stores val int '*p' and returns true; otherwise returns false.
nuclear@3 500 inline static bool CompareAndSet_NoSync(volatile O_T* p, O_T c, O_T val) { if (*p==c) { *p = val; return 1; } return 0; }
nuclear@3 501
nuclear@3 502 #endif
nuclear@3 503
nuclear@3 504 // If NoSync wrapped implementation may not be possible, it this block should be
nuclear@3 505 // replaced with per-function implementation in O.
nuclear@3 506 // "AtomicOpsRaw_DefImpl<O>::" prefix in calls below.
nuclear@3 507 inline static O_T Exchange_Sync(volatile O_T* p, O_T val) { O_FullSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
nuclear@3 508 inline static O_T Exchange_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
nuclear@3 509 inline static O_T Exchange_Acquire(volatile O_T* p, O_T val) { O_AcquireSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
nuclear@3 510 inline static O_T ExchangeAdd_Sync(volatile O_T* p, O_T val) { O_FullSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
nuclear@3 511 inline static O_T ExchangeAdd_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
nuclear@3 512 inline static O_T ExchangeAdd_Acquire(volatile O_T* p, O_T val) { O_AcquireSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
nuclear@3 513 inline static bool CompareAndSet_Sync(volatile O_T* p, O_T c, O_T val) { O_FullSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
nuclear@3 514 inline static bool CompareAndSet_Release(volatile O_T* p, O_T c, O_T val) { O_ReleaseSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
nuclear@3 515 inline static bool CompareAndSet_Acquire(volatile O_T* p, O_T c, O_T val) { O_AcquireSync sync; OVR_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
nuclear@3 516
nuclear@3 517 // Loads and stores with memory fence. These have only the relevant versions.
nuclear@3 518 #ifdef OVR_CPU_X86
nuclear@3 519 // On X86, Store_Release is implemented as exchange. Note that we can also
nuclear@3 520 // consider 'sfence' in the future, although it is not as compatible with older CPUs.
nuclear@3 521 inline static void Store_Release(volatile O_T* p, O_T val) { Exchange_Release(p, val); }
nuclear@3 522 #else
nuclear@3 523 inline static void Store_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; OVR_UNUSED(sync); *p = val; }
nuclear@3 524 #endif
nuclear@3 525 inline static O_T Load_Acquire(const volatile O_T* p) { O_AcquireSync sync; OVR_UNUSED(sync); return *p; }
nuclear@3 526 };
nuclear@3 527
nuclear@3 528
nuclear@3 529 template<int size>
nuclear@3 530 struct AtomicOpsRaw : public AtomicOpsRawBase { };
nuclear@3 531
nuclear@3 532 template<>
nuclear@3 533 struct AtomicOpsRaw<4> : public AtomicOpsRaw_DefImpl<AtomicOpsRaw_4ByteImpl>
nuclear@3 534 {
nuclear@3 535 // Ensure that assigned type size is correct.
nuclear@3 536 AtomicOpsRaw()
nuclear@3 537 { OVR_COMPILER_ASSERT(sizeof(AtomicOpsRaw_DefImpl<AtomicOpsRaw_4ByteImpl>::T) == 4); }
nuclear@3 538 };
nuclear@3 539 template<>
nuclear@3 540 struct AtomicOpsRaw<8> : public AtomicOpsRaw_DefImpl<AtomicOpsRaw_8ByteImpl>
nuclear@3 541 {
nuclear@3 542 AtomicOpsRaw()
nuclear@3 543 { OVR_COMPILER_ASSERT(sizeof(AtomicOpsRaw_DefImpl<AtomicOpsRaw_8ByteImpl>::T) == 8); }
nuclear@3 544 };
nuclear@3 545
nuclear@3 546
nuclear@3 547 // *** AtomicOps - implementation of atomic Ops for specified class
nuclear@3 548
nuclear@3 549 // Implements atomic ops on a class, provided that the object is either
nuclear@3 550 // 4 or 8 bytes in size (depending on the AtomicOpsRaw specializations
nuclear@3 551 // available). Relies on AtomicOpsRaw for much of implementation.
nuclear@3 552
nuclear@3 553 template<class C>
nuclear@3 554 class AtomicOps
nuclear@3 555 {
nuclear@3 556 typedef AtomicOpsRaw<sizeof(C)> Ops;
nuclear@3 557 typedef typename Ops::T T;
nuclear@3 558 typedef volatile typename Ops::T* PT;
nuclear@3 559 // We cast through unions to (1) avoid pointer size compiler warnings
nuclear@3 560 // and (2) ensure that there are no problems with strict pointer aliasing.
nuclear@3 561 union C2T_union { C c; T t; };
nuclear@3 562
nuclear@3 563 public:
nuclear@3 564 // General purpose implementation for standard syncs.
nuclear@3 565 inline static C Exchange_Sync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Sync((PT)p, u.t); return u.c; }
nuclear@3 566 inline static C Exchange_Release(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Release((PT)p, u.t); return u.c; }
nuclear@3 567 inline static C Exchange_Acquire(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Acquire((PT)p, u.t); return u.c; }
nuclear@3 568 inline static C Exchange_NoSync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_NoSync((PT)p, u.t); return u.c; }
nuclear@3 569 inline static C ExchangeAdd_Sync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Sync((PT)p, u.t); return u.c; }
nuclear@3 570 inline static C ExchangeAdd_Release(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Release((PT)p, u.t); return u.c; }
nuclear@3 571 inline static C ExchangeAdd_Acquire(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Acquire((PT)p, u.t); return u.c; }
nuclear@3 572 inline static C ExchangeAdd_NoSync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_NoSync((PT)p, u.t); return u.c; }
nuclear@3 573 inline static bool CompareAndSet_Sync(volatile C* p, C c, C val) { C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Sync((PT)p, cu.t, u.t); }
nuclear@3 574 inline static bool CompareAndSet_Release(volatile C* p, C c, C val){ C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Release((PT)p, cu.t, u.t); }
nuclear@3 575 inline static bool CompareAndSet_Relse(volatile C* p, C c, C val){ C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Acquire((PT)p, cu.t, u.t); }
nuclear@3 576 inline static bool CompareAndSet_NoSync(volatile C* p, C c, C val) { C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_NoSync((PT)p, cu.t, u.t); }
nuclear@3 577 // Loads and stores with memory fence. These have only the relevant versions.
nuclear@3 578 inline static void Store_Release(volatile C* p, C val) { C2T_union u; u.c = val; Ops::Store_Release((PT)p, u.t); }
nuclear@3 579 inline static C Load_Acquire(const volatile C* p) { C2T_union u; u.t = Ops::Load_Acquire((PT)p); return u.c; }
nuclear@3 580 };
nuclear@3 581
nuclear@3 582
nuclear@3 583
nuclear@3 584 // Atomic value base class - implements operations shared for integers and pointers.
nuclear@3 585 template<class T>
nuclear@3 586 class AtomicValueBase
nuclear@3 587 {
nuclear@3 588 protected:
nuclear@3 589 typedef AtomicOps<T> Ops;
nuclear@3 590 public:
nuclear@3 591
nuclear@3 592 volatile T Value;
nuclear@3 593
nuclear@3 594 inline AtomicValueBase() { }
nuclear@3 595 explicit inline AtomicValueBase(T val) { Ops::Store_Release(&Value, val); }
nuclear@3 596
nuclear@3 597 // Most libraries (TBB and Joshua Scholar's) library do not do Load_Acquire
nuclear@3 598 // here, since most algorithms do not require atomic loads. Needs some research.
nuclear@3 599 inline operator T() const { return Value; }
nuclear@3 600
nuclear@3 601 // *** Standard Atomic inlines
nuclear@3 602 inline T Exchange_Sync(T val) { return Ops::Exchange_Sync(&Value, val); }
nuclear@3 603 inline T Exchange_Release(T val) { return Ops::Exchange_Release(&Value, val); }
nuclear@3 604 inline T Exchange_Acquire(T val) { return Ops::Exchange_Acquire(&Value, val); }
nuclear@3 605 inline T Exchange_NoSync(T val) { return Ops::Exchange_NoSync(&Value, val); }
nuclear@3 606 inline bool CompareAndSet_Sync(T c, T val) { return Ops::CompareAndSet_Sync(&Value, c, val); }
nuclear@3 607 inline bool CompareAndSet_Release(T c, T val) { return Ops::CompareAndSet_Release(&Value, c, val); }
nuclear@3 608 inline bool CompareAndSet_Acquire(T c, T val) { return Ops::CompareAndSet_Relse(&Value, c, val); }
nuclear@3 609 inline bool CompareAndSet_NoSync(T c, T val) { return Ops::CompareAndSet_NoSync(&Value, c, val); }
nuclear@3 610 // Load & Store.
nuclear@3 611 inline void Store_Release(T val) { Ops::Store_Release(&Value, val); }
nuclear@3 612 inline T Load_Acquire() const { return Ops::Load_Acquire(&Value); }
nuclear@3 613 };
nuclear@3 614
nuclear@3 615
nuclear@3 616 // ***** AtomicPtr - Atomic pointer template
nuclear@3 617
nuclear@3 618 // This pointer class supports atomic assignments with release,
nuclear@3 619 // increment / decrement operations, and conditional compare + set.
nuclear@3 620
nuclear@3 621 template<class T>
nuclear@3 622 class AtomicPtr : public AtomicValueBase<T*>
nuclear@3 623 {
nuclear@3 624 typedef typename AtomicValueBase<T*>::Ops Ops;
nuclear@3 625
nuclear@3 626 public:
nuclear@3 627 // Initialize pointer value to 0 by default; use Store_Release only with explicit constructor.
nuclear@3 628 inline AtomicPtr() : AtomicValueBase<T*>() { this->Value = 0; }
nuclear@3 629 explicit inline AtomicPtr(T* val) : AtomicValueBase<T*>(val) { }
nuclear@3 630
nuclear@3 631 // Pointer access.
nuclear@3 632 inline T* operator -> () const { return this->Load_Acquire(); }
nuclear@3 633
nuclear@3 634 // It looks like it is convenient to have Load_Acquire characteristics
nuclear@3 635 // for this, since that is convenient for algorithms such as linked
nuclear@3 636 // list traversals that can be added to bu another thread.
nuclear@3 637 inline operator T* () const { return this->Load_Acquire(); }
nuclear@3 638
nuclear@3 639
nuclear@3 640 // *** Standard Atomic inlines (applicable to pointers)
nuclear@3 641
nuclear@3 642 // ExhangeAdd considers pointer size for pointers.
nuclear@3 643 template<class I>
nuclear@3 644 inline T* ExchangeAdd_Sync(I incr) { return Ops::ExchangeAdd_Sync(&this->Value, ((T*)0) + incr); }
nuclear@3 645 template<class I>
nuclear@3 646 inline T* ExchangeAdd_Release(I incr) { return Ops::ExchangeAdd_Release(&this->Value, ((T*)0) + incr); }
nuclear@3 647 template<class I>
nuclear@3 648 inline T* ExchangeAdd_Acquire(I incr) { return Ops::ExchangeAdd_Acquire(&this->Value, ((T*)0) + incr); }
nuclear@3 649 template<class I>
nuclear@3 650 inline T* ExchangeAdd_NoSync(I incr) { return Ops::ExchangeAdd_NoSync(&this->Value, ((T*)0) + incr); }
nuclear@3 651
nuclear@3 652 // *** Atomic Operators
nuclear@3 653
nuclear@3 654 inline T* operator = (T* val) { this->Store_Release(val); return val; }
nuclear@3 655
nuclear@3 656 template<class I>
nuclear@3 657 inline T* operator += (I val) { return ExchangeAdd_Sync(val) + val; }
nuclear@3 658 template<class I>
nuclear@3 659 inline T* operator -= (I val) { return operator += (-val); }
nuclear@3 660
nuclear@3 661 inline T* operator ++ () { return ExchangeAdd_Sync(1) + 1; }
nuclear@3 662 inline T* operator -- () { return ExchangeAdd_Sync(-1) - 1; }
nuclear@3 663 inline T* operator ++ (int) { return ExchangeAdd_Sync(1); }
nuclear@3 664 inline T* operator -- (int) { return ExchangeAdd_Sync(-1); }
nuclear@3 665 };
nuclear@3 666
nuclear@3 667
nuclear@3 668 // ***** AtomicInt - Atomic integer template
nuclear@3 669
nuclear@3 670 // Implements an atomic integer type; the exact type to use is provided
nuclear@3 671 // as an argument. Supports atomic Acquire / Release semantics, atomic
nuclear@3 672 // arithmetic operations, and atomic conditional compare + set.
nuclear@3 673
nuclear@3 674 template<class T>
nuclear@3 675 class AtomicInt : public AtomicValueBase<T>
nuclear@3 676 {
nuclear@3 677 typedef typename AtomicValueBase<T>::Ops Ops;
nuclear@3 678
nuclear@3 679 public:
nuclear@3 680 inline AtomicInt() : AtomicValueBase<T>() { }
nuclear@3 681 explicit inline AtomicInt(T val) : AtomicValueBase<T>(val) { }
nuclear@3 682
nuclear@3 683
nuclear@3 684 // *** Standard Atomic inlines (applicable to int)
nuclear@3 685 inline T ExchangeAdd_Sync(T val) { return Ops::ExchangeAdd_Sync(&this->Value, val); }
nuclear@3 686 inline T ExchangeAdd_Release(T val) { return Ops::ExchangeAdd_Release(&this->Value, val); }
nuclear@3 687 inline T ExchangeAdd_Acquire(T val) { return Ops::ExchangeAdd_Acquire(&this->Value, val); }
nuclear@3 688 inline T ExchangeAdd_NoSync(T val) { return Ops::ExchangeAdd_NoSync(&this->Value, val); }
nuclear@3 689 // These increments could be more efficient because they don't return a value.
nuclear@3 690 inline void Increment_Sync() { ExchangeAdd_Sync((T)1); }
nuclear@3 691 inline void Increment_Release() { ExchangeAdd_Release((T)1); }
nuclear@3 692 inline void Increment_Acquire() { ExchangeAdd_Acquire((T)1); }
nuclear@3 693 inline void Increment_NoSync() { ExchangeAdd_NoSync((T)1); }
nuclear@3 694
nuclear@3 695 // *** Atomic Operators
nuclear@3 696
nuclear@3 697 inline T operator = (T val) { this->Store_Release(val); return val; }
nuclear@3 698 inline T operator += (T val) { return ExchangeAdd_Sync(val) + val; }
nuclear@3 699 inline T operator -= (T val) { return ExchangeAdd_Sync(0 - val) - val; }
nuclear@3 700
nuclear@3 701 inline T operator ++ () { return ExchangeAdd_Sync((T)1) + 1; }
nuclear@3 702 inline T operator -- () { return ExchangeAdd_Sync(((T)0)-1) - 1; }
nuclear@3 703 inline T operator ++ (int) { return ExchangeAdd_Sync((T)1); }
nuclear@3 704 inline T operator -- (int) { return ExchangeAdd_Sync(((T)0)-1); }
nuclear@3 705
nuclear@3 706 // More complex atomic operations. Leave it to compiler whether to optimize them or not.
nuclear@3 707 T operator &= (T arg)
nuclear@3 708 {
nuclear@3 709 T comp, newVal;
nuclear@3 710 do {
nuclear@3 711 comp = this->Value;
nuclear@3 712 newVal = comp & arg;
nuclear@3 713 } while(!this->CompareAndSet_Sync(comp, newVal));
nuclear@3 714 return newVal;
nuclear@3 715 }
nuclear@3 716
nuclear@3 717 T operator |= (T arg)
nuclear@3 718 {
nuclear@3 719 T comp, newVal;
nuclear@3 720 do {
nuclear@3 721 comp = this->Value;
nuclear@3 722 newVal = comp | arg;
nuclear@3 723 } while(!this->CompareAndSet_Sync(comp, newVal));
nuclear@3 724 return newVal;
nuclear@3 725 }
nuclear@3 726
nuclear@3 727 T operator ^= (T arg)
nuclear@3 728 {
nuclear@3 729 T comp, newVal;
nuclear@3 730 do {
nuclear@3 731 comp = this->Value;
nuclear@3 732 newVal = comp ^ arg;
nuclear@3 733 } while(!this->CompareAndSet_Sync(comp, newVal));
nuclear@3 734 return newVal;
nuclear@3 735 }
nuclear@3 736
nuclear@3 737 T operator *= (T arg)
nuclear@3 738 {
nuclear@3 739 T comp, newVal;
nuclear@3 740 do {
nuclear@3 741 comp = this->Value;
nuclear@3 742 newVal = comp * arg;
nuclear@3 743 } while(!this->CompareAndSet_Sync(comp, newVal));
nuclear@3 744 return newVal;
nuclear@3 745 }
nuclear@3 746
nuclear@3 747 T operator /= (T arg)
nuclear@3 748 {
nuclear@3 749 T comp, newVal;
nuclear@3 750 do {
nuclear@3 751 comp = this->Value;
nuclear@3 752 newVal = comp / arg;
nuclear@3 753 } while(!CompareAndSet_Sync(comp, newVal));
nuclear@3 754 return newVal;
nuclear@3 755 }
nuclear@3 756
nuclear@3 757 T operator >>= (unsigned bits)
nuclear@3 758 {
nuclear@3 759 T comp, newVal;
nuclear@3 760 do {
nuclear@3 761 comp = this->Value;
nuclear@3 762 newVal = comp >> bits;
nuclear@3 763 } while(!CompareAndSet_Sync(comp, newVal));
nuclear@3 764 return newVal;
nuclear@3 765 }
nuclear@3 766
nuclear@3 767 T operator <<= (unsigned bits)
nuclear@3 768 {
nuclear@3 769 T comp, newVal;
nuclear@3 770 do {
nuclear@3 771 comp = this->Value;
nuclear@3 772 newVal = comp << bits;
nuclear@3 773 } while(!this->CompareAndSet_Sync(comp, newVal));
nuclear@3 774 return newVal;
nuclear@3 775 }
nuclear@3 776 };
nuclear@3 777
nuclear@3 778
nuclear@3 779
nuclear@3 780 //-----------------------------------------------------------------------------------
nuclear@3 781 // ***** Lock
nuclear@3 782
nuclear@3 783 // Lock is a simplest and most efficient mutual-exclusion lock class.
nuclear@3 784 // Unlike Mutex, it cannot be waited on.
nuclear@3 785
nuclear@3 786 class Lock
nuclear@3 787 {
nuclear@3 788 // NOTE: Locks are not allocatable and they themselves should not allocate
nuclear@3 789 // memory by standard means. This is the case because StandardAllocator
nuclear@3 790 // relies on this class.
nuclear@3 791 // Make 'delete' private. Don't do this for 'new' since it can be redefined.
nuclear@3 792 void operator delete(void*) {}
nuclear@3 793
nuclear@3 794
nuclear@3 795 // *** Lock implementation for various platforms.
nuclear@3 796
nuclear@3 797 #if !defined(OVR_ENABLE_THREADS)
nuclear@3 798
nuclear@3 799 public:
nuclear@3 800 // With no thread support, lock does nothing.
nuclear@3 801 inline Lock() { }
nuclear@3 802 inline Lock(unsigned) { }
nuclear@3 803 inline ~Lock() { }
nuclear@3 804 inline void DoLock() { }
nuclear@3 805 inline void Unlock() { }
nuclear@3 806
nuclear@3 807 // Windows.
nuclear@3 808 #elif defined(OVR_OS_WIN32)
nuclear@3 809
nuclear@3 810 CRITICAL_SECTION cs;
nuclear@3 811 public:
nuclear@3 812 Lock(unsigned spinCount = 0);
nuclear@3 813 ~Lock();
nuclear@3 814 // Locking functions.
nuclear@3 815 inline void DoLock() { ::EnterCriticalSection(&cs); }
nuclear@3 816 inline void Unlock() { ::LeaveCriticalSection(&cs); }
nuclear@3 817
nuclear@3 818 #else
nuclear@3 819 pthread_mutex_t mutex;
nuclear@3 820
nuclear@3 821 public:
nuclear@3 822 static pthread_mutexattr_t RecursiveAttr;
nuclear@3 823 static bool RecursiveAttrInit;
nuclear@3 824
nuclear@3 825 Lock (unsigned dummy = 0)
nuclear@3 826 {
nuclear@3 827 if (!RecursiveAttrInit)
nuclear@3 828 {
nuclear@3 829 pthread_mutexattr_init(&RecursiveAttr);
nuclear@3 830 pthread_mutexattr_settype(&RecursiveAttr, PTHREAD_MUTEX_RECURSIVE);
nuclear@3 831 RecursiveAttrInit = 1;
nuclear@3 832 }
nuclear@3 833 pthread_mutex_init(&mutex,&RecursiveAttr);
nuclear@3 834 }
nuclear@3 835 ~Lock () { pthread_mutex_destroy(&mutex); }
nuclear@3 836 inline void DoLock() { pthread_mutex_lock(&mutex); }
nuclear@3 837 inline void Unlock() { pthread_mutex_unlock(&mutex); }
nuclear@3 838
nuclear@3 839 #endif // OVR_ENABLE_THREDS
nuclear@3 840
nuclear@3 841
nuclear@3 842 public:
nuclear@3 843 // Locker class, used for automatic locking
nuclear@3 844 class Locker
nuclear@3 845 {
nuclear@3 846 public:
nuclear@3 847 Lock *pLock;
nuclear@3 848 inline Locker(Lock *plock)
nuclear@3 849 { pLock = plock; pLock->DoLock(); }
nuclear@3 850 inline ~Locker()
nuclear@3 851 { pLock->Unlock(); }
nuclear@3 852 };
nuclear@3 853 };
nuclear@3 854
nuclear@3 855
nuclear@3 856
nuclear@3 857 } // OVR
nuclear@3 858
nuclear@3 859 #endif