Commit 9a25579e authored by Mike Gorchak's avatar Mike Gorchak

Support for the atomic operations for ARM, PPC, MIPS, SH, X86 platforms has been added.

--HG--
extra : convert_revision : svn%3Ac70aab31-4412-0410-b14c-859654838e24/trunk%404010
parent 7e6cba93
...@@ -18,865 +18,488 @@ ...@@ -18,865 +18,488 @@
Sam Lantinga Sam Lantinga
slouken@libsdl.org slouken@libsdl.org
QNX native atomic operations
Copyright (C) 2009 Mike Gorchak
(mike@malva.ua, lestat@i.com.ua)
*/ */
#include "SDL_stdinc.h" #include "SDL_stdinc.h"
#include "SDL_atomic.h" #include "SDL_atomic.h"
#include <atomic.h>
/*
This file provides 8, 16, 32, and 64 bit atomic operations. If the
operations are provided by the native hardware and operating system
they are used. If they are not then the operations are emulated
using the SDL mutex operations.
*/
/*
First, detect whether the operations are supported and create
#defines that indicate that they do exist. The goal is to have all
the system dependent code in the top part of the file so that the
bottom can be use unchanged across all platforms.
Second, #define all the operations in each size class that are
supported. Doing this allows supported operations to be used along
side of emulated operations.
*/
/*
Emmulated version.
Assume there is no support for atomic operations. All such
operations are implemented using SDL mutex operations.
*/
#ifdef EMULATED_ATOMIC_OPERATIONS
#undef EMULATED_ATOMIC_OPERATIONS
#endif
#ifdef EMULATED_ATOMIC_OPERATIONS
#define HAVE_ALL_8_BIT_OPS
#define nativeExchange8(ptr, value) ()
#define nativeCompareThenSet8(ptr, oldvalue, newvalue) ()
#define nativeTestThenSet8(ptr) ()
#define nativeClear8(ptr) ()
#define nativeFetchThenIncrement8(ptr) ()
#define nativeFetchThenDecrement8(ptr) ()
#define nativeFetchThenAdd8(ptr, value) ()
#define nativeFetchThenSubtract8(ptr, value) ()
#define nativeIncrementThenFetch8(ptr) ()
#define nativeDecrementThenFetch8(ptr) ()
#define nativeAddThenFetch8(ptr, value) ()
#define nativeSubtractThenFetch8(ptr, value) ()
#endif
#ifdef EMULATED_ATOMIC_OPERATIONS
#define HAVE_ALL_16_BIT_OPS
#define nativeExchange16(ptr, value) ()
#define nativeCompareThenSet16(ptr, oldvalue, newvalue) ()
#define nativeTestThenSet16(ptr) ()
#define nativeClear16(ptr) ()
#define nativeFetchThenIncrement16(ptr) ()
#define nativeFetchThenDecrement16(ptr) ()
#define nativeFetchThenAdd16(ptr, value) ()
#define nativeFetchThenSubtract16(ptr, value) ()
#define nativeIncrementThenFetch16(ptr) ()
#define nativeDecrementThenFetch16(ptr) ()
#define nativeAddThenFetch16(ptr, value) ()
#define nativeSubtractThenFetch16(ptr, value) ()
#endif
#ifdef EMULATED_ATOMIC_OPERATIONS
#define HAVE_ALL_64_BIT_OPS
#define nativeExchange64(ptr, value) ()
#define nativeCompareThenSet64(ptr, oldvalue, newvalue) ()
#define nativeTestThenSet64(ptr) ()
#define nativeClear64(ptr) ()
#define nativeFetchThenIncrement64(ptr) ()
#define nativeFetchThenDecrement64(ptr) ()
#define nativeFetchThenAdd64(ptr, value) ()
#define nativeFetchThenSubtract64(ptr, value) ()
#define nativeIncrementThenFetch64(ptr) ()
#define nativeDecrementThenFetch64(ptr) ()
#define nativeAddThenFetch64(ptr, value) ()
#define nativeSubtractThenFetch64(ptr, value) ()
#endif
/*
If any of the operations are not provided then we must emulate some of
them.
*/
#if !defined(HAVE_ALL_8_BIT_OPS) || !defined(HAVE_ALL_16_BIT_OPS) || !defined(HAVE_ALL_64_BIT_OPS)
#include "SDL_mutex.h"
#include "SDL_error.h" #include "SDL_error.h"
static SDL_mutex * lock = NULL; #include <atomic.h>
static __inline__ void
privateWaitLock()
{
if(NULL == lock)
{
lock = SDL_CreateMutex();
if (NULL == lock)
{
SDL_SetError("SDL_atomic.c: can't create a mutex");
return;
}
}
if (-1 == SDL_LockMutex(lock))
{
SDL_SetError("SDL_atomic.c: can't lock mutex");
}
}
static __inline__ void /* SMP Exchange for PPC platform */
privateUnlock() #ifdef __PPC__
{ #include <ppc/smpxchg.h>
if (-1 == SDL_UnlockMutex(lock)) #endif /* __PPC__ */
{
SDL_SetError("SDL_atomic.c: can't unlock mutex");
}
}
#endif /* SMP Exchange for ARM platform */
#ifdef __ARM__
#include <arm/smpxchg.h>
#endif /* __ARM__ */
/* 8 bit atomic operations */ /* SMP Exchange for MIPS platform */
#if defined (__MIPSEB__) || defined(__MIPSEL__)
#include <mips/smpxchg.h>
#endif /* __MIPSEB__ || __MIPSEL__ */
Uint8 /* SMP Exchange for SH platform */
SDL_AtomicExchange8(volatile Uint8 * ptr, Uint8 value) #ifdef __SH__
{ #include <sh/smpxchg.h>
#ifdef nativeExchange8 #endif /* __SH__ */
return nativeExchange8(ptr, value);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp; /* SMP Exchange for x86 platform */
#endif #ifdef __X86__
} #include <x86/smpxchg.h>
#endif /* __X86__ */
SDL_bool /*
SDL_AtomicCompareThenSet8(volatile Uint8 * ptr, Uint8 oldvalue, Uint8 newvalue) This file provides 32, and 64 bit atomic operations. If the
{ operations are provided by the native hardware and operating system
#ifdef nativeCompareThenSet8 they are used. If they are not then the operations are emulated
return (SDL_bool)nativeCompareThenSet8(ptr, oldvalue, newvalue); using the SDL spin lock operations. If spin lock can not be
#else implemented then these functions must fail.
SDL_bool result = SDL_FALSE; */
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
}
SDL_bool void
SDL_AtomicTestThenSet8(volatile Uint8 * ptr) SDL_AtomicLock(SDL_SpinLock *lock)
{ {
#ifdef nativeTestThenSet8 unsigned volatile* l = (unsigned volatile*)lock;
return (SDL_bool)nativeTestThenSet8(ptr); Uint32 oldval = 0;
#else Uint32 newval = 1;
SDL_bool result = SDL_FALSE;
privateWaitLock(); oldval = _smp_xchg(l, newval);
result = (*ptr == 0); while(1 == oldval)
if (result)
{ {
*ptr = 1; oldval = _smp_xchg(l, newval);
} }
privateUnlock();
return result;
#endif
}
void
SDL_AtomicClear8(volatile Uint8 * ptr)
{
#ifdef nativeClear8
nativeClear8(ptr);
#else
privateWaitLock();
*ptr = 0;
privateUnlock();
return;
#endif
}
Uint8
SDL_AtomicFetchThenIncrement8(volatile Uint8 * ptr)
{
#ifdef nativeFetchThenIncrement8
return nativeFetchThenIncrement8(ptr);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
(*ptr)+= 1;
privateUnlock();
return tmp;
#endif
}
Uint8
SDL_AtomicFetchThenDecrement8(volatile Uint8 * ptr)
{
#ifdef nativeFetchThenDecrement8
return nativeFetchThenDecrement8(ptr);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
(*ptr) -= 1;
privateUnlock();
return tmp;
#endif
}
Uint8
SDL_AtomicFetchThenAdd8(volatile Uint8 * ptr, Uint8 value)
{
#ifdef nativeFetchThenAdd8
return nativeFetchThenAdd8(ptr, value);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
(*ptr)+= value;
privateUnlock();
return tmp;
#endif
}
Uint8
SDL_AtomicFetchThenSubtract8(volatile Uint8 * ptr, Uint8 value)
{
#ifdef nativeFetchThenSubtract8
return nativeFetchThenSubtract8(ptr, value);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
(*ptr)-= value;
privateUnlock();
return tmp;
#endif
} }
Uint8 void
SDL_AtomicIncrementThenFetch8(volatile Uint8 * ptr) SDL_AtomicUnlock(SDL_SpinLock *lock)
{ {
#ifdef nativeIncrementThenFetch8 unsigned volatile* l = (unsigned volatile*)lock;
return nativeIncrementThenFetch8(ptr); Uint32 newval = 0;
#else
Uint8 tmp = 0;
privateWaitLock(); _smp_xchg(l, newval);
(*ptr)+= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
} }
Uint8 /*
SDL_AtomicDecrementThenFetch8(volatile Uint8 * ptr) QNX 6.4.1 supports only 32 bit atomic access
{ */
#ifdef nativeDecrementThenFetch8
return nativeDecrementThenFetch8(ptr);
#else
Uint8 tmp = 0;
privateWaitLock();
(*ptr)-= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
Uint8 #undef nativeTestThenSet32
SDL_AtomicAddThenFetch8(volatile Uint8 * ptr, Uint8 value) #define nativeClear32
{ #define nativeFetchThenIncrement32
#ifdef nativeAddThenFetch8 #define nativeFetchThenDecrement32
return nativeAddThenFetch8(ptr, value); #define nativeFetchThenAdd32
#else #define nativeFetchThenSubtract32
Uint8 tmp = 0; #define nativeIncrementThenFetch32
#define nativeDecrementThenFetch32
#define nativeAddThenFetch32
#define nativeSubtractThenFetch32
#undef nativeTestThenSet64
#undef nativeClear64
#undef nativeFetchThenIncrement64
#undef nativeFetchThenDecrement64
#undef nativeFetchThenAdd64
#undef nativeFetchThenSubtract64
#undef nativeIncrementThenFetch64
#undef nativeDecrementThenFetch64
#undef nativeAddThenFetch64
#undef nativeSubtractThenFetch64
privateWaitLock(); /*
(*ptr)+= value; If any of the operations are not provided then we must emulate some
tmp = *ptr; of them. That means we need a nice implementation of spin locks
privateUnlock(); that avoids the "one big lock" problem. We use a vector of spin
locks and pick which one to use based on the address of the operand
of the function.
To generate the index of the lock we first shift by 3 bits to get
rid on the zero bits that result from 32 and 64 bit allignment of
data. We then mask off all but 5 bits and use those 5 bits as an
index into the table.
Picking the lock this way insures that accesses to the same data at
the same time will go to the same lock. OTOH, accesses to different
data have only a 1/32 chance of hitting the same lock. That should
pretty much eliminate the chances of several atomic operations on
different data from waiting on the same "big lock". If it isn't
then the table of locks can be expanded to a new size so long as
the new size is a power of two.
*/
return tmp; static SDL_SpinLock locks[32] = {
#endif 0, 0, 0, 0, 0, 0, 0, 0,
} 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
Uint8 static __inline__ void
SDL_AtomicSubtractThenFetch8(volatile Uint8 * ptr, Uint8 value) privateWaitLock(volatile void *ptr)
{ {
#ifdef nativeSubtractThenFetch8 #if SIZEOF_VOIDP == 4
return nativeSubtractThenFetch8(ptr, value); Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f);
#else #elif SIZEOF_VOIDP == 8
Uint8 tmp = 0; Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f);
#endif /* SIZEOF_VOIDP */
privateWaitLock(); SDL_AtomicLock(&locks[index]);
(*ptr)-= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
} }
/* 16 bit atomic operations */ static __inline__ void
privateUnlock(volatile void *ptr)
Uint16
SDL_AtomicExchange16(volatile Uint16 * ptr, Uint16 value)
{ {
#ifdef nativeExchange16 #if SIZEOF_VOIDP == 4
return nativeExchange16(ptr, value); Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f);
#else #elif SIZEOF_VOIDP == 8
Uint16 tmp = 0; Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f);
#endif /* SIZEOF_VOIDP */
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp; SDL_AtomicUnlock(&locks[index]);
#endif
} }
SDL_bool /* 32 bit atomic operations */
SDL_AtomicCompareThenSet16(volatile Uint16 * ptr, Uint16 oldvalue, Uint16 newvalue)
{
#ifdef nativeCompareThenSet16
return (SDL_bool)nativeCompareThenSet16(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
}
SDL_bool SDL_bool
SDL_AtomicTestThenSet16(volatile Uint16 * ptr) SDL_AtomicTestThenSet32(volatile Uint32 * ptr)
{ {
#ifdef nativeTestThenSet16 #ifdef nativeTestThenSet32
return (SDL_bool)nativeTestThenSet16(ptr);
#else #else
SDL_bool result = SDL_FALSE; SDL_bool result = SDL_FALSE;
privateWaitLock(); privateWaitLock(ptr);
result = (*ptr == 0); result = (*ptr == 0);
if (result) if (result)
{ {
*ptr = 1; *ptr = 1;
} }
privateUnlock(); privateUnlock(ptr);
return result; return result;
#endif #endif /* nativeTestThenSet32 */
} }
void void
SDL_AtomicClear16(volatile Uint16 * ptr) SDL_AtomicClear32(volatile Uint32 * ptr)
{ {
#ifdef nativeClear16 #ifdef nativeClear32
nativeClear16(ptr); atomic_clr(ptr, 0xFFFFFFFF);
#else #else
privateWaitLock(); privateWaitLock(ptr);
*ptr = 0; *ptr = 0;
privateUnlock(); privateUnlock(ptr);
return; return;
#endif #endif /* nativeClear32 */
} }
Uint16 Uint32
SDL_AtomicFetchThenIncrement16(volatile Uint16 * ptr) SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr)
{ {
#ifdef nativeFetchThenIncrement16 #ifdef nativeFetchThenIncrement32
return nativeFetchThenIncrement16(ptr); return atomic_add_value(ptr, 0x00000001);
#else #else
Uint16 tmp = 0; Uint32 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
tmp = *ptr; tmp = *ptr;
(*ptr)+= 1; (*ptr)+= 1;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeFetchThenIncrement32 */
} }
Uint16 Uint32
SDL_AtomicFetchThenDecrement16(volatile Uint16 * ptr) SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr)
{ {
#ifdef nativeFetchThenDecrement16 #ifdef nativeFetchThenDecrement32
return nativeFetchThenDecrement16(ptr); return atomic_sub_value(ptr, 0x00000001);
#else #else
Uint16 tmp = 0; Uint32 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
tmp = *ptr; tmp = *ptr;
(*ptr) -= 1; (*ptr) -= 1;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeFetchThenDecrement32 */
} }
Uint16 Uint32
SDL_AtomicFetchThenAdd16(volatile Uint16 * ptr, Uint16 value) SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value)
{ {
#ifdef nativeFetchThenAdd16 #ifdef nativeFetchThenAdd32
return nativeFetchThenAdd16(ptr, value); return atomic_add_value(ptr, value);
#else #else
Uint16 tmp = 0; Uint32 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
tmp = *ptr; tmp = *ptr;
(*ptr)+= value; (*ptr)+= value;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeFetchThenAdd32 */
} }
Uint16 Uint32
SDL_AtomicFetchThenSubtract16(volatile Uint16 * ptr, Uint16 value) SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value)
{ {
#ifdef nativeFetchThenSubtract16 #ifdef nativeFetchThenSubtract32
return nativeFetchThenSubtract16(ptr, value); return atomic_sub_value(ptr, value);
#else #else
Uint16 tmp = 0; Uint32 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
tmp = *ptr; tmp = *ptr;
(*ptr)-= value; (*ptr)-= value;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeFetchThenSubtract32 */
} }
Uint16 Uint32
SDL_AtomicIncrementThenFetch16(volatile Uint16 * ptr) SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr)
{ {
#ifdef nativeIncrementThenFetch16 #ifdef nativeIncrementThenFetch32
return nativeIncrementThenFetch16(ptr); atomic_add(ptr, 0x00000001);
return atomic_add_value(ptr, 0x00000000);
#else #else
Uint16 tmp = 0; Uint32 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
(*ptr)+= 1; (*ptr)+= 1;
tmp = *ptr; tmp = *ptr;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeIncrementThenFetch32 */
} }
Uint16 Uint32
SDL_AtomicDecrementThenFetch16(volatile Uint16 * ptr) SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr)
{ {
#ifdef nativeDecrementThenFetch16 #ifdef nativeDecrementThenFetch32
return nativeDecrementThenFetch16(ptr); atomic_sub(ptr, 0x00000001);
return atomic_sub_value(ptr, 0x00000000);
#else #else
Uint16 tmp = 0; Uint32 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
(*ptr)-= 1; (*ptr)-= 1;
tmp = *ptr; tmp = *ptr;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeDecrementThenFetch32 */
} }
Uint16 Uint32
SDL_AtomicAddThenFetch16(volatile Uint16 * ptr, Uint16 value) SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value)
{ {
#ifdef nativeAddThenFetch16 #ifdef nativeAddThenFetch32
return nativeAddThenFetch16(ptr, value); atomic_add(ptr, value);
return atomic_add_value(ptr, 0x00000000);
#else #else
Uint16 tmp = 0; Uint32 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
(*ptr)+= value; (*ptr)+= value;
tmp = *ptr; tmp = *ptr;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeAddThenFetch32 */
} }
Uint16 Uint32
SDL_AtomicSubtractThenFetch16(volatile Uint16 * ptr, Uint16 value) SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value)
{ {
#ifdef nativeSubtractThenFetch16 #ifdef nativeSubtractThenFetch32
return nativeSubtractThenFetch16(ptr, value); atomic_sub(ptr, value);
return atomic_sub_value(ptr, 0x00000000);
#else #else
Uint16 tmp = 0; Uint32 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
(*ptr)-= value; (*ptr)-= value;
tmp = *ptr; tmp = *ptr;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeSubtractThenFetch32 */
} }
/* 64 bit atomic operations */ /* 64 bit atomic operations */
#ifdef SDL_HAS_64BIT_TYPE #ifdef SDL_HAS_64BIT_TYPE
Uint64
SDL_AtomicExchange64(volatile Uint64 * ptr, Uint64 value)
{
#ifdef nativeExchange64
return nativeExchange64(ptr, value);
#else
Uint64 tmp = 0;
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
#endif
}
SDL_bool
SDL_AtomicCompareThenSet64(volatile Uint64 * ptr, Uint64 oldvalue, Uint64 newvalue)
{
#ifdef nativeCompareThenSet64
return (SDL_bool)nativeCompareThenSet64(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
}
SDL_bool SDL_bool
SDL_AtomicTestThenSet64(volatile Uint64 * ptr) SDL_AtomicTestThenSet64(volatile Uint64 * ptr)
{ {
#ifdef nativeTestThenSet64 #ifdef nativeTestThenSet64
return (SDL_bool)nativeTestThenSet64(ptr);
#else #else
SDL_bool result = SDL_FALSE; SDL_bool result = SDL_FALSE;
privateWaitLock(); privateWaitLock(ptr);
result = (*ptr == 0); result = (*ptr == 0);
if (result) if (result)
{ {
*ptr = 1; *ptr = 1;
} }
privateUnlock(); privateUnlock(ptr);
return result; return result;
#endif #endif /* nativeTestThenSet64 */
} }
void void
SDL_AtomicClear64(volatile Uint64 * ptr) SDL_AtomicClear64(volatile Uint64 * ptr)
{ {
#ifdef nativeClear64 #ifdef nativeClear64
nativeClear64(ptr);
#else #else
privateWaitLock(); privateWaitLock(ptr);
*ptr = 0; *ptr = 0;
privateUnlock(); privateUnlock(ptr);
return; return;
#endif #endif /* nativeClear64 */
} }
Uint64 Uint64
SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr) SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr)
{ {
#ifdef nativeFetchThenIncrement64 #ifdef nativeFetchThenIncrement64
return nativeFetchThenIncrement64(ptr);
#else #else
Uint64 tmp = 0; Uint64 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
tmp = *ptr; tmp = *ptr;
(*ptr)+= 1; (*ptr)+= 1;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeFetchThenIncrement64 */
} }
Uint64 Uint64
SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr) SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr)
{ {
#ifdef nativeFetchThenDecrement64 #ifdef nativeFetchThenDecrement64
return nativeFetchThenDecrement64(ptr);
#else #else
Uint64 tmp = 0; Uint64 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
tmp = *ptr; tmp = *ptr;
(*ptr) -= 1; (*ptr) -= 1;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeFetchThenDecrement64 */
} }
Uint64 Uint64
SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value) SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value)
{ {
#ifdef nativeFetchThenAdd64 #ifdef nativeFetchThenAdd64
return nativeFetchThenAdd64(ptr, value);
#else #else
Uint64 tmp = 0; Uint64 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
tmp = *ptr; tmp = *ptr;
(*ptr)+= value; (*ptr)+= value;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeFetchThenAdd64 */
} }
Uint64 Uint64
SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value) SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value)
{ {
#ifdef nativeFetchThenSubtract64 #ifdef nativeFetchThenSubtract64
return nativeFetchThenSubtract64(ptr, value);
#else #else
Uint64 tmp = 0; Uint64 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
tmp = *ptr; tmp = *ptr;
(*ptr)-= value; (*ptr)-= value;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeFetchThenSubtract64 */
} }
Uint64 Uint64
SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr) SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr)
{ {
#ifdef nativeIncrementThenFetch64 #ifdef nativeIncrementThenFetch64
return nativeIncrementThenFetch64(ptr);
#else #else
Uint64 tmp = 0; Uint64 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
(*ptr)+= 1; (*ptr)+= 1;
tmp = *ptr; tmp = *ptr;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeIncrementThenFetch64 */
} }
Uint64 Uint64
SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr) SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr)
{ {
#ifdef nativeDecrementThenFetch64 #ifdef nativeDecrementThenFetch64
return nativeDecrementThenFetch64(ptr);
#else #else
Uint64 tmp = 0; Uint64 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
(*ptr)-= 1; (*ptr)-= 1;
tmp = *ptr; tmp = *ptr;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeDecrementThenFetch64 */
} }
Uint64 Uint64
SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value) SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value)
{ {
#ifdef nativeAddThenFetch64 #ifdef nativeAddThenFetch64
return nativeAddThenFetch64(ptr, value);
#else #else
Uint64 tmp = 0; Uint64 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
(*ptr)+= value; (*ptr)+= value;
tmp = *ptr; tmp = *ptr;
privateUnlock(); privateUnlock(ptr);
return tmp; return tmp;
#endif #endif /* nativeAddThenFetch64 */
} }
Uint64 Uint64
SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value) SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value)
{ {
#ifdef nativeSubtractThenFetch64 #ifdef nativeSubtractThenFetch64
return nativeSubtractThenFetch64(ptr, value);
#else #else
Uint64 tmp = 0; Uint64 tmp = 0;
privateWaitLock(); privateWaitLock(ptr);
(*ptr)-= value; (*ptr)-= value;
tmp = *ptr; tmp = *ptr;
privateUnlock(); privateUnlock(ptr);
return tmp;
#endif
}
#endif
/* QNX native 32 bit atomic operations */
Uint32
SDL_AtomicExchange32(volatile Uint32 * ptr, Uint32 value)
{
Uint32 tmp = 0;
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp; return tmp;
#endif /* nativeSubtractThenFetch64 */
} }
SDL_bool #endif /* SDL_HAS_64BIT_TYPE */
SDL_AtomicCompareThenSet32(volatile Uint32 * ptr, Uint32 oldvalue, Uint32 newvalue)
{
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
}
SDL_bool
SDL_AtomicTestThenSet32(volatile Uint32 * ptr)
{
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
privateUnlock();
return result;
}
void
SDL_AtomicClear32(volatile Uint32 * ptr)
{
atomic_clr(ptr, 0xFFFFFFFF);
}
Uint32
SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr)
{
return atomic_add_value(ptr, 0x00000001);
}
Uint32
SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr)
{
return atomic_sub_value(ptr, 0x00000001);
}
Uint32
SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value)
{
return atomic_add_value(ptr, value);
}
Uint32
SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value)
{
return atomic_sub_value(ptr, value);
}
Uint32
SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr)
{
atomic_add(ptr, 0x00000001);
return atomic_add_value(ptr, 0x00000000);
}
Uint32
SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr)
{
atomic_sub(ptr, 0x00000001);
return atomic_sub_value(ptr, 0x00000000);
}
Uint32
SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value)
{
atomic_add(ptr, value);
return atomic_add_value(ptr, 0x00000000);
}
Uint32
SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value)
{
atomic_sub(ptr, value);
return atomic_sub_value(ptr, 0x00000000);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment