Commit 9a25579e authored by Mike Gorchak's avatar Mike Gorchak

Support for the atomic operations for ARM, PPC, MIPS, SH, X86 platforms has been added.

--HG--
extra : convert_revision : svn%3Ac70aab31-4412-0410-b14c-859654838e24/trunk%404010
parent 7e6cba93
......@@ -18,865 +18,488 @@
Sam Lantinga
slouken@libsdl.org
QNX native atomic operations
Copyright (C) 2009 Mike Gorchak
(mike@malva.ua, lestat@i.com.ua)
*/
#include "SDL_stdinc.h"
#include "SDL_atomic.h"
#include <atomic.h>
/*
This file provides 8, 16, 32, and 64 bit atomic operations. If the
operations are provided by the native hardware and operating system
they are used. If they are not then the operations are emulated
using the SDL mutex operations.
*/
/*
First, detect whether the operations are supported and create
#defines that indicate that they do exist. The goal is to have all
the system dependent code in the top part of the file so that the
bottom can be use unchanged across all platforms.
Second, #define all the operations in each size class that are
supported. Doing this allows supported operations to be used along
side of emulated operations.
*/
/*
Emmulated version.
Assume there is no support for atomic operations. All such
operations are implemented using SDL mutex operations.
*/
#ifdef EMULATED_ATOMIC_OPERATIONS
#undef EMULATED_ATOMIC_OPERATIONS
#endif
#ifdef EMULATED_ATOMIC_OPERATIONS
#define HAVE_ALL_8_BIT_OPS
#define nativeExchange8(ptr, value) ()
#define nativeCompareThenSet8(ptr, oldvalue, newvalue) ()
#define nativeTestThenSet8(ptr) ()
#define nativeClear8(ptr) ()
#define nativeFetchThenIncrement8(ptr) ()
#define nativeFetchThenDecrement8(ptr) ()
#define nativeFetchThenAdd8(ptr, value) ()
#define nativeFetchThenSubtract8(ptr, value) ()
#define nativeIncrementThenFetch8(ptr) ()
#define nativeDecrementThenFetch8(ptr) ()
#define nativeAddThenFetch8(ptr, value) ()
#define nativeSubtractThenFetch8(ptr, value) ()
#endif
#ifdef EMULATED_ATOMIC_OPERATIONS
#define HAVE_ALL_16_BIT_OPS
#define nativeExchange16(ptr, value) ()
#define nativeCompareThenSet16(ptr, oldvalue, newvalue) ()
#define nativeTestThenSet16(ptr) ()
#define nativeClear16(ptr) ()
#define nativeFetchThenIncrement16(ptr) ()
#define nativeFetchThenDecrement16(ptr) ()
#define nativeFetchThenAdd16(ptr, value) ()
#define nativeFetchThenSubtract16(ptr, value) ()
#define nativeIncrementThenFetch16(ptr) ()
#define nativeDecrementThenFetch16(ptr) ()
#define nativeAddThenFetch16(ptr, value) ()
#define nativeSubtractThenFetch16(ptr, value) ()
#endif
#ifdef EMULATED_ATOMIC_OPERATIONS
#define HAVE_ALL_64_BIT_OPS
#define nativeExchange64(ptr, value) ()
#define nativeCompareThenSet64(ptr, oldvalue, newvalue) ()
#define nativeTestThenSet64(ptr) ()
#define nativeClear64(ptr) ()
#define nativeFetchThenIncrement64(ptr) ()
#define nativeFetchThenDecrement64(ptr) ()
#define nativeFetchThenAdd64(ptr, value) ()
#define nativeFetchThenSubtract64(ptr, value) ()
#define nativeIncrementThenFetch64(ptr) ()
#define nativeDecrementThenFetch64(ptr) ()
#define nativeAddThenFetch64(ptr, value) ()
#define nativeSubtractThenFetch64(ptr, value) ()
#endif
/*
If any of the operations are not provided then we must emulate some of
them.
*/
#if !defined(HAVE_ALL_8_BIT_OPS) || !defined(HAVE_ALL_16_BIT_OPS) || !defined(HAVE_ALL_64_BIT_OPS)
#include "SDL_mutex.h"
#include "SDL_error.h"
static SDL_mutex * lock = NULL;
static __inline__ void
privateWaitLock()
{
if(NULL == lock)
{
lock = SDL_CreateMutex();
if (NULL == lock)
{
SDL_SetError("SDL_atomic.c: can't create a mutex");
return;
}
}
#include <atomic.h>
if (-1 == SDL_LockMutex(lock))
{
SDL_SetError("SDL_atomic.c: can't lock mutex");
}
}
/* SMP Exchange for PPC platform */
#ifdef __PPC__
#include <ppc/smpxchg.h>
#endif /* __PPC__ */
static __inline__ void
privateUnlock()
{
if (-1 == SDL_UnlockMutex(lock))
{
SDL_SetError("SDL_atomic.c: can't unlock mutex");
}
}
/* SMP Exchange for ARM platform */
#ifdef __ARM__
#include <arm/smpxchg.h>
#endif /* __ARM__ */
#endif
/* SMP Exchange for MIPS platform */
#if defined (__MIPSEB__) || defined(__MIPSEL__)
#include <mips/smpxchg.h>
#endif /* __MIPSEB__ || __MIPSEL__ */
/* 8 bit atomic operations */
/* SMP Exchange for SH platform */
#ifdef __SH__
#include <sh/smpxchg.h>
#endif /* __SH__ */
Uint8
SDL_AtomicExchange8(volatile Uint8 * ptr, Uint8 value)
{
#ifdef nativeExchange8
return nativeExchange8(ptr, value);
#else
Uint8 tmp = 0;
/* SMP Exchange for x86 platform */
#ifdef __X86__
#include <x86/smpxchg.h>
#endif /* __X86__ */
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
#endif
}
SDL_bool
SDL_AtomicCompareThenSet8(volatile Uint8 * ptr, Uint8 oldvalue, Uint8 newvalue)
{
#ifdef nativeCompareThenSet8
return (SDL_bool)nativeCompareThenSet8(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
}
/*
This file provides 32, and 64 bit atomic operations. If the
operations are provided by the native hardware and operating system
they are used. If they are not then the operations are emulated
using the SDL spin lock operations. If spin lock can not be
implemented then these functions must fail.
*/
SDL_bool
SDL_AtomicTestThenSet8(volatile Uint8 * ptr)
void
SDL_AtomicLock(SDL_SpinLock *lock)
{
#ifdef nativeTestThenSet8
return (SDL_bool)nativeTestThenSet8(ptr);
#else
SDL_bool result = SDL_FALSE;
unsigned volatile* l = (unsigned volatile*)lock;
Uint32 oldval = 0;
Uint32 newval = 1;
privateWaitLock();
result = (*ptr == 0);
if (result)
oldval = _smp_xchg(l, newval);
while(1 == oldval)
{
*ptr = 1;
oldval = _smp_xchg(l, newval);
}
privateUnlock();
return result;
#endif
}
void
SDL_AtomicClear8(volatile Uint8 * ptr)
SDL_AtomicUnlock(SDL_SpinLock *lock)
{
#ifdef nativeClear8
nativeClear8(ptr);
#else
privateWaitLock();
*ptr = 0;
privateUnlock();
unsigned volatile* l = (unsigned volatile*)lock;
Uint32 newval = 0;
return;
#endif
_smp_xchg(l, newval);
}
Uint8
SDL_AtomicFetchThenIncrement8(volatile Uint8 * ptr)
{
#ifdef nativeFetchThenIncrement8
return nativeFetchThenIncrement8(ptr);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
(*ptr)+= 1;
privateUnlock();
return tmp;
#endif
}
Uint8
SDL_AtomicFetchThenDecrement8(volatile Uint8 * ptr)
{
#ifdef nativeFetchThenDecrement8
return nativeFetchThenDecrement8(ptr);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
(*ptr) -= 1;
privateUnlock();
return tmp;
#endif
}
Uint8
SDL_AtomicFetchThenAdd8(volatile Uint8 * ptr, Uint8 value)
{
#ifdef nativeFetchThenAdd8
return nativeFetchThenAdd8(ptr, value);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
(*ptr)+= value;
privateUnlock();
return tmp;
#endif
}
Uint8
SDL_AtomicFetchThenSubtract8(volatile Uint8 * ptr, Uint8 value)
{
#ifdef nativeFetchThenSubtract8
return nativeFetchThenSubtract8(ptr, value);
#else
Uint8 tmp = 0;
privateWaitLock();
tmp = *ptr;
(*ptr)-= value;
privateUnlock();
return tmp;
#endif
}
Uint8
SDL_AtomicIncrementThenFetch8(volatile Uint8 * ptr)
{
#ifdef nativeIncrementThenFetch8
return nativeIncrementThenFetch8(ptr);
#else
Uint8 tmp = 0;
privateWaitLock();
(*ptr)+= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
Uint8
SDL_AtomicDecrementThenFetch8(volatile Uint8 * ptr)
{
#ifdef nativeDecrementThenFetch8
return nativeDecrementThenFetch8(ptr);
#else
Uint8 tmp = 0;
privateWaitLock();
(*ptr)-= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
/*
QNX 6.4.1 supports only 32 bit atomic access
*/
Uint8
SDL_AtomicAddThenFetch8(volatile Uint8 * ptr, Uint8 value)
{
#ifdef nativeAddThenFetch8
return nativeAddThenFetch8(ptr, value);
#else
Uint8 tmp = 0;
#undef nativeTestThenSet32
#define nativeClear32
#define nativeFetchThenIncrement32
#define nativeFetchThenDecrement32
#define nativeFetchThenAdd32
#define nativeFetchThenSubtract32
#define nativeIncrementThenFetch32
#define nativeDecrementThenFetch32
#define nativeAddThenFetch32
#define nativeSubtractThenFetch32
#undef nativeTestThenSet64
#undef nativeClear64
#undef nativeFetchThenIncrement64
#undef nativeFetchThenDecrement64
#undef nativeFetchThenAdd64
#undef nativeFetchThenSubtract64
#undef nativeIncrementThenFetch64
#undef nativeDecrementThenFetch64
#undef nativeAddThenFetch64
#undef nativeSubtractThenFetch64
privateWaitLock();
(*ptr)+= value;
tmp = *ptr;
privateUnlock();
/*
If any of the operations are not provided then we must emulate some
of them. That means we need a nice implementation of spin locks
that avoids the "one big lock" problem. We use a vector of spin
locks and pick which one to use based on the address of the operand
of the function.
To generate the index of the lock we first shift by 3 bits to get
rid on the zero bits that result from 32 and 64 bit allignment of
data. We then mask off all but 5 bits and use those 5 bits as an
index into the table.
Picking the lock this way insures that accesses to the same data at
the same time will go to the same lock. OTOH, accesses to different
data have only a 1/32 chance of hitting the same lock. That should
pretty much eliminate the chances of several atomic operations on
different data from waiting on the same "big lock". If it isn't
then the table of locks can be expanded to a new size so long as
the new size is a power of two.
*/
return tmp;
#endif
}
static SDL_SpinLock locks[32] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
Uint8
SDL_AtomicSubtractThenFetch8(volatile Uint8 * ptr, Uint8 value)
static __inline__ void
privateWaitLock(volatile void *ptr)
{
#ifdef nativeSubtractThenFetch8
return nativeSubtractThenFetch8(ptr, value);
#else
Uint8 tmp = 0;
privateWaitLock();
(*ptr)-= value;
tmp = *ptr;
privateUnlock();
#if SIZEOF_VOIDP == 4
Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f);
#elif SIZEOF_VOIDP == 8
Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f);
#endif /* SIZEOF_VOIDP */
return tmp;
#endif
SDL_AtomicLock(&locks[index]);
}
/* 16 bit atomic operations */
Uint16
SDL_AtomicExchange16(volatile Uint16 * ptr, Uint16 value)
static __inline__ void
privateUnlock(volatile void *ptr)
{
#ifdef nativeExchange16
return nativeExchange16(ptr, value);
#else
Uint16 tmp = 0;
#if SIZEOF_VOIDP == 4
Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f);
#elif SIZEOF_VOIDP == 8
Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f);
#endif /* SIZEOF_VOIDP */
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
#endif
SDL_AtomicUnlock(&locks[index]);
}
SDL_bool
SDL_AtomicCompareThenSet16(volatile Uint16 * ptr, Uint16 oldvalue, Uint16 newvalue)
{
#ifdef nativeCompareThenSet16
return (SDL_bool)nativeCompareThenSet16(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
}
/* 32 bit atomic operations */
SDL_bool
SDL_AtomicTestThenSet16(volatile Uint16 * ptr)
SDL_AtomicTestThenSet32(volatile Uint32 * ptr)
{
#ifdef nativeTestThenSet16
return (SDL_bool)nativeTestThenSet16(ptr);
#ifdef nativeTestThenSet32
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
privateWaitLock(ptr);
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
privateUnlock();
privateUnlock(ptr);
return result;
#endif
#endif /* nativeTestThenSet32 */
}
void
SDL_AtomicClear16(volatile Uint16 * ptr)
SDL_AtomicClear32(volatile Uint32 * ptr)
{
#ifdef nativeClear16
nativeClear16(ptr);
#ifdef nativeClear32
atomic_clr(ptr, 0xFFFFFFFF);
#else
privateWaitLock();
privateWaitLock(ptr);
*ptr = 0;
privateUnlock();
privateUnlock(ptr);
return;
#endif
#endif /* nativeClear32 */
}
Uint16
SDL_AtomicFetchThenIncrement16(volatile Uint16 * ptr)
Uint32
SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr)
{
#ifdef nativeFetchThenIncrement16
return nativeFetchThenIncrement16(ptr);
#ifdef nativeFetchThenIncrement32
return atomic_add_value(ptr, 0x00000001);
#else
Uint16 tmp = 0;
Uint32 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
tmp = *ptr;
(*ptr)+= 1;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeFetchThenIncrement32 */
}
Uint16
SDL_AtomicFetchThenDecrement16(volatile Uint16 * ptr)
Uint32
SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr)
{
#ifdef nativeFetchThenDecrement16
return nativeFetchThenDecrement16(ptr);
#ifdef nativeFetchThenDecrement32
return atomic_sub_value(ptr, 0x00000001);
#else
Uint16 tmp = 0;
Uint32 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
tmp = *ptr;
(*ptr) -= 1;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeFetchThenDecrement32 */
}
Uint16
SDL_AtomicFetchThenAdd16(volatile Uint16 * ptr, Uint16 value)
Uint32
SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value)
{
#ifdef nativeFetchThenAdd16
return nativeFetchThenAdd16(ptr, value);
#ifdef nativeFetchThenAdd32
return atomic_add_value(ptr, value);
#else
Uint16 tmp = 0;
Uint32 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
tmp = *ptr;
(*ptr)+= value;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeFetchThenAdd32 */
}
Uint16
SDL_AtomicFetchThenSubtract16(volatile Uint16 * ptr, Uint16 value)
Uint32
SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value)
{
#ifdef nativeFetchThenSubtract16
return nativeFetchThenSubtract16(ptr, value);
#ifdef nativeFetchThenSubtract32
return atomic_sub_value(ptr, value);
#else
Uint16 tmp = 0;
Uint32 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
tmp = *ptr;
(*ptr)-= value;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeFetchThenSubtract32 */
}
Uint16
SDL_AtomicIncrementThenFetch16(volatile Uint16 * ptr)
Uint32
SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr)
{
#ifdef nativeIncrementThenFetch16
return nativeIncrementThenFetch16(ptr);
#ifdef nativeIncrementThenFetch32
atomic_add(ptr, 0x00000001);
return atomic_add_value(ptr, 0x00000000);
#else
Uint16 tmp = 0;
Uint32 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
(*ptr)+= 1;
tmp = *ptr;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeIncrementThenFetch32 */
}
Uint16
SDL_AtomicDecrementThenFetch16(volatile Uint16 * ptr)
Uint32
SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr)
{
#ifdef nativeDecrementThenFetch16
return nativeDecrementThenFetch16(ptr);
#ifdef nativeDecrementThenFetch32
atomic_sub(ptr, 0x00000001);
return atomic_sub_value(ptr, 0x00000000);
#else
Uint16 tmp = 0;
Uint32 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
(*ptr)-= 1;
tmp = *ptr;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeDecrementThenFetch32 */
}
Uint16
SDL_AtomicAddThenFetch16(volatile Uint16 * ptr, Uint16 value)
Uint32
SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value)
{
#ifdef nativeAddThenFetch16
return nativeAddThenFetch16(ptr, value);
#ifdef nativeAddThenFetch32
atomic_add(ptr, value);
return atomic_add_value(ptr, 0x00000000);
#else
Uint16 tmp = 0;
Uint32 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
(*ptr)+= value;
tmp = *ptr;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeAddThenFetch32 */
}
Uint16
SDL_AtomicSubtractThenFetch16(volatile Uint16 * ptr, Uint16 value)
Uint32
SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value)
{
#ifdef nativeSubtractThenFetch16
return nativeSubtractThenFetch16(ptr, value);
#ifdef nativeSubtractThenFetch32
atomic_sub(ptr, value);
return atomic_sub_value(ptr, 0x00000000);
#else
Uint16 tmp = 0;
Uint32 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
(*ptr)-= value;
tmp = *ptr;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeSubtractThenFetch32 */
}
/* 64 bit atomic operations */
#ifdef SDL_HAS_64BIT_TYPE
Uint64
SDL_AtomicExchange64(volatile Uint64 * ptr, Uint64 value)
{
#ifdef nativeExchange64
return nativeExchange64(ptr, value);
#else
Uint64 tmp = 0;
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
#endif
}
SDL_bool
SDL_AtomicCompareThenSet64(volatile Uint64 * ptr, Uint64 oldvalue, Uint64 newvalue)
{
#ifdef nativeCompareThenSet64
return (SDL_bool)nativeCompareThenSet64(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
}
SDL_bool
SDL_AtomicTestThenSet64(volatile Uint64 * ptr)
{
#ifdef nativeTestThenSet64
return (SDL_bool)nativeTestThenSet64(ptr);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
privateWaitLock(ptr);
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
privateUnlock();
privateUnlock(ptr);
return result;
#endif
#endif /* nativeTestThenSet64 */
}
void
SDL_AtomicClear64(volatile Uint64 * ptr)
{
#ifdef nativeClear64
nativeClear64(ptr);
#else
privateWaitLock();
privateWaitLock(ptr);
*ptr = 0;
privateUnlock();
privateUnlock(ptr);
return;
#endif
#endif /* nativeClear64 */
}
Uint64
SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr)
{
#ifdef nativeFetchThenIncrement64
return nativeFetchThenIncrement64(ptr);
#else
Uint64 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
tmp = *ptr;
(*ptr)+= 1;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeFetchThenIncrement64 */
}
Uint64
SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr)
{
#ifdef nativeFetchThenDecrement64
return nativeFetchThenDecrement64(ptr);
#else
Uint64 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
tmp = *ptr;
(*ptr) -= 1;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeFetchThenDecrement64 */
}
Uint64
SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value)
{
#ifdef nativeFetchThenAdd64
return nativeFetchThenAdd64(ptr, value);
#else
Uint64 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
tmp = *ptr;
(*ptr)+= value;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeFetchThenAdd64 */
}
Uint64
SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value)
{
#ifdef nativeFetchThenSubtract64
return nativeFetchThenSubtract64(ptr, value);
#else
Uint64 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
tmp = *ptr;
(*ptr)-= value;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeFetchThenSubtract64 */
}
Uint64
SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr)
{
#ifdef nativeIncrementThenFetch64
return nativeIncrementThenFetch64(ptr);
#else
Uint64 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
(*ptr)+= 1;
tmp = *ptr;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeIncrementThenFetch64 */
}
Uint64
SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr)
{
#ifdef nativeDecrementThenFetch64
return nativeDecrementThenFetch64(ptr);
#else
Uint64 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
(*ptr)-= 1;
tmp = *ptr;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeDecrementThenFetch64 */
}
Uint64
SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value)
{
#ifdef nativeAddThenFetch64
return nativeAddThenFetch64(ptr, value);
#else
Uint64 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
(*ptr)+= value;
tmp = *ptr;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeAddThenFetch64 */
}
Uint64
SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value)
{
#ifdef nativeSubtractThenFetch64
return nativeSubtractThenFetch64(ptr, value);
#else
Uint64 tmp = 0;
privateWaitLock();
privateWaitLock(ptr);
(*ptr)-= value;
tmp = *ptr;
privateUnlock();
privateUnlock(ptr);
return tmp;
#endif
#endif /* nativeSubtractThenFetch64 */
}
#endif
/* QNX native 32 bit atomic operations */
Uint32
SDL_AtomicExchange32(volatile Uint32 * ptr, Uint32 value)
{
Uint32 tmp = 0;
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
}
SDL_bool
SDL_AtomicCompareThenSet32(volatile Uint32 * ptr, Uint32 oldvalue, Uint32 newvalue)
{
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
}
SDL_bool
SDL_AtomicTestThenSet32(volatile Uint32 * ptr)
{
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
privateUnlock();
return result;
}
void
SDL_AtomicClear32(volatile Uint32 * ptr)
{
atomic_clr(ptr, 0xFFFFFFFF);
}
Uint32
SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr)
{
return atomic_add_value(ptr, 0x00000001);
}
Uint32
SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr)
{
return atomic_sub_value(ptr, 0x00000001);
}
Uint32
SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value)
{
return atomic_add_value(ptr, value);
}
Uint32
SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value)
{
return atomic_sub_value(ptr, value);
}
Uint32
SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr)
{
atomic_add(ptr, 0x00000001);
return atomic_add_value(ptr, 0x00000000);
}
Uint32
SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr)
{
atomic_sub(ptr, 0x00000001);
return atomic_sub_value(ptr, 0x00000000);
}
Uint32
SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value)
{
atomic_add(ptr, value);
return atomic_add_value(ptr, 0x00000000);
}
Uint32
SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value)
{
atomic_sub(ptr, value);
return atomic_sub_value(ptr, 0x00000000);
}
#endif /* SDL_HAS_64BIT_TYPE */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment