28#ifndef __AtomicScalar_H__
29#define __AtomicScalar_H__
36#if (((OGRE_COMPILER == OGRE_COMPILER_GNUC) && (OGRE_COMP_VER >= 412)) || (OGRE_COMPILER == OGRE_COMPILER_CLANG)) && OGRE_THREAD_SUPPORT
39#if ((OGRE_COMPILER == OGRE_COMPILER_GNUC) && (OGRE_COMP_VER >= 473))
40 #define BUILTIN_FETCH_ADD(var, add) __atomic_fetch_add (var, add, __ATOMIC_SEQ_CST);
41 #define BUILTIN_ADD_FETCH(var, add) __atomic_add_fetch (var, add, __ATOMIC_SEQ_CST);
42 #define BUILTIN_SUB_FETCH(var, sub) __atomic_sub_fetch (var, sub, __ATOMIC_SEQ_CST);
44 #define BUILTIN_FETCH_ADD(var, add) __sync_fetch_and_add (var, add);
45 #define BUILTIN_ADD_FETCH(var, add) __sync_add_and_fetch (var, add);
46 #define BUILTIN_SUB_FETCH(var, sub) __sync_sub_and_fetch (var, sub);
57 template<
class T>
class AtomicScalar
73 void operator= (
const AtomicScalar<T> &cousin)
88 bool cas (
const T &old,
const T &nu)
90 return __sync_bool_compare_and_swap (&
mField, old, nu);
95 return BUILTIN_ADD_FETCH (&
mField, 1);
100 return BUILTIN_ADD_FETCH (&
mField, -1);
105 return BUILTIN_FETCH_ADD (&
mField, 1);
110 return BUILTIN_FETCH_ADD (&
mField, -1);
115 return BUILTIN_ADD_FETCH (&
mField, add);
120 return BUILTIN_SUB_FETCH (&
mField, sub);
124#if OGRE_CPU == OGRE_CPU_ARM
125# if OGRE_COMPILER == OGRE_COMPILER_MSVC
126 __declspec(align(16)) volatile T
mField;
127# elif (OGRE_COMPILER == OGRE_COMPILER_GNUC) || (OGRE_COMPILER == OGRE_COMPILER_CLANG)
128 volatile T
mField __attribute__((__aligned__(16)));
141 #elif OGRE_COMPILER == OGRE_COMPILER_MSVC && OGRE_COMP_VER >= 1400 && OGRE_THREAD_SUPPORT
143#ifndef WIN32_LEAN_AND_MEAN
144# define WIN32_LEAN_AND_MEAN
146#if !defined(NOMINMAX) && defined(_MSC_VER)
154# pragma warning (push)
155# pragma warning (disable : 4244)
162#define NEED_TO_INIT_INTERLOCKEDCOMPAREEXCHANGE64WRAPPER
163 struct _OgreExport InterlockedCompareExchange64Wrapper
165 InterlockedCompareExchange64Wrapper();
169 (WINAPI *func_InterlockedCompareExchange64)(
170 __inout LONGLONG
volatile *Destination,
171 __in LONGLONG Exchange,
172 __in LONGLONG Comperand) ;
174 static func_InterlockedCompareExchange64 Ogre_InterlockedCompareExchange64;
178 Ogre_InterlockedIncrement64 (
179 __inout LONGLONG
volatile *Addend
186 }
while (Ogre_InterlockedCompareExchange64(Addend,
195 Ogre_InterlockedDecrement64 (
196 __inout LONGLONG
volatile *Addend
203 }
while (Ogre_InterlockedCompareExchange64(Addend,
218 template<
class T>
class AtomicScalar
234 void operator= (
const AtomicScalar<T> &cousin)
244 void set (
const T &v)
249 bool cas (
const T &old,
const T &nu)
252 return _InterlockedCompareExchange16((SHORT*)&
mField,
static_cast<SHORT
>(nu),
static_cast<SHORT
>(old)) ==
static_cast<SHORT
>(old);
254 else if (
sizeof(T)==4)
256 return _InterlockedCompareExchange((LONG*)&
mField,
static_cast<LONG
>(nu),
static_cast<LONG
>(old)) ==
static_cast<LONG
>(old);
258 else if (
sizeof(T)==8 && InterlockedCompareExchange64Wrapper::Ogre_InterlockedCompareExchange64 != NULL) {
259 return InterlockedCompareExchange64Wrapper::Ogre_InterlockedCompareExchange64((LONGLONG*)&
mField,
static_cast<LONGLONG
>(nu),
static_cast<LONGLONG
>(old)) ==
static_cast<LONGLONG
>(old);
263 if (
mField != old)
return false;
272 return _InterlockedIncrement16((SHORT*)&
mField);
273 }
else if (
sizeof(T)==4) {
274 return InterlockedIncrement((LONG*)&
mField);
275 }
else if (
sizeof(T)==8 && InterlockedCompareExchange64Wrapper::Ogre_InterlockedCompareExchange64 != NULL) {
276 return InterlockedCompareExchange64Wrapper::Ogre_InterlockedIncrement64((LONGLONG*)&
mField);
286 return _InterlockedDecrement16((SHORT*)&
mField);
287 }
else if (
sizeof(T)==4) {
288 return InterlockedDecrement((LONG*)&
mField);
289 }
else if (
sizeof(T)==8 && InterlockedCompareExchange64Wrapper::Ogre_InterlockedCompareExchange64 != NULL) {
290 return InterlockedCompareExchange64Wrapper::Ogre_InterlockedDecrement64((LONGLONG*)&
mField);
300 return _InterlockedIncrement16((SHORT*)&
mField)-1;
301 }
else if (
sizeof(T)==4) {
302 return InterlockedIncrement((LONG*)&
mField)-1;
303 }
else if (
sizeof(T)==8 && InterlockedCompareExchange64Wrapper::Ogre_InterlockedCompareExchange64 != NULL) {
304 return InterlockedCompareExchange64Wrapper::Ogre_InterlockedIncrement64((LONGLONG*)&
mField)-1;
314 return _InterlockedDecrement16((SHORT*)&
mField)+1;
315 }
else if (
sizeof(T)==4) {
316 return InterlockedDecrement((LONG*)&
mField)+1;
317 }
else if (
sizeof(T)==8 && InterlockedCompareExchange64Wrapper::Ogre_InterlockedCompareExchange64 != NULL) {
318 return InterlockedCompareExchange64Wrapper::Ogre_InterlockedDecrement64((LONGLONG*)&
mField)+1;
327 if ((
sizeof(T)==2) || (
sizeof(T)==4) || (
sizeof(T)==8 && InterlockedCompareExchange64Wrapper::Ogre_InterlockedCompareExchange64 != NULL)) {
337 }
while (!
cas(newVal - add, newVal));
350 if ((
sizeof(T)==2) || (
sizeof(T)==4) || (
sizeof(T)==8 && InterlockedCompareExchange64Wrapper::Ogre_InterlockedCompareExchange64 != NULL)) {
360 }
while (!
cas(newVal + sub, newVal));
383# pragma warning (pop)
430 bool cas (
const T &old,
const T &nu)
433 if (
mField != old)
return false;
#define OGRE_LOCK_AUTO_MUTEX
T operator+=(const T &add)
AtomicScalar(const AtomicScalar< T > &cousin)
bool cas(const T &old, const T &nu)
AtomicScalar(const T &initial)
void operator=(const AtomicScalar< T > &cousin)
T operator-=(const T &sub)