42 #ifndef QATOMIC_IA64_H 43 #define QATOMIC_IA64_H 49 #define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE 50 #define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_WAIT_FREE 57 #define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE 58 #define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE 65 #define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE 66 #define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE 73 #define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE 80 #define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE 81 #define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE 90 #define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE 91 #define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE 100 #define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE 102 template <
typename T>
105 template <
typename T>
111 return value == 1 || value == -1
112 || value == 4 || value == -4
113 || value == 8 || value == -8
114 || value == 16 || value == -16;
117 #if defined(Q_CC_INTEL) 120 #include <ia64intrin.h> 124 return static_cast<int>(_InterlockedExchange(&
_q_value, newValue));
130 return static_cast<int>(_InterlockedExchange(&
_q_value, newValue));
135 register int expectedValueCopy = expectedValue;
136 return (static_cast<int>(_InterlockedCompareExchange(&
_q_value,
144 register int expectedValueCopy = expectedValue;
145 return (static_cast<int>(_InterlockedCompareExchange_acq(reinterpret_cast<volatile uint *>(&
_q_value),
153 register int expectedValueCopy = expectedValue;
154 return (static_cast<int>(_InterlockedCompareExchange_rel(reinterpret_cast<volatile uint *>(&
_q_value),
168 if (__builtin_constant_p(valueToAdd)) {
170 return __fetchadd4_acq((
unsigned int *)&
_q_value, 1);
171 if (valueToAdd == -1)
172 return __fetchadd4_acq((
unsigned int *)&_q_value, -1);
174 return _InterlockedExchangeAdd(&
_q_value, valueToAdd);
179 if (__builtin_constant_p(valueToAdd)) {
181 return __fetchadd4_rel((
unsigned int *)&
_q_value, 1);
182 if (valueToAdd == -1)
183 return __fetchadd4_rel((
unsigned int *)&_q_value, -1);
186 return _InterlockedExchangeAdd(&
_q_value, valueToAdd);
197 return _InterlockedIncrement(&
_q_value) != 0;
202 return _InterlockedDecrement(&
_q_value) != 0;
205 template <
typename T>
208 return (T *)_InterlockedExchangePointer(reinterpret_cast<void * volatile*>(&
_q_value), newValue);
211 template <
typename T>
218 template <
typename T>
221 register T *expectedValueCopy = expectedValue;
222 return (_InterlockedCompareExchangePointer(reinterpret_cast<void * volatile*>(&
_q_value),
228 template <
typename T>
233 volatile unsigned long *p;
236 register T *expectedValueCopy = expectedValue;
237 return (_InterlockedCompareExchange64_acq(p,
quintptr(newValue),
quintptr(expectedValueCopy))
241 template <
typename T>
246 volatile unsigned long *p;
249 register T *expectedValueCopy = expectedValue;
250 return (_InterlockedCompareExchange64_rel(p,
quintptr(newValue),
quintptr(expectedValueCopy))
254 template <
typename T>
261 template <
typename T>
264 return (T *)_InterlockedExchangeAdd64((
volatile long *)&
_q_value,
265 valueToAdd *
sizeof(T));
268 template <
typename T>
272 return (T *)_InterlockedExchangeAdd64((
volatile long *)&
_q_value,
273 valueToAdd *
sizeof(T));
276 template <
typename T>
285 # if defined(Q_CC_GNU) 290 asm volatile(
"xchg4 %0=%1,%2\n" 311 asm volatile(
"mov ar.ccv=%2\n" 313 "cmpxchg4.acq %0=%1,%3,ar.ccv\n" 315 :
"r" (expectedValue),
"r" (newValue)
317 return ret == expectedValue;
323 asm volatile(
"mov ar.ccv=%2\n" 325 "cmpxchg4.rel %0=%1,%3,ar.ccv\n" 327 :
"r" (expectedValue),
"r" (newValue)
329 return ret == expectedValue;
339 asm volatile(
"fetchadd4.acq %0=%1,%2\n" 354 " cmpxchg4.acq %0=%1,%0,ar.ccv\n" 356 " cmp.ne p6,p0 = %0, r9\n" 361 :
"r9",
"p6",
"memory");
372 asm volatile(
"fetchadd4.rel %0=%1,%2\n" 387 " cmpxchg4.rel %0=%1,%0,ar.ccv\n" 389 " cmp.ne p6,p0 = %0, r9\n" 394 :
"r9",
"p6",
"memory");
400 asm volatile(
"mf" :::
"memory");
407 asm volatile(
"fetchadd4.acq %0=%1,1\n" 417 asm volatile(
"fetchadd4.rel %0=%1,-1\n" 424 template <
typename T>
428 asm volatile(
"xchg8 %0=%1,%2\n" 435 template <
typename T>
447 template <
typename T>
451 asm volatile(
"mov ar.ccv=%2\n" 453 "cmpxchg8.acq %0=%1,%3,ar.ccv\n" 455 :
"r" (expectedValue),
"r" (newValue)
457 return ret == expectedValue;
460 template <
typename T>
464 asm volatile(
"mov ar.ccv=%2\n" 466 "cmpxchg8.rel %0=%1,%3,ar.ccv\n" 468 :
"r" (expectedValue),
"r" (newValue)
470 return ret == expectedValue;
473 template <
typename T>
481 asm volatile(
"fetchadd8.acq %0=%1,%2\n" 483 :
"i" (valueToAdd *
sizeof(T))
496 " cmpxchg8.acq %0=%1,%0,ar.ccv\n" 498 " cmp.ne p6,p0 = %0, r9\n" 502 :
"r" (valueToAdd *
sizeof(T))
503 :
"r9",
"p6",
"memory");
507 template <
typename T>
515 asm volatile(
"fetchadd8.rel %0=%1,%2\n" 517 :
"i" (valueToAdd *
sizeof(T))
530 " cmpxchg8.rel %0=%1,%0,ar.ccv\n" 532 " cmp.ne p6,p0 = %0, r9\n" 536 :
"r" (valueToAdd *
sizeof(T))
537 :
"r9",
"p6",
"memory");
541 template <
typename T>
544 asm volatile(
"mf" :::
"memory");
548 #elif defined Q_CC_HPACC 551 #include <ia64/sys/inline.h> 554 #define FENCE (_Asm_fence)(_UP_CALL_FENCE | _UP_SYS_FENCE | _DOWN_CALL_FENCE | _DOWN_SYS_FENCE) 558 return _Asm_xchg((_Asm_sz)_SZ_W, &
_q_value, (
unsigned)newValue,
559 (_Asm_ldhint)_LDHINT_NONE, FENCE);
565 return _Asm_xchg((_Asm_sz)_SZ_W, &
_q_value, (
unsigned)newValue,
566 (_Asm_ldhint)_LDHINT_NONE, FENCE);
571 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
unsigned)expectedValue, FENCE);
572 int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
573 &
_q_value, (
unsigned)newValue, (_Asm_ldhint)_LDHINT_NONE);
574 return ret == expectedValue;
579 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
unsigned)expectedValue, FENCE);
580 int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
581 &
_q_value, newValue, (_Asm_ldhint)_LDHINT_NONE);
582 return ret == expectedValue;
588 return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
589 &
_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
590 else if (valueToAdd == -1)
591 return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
592 &
_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
595 register int old, ret;
598 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
unsigned)old, FENCE);
599 ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
600 &
_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
601 }
while (ret != old);
608 return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
609 &
_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
610 else if (valueToAdd == -1)
611 return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
612 &
_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
615 register int old, ret;
618 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
unsigned)old, FENCE);
619 ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
620 &
_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
621 }
while (ret != old);
633 return (
int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
634 &
_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1;
639 return (
int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
640 &
_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1;
643 template <
typename T>
648 (_Asm_ldhint)_LDHINT_NONE, FENCE);
651 (_Asm_ldhint)_LDHINT_NONE, FENCE);
655 template <
typename T>
662 template <
typename T>
666 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
quint64)expectedValue, FENCE);
667 T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
670 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
quint32)expectedValue, FENCE);
671 T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
674 return ret == expectedValue;
677 template <
typename T>
681 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
quint64)expectedValue, FENCE);
682 T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
685 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
quint32)expectedValue, FENCE);
686 T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
689 return ret == expectedValue;
692 template <
typename T>
696 register T *old, *ret;
700 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
quint64)old, FENCE);
701 ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
703 (_Asm_ldhint)_LDHINT_NONE);
705 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
quint32)old, FENCE);
706 ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
708 (_Asm_ldhint)_LDHINT_NONE);
710 }
while (old != ret);
714 template <
typename T>
718 register T *old, *ret;
722 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
quint64)old, FENCE);
723 ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
725 (_Asm_ldhint)_LDHINT_NONE);
727 _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (
quint32)old, FENCE);
728 ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
730 (_Asm_ldhint)_LDHINT_NONE);
732 }
while (old != ret);
736 template <
typename T>
762 template <
typename T>
768 template <
typename T>
791 template <
typename T>
797 template <
typename T>
803 template <
typename T>
813 #endif // QATOMIC_IA64_H static bool isFetchAndStoreNative()
static bool isTestAndSetNative()
int fetchAndStoreRelease(int newValue)
static bool isReferenceCountingNative()
T * fetchAndAddRelaxed(qptrdiff valueToAdd)
QIntegerForSizeof< void * >::Unsigned quintptr
#define QT_END_NAMESPACE
This macro expands to.
static bool isFetchAndAddNative()
static bool isTestAndSetNative()
static bool isFetchAndAddNative()
T * fetchAndStoreRelease(T *newValue)
static bool isTestAndSetWaitFree()
bool _q_ia64_fetchadd_immediate(register int value)
T * fetchAndAddRelease(qptrdiff valueToAdd)
bool testAndSetOrdered(T *expectedValue, T *newValue)
int fetchAndAddAcquire(int valueToAdd)
#define QT_END_INCLUDE_NAMESPACE
This macro is equivalent to QT_BEGIN_NAMESPACE.
T * fetchAndAddAcquire(qptrdiff valueToAdd)
int fetchAndStoreRelaxed(int newValue)
T * fetchAndAddOrdered(qptrdiff valueToAdd)
bool testAndSetAcquire(int expectedValue, int newValue)
bool testAndSetRelaxed(int expectedValue, int newValue)
#define QT_BEGIN_NAMESPACE
This macro expands to.
int fetchAndStoreAcquire(int newValue)
Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval)
QIntegerForSizeof< void * >::Signed qptrdiff
T * fetchAndStoreOrdered(T *newValue)
#define Q_INLINE_TEMPLATE
int fetchAndAddRelease(int valueToAdd)
const T * ptr(const T &t)
bool testAndSetOrdered(int expectedValue, int newValue)
T * fetchAndStoreAcquire(T *newValue)
static bool isFetchAndStoreWaitFree()
static bool isTestAndSetWaitFree()
static bool isReferenceCountingWaitFree()
int fetchAndAddOrdered(int valueToAdd)
static bool isFetchAndStoreWaitFree()
static bool isFetchAndAddWaitFree()
static bool isFetchAndAddWaitFree()
bool testAndSetRelease(int expectedValue, int newValue)
Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval)
int fetchAndAddRelaxed(int valueToAdd)
int fetchAndStoreOrdered(int newValue)
bool testAndSetRelaxed(T *expectedValue, T *newValue)
#define QT_BEGIN_INCLUDE_NAMESPACE
This macro is equivalent to QT_END_NAMESPACE.
bool testAndSetRelease(T *expectedValue, T *newValue)
T * fetchAndStoreRelaxed(T *newValue)
static bool isFetchAndStoreNative()
bool testAndSetAcquire(T *expectedValue, T *newValue)