Qt 4.8
qatomic_ia64.h
Go to the documentation of this file.
1 /****************************************************************************
2 **
3 ** Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
4 ** Contact: http://www.qt-project.org/legal
5 **
6 ** This file is part of the QtCore module of the Qt Toolkit.
7 **
8 ** $QT_BEGIN_LICENSE:LGPL$
9 ** Commercial License Usage
10 ** Licensees holding valid commercial Qt licenses may use this file in
11 ** accordance with the commercial license agreement provided with the
12 ** Software or, alternatively, in accordance with the terms contained in
13 ** a written agreement between you and Digia. For licensing terms and
14 ** conditions see http://qt.digia.com/licensing. For further information
15 ** use the contact form at http://qt.digia.com/contact-us.
16 **
17 ** GNU Lesser General Public License Usage
18 ** Alternatively, this file may be used under the terms of the GNU Lesser
19 ** General Public License version 2.1 as published by the Free Software
20 ** Foundation and appearing in the file LICENSE.LGPL included in the
21 ** packaging of this file. Please review the following information to
22 ** ensure the GNU Lesser General Public License version 2.1 requirements
23 ** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
24 **
25 ** In addition, as a special exception, Digia gives you certain additional
26 ** rights. These rights are described in the Digia Qt LGPL Exception
27 ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
28 **
29 ** GNU General Public License Usage
30 ** Alternatively, this file may be used under the terms of the GNU
31 ** General Public License version 3.0 as published by the Free Software
32 ** Foundation and appearing in the file LICENSE.GPL included in the
33 ** packaging of this file. Please review the following information to
34 ** ensure the GNU General Public License version 3.0 requirements will be
35 ** met: http://www.gnu.org/copyleft/gpl.html.
36 **
37 **
38 ** $QT_END_LICENSE$
39 **
40 ****************************************************************************/
41 
42 #ifndef QATOMIC_IA64_H
43 #define QATOMIC_IA64_H
44 
46 
48 
49 #define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
50 #define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_WAIT_FREE
51 
53 { return true; }
55 { return true; }
56 
57 #define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
58 #define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE
59 
61 { return true; }
63 { return true; }
64 
65 #define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
66 #define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
67 
69 { return true; }
71 { return true; }
72 
73 #define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
74 
76 { return true; }
78 { return false; }
79 
80 #define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
81 #define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE
82 
83 template <typename T>
85 { return true; }
86 template <typename T>
88 { return true; }
89 
90 #define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
91 #define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
92 
93 template <typename T>
95 { return true; }
96 template <typename T>
98 { return true; }
99 
100 #define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
101 
102 template <typename T>
104 { return true; }
105 template <typename T>
107 { return false; }
108 
109 inline bool _q_ia64_fetchadd_immediate(register int value)
110 {
111  return value == 1 || value == -1
112  || value == 4 || value == -4
113  || value == 8 || value == -8
114  || value == 16 || value == -16;
115 }
116 
117 #if defined(Q_CC_INTEL)
118 
119 // intrinsics provided by the Intel C++ Compiler
120 #include <ia64intrin.h>
121 
122 inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
123 {
124  return static_cast<int>(_InterlockedExchange(&_q_value, newValue));
125 }
126 
127 inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
128 {
129  __memory_barrier();
130  return static_cast<int>(_InterlockedExchange(&_q_value, newValue));
131 }
132 
133 inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
134 {
135  register int expectedValueCopy = expectedValue;
136  return (static_cast<int>(_InterlockedCompareExchange(&_q_value,
137  newValue,
138  expectedValueCopy))
139  == expectedValue);
140 }
141 
142 inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
143 {
144  register int expectedValueCopy = expectedValue;
145  return (static_cast<int>(_InterlockedCompareExchange_acq(reinterpret_cast<volatile uint *>(&_q_value),
146  newValue,
147  expectedValueCopy))
148  == expectedValue);
149 }
150 
151 inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
152 {
153  register int expectedValueCopy = expectedValue;
154  return (static_cast<int>(_InterlockedCompareExchange_rel(reinterpret_cast<volatile uint *>(&_q_value),
155  newValue,
156  expectedValueCopy))
157  == expectedValue);
158 }
159 
160 inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
161 {
162  __memory_barrier();
163  return testAndSetAcquire(expectedValue, newValue);
164 }
165 
166 inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
167 {
168  if (__builtin_constant_p(valueToAdd)) {
169  if (valueToAdd == 1)
170  return __fetchadd4_acq((unsigned int *)&_q_value, 1);
171  if (valueToAdd == -1)
172  return __fetchadd4_acq((unsigned int *)&_q_value, -1);
173  }
174  return _InterlockedExchangeAdd(&_q_value, valueToAdd);
175 }
176 
177 inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
178 {
179  if (__builtin_constant_p(valueToAdd)) {
180  if (valueToAdd == 1)
181  return __fetchadd4_rel((unsigned int *)&_q_value, 1);
182  if (valueToAdd == -1)
183  return __fetchadd4_rel((unsigned int *)&_q_value, -1);
184  }
185  __memory_barrier();
186  return _InterlockedExchangeAdd(&_q_value, valueToAdd);
187 }
188 
189 inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
190 {
191  __memory_barrier();
192  return fetchAndAddAcquire(valueToAdd);
193 }
194 
195 inline bool QBasicAtomicInt::ref()
196 {
197  return _InterlockedIncrement(&_q_value) != 0;
198 }
199 
200 inline bool QBasicAtomicInt::deref()
201 {
202  return _InterlockedDecrement(&_q_value) != 0;
203 }
204 
205 template <typename T>
207 {
208  return (T *)_InterlockedExchangePointer(reinterpret_cast<void * volatile*>(&_q_value), newValue);
209 }
210 
211 template <typename T>
213 {
214  __memory_barrier();
215  return fetchAndStoreAcquire(newValue);
216 }
217 
218 template <typename T>
219 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
220 {
221  register T *expectedValueCopy = expectedValue;
222  return (_InterlockedCompareExchangePointer(reinterpret_cast<void * volatile*>(&_q_value),
223  newValue,
224  expectedValueCopy)
225  == expectedValue);
226 }
227 
228 template <typename T>
229 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
230 {
231  union {
232  volatile void *x;
233  volatile unsigned long *p;
234  };
235  x = &_q_value;
236  register T *expectedValueCopy = expectedValue;
237  return (_InterlockedCompareExchange64_acq(p, quintptr(newValue), quintptr(expectedValueCopy))
238  == quintptr(expectedValue));
239 }
240 
241 template <typename T>
242 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
243 {
244  union {
245  volatile void *x;
246  volatile unsigned long *p;
247  };
248  x = &_q_value;
249  register T *expectedValueCopy = expectedValue;
250  return (_InterlockedCompareExchange64_rel(p, quintptr(newValue), quintptr(expectedValueCopy))
251  == quintptr(expectedValue));
252 }
253 
254 template <typename T>
255 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
256 {
257  __memory_barrier();
258  return testAndSetAcquire(expectedValue, newValue);
259 }
260 
261 template <typename T>
263 {
264  return (T *)_InterlockedExchangeAdd64((volatile long *)&_q_value,
265  valueToAdd * sizeof(T));
266 }
267 
268 template <typename T>
270 {
271  __memory_barrier();
272  return (T *)_InterlockedExchangeAdd64((volatile long *)&_q_value,
273  valueToAdd * sizeof(T));
274 }
275 
276 template <typename T>
278 {
279  __memory_barrier();
280  return fetchAndAddAcquire(valueToAdd);
281 }
282 
283 #else // !Q_CC_INTEL
284 
285 # if defined(Q_CC_GNU)
286 
287 inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
288 {
289  int ret;
290  asm volatile("xchg4 %0=%1,%2\n"
291  : "=r" (ret), "+m" (_q_value)
292  : "r" (newValue)
293  : "memory");
294  return ret;
295 }
296 
297 inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
298 {
299  int ret;
300  asm volatile("mf\n"
301  "xchg4 %0=%1,%2\n"
302  : "=r" (ret), "+m" (_q_value)
303  : "r" (newValue)
304  : "memory");
305  return ret;
306 }
307 
308 inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
309 {
310  int ret;
311  asm volatile("mov ar.ccv=%2\n"
312  ";;\n"
313  "cmpxchg4.acq %0=%1,%3,ar.ccv\n"
314  : "=r" (ret), "+m" (_q_value)
315  : "r" (expectedValue), "r" (newValue)
316  : "memory");
317  return ret == expectedValue;
318 }
319 
320 inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
321 {
322  int ret;
323  asm volatile("mov ar.ccv=%2\n"
324  ";;\n"
325  "cmpxchg4.rel %0=%1,%3,ar.ccv\n"
326  : "=r" (ret), "+m" (_q_value)
327  : "r" (expectedValue), "r" (newValue)
328  : "memory");
329  return ret == expectedValue;
330 }
331 
332 inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
333 {
334  int ret;
335 
336 #if (__GNUC__ >= 4)
337  // We implement a fast fetch-and-add when we can
338  if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd)) {
339  asm volatile("fetchadd4.acq %0=%1,%2\n"
340  : "=r" (ret), "+m" (_q_value)
341  : "i" (valueToAdd)
342  : "memory");
343  return ret;
344  }
345 #endif
346 
347  // otherwise, use a loop around test-and-set
348  ret = _q_value;
349  asm volatile("0:\n"
350  " mov r9=%0\n"
351  " mov ar.ccv=%0\n"
352  " add %0=%0, %2\n"
353  " ;;\n"
354  " cmpxchg4.acq %0=%1,%0,ar.ccv\n"
355  " ;;\n"
356  " cmp.ne p6,p0 = %0, r9\n"
357  "(p6) br.dptk 0b\n"
358  "1:\n"
359  : "+r" (ret), "+m" (_q_value)
360  : "r" (valueToAdd)
361  : "r9", "p6", "memory");
362  return ret;
363 }
364 
365 inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
366 {
367  int ret;
368 
369 #if (__GNUC__ >= 4)
370  // We implement a fast fetch-and-add when we can
371  if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd)) {
372  asm volatile("fetchadd4.rel %0=%1,%2\n"
373  : "=r" (ret), "+m" (_q_value)
374  : "i" (valueToAdd)
375  : "memory");
376  return ret;
377  }
378 #endif
379 
380  // otherwise, use a loop around test-and-set
381  ret = _q_value;
382  asm volatile("0:\n"
383  " mov r9=%0\n"
384  " mov ar.ccv=%0\n"
385  " add %0=%0, %2\n"
386  " ;;\n"
387  " cmpxchg4.rel %0=%1,%0,ar.ccv\n"
388  " ;;\n"
389  " cmp.ne p6,p0 = %0, r9\n"
390  "(p6) br.dptk 0b\n"
391  "1:\n"
392  : "+r" (ret), "+m" (_q_value)
393  : "r" (valueToAdd)
394  : "r9", "p6", "memory");
395  return ret;
396 }
397 
398 inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
399 {
400  asm volatile("mf" ::: "memory");
401  return fetchAndAddRelease(valueToAdd);
402 }
403 
404 inline bool QBasicAtomicInt::ref()
405 {
406  int ret;
407  asm volatile("fetchadd4.acq %0=%1,1\n"
408  : "=r" (ret), "+m" (_q_value)
409  :
410  : "memory");
411  return ret != -1;
412 }
413 
414 inline bool QBasicAtomicInt::deref()
415 {
416  int ret;
417  asm volatile("fetchadd4.rel %0=%1,-1\n"
418  : "=r" (ret), "+m" (_q_value)
419  :
420  : "memory");
421  return ret != 1;
422 }
423 
424 template <typename T>
426 {
427  T *ret;
428  asm volatile("xchg8 %0=%1,%2\n"
429  : "=r" (ret), "+m" (_q_value)
430  : "r" (newValue)
431  : "memory");
432  return ret;
433 }
434 
435 template <typename T>
437 {
438  T *ret;
439  asm volatile("mf\n"
440  "xchg8 %0=%1,%2\n"
441  : "=r" (ret), "+m" (_q_value)
442  : "r" (newValue)
443  : "memory");
444  return ret;
445 }
446 
447 template <typename T>
448 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
449 {
450  T *ret;
451  asm volatile("mov ar.ccv=%2\n"
452  ";;\n"
453  "cmpxchg8.acq %0=%1,%3,ar.ccv\n"
454  : "=r" (ret), "+m" (_q_value)
455  : "r" (expectedValue), "r" (newValue)
456  : "memory");
457  return ret == expectedValue;
458 }
459 
460 template <typename T>
461 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
462 {
463  T *ret;
464  asm volatile("mov ar.ccv=%2\n"
465  ";;\n"
466  "cmpxchg8.rel %0=%1,%3,ar.ccv\n"
467  : "=r" (ret), "+m" (_q_value)
468  : "r" (expectedValue), "r" (newValue)
469  : "memory");
470  return ret == expectedValue;
471 }
472 
473 template <typename T>
475 {
476  T *ret;
477 
478 #if (__GNUC__ >= 4)
479  // We implement a fast fetch-and-add when we can
480  if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd * sizeof(T))) {
481  asm volatile("fetchadd8.acq %0=%1,%2\n"
482  : "=r" (ret), "+m" (_q_value)
483  : "i" (valueToAdd * sizeof(T))
484  : "memory");
485  return ret;
486  }
487 #endif
488 
489  // otherwise, use a loop around test-and-set
490  ret = _q_value;
491  asm volatile("0:\n"
492  " mov r9=%0\n"
493  " mov ar.ccv=%0\n"
494  " add %0=%0, %2\n"
495  " ;;\n"
496  " cmpxchg8.acq %0=%1,%0,ar.ccv\n"
497  " ;;\n"
498  " cmp.ne p6,p0 = %0, r9\n"
499  "(p6) br.dptk 0b\n"
500  "1:\n"
501  : "+r" (ret), "+m" (_q_value)
502  : "r" (valueToAdd * sizeof(T))
503  : "r9", "p6", "memory");
504  return ret;
505 }
506 
507 template <typename T>
509 {
510  T *ret;
511 
512 #if (__GNUC__ >= 4)
513  // We implement a fast fetch-and-add when we can
514  if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd * sizeof(T))) {
515  asm volatile("fetchadd8.rel %0=%1,%2\n"
516  : "=r" (ret), "+m" (_q_value)
517  : "i" (valueToAdd * sizeof(T))
518  : "memory");
519  return ret;
520  }
521 #endif
522 
523  // otherwise, use a loop around test-and-set
524  ret = _q_value;
525  asm volatile("0:\n"
526  " mov r9=%0\n"
527  " mov ar.ccv=%0\n"
528  " add %0=%0, %2\n"
529  " ;;\n"
530  " cmpxchg8.rel %0=%1,%0,ar.ccv\n"
531  " ;;\n"
532  " cmp.ne p6,p0 = %0, r9\n"
533  "(p6) br.dptk 0b\n"
534  "1:\n"
535  : "+r" (ret), "+m" (_q_value)
536  : "r" (valueToAdd * sizeof(T))
537  : "r9", "p6", "memory");
538  return ret;
539 }
540 
541 template <typename T>
543 {
544  asm volatile("mf" ::: "memory");
545  return fetchAndAddRelease(valueToAdd);
546 }
547 
548 #elif defined Q_CC_HPACC
549 
551 #include <ia64/sys/inline.h>
553 
554 #define FENCE (_Asm_fence)(_UP_CALL_FENCE | _UP_SYS_FENCE | _DOWN_CALL_FENCE | _DOWN_SYS_FENCE)
555 
556 inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
557 {
558  return _Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue,
559  (_Asm_ldhint)_LDHINT_NONE, FENCE);
560 }
561 
562 inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
563 {
564  _Asm_mf(FENCE);
565  return _Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue,
566  (_Asm_ldhint)_LDHINT_NONE, FENCE);
567 }
568 
569 inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
570 {
571  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE);
572  int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
573  &_q_value, (unsigned)newValue, (_Asm_ldhint)_LDHINT_NONE);
574  return ret == expectedValue;
575 }
576 
577 inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
578 {
579  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE);
580  int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
581  &_q_value, newValue, (_Asm_ldhint)_LDHINT_NONE);
582  return ret == expectedValue;
583 }
584 
585 inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
586 {
587  if (valueToAdd == 1)
588  return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
589  &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
590  else if (valueToAdd == -1)
591  return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
592  &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
593 
594  // implement the test-and-set loop
595  register int old, ret;
596  do {
597  old = _q_value;
598  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE);
599  ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
600  &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
601  } while (ret != old);
602  return old;
603 }
604 
605 inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
606 {
607  if (valueToAdd == 1)
608  return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
609  &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
610  else if (valueToAdd == -1)
611  return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
612  &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
613 
614  // implement the test-and-set loop
615  register int old, ret;
616  do {
617  old = _q_value;
618  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE);
619  ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
620  &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
621  } while (ret != old);
622  return old;
623 }
624 
625 inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
626 {
627  _Asm_mf(FENCE);
628  return fetchAndAddAcquire(valueToAdd);
629 }
630 
631 inline bool QBasicAtomicInt::ref()
632 {
633  return (int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
634  &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1;
635 }
636 
637 inline bool QBasicAtomicInt::deref()
638 {
639  return (int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
640  &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1;
641 }
642 
643 template <typename T>
645 {
646 #ifdef __LP64__
647  return (T *)_Asm_xchg((_Asm_sz)_SZ_D, &_q_value, (quint64)newValue,
648  (_Asm_ldhint)_LDHINT_NONE, FENCE);
649 #else
650  return (T *)_Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (quint32)newValue,
651  (_Asm_ldhint)_LDHINT_NONE, FENCE);
652 #endif
653 }
654 
655 template <typename T>
657 {
658  _Asm_mf(FENCE);
659  return fetchAndStoreAcquire(newValue);
660 }
661 
662 template <typename T>
663 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
664 {
665 #ifdef __LP64__
666  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)expectedValue, FENCE);
667  T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
668  &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE);
669 #else
670  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)expectedValue, FENCE);
671  T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
672  &_q_value, (quint32)newValue, (_Asm_ldhint)_LDHINT_NONE);
673 #endif
674  return ret == expectedValue;
675 }
676 
677 template <typename T>
678 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
679 {
680 #ifdef __LP64__
681  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)expectedValue, FENCE);
682  T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
683  &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE);
684 #else
685  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)expectedValue, FENCE);
686  T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
687  &_q_value, (quint32)newValue, (_Asm_ldhint)_LDHINT_NONE);
688 #endif
689  return ret == expectedValue;
690 }
691 
692 template <typename T>
694 {
695  // implement the test-and-set loop
696  register T *old, *ret;
697  do {
698  old = _q_value;
699 #ifdef __LP64__
700  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE);
701  ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
702  &_q_value, (quint64)(old + valueToAdd),
703  (_Asm_ldhint)_LDHINT_NONE);
704 #else
705  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)old, FENCE);
706  ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
707  &_q_value, (quint32)(old + valueToAdd),
708  (_Asm_ldhint)_LDHINT_NONE);
709 #endif
710  } while (old != ret);
711  return old;
712 }
713 
714 template <typename T>
716 {
717  // implement the test-and-set loop
718  register T *old, *ret;
719  do {
720  old = _q_value;
721 #ifdef __LP64__
722  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE);
723  ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
724  &_q_value, (quint64)(old + valueToAdd),
725  (_Asm_ldhint)_LDHINT_NONE);
726 #else
727  _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)old, FENCE);
728  ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
729  &_q_value, (quint32)(old + valueToAdd),
730  (_Asm_ldhint)_LDHINT_NONE);
731 #endif
732  } while (old != ret);
733  return old;
734 }
735 
736 template <typename T>
738 {
739  _Asm_mf(FENCE);
740  return fetchAndAddAcquire(valueToAdd);
741 }
742 
743 #else
744 
745 extern "C" {
746  Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval);
747  Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval);
748 } // extern "C"
749 
750 #endif
751 
752 inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
753 {
754  return testAndSetAcquire(expectedValue, newValue);
755 }
756 
757 inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
758 {
759  return testAndSetAcquire(expectedValue, newValue);
760 }
761 
762 template <typename T>
763 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
764 {
765  return testAndSetAcquire(expectedValue, newValue);
766 }
767 
768 template <typename T>
769 Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
770 {
771  return testAndSetAcquire(expectedValue, newValue);
772 }
773 
774 #endif // Q_CC_INTEL
775 
776 inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
777 {
778  return fetchAndStoreAcquire(newValue);
779 }
780 
781 inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
782 {
783  return fetchAndStoreRelease(newValue);
784 }
785 
786 inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
787 {
788  return fetchAndAddAcquire(valueToAdd);
789 }
790 
791 template <typename T>
793 {
794  return fetchAndStoreAcquire(newValue);
795 }
796 
797 template <typename T>
799 {
800  return fetchAndStoreRelaxed(newValue);
801 }
802 
803 template <typename T>
805 {
806  return fetchAndAddAcquire(valueToAdd);
807 }
808 
810 
812 
813 #endif // QATOMIC_IA64_H
static bool isFetchAndStoreNative()
Definition: qatomic_alpha.h:65
static bool isTestAndSetNative()
Definition: qatomic_alpha.h:58
int fetchAndStoreRelease(int newValue)
static bool isReferenceCountingNative()
Definition: qatomic_alpha.h:51
T * fetchAndAddRelaxed(qptrdiff valueToAdd)
QIntegerForSizeof< void * >::Unsigned quintptr
Definition: qglobal.h:986
volatile int _q_value
Definition: qbasicatomic.h:64
#define QT_END_NAMESPACE
This macro expands to.
Definition: qglobal.h:90
static bool isFetchAndAddNative()
Definition: qatomic_alpha.h:98
#define QT_BEGIN_HEADER
Definition: qglobal.h:136
static bool isTestAndSetNative()
Definition: qatomic_alpha.h:80
static bool isFetchAndAddNative()
Definition: qatomic_alpha.h:72
T * fetchAndStoreRelease(T *newValue)
static bool isTestAndSetWaitFree()
Definition: qatomic_alpha.h:83
bool _q_ia64_fetchadd_immediate(register int value)
Definition: qatomic_ia64.h:109
T * fetchAndAddRelease(qptrdiff valueToAdd)
bool testAndSetOrdered(T *expectedValue, T *newValue)
int fetchAndAddAcquire(int valueToAdd)
#define QT_END_INCLUDE_NAMESPACE
This macro is equivalent to QT_BEGIN_NAMESPACE.
Definition: qglobal.h:92
T * fetchAndAddAcquire(qptrdiff valueToAdd)
int fetchAndStoreRelaxed(int newValue)
T * fetchAndAddOrdered(qptrdiff valueToAdd)
bool testAndSetAcquire(int expectedValue, int newValue)
bool testAndSetRelaxed(int expectedValue, int newValue)
#define QT_BEGIN_NAMESPACE
This macro expands to.
Definition: qglobal.h:89
int fetchAndStoreAcquire(int newValue)
Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval)
unsigned __int64 quint64
Definition: qglobal.h:943
QIntegerForSizeof< void * >::Signed qptrdiff
Definition: qglobal.h:987
T * fetchAndStoreOrdered(T *newValue)
#define Q_INLINE_TEMPLATE
Definition: qglobal.h:1713
int fetchAndAddRelease(int valueToAdd)
const T * ptr(const T &t)
bool testAndSetOrdered(int expectedValue, int newValue)
T * fetchAndStoreAcquire(T *newValue)
static bool isFetchAndStoreWaitFree()
Definition: qatomic_alpha.h:67
static bool isTestAndSetWaitFree()
Definition: qatomic_alpha.h:60
static bool isReferenceCountingWaitFree()
Definition: qatomic_alpha.h:53
int fetchAndAddOrdered(int valueToAdd)
static bool isFetchAndStoreWaitFree()
Definition: qatomic_alpha.h:92
static bool isFetchAndAddWaitFree()
Definition: qatomic_alpha.h:74
static bool isFetchAndAddWaitFree()
bool testAndSetRelease(int expectedValue, int newValue)
#define Q_CORE_EXPORT
Definition: qglobal.h:1449
Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval)
unsigned int quint32
Definition: qglobal.h:938
int fetchAndAddRelaxed(int valueToAdd)
int fetchAndStoreOrdered(int newValue)
bool testAndSetRelaxed(T *expectedValue, T *newValue)
#define QT_BEGIN_INCLUDE_NAMESPACE
This macro is equivalent to QT_END_NAMESPACE.
Definition: qglobal.h:91
bool testAndSetRelease(T *expectedValue, T *newValue)
#define QT_END_HEADER
Definition: qglobal.h:137
T * fetchAndStoreRelaxed(T *newValue)
static bool isFetchAndStoreNative()
Definition: qatomic_alpha.h:89
bool testAndSetAcquire(T *expectedValue, T *newValue)