2 * Copyright (c) 1997-1999
3 * Silicon Graphics Computer Systems, Inc.
8 * This material is provided "as is", with absolutely no warranty expressed
9 * or implied. Any use is at your own risk.
11 * Permission to use or copy this software for any purpose is hereby granted
12 * without fee, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
19 // WARNING: This is an internal header file, included by other C++
20 // standard library headers. You should not attempt to use this header
24 #ifndef _STLP_INTERNAL_THREADS_H
25 #define _STLP_INTERNAL_THREADS_H
27 // Supported threading models are native SGI, pthreads, uithreads
28 // (similar to pthreads, but based on an earlier draft of the Posix
29 // threads standard), and Win32 threads. Uithread support by Jochen
30 // Schlick, 1999, and Solaris threads generalized to them.
32 #ifndef _STLP_INTERNAL_CSTDDEF
33 # include <stl/_cstddef.h>
36 #ifndef _STLP_INTERNAL_CSTDLIB
37 # include <stl/_cstdlib.h>
40 // On SUN and Mac OS X gcc, zero-initialization works just fine...
41 #if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
42 # define _STLP_MUTEX_INITIALIZER
45 /* This header defines the following atomic operation that platform should
46 * try to support as much as possible. Atomic operation are exposed as macro
47 * in order to easily test for their existance. They are:
48 * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
49 * increment *__ptr by 1 and returns the new value
50 * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
51 * decrement *__ptr by 1 and returns the new value
52 * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
53 * assign __val to *__target and returns former *__target value
54 * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
55 * assign __ptr to *__target and returns former *__target value
58 #if defined (_STLP_THREADS)
60 # if defined (_STLP_SGI_THREADS)
63 // Hack for SGI o32 compilers.
64 # if !defined(__add_and_fetch) && \
65 (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
66 # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
67 # define __test_and_set(__l,__v) test_and_set(__l,__v)
70 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
71 # define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
73 # define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
76 # define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
77 # define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
78 typedef long __stl_atomic_t
;
80 # elif defined (_STLP_PTHREADS)
83 # if !defined (_STLP_USE_PTHREAD_SPINLOCK)
84 # if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
85 # define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
87 //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
88 # if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
89 # define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
91 # define _STLP_PTHREAD_ATTR_DEFAULT 0
94 # if defined (__OpenBSD__)
95 # include <spinlock.h>
99 # if defined (__GNUC__) && defined (__i386__)
100 # if !defined (_STLP_ATOMIC_INCREMENT)
101 inline long _STLP_atomic_increment_gcc_x86(long volatile* p
) {
104 ("lock; xaddl %1, %0;"
105 :"=m" (*p
), "=r" (result
)
110 # define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
113 # if !defined (_STLP_ATOMIC_DECREMENT)
114 inline long _STLP_atomic_decrement_gcc_x86(long volatile* p
) {
117 ("lock; xaddl %1, %0;"
118 :"=m" (*p
), "=r" (result
)
123 # define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
125 typedef long __stl_atomic_t
;
127 typedef size_t __stl_atomic_t
;
128 # endif /* if defined(__GNUC__) && defined(__i386__) */
130 # elif defined (_STLP_WIN32THREADS)
132 # if !defined (_STLP_ATOMIC_INCREMENT)
133 # if !defined (_STLP_NEW_PLATFORM_SDK)
134 # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__CONST_CAST(long*, __x))
135 # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__CONST_CAST(long*, __x))
136 # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__CONST_CAST(long*, __x), __y)
138 # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__x)
139 # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__x)
140 # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__x, __y)
142 # define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y) STLPInterlockedExchangePointer(__x, __y)
144 typedef long __stl_atomic_t
;
146 # elif defined (__DECC) || defined (__DECCXX)
148 # include <machine/builtins.h>
149 # define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
150 # define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
151 # define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
152 typedef long __stl_atomic_t
;
154 # elif defined (_STLP_SPARC_SOLARIS_THREADS)
156 typedef long __stl_atomic_t
;
157 # include <stl/_sparc_atomic.h>
159 # elif defined (_STLP_UITHREADS)
161 // this inclusion is potential hazard to bring up all sorts
162 // of old-style headers. Let's assume vendor already know how
163 // to deal with that.
164 # ifndef _STLP_INTERNAL_CTIME
165 # include <stl/_ctime.h>
167 # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
168 using _STLP_VENDOR_CSTD::time_t;
171 # ifndef _STLP_INTERNAL_CSTDIO
172 # include <stl/_cstdio.h>
174 # ifndef _STLP_INTERNAL_CWCHAR
175 # include <stl/_cwchar.h>
177 typedef size_t __stl_atomic_t
;
179 # elif defined (_STLP_BETHREADS)
184 # define _STLP_MUTEX_INITIALIZER = { 0 }
185 typedef size_t __stl_atomic_t
;
187 # elif defined (_STLP_NWTHREADS)
189 # include <nwthread.h>
190 # include <nwsemaph.h>
191 typedef size_t __stl_atomic_t
;
193 # elif defined(_STLP_OS2THREADS)
195 # if defined (__GNUC__)
196 # define INCL_DOSSEMAPHORES
199 // This section serves to replace os2.h for VisualAge C++
200 typedef unsigned long ULONG
;
201 # if !defined (__HEV__) /* INCL_SEMAPHORE may also define HEV */
206 typedef ULONG APIRET
;
209 typedef const char* PCSZ
;
210 typedef ULONG BOOL32
;
211 APIRET _System
DosCreateMutexSem(PCSZ pszName
, PHEV phev
, ULONG flAttr
, BOOL32 fState
);
212 APIRET _System
DosRequestMutexSem(HMTX hmtx
, ULONG ulTimeout
);
213 APIRET _System
DosReleaseMutexSem(HMTX hmtx
);
214 APIRET _System
DosCloseMutexSem(HMTX hmtx
);
215 # define _STLP_MUTEX_INITIALIZER = { 0 }
217 typedef size_t __stl_atomic_t
;
221 typedef size_t __stl_atomic_t
;
227 # define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
228 # define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
229 /* We do not grant other atomic operations as they are useless if STLport do not have
232 typedef size_t __stl_atomic_t
;
235 #if !defined (_STLP_MUTEX_INITIALIZER)
236 # if defined(_STLP_ATOMIC_EXCHANGE)
237 # define _STLP_MUTEX_INITIALIZER = { 0 }
238 # elif defined(_STLP_UITHREADS)
239 # define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
241 # define _STLP_MUTEX_INITIALIZER
245 _STLP_BEGIN_NAMESPACE
247 #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
248 // Helper struct. This is a workaround for various compilers that don't
249 // handle static variables in inline functions properly.
250 template <int __inst
>
251 struct _STLP_mutex_spin
{
252 enum { __low_max
= 30, __high_max
= 1000 };
253 // Low if we suspect uniprocessor, high for multiprocessor.
254 static unsigned __max
;
255 static unsigned __last
;
256 static void _STLP_CALL
_M_do_lock(volatile __stl_atomic_t
* __lock
);
257 static void _STLP_CALL
_S_nsec_sleep(int __log_nsec
, unsigned int& __iteration
);
259 #endif // !_STLP_USE_PTHREAD_SPINLOCK
261 // Locking class. Note that this class *does not have a constructor*.
262 // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
263 // or dynamically, by explicitly calling the _M_initialize member function.
264 // (This is similar to the ways that a pthreads mutex can be initialized.)
265 // There are explicit member functions for acquiring and releasing the lock.
267 // There is no constructor because static initialization is essential for
268 // some uses, and only a class aggregate (see section 8.5.1 of the C++
269 // standard) can be initialized that way. That means we must have no
270 // constructors, no base classes, no virtual functions, and no private or
271 // protected members.
273 // For non-static cases, clients should use _STLP_mutex.
275 struct _STLP_CLASS_DECLSPEC _STLP_mutex_base
{
276 #if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
277 // It should be relatively easy to get this to work on any modern Unix.
278 volatile __stl_atomic_t _M_lock
;
281 #if defined (_STLP_THREADS)
282 # if defined (_STLP_ATOMIC_EXCHANGE)
283 inline void _M_initialize() { _M_lock
= 0; }
284 inline void _M_destroy() {}
286 void _M_acquire_lock() {
287 _STLP_mutex_spin
<0>::_M_do_lock(&_M_lock
);
290 inline void _M_release_lock() {
291 volatile __stl_atomic_t
* __lock
= &_M_lock
;
292 # if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
295 # elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
296 (defined (_ABIN32) || defined(_ABI64))
297 __lock_release(__lock
);
298 # elif defined (_STLP_SPARC_SOLARIS_THREADS)
299 # if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
300 asm("membar #StoreStore ; membar #LoadStore");
307 // This is not sufficient on many multiprocessors, since
308 // writes to protected variables and the lock may be reordered.
311 # elif defined (_STLP_PTHREADS)
312 # if defined (_STLP_USE_PTHREAD_SPINLOCK)
313 # if !defined (__OpenBSD__)
314 pthread_spinlock_t _M_lock
;
315 inline void _M_initialize() { pthread_spin_init( &_M_lock
, 0 ); }
316 inline void _M_destroy() { pthread_spin_destroy( &_M_lock
); }
318 // sorry, but no static initializer for pthread_spinlock_t;
319 // this will not work for compilers that has problems with call
320 // constructor of static object...
322 // _STLP_mutex_base()
323 // { pthread_spin_init( &_M_lock, 0 ); }
325 // ~_STLP_mutex_base()
326 // { pthread_spin_destroy( &_M_lock ); }
328 inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock
); }
329 inline void _M_release_lock() { pthread_spin_unlock( &_M_lock
); }
330 # else // __OpenBSD__
332 inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock
); }
333 inline void _M_destroy() { }
334 inline void _M_acquire_lock() { _SPINLOCK( &_M_lock
); }
335 inline void _M_release_lock() { _SPINUNLOCK( &_M_lock
); }
336 # endif // __OpenBSD__
337 # else // !_STLP_USE_PTHREAD_SPINLOCK
338 pthread_mutex_t _M_lock
;
339 inline void _M_initialize()
340 { pthread_mutex_init(&_M_lock
,_STLP_PTHREAD_ATTR_DEFAULT
); }
341 inline void _M_destroy()
342 { pthread_mutex_destroy(&_M_lock
); }
343 inline void _M_acquire_lock() {
344 # if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
345 if (!_M_lock
.field1
) _M_initialize();
347 pthread_mutex_lock(&_M_lock
);
349 inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock
); }
350 # endif // !_STLP_USE_PTHREAD_SPINLOCK
352 # elif defined (_STLP_UITHREADS)
354 inline void _M_initialize()
355 { mutex_init(&_M_lock
, 0, NULL
); }
356 inline void _M_destroy()
357 { mutex_destroy(&_M_lock
); }
358 inline void _M_acquire_lock() { mutex_lock(&_M_lock
); }
359 inline void _M_release_lock() { mutex_unlock(&_M_lock
); }
361 # elif defined (_STLP_OS2THREADS)
363 inline void _M_initialize() { DosCreateMutexSem(NULL
, &_M_lock
, 0, false); }
364 inline void _M_destroy() { DosCloseMutexSem(_M_lock
); }
365 inline void _M_acquire_lock() {
366 if (!_M_lock
) _M_initialize();
367 DosRequestMutexSem(_M_lock
, SEM_INDEFINITE_WAIT
);
369 inline void _M_release_lock() { DosReleaseMutexSem(_M_lock
); }
370 # elif defined (_STLP_BETHREADS)
372 inline void _M_initialize() {
373 sem
= create_sem(1, "STLPort");
376 inline void _M_destroy() {
377 int t
= delete_sem(sem
);
378 assert(t
== B_NO_ERROR
);
380 inline void _M_acquire_lock();
381 inline void _M_release_lock() {
382 status_t t
= release_sem(sem
);
383 assert(t
== B_NO_ERROR
);
385 # elif defined (_STLP_NWTHREADS)
387 inline void _M_initialize()
388 { _M_lock
= OpenLocalSemaphore(1); }
389 inline void _M_destroy()
390 { CloseLocalSemaphore(_M_lock
); }
391 inline void _M_acquire_lock()
392 { WaitOnLocalSemaphore(_M_lock
); }
393 inline void _M_release_lock() { SignalLocalSemaphore(_M_lock
); }
394 # else //*ty 11/24/2001 - added configuration check
395 # error "Unknown thread facility configuration"
397 #else /* No threads */
398 inline void _M_initialize() {}
399 inline void _M_destroy() {}
400 inline void _M_acquire_lock() {}
401 inline void _M_release_lock() {}
402 #endif // _STLP_PTHREADS
405 // Locking class. The constructor initializes the lock, the destructor destroys it.
406 // Well - behaving class, does not need static initializer
408 class _STLP_CLASS_DECLSPEC _STLP_mutex
: public _STLP_mutex_base
{
410 inline _STLP_mutex () { _M_initialize(); }
411 inline ~_STLP_mutex () { _M_destroy(); }
413 _STLP_mutex(const _STLP_mutex
&);
414 void operator=(const _STLP_mutex
&);
417 // A locking class that uses _STLP_STATIC_MUTEX. The constructor takes
418 // a reference to an _STLP_STATIC_MUTEX, and acquires a lock. The destructor
419 // releases the lock.
420 // It's not clear that this is exactly the right functionality.
421 // It will probably change in the future.
423 struct _STLP_CLASS_DECLSPEC _STLP_auto_lock
{
424 _STLP_auto_lock(_STLP_STATIC_MUTEX
& __lock
) : _M_lock(__lock
)
425 { _M_lock
._M_acquire_lock(); }
427 { _M_lock
._M_release_lock(); }
430 _STLP_STATIC_MUTEX
& _M_lock
;
431 void operator=(const _STLP_auto_lock
&);
432 _STLP_auto_lock(const _STLP_auto_lock
&);
436 * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
437 * _M_ref_count, and member functions _M_incr and _M_decr, which perform
438 * atomic preincrement/predecrement. The constructor initializes
441 class _STLP_CLASS_DECLSPEC _Refcount_Base
{
442 // The data member _M_ref_count
443 #if defined (__DMC__)
446 _STLP_VOLATILE __stl_atomic_t _M_ref_count
;
448 #if defined (_STLP_THREADS) && \
449 (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \
450 defined (_STLP_WIN95_LIKE))
451 # define _STLP_USE_MUTEX
452 _STLP_mutex _M_mutex
;
457 _Refcount_Base(__stl_atomic_t __n
) : _M_ref_count(__n
) {}
458 #if defined (__BORLANDC__)
462 // _M_incr and _M_decr
463 #if defined (_STLP_THREADS)
464 # if !defined (_STLP_USE_MUTEX)
465 __stl_atomic_t
_M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count
); }
466 __stl_atomic_t
_M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count
); }
468 # undef _STLP_USE_MUTEX
469 __stl_atomic_t
_M_incr() {
470 _STLP_auto_lock
l(_M_mutex
);
471 return ++_M_ref_count
;
473 __stl_atomic_t
_M_decr() {
474 _STLP_auto_lock
l(_M_mutex
);
475 return --_M_ref_count
;
478 #else /* No threads */
479 __stl_atomic_t
_M_incr() { return ++_M_ref_count
; }
480 __stl_atomic_t
_M_decr() { return --_M_ref_count
; }
484 /* Atomic swap on __stl_atomic_t
485 * This is guaranteed to behave as though it were atomic only if all
486 * possibly concurrent updates use _Atomic_swap.
487 * In some cases the operation is emulated with a lock.
488 * Idem for _Atomic_swap_ptr
490 /* Helper struct to handle following cases:
491 * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
492 * exchange can be done on pointers
493 * - on platform without atomic operation swap is done in a critical section,
494 * portable but inefficient.
496 template <int __use_ptr_atomic_swap
>
497 class _Atomic_swap_struct
{
499 #if defined (_STLP_THREADS) && \
500 !defined (_STLP_ATOMIC_EXCHANGE) && \
501 (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
502 defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
503 # define _STLP_USE_ATOMIC_SWAP_MUTEX
504 static _STLP_STATIC_MUTEX _S_swap_lock
;
507 static __stl_atomic_t
_S_swap(_STLP_VOLATILE __stl_atomic_t
* __p
, __stl_atomic_t __q
) {
508 #if defined (_STLP_THREADS)
509 # if defined (_STLP_ATOMIC_EXCHANGE)
510 return _STLP_ATOMIC_EXCHANGE(__p
, __q
);
511 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
512 _S_swap_lock
._M_acquire_lock();
513 __stl_atomic_t __result
= *__p
;
515 _S_swap_lock
._M_release_lock();
518 # error Missing atomic swap implementation
522 __stl_atomic_t __result
= *__p
;
525 #endif // _STLP_THREADS
528 static void* _S_swap_ptr(void* _STLP_VOLATILE
* __p
, void* __q
) {
529 #if defined (_STLP_THREADS)
530 # if defined (_STLP_ATOMIC_EXCHANGE_PTR)
531 return _STLP_ATOMIC_EXCHANGE_PTR(__p
, __q
);
532 # elif defined (_STLP_ATOMIC_EXCHANGE)
533 _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t
) == sizeof(void*))
534 return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t
*, __p
),
535 __REINTERPRET_CAST(__stl_atomic_t
, __q
))
537 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
538 _S_swap_lock
._M_acquire_lock();
539 void *__result
= *__p
;
541 _S_swap_lock
._M_release_lock();
544 # error Missing pointer atomic swap implementation
548 void *__result
= *__p
;
556 class _Atomic_swap_struct
<0> {
558 #if defined (_STLP_THREADS) && \
559 (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
560 (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
561 defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
562 # define _STLP_USE_ATOMIC_SWAP_MUTEX
563 static _STLP_STATIC_MUTEX _S_swap_lock
;
566 static __stl_atomic_t
_S_swap(_STLP_VOLATILE __stl_atomic_t
* __p
, __stl_atomic_t __q
) {
567 #if defined (_STLP_THREADS)
568 # if defined (_STLP_ATOMIC_EXCHANGE)
569 return _STLP_ATOMIC_EXCHANGE(__p
, __q
);
570 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
571 /* This should be portable, but performance is expected
572 * to be quite awful. This really needs platform specific
575 _S_swap_lock
._M_acquire_lock();
576 __stl_atomic_t __result
= *__p
;
578 _S_swap_lock
._M_release_lock();
581 # error Missing atomic swap implementation
585 __stl_atomic_t __result
= *__p
;
588 #endif // _STLP_THREADS
591 static void* _S_swap_ptr(void* _STLP_VOLATILE
* __p
, void* __q
) {
592 #if defined (_STLP_THREADS)
593 # if defined (_STLP_ATOMIC_EXCHANGE_PTR)
594 return _STLP_ATOMIC_EXCHANGE_PTR(__p
, __q
);
595 # elif defined (_STLP_ATOMIC_EXCHANGE)
596 _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t
) == sizeof(void*))
597 return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t
*, __p
),
598 __REINTERPRET_CAST(__stl_atomic_t
, __q
))
600 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
601 _S_swap_lock
._M_acquire_lock();
602 void *__result
= *__p
;
604 _S_swap_lock
._M_release_lock();
607 # error Missing pointer atomic swap implementation
611 void *__result
= *__p
;
618 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
619 # pragma warning (push)
620 # pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
623 inline __stl_atomic_t _STLP_CALL
_Atomic_swap(_STLP_VOLATILE __stl_atomic_t
* __p
, __stl_atomic_t __q
) {
624 const int __use_ptr_atomic_swap
= sizeof(__stl_atomic_t
) == sizeof(void*);
625 return _Atomic_swap_struct
<__use_ptr_atomic_swap
>::_S_swap(__p
, __q
);
628 inline void* _STLP_CALL
_Atomic_swap_ptr(void* _STLP_VOLATILE
* __p
, void* __q
) {
629 const int __use_ptr_atomic_swap
= sizeof(__stl_atomic_t
) == sizeof(void*);
630 return _Atomic_swap_struct
<__use_ptr_atomic_swap
>::_S_swap_ptr(__p
, __q
);
633 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
634 # pragma warning (pop)
637 #if defined (_STLP_BETHREADS)
638 template <int __inst
>
639 struct _STLP_beos_static_lock_data
{
641 struct mutex_t
: public _STLP_mutex
{
643 { _STLP_beos_static_lock_data
<0>::is_init
= true; }
645 { _STLP_beos_static_lock_data
<0>::is_init
= false; }
650 template <int __inst
>
651 bool _STLP_beos_static_lock_data
<__inst
>::is_init
= false;
652 template <int __inst
>
653 typename _STLP_beos_static_lock_data
<__inst
>::mutex_t _STLP_beos_static_lock_data
<__inst
>::mut
;
655 inline void _STLP_mutex_base::_M_acquire_lock() {
657 // we need to initialise on demand here
658 // to prevent race conditions use our global
659 // mutex if it's available:
660 if (_STLP_beos_static_lock_data
<0>::is_init
) {
661 _STLP_auto_lock
al(_STLP_beos_static_lock_data
<0>::mut
);
662 if (sem
== 0) _M_initialize();
665 // no lock available, we must still be
666 // in startup code, THERE MUST BE ONE THREAD
667 // ONLY active at this point.
672 t
= acquire_sem(sem
);
673 assert(t
== B_NO_ERROR
);
679 #if !defined (_STLP_LINK_TIME_INSTANTIATION)
680 # include <stl/_threads.c>
683 #endif /* _STLP_INTERNAL_THREADS_H */