00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029 #ifndef __TBB_queuing_rw_mutex_H
00030 #define __TBB_queuing_rw_mutex_H
00031
00032 #include "tbb_config.h"
00033
00034 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00035
00036 #pragma warning (push)
00037 #pragma warning (disable: 4530)
00038 #endif
00039
00040 #include <cstring>
00041
00042 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00043 #pragma warning (pop)
00044 #endif
00045
00046 #include "atomic.h"
00047 #include "tbb_profiling.h"
00048
00049 namespace tbb {
00050
00052
00055 class queuing_rw_mutex {
00056 public:
00058 queuing_rw_mutex() {
00059 q_tail = NULL;
00060 #if TBB_USE_THREADING_TOOLS
00061 internal_construct();
00062 #endif
00063 }
00064
00066 ~queuing_rw_mutex() {
00067 #if TBB_USE_ASSERT
00068 __TBB_ASSERT( !q_tail, "destruction of an acquired mutex");
00069 #endif
00070 }
00071
00073
00075 class scoped_lock: internal::no_copy {
00077 void initialize() {
00078 my_mutex = NULL;
00079 #if TBB_USE_ASSERT
00080 my_state = 0xFF;
00081 internal::poison_pointer(my_next);
00082 internal::poison_pointer(my_prev);
00083 #endif
00084 }
00085
00086 public:
00088
00089 scoped_lock() {initialize();}
00090
00092 scoped_lock( queuing_rw_mutex& m, bool write=true ) {
00093 initialize();
00094 acquire(m,write);
00095 }
00096
00098 ~scoped_lock() {
00099 if( my_mutex ) release();
00100 }
00101
00103 void acquire( queuing_rw_mutex& m, bool write=true );
00104
00106 bool try_acquire( queuing_rw_mutex& m, bool write=true );
00107
00109 void release();
00110
00112
00113 bool upgrade_to_writer();
00114
00116 bool downgrade_to_reader();
00117
00118 private:
00120 queuing_rw_mutex* my_mutex;
00121
00123 scoped_lock *__TBB_atomic my_prev, *__TBB_atomic my_next;
00124
00125 typedef unsigned char state_t;
00126
00128 atomic<state_t> my_state;
00129
00131
00132 unsigned char __TBB_atomic my_going;
00133
00135 unsigned char my_internal_lock;
00136
00138 void acquire_internal_lock();
00139
00141
00142 bool try_acquire_internal_lock();
00143
00145 void release_internal_lock();
00146
00148 void wait_for_release_of_internal_lock();
00149
00151 void unblock_or_wait_on_internal_lock( uintptr_t );
00152 };
00153
00154 void __TBB_EXPORTED_METHOD internal_construct();
00155
00156
00157 static const bool is_rw_mutex = true;
00158 static const bool is_recursive_mutex = false;
00159 static const bool is_fair_mutex = true;
00160
00161 private:
00163 atomic<scoped_lock*> q_tail;
00164
00165 };
00166
00167 __TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex)
00168
00169 }
00170
00171 #endif