00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_enumerable_thread_specific_H
00022 #define __TBB_enumerable_thread_specific_H
00023
00024 #include "concurrent_vector.h"
00025 #include "tbb_thread.h"
00026 #include "cache_aligned_allocator.h"
00027 #if __SUNPRO_CC
00028 #include <string.h>
00029 #endif
00030
00031 #if _WIN32||_WIN64
00032 #include <windows.h>
00033 #else
00034 #include <pthread.h>
00035 #endif
00036
00037 namespace tbb {
00038
00040 enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
00041
00042 namespace interface5 {
00043
00045 namespace internal {
00046
00047 template<ets_key_usage_type ETS_key_type>
00048 class ets_base: tbb::internal::no_copy {
00049 protected:
00050 #if _WIN32||_WIN64
00051 typedef DWORD key_type;
00052 #else
00053 typedef pthread_t key_type;
00054 #endif
00055 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00056 public:
00057 #endif
00058 struct slot;
00059
00060 struct array {
00061 array* next;
00062 size_t lg_size;
00063 slot& at( size_t k ) {
00064 return ((slot*)(void*)(this+1))[k];
00065 }
00066 size_t size() const {return (size_t)1<<lg_size;}
00067 size_t mask() const {return size()-1;}
00068 size_t start( size_t h ) const {
00069 return h>>(8*sizeof(size_t)-lg_size);
00070 }
00071 };
00072 struct slot {
00073 key_type key;
00074 void* ptr;
00075 bool empty() const {return !key;}
00076 bool match( key_type k ) const {return key==k;}
00077 bool claim( key_type k ) {
00078 __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
00079 __TBB_ASSERT(sizeof(void*)==sizeof(tbb::atomic<key_type>*), NULL);
00080 union { void* space; tbb::atomic<key_type>* key_atomic; } helper;
00081 helper.space = &key;
00082 return helper.key_atomic->compare_and_swap(k,0)==0;
00083 }
00084 };
00085 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00086 protected:
00087 #endif
00088
00089 static key_type key_of_current_thread() {
00090 tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
00091 key_type k;
00092 memcpy( &k, &id, sizeof(k) );
00093 return k;
00094 }
00095
00097
00099 atomic<array*> my_root;
00100 atomic<size_t> my_count;
00101 virtual void* create_local() = 0;
00102 virtual void* create_array(size_t _size) = 0;
00103 virtual void free_array(void* ptr, size_t _size) = 0;
00104 array* allocate( size_t lg_size ) {
00105 size_t n = 1<<lg_size;
00106 array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
00107 a->lg_size = lg_size;
00108 std::memset( a+1, 0, n*sizeof(slot) );
00109 return a;
00110 }
00111 void free(array* a) {
00112 size_t n = 1<<(a->lg_size);
00113 free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
00114 }
00115 static size_t hash( key_type k ) {
00116
00117
00118 #if __TBB_WORDSIZE == 4
00119 return uintptr_t(k)*0x9E3779B9;
00120 #else
00121 return uintptr_t(k)*0x9E3779B97F4A7C15;
00122 #endif
00123 }
00124
00125 ets_base() {my_root=NULL; my_count=0;}
00126 virtual ~ets_base();
00127 void* table_lookup( bool& exists );
00128 void table_clear();
00129 slot& table_find( key_type k ) {
00130 size_t h = hash(k);
00131 array* r = my_root;
00132 size_t mask = r->mask();
00133 for(size_t i = r->start(h);;i=(i+1)&mask) {
00134 slot& s = r->at(i);
00135 if( s.empty() || s.match(k) )
00136 return s;
00137 }
00138 }
00139 void table_reserve_for_copy( const ets_base& other ) {
00140 __TBB_ASSERT(!my_root,NULL);
00141 __TBB_ASSERT(!my_count,NULL);
00142 if( other.my_root ) {
00143 array* a = allocate(other.my_root->lg_size);
00144 a->next = NULL;
00145 my_root = a;
00146 my_count = other.my_count;
00147 }
00148 }
00149 };
00150
00151 template<ets_key_usage_type ETS_key_type>
00152 ets_base<ETS_key_type>::~ets_base() {
00153 __TBB_ASSERT(!my_root, NULL);
00154 }
00155
00156 template<ets_key_usage_type ETS_key_type>
00157 void ets_base<ETS_key_type>::table_clear() {
00158 while( array* r = my_root ) {
00159 my_root = r->next;
00160 free(r);
00161 }
00162 my_count = 0;
00163 }
00164
00165 template<ets_key_usage_type ETS_key_type>
00166 void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
00167 const key_type k = key_of_current_thread();
00168
00169 __TBB_ASSERT(k!=0,NULL);
00170 void* found;
00171 size_t h = hash(k);
00172 for( array* r=my_root; r; r=r->next ) {
00173 size_t mask=r->mask();
00174 for(size_t i = r->start(h); ;i=(i+1)&mask) {
00175 slot& s = r->at(i);
00176 if( s.empty() ) break;
00177 if( s.match(k) ) {
00178 if( r==my_root ) {
00179
00180 exists = true;
00181 return s.ptr;
00182 } else {
00183
00184 exists = true;
00185 found = s.ptr;
00186 goto insert;
00187 }
00188 }
00189 }
00190 }
00191
00192 exists = false;
00193 found = create_local();
00194 {
00195 size_t c = ++my_count;
00196 array* r = my_root;
00197 if( !r || c>r->size()/2 ) {
00198 size_t s = r ? r->lg_size : 2;
00199 while( c>size_t(1)<<(s-1) ) ++s;
00200 array* a = allocate(s);
00201 for(;;) {
00202 a->next = my_root;
00203 array* new_r = my_root.compare_and_swap(a,r);
00204 if( new_r==r ) break;
00205 if( new_r->lg_size>=s ) {
00206
00207 free(a);
00208 break;
00209 }
00210 r = new_r;
00211 }
00212 }
00213 }
00214 insert:
00215
00216 array* ir = my_root;
00217 size_t mask = ir->mask();
00218 for(size_t i = ir->start(h);;i=(i+1)&mask) {
00219 slot& s = ir->at(i);
00220 if( s.empty() ) {
00221 if( s.claim(k) ) {
00222 s.ptr = found;
00223 return found;
00224 }
00225 }
00226 }
00227 };
00228
00230 template <>
00231 class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
00232 typedef ets_base<ets_no_key> super;
00233 #if _WIN32||_WIN64
00234 typedef DWORD tls_key_t;
00235 void create_key() { my_key = TlsAlloc(); }
00236 void destroy_key() { TlsFree(my_key); }
00237 void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
00238 void* get_tls() { return (void *)TlsGetValue(my_key); }
00239 #else
00240 typedef pthread_key_t tls_key_t;
00241 void create_key() { pthread_key_create(&my_key, NULL); }
00242 void destroy_key() { pthread_key_delete(my_key); }
00243 void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
00244 void* get_tls() const { return pthread_getspecific(my_key); }
00245 #endif
00246 tls_key_t my_key;
00247 virtual void* create_local() = 0;
00248 virtual void* create_array(size_t _size) = 0;
00249 virtual void free_array(void* ptr, size_t _size) = 0;
00250 public:
00251 ets_base() {create_key();}
00252 ~ets_base() {destroy_key();}
00253 void* table_lookup( bool& exists ) {
00254 void* found = get_tls();
00255 if( found ) {
00256 exists=true;
00257 } else {
00258 found = super::table_lookup(exists);
00259 set_tls(found);
00260 }
00261 return found;
00262 }
00263 void table_clear() {
00264 destroy_key();
00265 create_key();
00266 super::table_clear();
00267 }
00268 };
00269
00271 template< typename Container, typename Value >
00272 class enumerable_thread_specific_iterator
00273 #if defined(_WIN64) && defined(_MSC_VER)
00274
00275 : public std::iterator<std::random_access_iterator_tag,Value>
00276 #endif
00277 {
00279
00280 Container *my_container;
00281 typename Container::size_type my_index;
00282 mutable Value *my_value;
00283
00284 template<typename C, typename T>
00285 friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset,
00286 const enumerable_thread_specific_iterator<C,T>& v );
00287
00288 template<typename C, typename T, typename U>
00289 friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
00290 const enumerable_thread_specific_iterator<C,U>& j );
00291
00292 template<typename C, typename T, typename U>
00293 friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
00294 const enumerable_thread_specific_iterator<C,U>& j );
00295
00296 template<typename C, typename T, typename U>
00297 friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
00298
00299 template<typename C, typename U>
00300 friend class enumerable_thread_specific_iterator;
00301
00302 public:
00303
00304 enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
00305 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
00306
00308 enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
00309
00310 template<typename U>
00311 enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
00312 my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
00313
00314 enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
00315 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
00316 }
00317
00318 enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
00319 my_index += offset;
00320 my_value = NULL;
00321 return *this;
00322 }
00323
00324 enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
00325 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
00326 }
00327
00328 enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
00329 my_index -= offset;
00330 my_value = NULL;
00331 return *this;
00332 }
00333
00334 Value& operator*() const {
00335 Value* value = my_value;
00336 if( !value ) {
00337 value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
00338 }
00339 __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
00340 return *value;
00341 }
00342
00343 Value& operator[]( ptrdiff_t k ) const {
00344 return (*my_container)[my_index + k].value;
00345 }
00346
00347 Value* operator->() const {return &operator*();}
00348
00349 enumerable_thread_specific_iterator& operator++() {
00350 ++my_index;
00351 my_value = NULL;
00352 return *this;
00353 }
00354
00355 enumerable_thread_specific_iterator& operator--() {
00356 --my_index;
00357 my_value = NULL;
00358 return *this;
00359 }
00360
00362 enumerable_thread_specific_iterator operator++(int) {
00363 enumerable_thread_specific_iterator result = *this;
00364 ++my_index;
00365 my_value = NULL;
00366 return result;
00367 }
00368
00370 enumerable_thread_specific_iterator operator--(int) {
00371 enumerable_thread_specific_iterator result = *this;
00372 --my_index;
00373 my_value = NULL;
00374 return result;
00375 }
00376
00377
00378 typedef ptrdiff_t difference_type;
00379 typedef Value value_type;
00380 typedef Value* pointer;
00381 typedef Value& reference;
00382 typedef std::random_access_iterator_tag iterator_category;
00383 };
00384
00385 template<typename Container, typename T>
00386 enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset,
00387 const enumerable_thread_specific_iterator<Container,T>& v ) {
00388 return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
00389 }
00390
00391 template<typename Container, typename T, typename U>
00392 bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
00393 const enumerable_thread_specific_iterator<Container,U>& j ) {
00394 return i.my_index==j.my_index && i.my_container == j.my_container;
00395 }
00396
00397 template<typename Container, typename T, typename U>
00398 bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
00399 const enumerable_thread_specific_iterator<Container,U>& j ) {
00400 return !(i==j);
00401 }
00402
00403 template<typename Container, typename T, typename U>
00404 bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
00405 const enumerable_thread_specific_iterator<Container,U>& j ) {
00406 return i.my_index<j.my_index;
00407 }
00408
00409 template<typename Container, typename T, typename U>
00410 bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
00411 const enumerable_thread_specific_iterator<Container,U>& j ) {
00412 return j<i;
00413 }
00414
00415 template<typename Container, typename T, typename U>
00416 bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
00417 const enumerable_thread_specific_iterator<Container,U>& j ) {
00418 return !(i<j);
00419 }
00420
00421 template<typename Container, typename T, typename U>
00422 bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
00423 const enumerable_thread_specific_iterator<Container,U>& j ) {
00424 return !(j<i);
00425 }
00426
00427 template<typename Container, typename T, typename U>
00428 ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
00429 const enumerable_thread_specific_iterator<Container,U>& j ) {
00430 return i.my_index-j.my_index;
00431 }
00432
00433 template<typename SegmentedContainer, typename Value >
00434 class segmented_iterator
00435 #if defined(_WIN64) && defined(_MSC_VER)
00436 : public std::iterator<std::input_iterator_tag, Value>
00437 #endif
00438 {
00439 template<typename C, typename T, typename U>
00440 friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00441
00442 template<typename C, typename T, typename U>
00443 friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00444
00445 template<typename C, typename U>
00446 friend class segmented_iterator;
00447
00448 public:
00449
00450 segmented_iterator() {my_segcont = NULL;}
00451
00452 segmented_iterator( const SegmentedContainer& _segmented_container ) :
00453 my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
00454 outer_iter(my_segcont->end()) { }
00455
00456 ~segmented_iterator() {}
00457
00458 typedef typename SegmentedContainer::iterator outer_iterator;
00459 typedef typename SegmentedContainer::value_type InnerContainer;
00460 typedef typename InnerContainer::iterator inner_iterator;
00461
00462
00463 typedef ptrdiff_t difference_type;
00464 typedef Value value_type;
00465 typedef typename SegmentedContainer::size_type size_type;
00466 typedef Value* pointer;
00467 typedef Value& reference;
00468 typedef std::input_iterator_tag iterator_category;
00469
00470
00471 template<typename U>
00472 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
00473 my_segcont(other.my_segcont),
00474 outer_iter(other.outer_iter),
00475
00476 inner_iter(other.inner_iter)
00477 {}
00478
00479
00480 template<typename U>
00481 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
00482 if(this != &other) {
00483 my_segcont = other.my_segcont;
00484 outer_iter = other.outer_iter;
00485 if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
00486 }
00487 return *this;
00488 }
00489
00490
00491
00492
00493 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
00494 __TBB_ASSERT(my_segcont != NULL, NULL);
00495
00496 for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
00497 if( !outer_iter->empty() ) {
00498 inner_iter = outer_iter->begin();
00499 break;
00500 }
00501 }
00502 return *this;
00503 }
00504
00505
00506 segmented_iterator& operator++() {
00507 advance_me();
00508 return *this;
00509 }
00510
00511
00512 segmented_iterator operator++(int) {
00513 segmented_iterator tmp = *this;
00514 operator++();
00515 return tmp;
00516 }
00517
00518 bool operator==(const outer_iterator& other_outer) const {
00519 __TBB_ASSERT(my_segcont != NULL, NULL);
00520 return (outer_iter == other_outer &&
00521 (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
00522 }
00523
00524 bool operator!=(const outer_iterator& other_outer) const {
00525 return !operator==(other_outer);
00526
00527 }
00528
00529
00530 reference operator*() const {
00531 __TBB_ASSERT(my_segcont != NULL, NULL);
00532 __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
00533 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL);
00534 return *inner_iter;
00535 }
00536
00537
00538 pointer operator->() const { return &operator*();}
00539
00540 private:
00541 SegmentedContainer* my_segcont;
00542 outer_iterator outer_iter;
00543 inner_iterator inner_iter;
00544
00545 void advance_me() {
00546 __TBB_ASSERT(my_segcont != NULL, NULL);
00547 __TBB_ASSERT(outer_iter != my_segcont->end(), NULL);
00548 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL);
00549 ++inner_iter;
00550 while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
00551 inner_iter = outer_iter->begin();
00552 }
00553 }
00554 };
00555
00556 template<typename SegmentedContainer, typename T, typename U>
00557 bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
00558 const segmented_iterator<SegmentedContainer,U>& j ) {
00559 if(i.my_segcont != j.my_segcont) return false;
00560 if(i.my_segcont == NULL) return true;
00561 if(i.outer_iter != j.outer_iter) return false;
00562 if(i.outer_iter == i.my_segcont->end()) return true;
00563 return i.inner_iter == j.inner_iter;
00564 }
00565
00566
00567 template<typename SegmentedContainer, typename T, typename U>
00568 bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
00569 const segmented_iterator<SegmentedContainer,U>& j ) {
00570 return !(i==j);
00571 }
00572
00573
00574 template<typename T>
00575 struct callback_base {
00576 virtual T apply( ) = 0;
00577 virtual void destroy( ) = 0;
00578
00579 virtual callback_base* make_copy() = 0;
00580
00581 virtual ~callback_base() { }
00582 };
00583
00584 template <typename T, typename Functor>
00585 struct callback_leaf : public callback_base<T>, public tbb::internal::no_copy {
00586 typedef Functor my_callback_type;
00587 typedef callback_leaf<T,Functor> my_type;
00588 typedef my_type* callback_pointer;
00589 typedef typename tbb::tbb_allocator<my_type> my_allocator_type;
00590 Functor f;
00591 callback_leaf( const Functor& f_) : f(f_) {
00592 }
00593
00594 static callback_pointer new_callback(const Functor& f_ ) {
00595 void* new_void = my_allocator_type().allocate(1);
00596 callback_pointer new_cb = new (new_void) callback_leaf<T,Functor>(f_);
00597 return new_cb;
00598 }
00599
00600 callback_pointer make_copy() {
00601 return new_callback( f );
00602 }
00603
00604 void destroy( ) {
00605 callback_pointer my_ptr = this;
00606 my_allocator_type().destroy(my_ptr);
00607 my_allocator_type().deallocate(my_ptr,1);
00608 }
00609 T apply() { return f(); }
00610 };
00611
00612
00614
00619 template<typename U, size_t ModularSize>
00620 struct ets_element {
00621 char value[sizeof(U) + tbb::internal::NFS_MaxLineSize-ModularSize];
00622 void unconstruct() {
00623
00624
00625
00626 __TBB_ASSERT(sizeof(void*)==sizeof(U*),NULL);
00627 union { void* space; U* val; } helper;
00628 helper.space = &value;
00629 helper.val->~U();
00630 }
00631 };
00632
00634 template<typename U>
00635 struct ets_element<U,0> {
00636 char value[sizeof(U)];
00637 void unconstruct() {
00638 __TBB_ASSERT(sizeof(void*)==sizeof(U*),NULL);
00639 union { void* space; U* val; } helper;
00640 helper.space = &value;
00641 helper.val->~U();
00642 }
00643 };
00644
00645 }
00647
00649
00668 template <typename T,
00669 typename Allocator=cache_aligned_allocator<T>,
00670 ets_key_usage_type ETS_key_type=ets_no_key >
00671 class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
00672
00673 template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
00674
00675 typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
00676
00678 template<typename I>
00679 class generic_range_type: public blocked_range<I> {
00680 public:
00681 typedef T value_type;
00682 typedef T& reference;
00683 typedef const T& const_reference;
00684 typedef I iterator;
00685 typedef ptrdiff_t difference_type;
00686 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
00687 template<typename U>
00688 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
00689 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
00690 };
00691
00692 typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
00693 typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
00694
00695 internal::callback_base<T> *my_finit_callback;
00696
00697
00698
00699
00700
00701 typedef typename tbb::tbb_allocator<padded_element > exemplar_allocator_type;
00702 static padded_element * create_exemplar(const T& my_value) {
00703 padded_element *new_exemplar = reinterpret_cast<padded_element *>(exemplar_allocator_type().allocate(1));
00704 new(new_exemplar->value) T(my_value);
00705 return new_exemplar;
00706 }
00707
00708 static padded_element *create_exemplar( ) {
00709 padded_element *new_exemplar = reinterpret_cast<padded_element *>(exemplar_allocator_type().allocate(1));
00710 new(new_exemplar->value) T( );
00711 return new_exemplar;
00712 }
00713
00714 static void free_exemplar(padded_element *my_ptr) {
00715 my_ptr->unconstruct();
00716 exemplar_allocator_type().destroy(my_ptr);
00717 exemplar_allocator_type().deallocate(my_ptr,1);
00718 }
00719
00720 padded_element* my_exemplar_ptr;
00721
00722 internal_collection_type my_locals;
00723
00724 void* create_local() {
00725 #if TBB_DEPRECATED
00726 void* lref = &my_locals[my_locals.push_back(padded_element())];
00727 #else
00728 void* lref = &*my_locals.push_back(padded_element());
00729 #endif
00730 if(my_finit_callback) {
00731 new(lref) T(my_finit_callback->apply());
00732 } else if(my_exemplar_ptr) {
00733 pointer t_exemp = reinterpret_cast<T *>(&(my_exemplar_ptr->value));
00734 new(lref) T(*t_exemp);
00735 } else {
00736 new(lref) T();
00737 }
00738 return lref;
00739 }
00740
00741 void unconstruct_locals() {
00742 for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
00743 cvi->unconstruct();
00744 }
00745 }
00746
00747 typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
00748
00749
00750 void* create_array(size_t _size) {
00751 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00752 return array_allocator_type().allocate(nelements);
00753 }
00754
00755 void free_array( void* _ptr, size_t _size) {
00756 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00757 array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
00758 }
00759
00760 public:
00761
00763 typedef Allocator allocator_type;
00764 typedef T value_type;
00765 typedef T& reference;
00766 typedef const T& const_reference;
00767 typedef T* pointer;
00768 typedef const T* const_pointer;
00769 typedef typename internal_collection_type::size_type size_type;
00770 typedef typename internal_collection_type::difference_type difference_type;
00771
00772
00773 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
00774 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
00775
00776
00777 typedef generic_range_type< iterator > range_type;
00778 typedef generic_range_type< const_iterator > const_range_type;
00779
00781 enumerable_thread_specific() : my_finit_callback(0) {
00782 my_exemplar_ptr = 0;
00783 }
00784
00786
00787 template <typename Finit>
00788 enumerable_thread_specific( Finit _finit )
00789 {
00790 my_finit_callback = internal::callback_leaf<T,Finit>::new_callback( _finit );
00791 my_exemplar_ptr = 0;
00792 }
00793
00795 enumerable_thread_specific(const T &_exemplar) : my_finit_callback(0) {
00796 my_exemplar_ptr = create_exemplar(_exemplar);
00797 }
00798
00800 ~enumerable_thread_specific() {
00801 if(my_finit_callback) {
00802 my_finit_callback->destroy();
00803 }
00804 if(my_exemplar_ptr) {
00805 free_exemplar(my_exemplar_ptr);
00806 }
00807 this->clear();
00808
00809 }
00810
00812 reference local() {
00813 bool exists;
00814 return local(exists);
00815 }
00816
00818 reference local(bool& exists) {
00819 __TBB_ASSERT(ETS_key_type==ets_no_key,"ets_key_per_instance not yet implemented");
00820 void* ptr = this->table_lookup(exists);
00821 return *(T*)ptr;
00822 }
00823
00825 size_type size() const { return my_locals.size(); }
00826
00828 bool empty() const { return my_locals.empty(); }
00829
00831 iterator begin() { return iterator( my_locals, 0 ); }
00833 iterator end() { return iterator(my_locals, my_locals.size() ); }
00834
00836 const_iterator begin() const { return const_iterator(my_locals, 0); }
00837
00839 const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
00840
00842 range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
00843
00845 const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
00846
00848 void clear() {
00849 unconstruct_locals();
00850 my_locals.clear();
00851 this->table_clear();
00852
00853
00854 }
00855
00856 private:
00857
00858 template<typename U, typename A2, ets_key_usage_type C2>
00859 void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);
00860
00861 public:
00862
00863 template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00864 enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
00865 {
00866 internal_copy(other);
00867 }
00868
00869 enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
00870 {
00871 internal_copy(other);
00872 }
00873
00874 private:
00875
00876 template<typename U, typename A2, ets_key_usage_type C2>
00877 enumerable_thread_specific &
00878 internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
00879 if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
00880 this->clear();
00881 if(my_finit_callback) {
00882 my_finit_callback->destroy();
00883 my_finit_callback = 0;
00884 }
00885 if(my_exemplar_ptr) {
00886 free_exemplar(my_exemplar_ptr);
00887 my_exemplar_ptr = 0;
00888 }
00889 internal_copy( other );
00890 }
00891 return *this;
00892 }
00893
00894 public:
00895
00896
00897 enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
00898 return internal_assign(other);
00899 }
00900
00901 template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00902 enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
00903 {
00904 return internal_assign(other);
00905 }
00906
00907
00908 template <typename combine_func_t>
00909 T combine(combine_func_t f_combine) {
00910 if(begin() == end()) {
00911 if(my_finit_callback) {
00912 return my_finit_callback->apply();
00913 }
00914 pointer local_ref = reinterpret_cast<T*>((my_exemplar_ptr->value));
00915 return T(*local_ref);
00916 }
00917 const_iterator ci = begin();
00918 T my_result = *ci;
00919 while(++ci != end())
00920 my_result = f_combine( my_result, *ci );
00921 return my_result;
00922 }
00923
00924
00925 template <typename combine_func_t>
00926 void combine_each(combine_func_t f_combine) {
00927 for(const_iterator ci = begin(); ci != end(); ++ci) {
00928 f_combine( *ci );
00929 }
00930 }
00931
00932 };
00933
00934
00935 template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
00936 template<typename U, typename A2, ets_key_usage_type C2>
00937 void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {
00938 typedef internal::ets_base<ets_no_key> base;
00939 __TBB_ASSERT(my_locals.size()==0,NULL);
00940 this->table_reserve_for_copy( other );
00941 for( base::array* r=other.my_root; r; r=r->next ) {
00942 for( size_t i=0; i<r->size(); ++i ) {
00943 base::slot& s1 = r->at(i);
00944 if( !s1.empty() ) {
00945 base::slot& s2 = this->table_find(s1.key);
00946 if( s2.empty() ) {
00947 #if TBB_DEPRECATED
00948 void* lref = &my_locals[my_locals.push_back(padded_element())];
00949 #else
00950 void* lref = &*my_locals.push_back(padded_element());
00951 #endif
00952 s2.ptr = new(lref) T(*(U*)s1.ptr);
00953 s2.key = s1.key;
00954 } else {
00955
00956 }
00957 }
00958 }
00959 }
00960 if(other.my_finit_callback) {
00961 my_finit_callback = other.my_finit_callback->make_copy();
00962 } else {
00963 my_finit_callback = 0;
00964 }
00965 if(other.my_exemplar_ptr) {
00966 pointer local_ref = reinterpret_cast<U*>(other.my_exemplar_ptr->value);
00967 my_exemplar_ptr = create_exemplar(*local_ref);
00968 } else {
00969 my_exemplar_ptr = 0;
00970 }
00971 }
00972
00973 template< typename Container >
00974 class flattened2d {
00975
00976
00977 typedef typename Container::value_type conval_type;
00978
00979 public:
00980
00982 typedef typename conval_type::size_type size_type;
00983 typedef typename conval_type::difference_type difference_type;
00984 typedef typename conval_type::allocator_type allocator_type;
00985 typedef typename conval_type::value_type value_type;
00986 typedef typename conval_type::reference reference;
00987 typedef typename conval_type::const_reference const_reference;
00988 typedef typename conval_type::pointer pointer;
00989 typedef typename conval_type::const_pointer const_pointer;
00990
00991 typedef typename internal::segmented_iterator<Container, value_type> iterator;
00992 typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
00993
00994 flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
00995 my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
00996
00997 flattened2d( const Container &c ) :
00998 my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
00999
01000 iterator begin() { return iterator(*my_container) = my_begin; }
01001 iterator end() { return iterator(*my_container) = my_end; }
01002 const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
01003 const_iterator end() const { return const_iterator(*my_container) = my_end; }
01004
01005 size_type size() const {
01006 size_type tot_size = 0;
01007 for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
01008 tot_size += i->size();
01009 }
01010 return tot_size;
01011 }
01012
01013 private:
01014
01015 Container *my_container;
01016 typename Container::const_iterator my_begin;
01017 typename Container::const_iterator my_end;
01018
01019 };
01020
01021 template <typename Container>
01022 flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
01023 return flattened2d<Container>(c, b, e);
01024 }
01025
01026 template <typename Container>
01027 flattened2d<Container> flatten2d(const Container &c) {
01028 return flattened2d<Container>(c);
01029 }
01030
01031 }
01032
01033 namespace internal {
01034 using interface5::internal::segmented_iterator;
01035 }
01036
01037 using interface5::enumerable_thread_specific;
01038 using interface5::flattened2d;
01039 using interface5::flatten2d;
01040
01041 }
01042
01043 #endif