enumerable_thread_specific.h

00001 /*
00002     Copyright 2005-2009 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_enumerable_thread_specific_H
00022 #define __TBB_enumerable_thread_specific_H
00023 
00024 #include "concurrent_vector.h"
00025 #include "tbb_thread.h"
00026 #include "concurrent_hash_map.h"
00027 #include "cache_aligned_allocator.h"
00028 #if __SUNPRO_CC
00029 #include <string.h>  // for memcpy
00030 #endif
00031 
00032 #if _WIN32||_WIN64
00033 #include <windows.h>
00034 #else
00035 #include <pthread.h>
00036 #endif
00037 
00038 namespace tbb {
00039 
00041     enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
00042 
00044     namespace internal {
00045         
00047         template< typename Container, typename Value >
00048         class enumerable_thread_specific_iterator 
00049 #if defined(_WIN64) && defined(_MSC_VER) 
00050             // Ensure that Microsoft's internal template function _Val_type works correctly.
00051             : public std::iterator<std::random_access_iterator_tag,Value>
00052 #endif /* defined(_WIN64) && defined(_MSC_VER) */
00053         {
00055         
00056             Container *my_container;
00057             typename Container::size_type my_index;
00058             mutable Value *my_value;
00059         
00060             template<typename C, typename T>
00061             friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset, 
00062                                                                        const enumerable_thread_specific_iterator<C,T>& v );
00063         
00064             template<typename C, typename T, typename U>
00065             friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i, 
00066                                     const enumerable_thread_specific_iterator<C,U>& j );
00067         
00068             template<typename C, typename T, typename U>
00069             friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i, 
00070                                    const enumerable_thread_specific_iterator<C,U>& j );
00071         
00072             template<typename C, typename T, typename U>
00073             friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
00074             
00075             template<typename C, typename U> 
00076             friend class enumerable_thread_specific_iterator;
00077         
00078             public:
00079         
00080             enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : 
00081                 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
00082         
00084             enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
00085         
00086             template<typename U>
00087             enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
00088                     my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
00089         
00090             enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
00091                 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
00092             }
00093         
00094             enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
00095                 my_index += offset;
00096                 my_value = NULL;
00097                 return *this;
00098             }
00099         
00100             enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
00101                 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
00102             }
00103         
00104             enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
00105                 my_index -= offset;
00106                 my_value = NULL;
00107                 return *this;
00108             }
00109         
00110             Value& operator*() const {
00111                 Value* value = my_value;
00112                 if( !value ) {
00113                     value = my_value = &(*my_container)[my_index].value;
00114                 }
00115                 __TBB_ASSERT( value==&(*my_container)[my_index].value, "corrupt cache" );
00116                 return *value;
00117             }
00118         
00119             Value& operator[]( ptrdiff_t k ) const {
00120                return (*my_container)[my_index + k].value;
00121             }
00122         
00123             Value* operator->() const {return &operator*();}
00124         
00125             enumerable_thread_specific_iterator& operator++() {
00126                 ++my_index;
00127                 my_value = NULL;
00128                 return *this;
00129             }
00130         
00131             enumerable_thread_specific_iterator& operator--() {
00132                 --my_index;
00133                 my_value = NULL;
00134                 return *this;
00135             }
00136         
00138             enumerable_thread_specific_iterator operator++(int) {
00139                 enumerable_thread_specific_iterator result = *this;
00140                 ++my_index;
00141                 my_value = NULL;
00142                 return result;
00143             }
00144         
00146             enumerable_thread_specific_iterator operator--(int) {
00147                 enumerable_thread_specific_iterator result = *this;
00148                 --my_index;
00149                 my_value = NULL;
00150                 return result;
00151             }
00152         
00153             // STL support
00154             typedef ptrdiff_t difference_type;
00155             typedef Value value_type;
00156             typedef Value* pointer;
00157             typedef Value& reference;
00158             typedef std::random_access_iterator_tag iterator_category;
00159         };
00160         
00161         template<typename Container, typename T>
00162         enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset, 
00163                                                                     const enumerable_thread_specific_iterator<Container,T>& v ) {
00164             return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
00165         }
00166         
00167         template<typename Container, typename T, typename U>
00168         bool operator==( const enumerable_thread_specific_iterator<Container,T>& i, 
00169                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00170             return i.my_index==j.my_index && i.my_container == j.my_container;
00171         }
00172         
00173         template<typename Container, typename T, typename U>
00174         bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i, 
00175                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00176             return !(i==j);
00177         }
00178         
00179         template<typename Container, typename T, typename U>
00180         bool operator<( const enumerable_thread_specific_iterator<Container,T>& i, 
00181                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00182             return i.my_index<j.my_index;
00183         }
00184         
00185         template<typename Container, typename T, typename U>
00186         bool operator>( const enumerable_thread_specific_iterator<Container,T>& i, 
00187                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00188             return j<i;
00189         }
00190         
00191         template<typename Container, typename T, typename U>
00192         bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i, 
00193                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00194             return !(i<j);
00195         }
00196         
00197         template<typename Container, typename T, typename U>
00198         bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i, 
00199                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00200             return !(j<i);
00201         }
00202         
00203         template<typename Container, typename T, typename U>
00204         ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i, 
00205                              const enumerable_thread_specific_iterator<Container,U>& j ) {
00206             return i.my_index-j.my_index;
00207         }
00208 
00209     template<typename SegmentedContainer, typename Value >
00210         class segmented_iterator
00211 #if defined(_WIN64) && defined(_MSC_VER)
00212         : public std::iterator<std::input_iterator_tag, Value>
00213 #endif
00214         {
00215             template<typename C, typename T, typename U>
00216             friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00217 
00218             template<typename C, typename T, typename U>
00219             friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00220             
00221             template<typename C, typename U> 
00222             friend class segmented_iterator;
00223 
00224             public:
00225 
00226                 segmented_iterator() {my_segcont = NULL;}
00227 
00228                 segmented_iterator( const SegmentedContainer& _segmented_container ) : 
00229                     my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
00230                     outer_iter(my_segcont->end()) { }
00231 
00232                 ~segmented_iterator() {}
00233 
00234                 typedef typename SegmentedContainer::iterator outer_iterator;
00235                 typedef typename SegmentedContainer::value_type InnerContainer;
00236                 typedef typename InnerContainer::iterator inner_iterator;
00237 
00238                 // STL support
00239                 typedef ptrdiff_t difference_type;
00240                 typedef Value value_type;
00241                 typedef typename SegmentedContainer::size_type size_type;
00242                 typedef Value* pointer;
00243                 typedef Value& reference;
00244                 typedef std::input_iterator_tag iterator_category;
00245 
00246                 // Copy Constructor
00247                 template<typename U>
00248                 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
00249                     my_segcont(other.my_segcont),
00250                     outer_iter(other.outer_iter),
00251                     // can we assign a default-constructed iterator to inner if we're at the end?
00252                     inner_iter(other.inner_iter)
00253                 {}
00254 
00255                 // assignment
00256                 template<typename U>
00257                 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
00258                     if(this != &other) {
00259                         my_segcont = other.my_segcont;
00260                         outer_iter = other.outer_iter;
00261                         if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
00262                     }
00263                     return *this;
00264                 }
00265 
00266                 // allow assignment of outer iterator to segmented iterator.  Once it is
00267                 // assigned, move forward until a non-empty inner container is found or
00268                 // the end of the outer container is reached.
00269                 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
00270                     __TBB_ASSERT(my_segcont != NULL, NULL);
00271                     // check that this iterator points to something inside the segmented container
00272                     for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
00273                         if( !outer_iter->empty() ) {
00274                             inner_iter = outer_iter->begin();
00275                             break;
00276                         }
00277                     }
00278                     return *this;
00279                 }
00280 
00281                 // pre-increment
00282                 segmented_iterator& operator++() {
00283                     advance_me();
00284                     return *this;
00285                 }
00286 
00287                 // post-increment
00288                 segmented_iterator operator++(int) {
00289                     segmented_iterator tmp = *this;
00290                     operator++();
00291                     return tmp;
00292                 }
00293 
00294                 bool operator==(const outer_iterator& other_outer) const {
00295                     __TBB_ASSERT(my_segcont != NULL, NULL);
00296                     return (outer_iter == other_outer &&
00297                             (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
00298                 }
00299 
00300                 bool operator!=(const outer_iterator& other_outer) const {
00301                     return !operator==(other_outer);
00302 
00303                 }
00304 
00305                 // (i)* RHS
00306                 reference operator*() const {
00307                     __TBB_ASSERT(my_segcont != NULL, NULL);
00308                     __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
00309                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
00310                     return *inner_iter;
00311                 }
00312 
00313                 // i->
00314                 pointer operator->() const { return &operator*();}
00315 
00316             private:
00317                 SegmentedContainer*             my_segcont;
00318                 outer_iterator outer_iter;
00319                 inner_iterator inner_iter;
00320 
00321                 void advance_me() {
00322                     __TBB_ASSERT(my_segcont != NULL, NULL);
00323                     __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
00324                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
00325                     ++inner_iter;
00326                     while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
00327                         inner_iter = outer_iter->begin();
00328                     }
00329                 }
00330         };    // segmented_iterator
00331 
00332         template<typename SegmentedContainer, typename T, typename U>
00333         bool operator==( const segmented_iterator<SegmentedContainer,T>& i, 
00334                          const segmented_iterator<SegmentedContainer,U>& j ) {
00335             if(i.my_segcont != j.my_segcont) return false;
00336             if(i.my_segcont == NULL) return true;
00337             if(i.outer_iter != j.outer_iter) return false;
00338             if(i.outer_iter == i.my_segcont->end()) return true;
00339             return i.inner_iter == j.inner_iter;
00340         }
00341 
00342         // !=
00343         template<typename SegmentedContainer, typename T, typename U>
00344         bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, 
00345                          const segmented_iterator<SegmentedContainer,U>& j ) {
00346             return !(i==j);
00347         }
00348 
00349         // empty template for following specializations
00350         template<ets_key_usage_type et>
00351         struct tls_manager {};
00352         
00354         template <>
00355         struct tls_manager<ets_no_key> {
00356             typedef size_t tls_key_t;
00357             static inline void create_key( tls_key_t &) { }
00358             static inline void destroy_key( tls_key_t & ) { }
00359             static inline void set_tls( tls_key_t &, void *  ) { }
00360             static inline void * get_tls( tls_key_t & ) { return (size_t)0; }
00361         };
00362 
00364         template <>
00365         struct tls_manager <ets_key_per_instance> {
00366 #if _WIN32||_WIN64
00367             typedef DWORD tls_key_t;
00368             static inline void create_key( tls_key_t &k) { k = TlsAlloc(); }
00369             static inline void destroy_key( tls_key_t &k) { TlsFree(k); }
00370             static inline void set_tls( tls_key_t &k, void * value) { TlsSetValue(k, (LPVOID)value); }
00371             static inline void * get_tls( tls_key_t &k ) { return (void *)TlsGetValue(k); }
00372 #else
00373             typedef pthread_key_t tls_key_t;
00374             static inline void create_key( tls_key_t &k) { pthread_key_create(&k, NULL); }
00375             static inline void destroy_key( tls_key_t &k) { pthread_key_delete(k); }
00376             static inline void set_tls( tls_key_t &k, void * value) { pthread_setspecific(k, value); }
00377             static inline void * get_tls( tls_key_t &k ) { return pthread_getspecific(k); }
00378 #endif
00379         };
00380 
00381         class thread_hash_compare {
00382         public:
00383             // using hack suggested by Arch to get value for thread id for hashing...
00384 #if _WIN32||_WIN64
00385             typedef DWORD thread_key;
00386 #else
00387             typedef pthread_t thread_key;
00388 #endif
00389             static thread_key my_thread_key(const tbb_thread::id j) {
00390                 thread_key key_val;
00391                 memcpy(&key_val, &j, sizeof(thread_key));
00392                 return key_val;
00393             }
00394 
00395             bool equal( const thread_key j, const thread_key k) const {
00396                 return j == k;
00397             }
00398             unsigned long hash(const thread_key k) const {
00399                 return (unsigned long)k;
00400             }
00401         };
00402 
00403         // storage for initialization function pointer
00404         template<typename T>
00405         struct callback_base {
00406             virtual T apply( ) = 0;
00407             virtual void destroy( ) = 0;
00408             // need to be able to create copies of callback_base for copy constructor
00409             virtual callback_base* make_copy() = 0;
00410             // need virtual destructor to satisfy GCC compiler warning
00411             virtual ~callback_base() { }
00412         };
00413 
00414         template <typename T, typename Functor>
00415         struct callback_leaf : public callback_base<T> {
00416             typedef Functor my_callback_type;
00417             typedef callback_leaf<T,Functor> my_type;
00418             typedef my_type* callback_pointer;
00419             typedef typename tbb::tbb_allocator<my_type> my_allocator_type;
00420             Functor f;
00421             callback_leaf( const Functor& f_) : f(f_) {
00422             }
00423 
00424             static callback_pointer new_callback(const Functor& f_ ) {
00425                 void* new_void = my_allocator_type().allocate(1);
00426                 callback_pointer new_cb = new (new_void) callback_leaf<T,Functor>(f_); // placement new
00427                 return new_cb;
00428             }
00429 
00430             /* override */ callback_pointer make_copy() {
00431                 return new_callback( f );
00432             }
00433 
00434              /* override */ void destroy( ) {
00435                  callback_pointer my_ptr = this;
00436                  my_allocator_type().destroy(my_ptr);
00437                  my_allocator_type().deallocate(my_ptr,1);
00438              }
00439             /* override */ T apply() { return f(); }  // does copy construction of returned value.
00440         };
00441 
00442         template<typename Key, typename T, typename HC, typename A>
00443         class ets_concurrent_hash_map : public tbb::concurrent_hash_map<Key, T, HC, A> {
00444         public:
00445             typedef tbb::concurrent_hash_map<Key, T, HC, A> base_type;
00446             typedef typename base_type::const_pointer const_pointer;
00447             typedef typename base_type::key_type key_type;
00448             const_pointer find( const key_type &k ) { return base_type::find( k ); } // make public
00449         };
00450     
00451     } // namespace internal
00453 
00455     template <typename T, 
00456               typename Allocator=cache_aligned_allocator<T>, 
00457               ets_key_usage_type ETS_key_type=ets_no_key > 
00458     class enumerable_thread_specific { 
00459 
00460         template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
00461     
00462         typedef internal::tls_manager< ETS_key_type > my_tls_manager;
00463 
00465         template<typename U>
00466         struct padded_element {
00467             U value;
00468             char padding[ ( (sizeof(U) - 1) / internal::NFS_MaxLineSize + 1 ) * internal::NFS_MaxLineSize - sizeof(U) ];
00469             padded_element(const U &v) : value(v) {}
00470             padded_element() {}
00471         };
00472     
00474         template<typename I>
00475         class generic_range_type: public blocked_range<I> {
00476         public:
00477             typedef T value_type;
00478             typedef T& reference;
00479             typedef const T& const_reference;
00480             typedef I iterator;
00481             typedef ptrdiff_t difference_type;
00482             generic_range_type( I begin_, I end_, size_t grainsize = 1) : blocked_range<I>(begin_,end_,grainsize) {} 
00483             template<typename U>
00484             generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {} 
00485             generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
00486         };
00487     
00488         typedef typename Allocator::template rebind< padded_element<T> >::other padded_allocator_type;
00489         typedef tbb::concurrent_vector< padded_element<T>, padded_allocator_type > internal_collection_type;
00490         typedef ptrdiff_t hash_table_index_type; // storing array indices rather than iterators to simplify
00491         // copying the hash table that correlates thread IDs with concurrent vector elements.
00492         
00493         typedef typename Allocator::template rebind< std::pair< typename internal::thread_hash_compare::thread_key, hash_table_index_type > >::other hash_element_allocator;
00494         typedef internal::ets_concurrent_hash_map< typename internal::thread_hash_compare::thread_key, hash_table_index_type, internal::thread_hash_compare, hash_element_allocator > thread_to_index_type;
00495 
00496         typename my_tls_manager::tls_key_t my_key;
00497 
00498         void reset_key() {
00499             my_tls_manager::destroy_key(my_key);
00500             my_tls_manager::create_key(my_key); 
00501         }
00502 
00503         internal::callback_base<T> *my_finit_callback;
00504 
00505         // need to use a pointed-to exemplar because T may not be assignable.
00506         // using tbb_allocator instead of padded_element_allocator because we may be
00507         // copying an exemplar from one instantiation of ETS to another with a different
00508         // allocator.
00509         typedef typename tbb::tbb_allocator<padded_element<T> > exemplar_allocator_type;
00510         static padded_element<T> * create_exemplar(const T& my_value) {
00511             padded_element<T> *new_exemplar = 0;
00512             // void *new_space = padded_allocator_type().allocate(1);
00513             void *new_space = exemplar_allocator_type().allocate(1);
00514             new_exemplar = new(new_space) padded_element<T>(my_value);
00515             return new_exemplar;
00516         }
00517 
00518         static padded_element<T> *create_exemplar( ) {
00519             // void *new_space = padded_allocator_type().allocate(1);
00520             void *new_space = exemplar_allocator_type().allocate(1);
00521             padded_element<T> *new_exemplar = new(new_space) padded_element<T>( );
00522             return new_exemplar;
00523         }
00524 
00525         static void free_exemplar(padded_element<T> *my_ptr) {
00526             // padded_allocator_type().destroy(my_ptr);
00527             // padded_allocator_type().deallocate(my_ptr,1);
00528             exemplar_allocator_type().destroy(my_ptr);
00529             exemplar_allocator_type().deallocate(my_ptr,1);
00530         }
00531 
00532         padded_element<T>* my_exemplar_ptr;
00533 
00534         internal_collection_type my_locals;
00535         thread_to_index_type my_hash_tbl;
00536     
00537     public:
00538     
00540         typedef Allocator allocator_type;
00541         typedef T value_type;
00542         typedef T& reference;
00543         typedef const T& const_reference;
00544         typedef T* pointer;
00545         typedef const T* const_pointer;
00546         typedef typename internal_collection_type::size_type size_type;
00547         typedef typename internal_collection_type::difference_type difference_type;
00548     
00549         // Iterator types
00550         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
00551         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
00552 
00553         // Parallel range types
00554         typedef generic_range_type< iterator > range_type;
00555         typedef generic_range_type< const_iterator > const_range_type;
00556     
00558         enumerable_thread_specific() : my_finit_callback(0) { 
00559             my_exemplar_ptr = create_exemplar();
00560             my_tls_manager::create_key(my_key); 
00561         }
00562 
00564         // Finit should be a function taking 0 parameters and returning a T
00565         template <typename Finit>
00566         enumerable_thread_specific( Finit _finit )
00567         {
00568             my_finit_callback = internal::callback_leaf<T,Finit>::new_callback( _finit );
00569             my_tls_manager::create_key(my_key);
00570             my_exemplar_ptr = 0; // don't need exemplar if function is provided
00571         }
00572     
00574         enumerable_thread_specific(const T &_exemplar) : my_finit_callback(0) {
00575             my_exemplar_ptr = create_exemplar(_exemplar);
00576             my_tls_manager::create_key(my_key); 
00577         }
00578     
00580         ~enumerable_thread_specific() { 
00581             my_tls_manager::destroy_key(my_key); 
00582             if(my_finit_callback) {
00583                 my_finit_callback->destroy();
00584             }
00585             if(my_exemplar_ptr)
00586             {
00587                 free_exemplar(my_exemplar_ptr);
00588             }
00589         }
00590       
00592         reference local() {
00593             bool exists;
00594             return local(exists);
00595         }
00596     
00598         reference local(bool& exists)  {
00599             if ( pointer local_ptr = static_cast<pointer>(my_tls_manager::get_tls(my_key)) ) {
00600                 exists = true;
00601                return *local_ptr;
00602             }
00603             // check hash table
00604             typename internal::thread_hash_compare::thread_key my_t_key = internal::thread_hash_compare::my_thread_key(tbb::this_tbb_thread::get_id());
00605             {
00606                 typename thread_to_index_type::const_pointer my_existing_entry;
00607                 my_existing_entry = my_hash_tbl.find(my_t_key);
00608                 if(my_existing_entry) {
00609                     exists = true;
00610                     hash_table_index_type my_index = my_existing_entry->second;
00611                     reference local_ref = (my_locals[my_index].value);
00612                     // This will only be needed if a change is made to the instance
00613                     my_tls_manager::set_tls( my_key, static_cast<void *>(&local_ref) );
00614                     return local_ref;
00615                 }
00616             }
00617             // create new entry
00618             exists = false;
00619             hash_table_index_type local_index;
00620             if(my_finit_callback) {
00621                 // convert iterator to array index
00622                 local_index = my_locals.push_back(my_finit_callback->apply()) - my_locals.begin();
00623             }
00624             else {
00625                 // convert iterator to array index
00626                 local_index = my_locals.push_back(*my_exemplar_ptr) - my_locals.begin();
00627             }
00628             // insert into hash table
00629             reference local_ref = (my_locals[local_index].value);
00630             my_hash_tbl.insert( std::make_pair(my_t_key, local_index) );
00631             my_tls_manager::set_tls( my_key, static_cast<void *>(&local_ref) );
00632             return local_ref;
00633         }
00634 
00636         size_type size() const { return my_locals.size(); }
00637     
00639         bool empty() const { return my_locals.empty(); }
00640     
00642         iterator begin() { return iterator( my_locals, 0 ); }
00644         iterator end() { return iterator(my_locals, my_locals.size() ); }
00645     
00647         const_iterator begin() const { return const_iterator(my_locals, 0); }
00648     
00650         const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
00651 
00653         range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } 
00654         
00656         const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
00657     
00659         void clear() {
00660             my_locals.clear();
00661             my_hash_tbl.clear();
00662             reset_key();
00663             // callback is not destroyed
00664             // exemplar is not destroyed
00665         }
00666 
00667         // STL container methods
00668         // copy constructor
00669 
00670     private:
00671 
00672         template<typename U, typename A2, ets_key_usage_type C2>
00673         void
00674         internal_copy_construct( const enumerable_thread_specific<U, A2, C2>& other) {
00675             typedef typename tbb::enumerable_thread_specific<U, A2, C2> other_type;
00676             for(typename other_type::const_iterator ci = other.begin(); ci != other.end(); ++ci) {
00677                 my_locals.push_back(*ci);
00678             }
00679             if(other.my_finit_callback) {
00680                 my_finit_callback = other.my_finit_callback->make_copy();
00681             }
00682             else {
00683                 my_finit_callback = 0;
00684             }
00685             if(other.my_exemplar_ptr) {
00686                 my_exemplar_ptr = create_exemplar(other.my_exemplar_ptr->value);
00687             }
00688             else {
00689                 my_exemplar_ptr = 0;
00690             }
00691             my_tls_manager::create_key(my_key);
00692         }
00693 
00694     public:
00695 
00696         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00697         enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : my_hash_tbl(other.my_hash_tbl) 
00698         {   // Have to do push_back because the contained elements are not necessarily assignable.
00699             internal_copy_construct(other);
00700         }
00701 
00702         // non-templatized version
00703         enumerable_thread_specific( const enumerable_thread_specific& other ) : my_hash_tbl(other.my_hash_tbl) 
00704         {
00705             internal_copy_construct(other);
00706         }
00707 
00708     private:
00709 
00710         template<typename U, typename A2, ets_key_usage_type C2>
00711         enumerable_thread_specific &
00712         internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
00713             typedef typename tbb::enumerable_thread_specific<U, A2, C2> other_type;
00714             if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
00715                 this->clear(); // resets TLS key
00716                 my_hash_tbl = other.my_hash_tbl;
00717                 // cannot use assign because T may not be assignable.
00718                 for(typename other_type::const_iterator ci = other.begin(); ci != other.end(); ++ci) {
00719                     my_locals.push_back(*ci);
00720                 }
00721 
00722                 if(my_finit_callback) {
00723                     my_finit_callback->destroy();
00724                     my_finit_callback = 0;
00725                 }
00726                 if(my_exemplar_ptr) {
00727                     free_exemplar(my_exemplar_ptr);
00728                     my_exemplar_ptr = 0;
00729                 }
00730                 if(other.my_finit_callback) {
00731                     my_finit_callback = other.my_finit_callback->make_copy();
00732                 }
00733 
00734                 if(other.my_exemplar_ptr) {
00735                     my_exemplar_ptr = create_exemplar(other.my_exemplar_ptr->value);
00736                 }
00737             }
00738             return *this;
00739         }
00740 
00741     public:
00742 
00743         // assignment
00744         enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
00745             return internal_assign(other);
00746         }
00747 
00748         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00749         enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
00750         {
00751             return internal_assign(other);
00752         }
00753 
00754     private:
00755 
00756         // combine_func_t has signature T(T,T) or T(const T&, const T&)
00757         template <typename combine_func_t>
00758         T internal_combine(typename internal_collection_type::const_range_type r, combine_func_t f_combine) {
00759             if(r.is_divisible()) {
00760                 typename internal_collection_type::const_range_type r2(r,split());
00761                 return f_combine(internal_combine(r2, f_combine), internal_combine(r, f_combine));
00762             }
00763             if(r.size() == 1) {
00764                 return r.begin()->value;
00765             }
00766             typename internal_collection_type::const_iterator i2 = r.begin();
00767             ++i2;
00768             return f_combine(r.begin()->value, i2->value);
00769         }
00770 
00771     public:
00772 
00773         // combine_func_t has signature T(T,T) or T(const T&, const T&)
00774         template <typename combine_func_t>
00775         T combine(combine_func_t f_combine) {
00776             if(my_locals.begin() == my_locals.end()) {
00777                 if(my_finit_callback) {
00778                     return my_finit_callback->apply();
00779                 }
00780                 return (*my_exemplar_ptr).value;
00781             }
00782             typename internal_collection_type::const_range_type r(my_locals.begin(), my_locals.end(), (size_t)2);
00783             return internal_combine(r, f_combine);
00784         }
00785 
00786         // combine_func_t has signature void(T) or void(const T&)
00787         template <typename combine_func_t>
00788         void combine_each(combine_func_t f_combine) {
00789             for(const_iterator ci = begin(); ci != end(); ++ci) {
00790                 f_combine( *ci );
00791             }
00792         }
00793     }; // enumerable_thread_specific
00794 
00795     template< typename Container >
00796     class flattened2d {
00797 
00798         // This intermediate typedef is to address issues with VC7.1 compilers
00799         typedef typename Container::value_type conval_type;
00800 
00801     public:
00802 
00804         typedef typename conval_type::size_type size_type;
00805         typedef typename conval_type::difference_type difference_type;
00806         typedef typename conval_type::allocator_type allocator_type;
00807         typedef typename conval_type::value_type value_type;
00808         typedef typename conval_type::reference reference;
00809         typedef typename conval_type::const_reference const_reference;
00810         typedef typename conval_type::pointer pointer;
00811         typedef typename conval_type::const_pointer const_pointer;
00812 
00813         typedef typename internal::segmented_iterator<Container, value_type> iterator;
00814         typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
00815 
00816         flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : 
00817             my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
00818 
00819         flattened2d( const Container &c ) : 
00820             my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
00821 
00822         iterator begin() { return iterator(*my_container) = my_begin; }
00823         iterator end() { return iterator(*my_container) = my_end; }
00824         const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
00825         const_iterator end() const { return const_iterator(*my_container) = my_end; }
00826 
00827         size_type size() const {
00828             size_type tot_size = 0;
00829             for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
00830                 tot_size += i->size();
00831             }
00832             return tot_size;
00833         }
00834 
00835     private:
00836 
00837         Container *my_container;
00838         typename Container::const_iterator my_begin;
00839         typename Container::const_iterator my_end;
00840 
00841     };
00842 
00843     template <typename Container>
00844     flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
00845         return flattened2d<Container>(c, b, e);
00846     }
00847 
00848     template <typename Container>
00849     flattened2d<Container> flatten2d(const Container &c) {
00850         return flattened2d<Container>(c);
00851     }
00852 
00853 } // namespace tbb
00854 
00855 #endif

Copyright © 2005-2009 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.