_concurrent_unordered_internal.h

00001 /*
00002     Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 /* Container implementations in this header are based on PPL implementations 
00022    provided by Microsoft. */
00023 
00024 #ifndef __TBB_concurrent_unordered_internal_H
00025 #define __TBB_concurrent_unordered_internal_H
00026 
00027 #include "tbb_stddef.h"
00028 
00029 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00030     // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
00031     #pragma warning (push)
00032     #pragma warning (disable: 4530)
00033 #endif
00034 
00035 #include <iterator>
00036 #include <utility>      // Need std::pair
00037 #include <functional>
00038 #include <string>       // For tbb_hasher
00039 #include <cstring>      // Need std::memset
00040 
00041 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00042     #pragma warning (pop)
00043 #endif
00044 
00045 #include "tbb_machine.h"
00046 #include "tbb_exception.h"
00047 #include "tbb_allocator.h"
00048 
00049 namespace tbb {
00050 namespace interface5 {
00052 namespace internal {
00053 
00054 template <typename T, typename Allocator>
00055 class split_ordered_list;
00056 template <typename Traits>
00057 class concurrent_unordered_base;
00058 
00059 // Forward list iterators (without skipping dummy elements)
00060 template<class Solist, typename Value>
00061 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
00062 {
00063     template <typename T, typename Allocator>
00064     friend class split_ordered_list;
00065     template <typename Traits>
00066     friend class concurrent_unordered_base;
00067     template<class M, typename V>
00068     friend class flist_iterator;
00069 
00070     typedef typename Solist::nodeptr_t nodeptr_t;
00071 public:
00072     typedef typename Solist::value_type value_type;
00073     typedef typename Solist::difference_type difference_type;
00074     typedef typename Solist::pointer pointer;
00075     typedef typename Solist::reference reference;
00076 
00077     flist_iterator() : my_node_ptr(0) {}
00078     flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
00079         : my_node_ptr(other.my_node_ptr) {}
00080 
00081     reference operator*() const { return my_node_ptr->my_element; }
00082     pointer operator->() const { return &**this; }
00083 
00084     flist_iterator& operator++() {
00085         my_node_ptr = my_node_ptr->my_next;
00086         return *this;
00087     }
00088 
00089     flist_iterator operator++(int) {
00090         flist_iterator tmp = *this;
00091         ++*this;
00092         return tmp;
00093     }
00094 
00095 protected:
00096     flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
00097     nodeptr_t get_node_ptr() const { return my_node_ptr; }
00098 
00099     nodeptr_t my_node_ptr;
00100 
00101     template<typename M, typename T, typename U>
00102     friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
00103     template<typename M, typename T, typename U>
00104     friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
00105 };
00106 
00107 template<typename Solist, typename T, typename U>
00108 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
00109     return i.my_node_ptr == j.my_node_ptr;
00110 }
00111 template<typename Solist, typename T, typename U>
00112 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
00113     return i.my_node_ptr != j.my_node_ptr;
00114 }
00115 
00116 // Split-order list iterators, needed to skip dummy elements
00117 template<class Solist, typename Value>
00118 class solist_iterator : public flist_iterator<Solist, Value>
00119 {
00120     typedef flist_iterator<Solist, Value> base_type;
00121     typedef typename Solist::nodeptr_t nodeptr_t;
00122     using base_type::get_node_ptr;
00123     template <typename T, typename Allocator>
00124     friend class split_ordered_list;
00125     template<class M, typename V>
00126     friend class solist_iterator;
00127     template<typename M, typename T, typename U>
00128     friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
00129     template<typename M, typename T, typename U>
00130     friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
00131 
00132     const Solist *my_list_ptr;
00133     solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
00134 
00135 public:
00136     typedef typename Solist::value_type value_type;
00137     typedef typename Solist::difference_type difference_type;
00138     typedef typename Solist::pointer pointer;
00139     typedef typename Solist::reference reference;
00140 
00141     solist_iterator() {}
00142     solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
00143         : base_type(other), my_list_ptr(other.my_list_ptr) {}
00144 
00145     reference operator*() const {
00146         return this->base_type::operator*();
00147     }
00148 
00149     pointer operator->() const {
00150         return (&**this);
00151     }
00152 
00153     solist_iterator& operator++() {
00154         do ++(*(base_type *)this);
00155         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00156 
00157         return (*this);
00158     }
00159 
00160     solist_iterator operator++(int) {
00161         solist_iterator tmp = *this;
00162         do ++*this;
00163         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00164 
00165         return (tmp);
00166     }
00167 };
00168 
00169 template<typename Solist, typename T, typename U>
00170 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
00171     return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
00172 }
00173 template<typename Solist, typename T, typename U>
00174 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
00175     return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
00176 }
00177 
00178 // Forward type and class definitions
00179 typedef size_t sokey_t;
00180 
00181 // Forward list in which elements are sorted in a split-order
00182 template <typename T, typename Allocator>
00183 class split_ordered_list
00184 {
00185 public:
00186     typedef split_ordered_list<T, Allocator> self_type;
00187     typedef typename Allocator::template rebind<T>::other allocator_type;
00188     struct node;
00189     typedef node *nodeptr_t;
00190 
00191     typedef typename allocator_type::size_type size_type;
00192     typedef typename allocator_type::difference_type difference_type;
00193     typedef typename allocator_type::pointer pointer;
00194     typedef typename allocator_type::const_pointer const_pointer;
00195     typedef typename allocator_type::reference reference;
00196     typedef typename allocator_type::const_reference const_reference;
00197     typedef typename allocator_type::value_type value_type;
00198 
00199     typedef solist_iterator<self_type, const value_type> const_iterator;
00200     typedef solist_iterator<self_type, value_type> iterator;
00201     typedef flist_iterator<self_type, const value_type> raw_const_iterator;
00202     typedef flist_iterator<self_type, value_type> raw_iterator;
00203 
00204     // Node that holds the element in a split-ordered list
00205     struct node : tbb::internal::no_assign
00206     {
00207         // Initialize the node with the given order key
00208         void init(sokey_t order_key) {
00209             my_order_key = order_key;
00210             my_next = NULL;
00211         }
00212 
00213         // Return the order key (needed for hashing)
00214         sokey_t get_order_key() const { // TODO: remove
00215             return my_order_key;
00216         }
00217 
00218         // Inserts the new element in the list in an atomic fashion
00219         nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
00220         {
00221             // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
00222             nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
00223 
00224             if (exchange_node == current_node) // TODO: why this branch?
00225             {
00226                 // Operation succeeded, return the new node
00227                 return new_node;
00228             }
00229             else
00230             {
00231                 // Operation failed, return the "interfering" node
00232                 return exchange_node;
00233             }
00234         }
00235 
00236         // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
00237         // in the hash table to quickly index into the right subsection of the split-ordered list.
00238         bool is_dummy() const {
00239             return (my_order_key & 0x1) == 0;
00240         }
00241 
00242 
00243         nodeptr_t  my_next;      // Next element in the list
00244         value_type my_element;   // Element storage
00245         sokey_t    my_order_key; // Order key for this element
00246     };
00247 
00248     // Allocate a new node with the given order key and value
00249     nodeptr_t create_node(sokey_t order_key, const T &value) {
00250         nodeptr_t pnode = my_node_allocator.allocate(1);
00251 
00252         __TBB_TRY {
00253             new(static_cast<void*>(&pnode->my_element)) T(value);
00254             pnode->init(order_key);
00255         } __TBB_CATCH(...) {
00256             my_node_allocator.deallocate(pnode, 1);
00257             __TBB_RETHROW();
00258         }
00259 
00260         return (pnode);
00261     }
00262 
00263     // Allocate a new node with the given order key; used to allocate dummy nodes
00264     nodeptr_t create_node(sokey_t order_key) {
00265         nodeptr_t pnode = my_node_allocator.allocate(1);
00266 
00267         __TBB_TRY {
00268             new(static_cast<void*>(&pnode->my_element)) T();
00269             pnode->init(order_key);
00270         } __TBB_CATCH(...) {
00271             my_node_allocator.deallocate(pnode, 1);
00272             __TBB_RETHROW();
00273         }
00274 
00275         return (pnode);
00276     }
00277 
00278    split_ordered_list(allocator_type a = allocator_type())
00279        : my_node_allocator(a), my_element_count(0)
00280     {
00281         // Immediately allocate a dummy node with order key of 0. This node
00282         // will always be the head of the list.
00283         my_head = create_node(0);
00284     }
00285 
00286     ~split_ordered_list()
00287     {
00288         // Clear the list
00289         clear();
00290 
00291         // Remove the head element which is not cleared by clear()
00292         nodeptr_t pnode = my_head;
00293         my_head = NULL;
00294 
00295         __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
00296 
00297         destroy_node(pnode);
00298     }
00299 
00300     // Common forward list functions
00301 
00302     allocator_type get_allocator() const {
00303         return (my_node_allocator);
00304     }
00305 
00306     void clear() {
00307         nodeptr_t pnext;
00308         nodeptr_t pnode = my_head;
00309 
00310         __TBB_ASSERT(my_head != NULL, "Invalid head list node");
00311         pnext = pnode->my_next;
00312         pnode->my_next = NULL;
00313         pnode = pnext;
00314 
00315         while (pnode != NULL)
00316         {
00317             pnext = pnode->my_next;
00318             destroy_node(pnode);
00319             pnode = pnext;
00320         }
00321 
00322         my_element_count = 0;
00323     }
00324 
00325     // Returns a first non-dummy element in the SOL
00326     iterator begin() {
00327         return first_real_iterator(raw_begin());
00328     }
00329 
00330     // Returns a first non-dummy element in the SOL
00331     const_iterator begin() const {
00332         return first_real_iterator(raw_begin());
00333     }
00334 
00335     iterator end() {
00336         return (iterator(0, this));
00337     }
00338 
00339     const_iterator end() const {
00340         return (const_iterator(0, this));
00341     }
00342 
00343     const_iterator cbegin() const {
00344         return (((const self_type *)this)->begin());
00345     }
00346 
00347     const_iterator cend() const {
00348         return (((const self_type *)this)->end());
00349     }
00350 
00351     // Checks if the number of elements (non-dummy) is 0
00352     bool empty() const {
00353         return (my_element_count == 0);
00354     }
00355 
00356     // Returns the number of non-dummy elements in the list
00357     size_type size() const {
00358         return my_element_count;
00359     }
00360 
00361     // Returns the maximum size of the list, determined by the allocator
00362     size_type max_size() const {
00363         return my_node_allocator.max_size();
00364     }
00365 
00366     // Swaps 'this' list with the passed in one
00367     void swap(self_type& other)
00368     {
00369         if (this == &other)
00370         {
00371             // Nothing to do
00372             return;
00373         }
00374 
00375         std::swap(my_element_count, other.my_element_count);
00376         std::swap(my_head, other.my_head);
00377     }
00378 
00379     // Split-order list functions
00380 
00381     // Returns a first element in the SOL, which is always a dummy
00382     raw_iterator raw_begin() {
00383         return raw_iterator(my_head);
00384     }
00385 
00386     // Returns a first element in the SOL, which is always a dummy
00387     raw_const_iterator raw_begin() const {
00388         return raw_const_iterator(my_head);
00389     }
00390 
00391     raw_iterator raw_end() {
00392         return raw_iterator(0);
00393     }
00394 
00395     raw_const_iterator raw_end() const {
00396         return raw_const_iterator(0);
00397     }
00398 
00399     static sokey_t get_order_key(const raw_const_iterator& it) {
00400         return it.get_node_ptr()->get_order_key();
00401     }
00402 
00403     static sokey_t get_safe_order_key(const raw_const_iterator& it) {
00404         if( !it.get_node_ptr() ) return sokey_t(~0U);
00405         return it.get_node_ptr()->get_order_key();
00406     }
00407 
00408     // Returns a public iterator version of the internal iterator. Public iterator must not
00409     // be a dummy private iterator.
00410     iterator get_iterator(raw_iterator it) {
00411         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00412         return iterator(it.get_node_ptr(), this);
00413     }
00414 
00415     // Returns a public iterator version of the internal iterator. Public iterator must not
00416     // be a dummy private iterator.
00417     const_iterator get_iterator(raw_const_iterator it) const {
00418         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00419         return const_iterator(it.get_node_ptr(), this);
00420     }
00421 
00422     // Returns a non-const version of the raw_iterator
00423     raw_iterator get_iterator(raw_const_iterator it) {
00424         return raw_iterator(it.get_node_ptr());
00425     }
00426 
00427     // Returns a non-const version of the iterator
00428     static iterator get_iterator(const_iterator it) {
00429         return iterator(it.my_node_ptr, it.my_list_ptr);
00430     }
00431 
00432     // Returns a public iterator version of a first non-dummy internal iterator at or after
00433     // the passed in internal iterator.
00434     iterator first_real_iterator(raw_iterator it)
00435     {
00436         // Skip all dummy, internal only iterators
00437         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00438             ++it;
00439 
00440         return iterator(it.get_node_ptr(), this);
00441     }
00442 
00443     // Returns a public iterator version of a first non-dummy internal iterator at or after
00444     // the passed in internal iterator.
00445     const_iterator first_real_iterator(raw_const_iterator it) const
00446     {
00447         // Skip all dummy, internal only iterators
00448         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00449             ++it;
00450 
00451         return const_iterator(it.get_node_ptr(), this);
00452     }
00453 
00454     // Erase an element using the allocator
00455     void destroy_node(nodeptr_t pnode) {
00456         my_node_allocator.destroy(pnode);
00457         my_node_allocator.deallocate(pnode, 1);
00458     }
00459 
00460     // Try to insert a new element in the list. If insert fails, return the node that
00461     // was inserted instead.
00462     nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
00463         new_node->my_next = current_node;
00464         return previous->atomic_set_next(new_node, current_node);
00465     }
00466 
00467     // Insert a new element between passed in iterators
00468     std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
00469     {
00470         nodeptr_t pnode = create_node(order_key, value);
00471         nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
00472 
00473         if (inserted_node == pnode)
00474         {
00475             // If the insert succeeded, check that the order is correct and increment the element count
00476             check_range();
00477             *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
00478             return std::pair<iterator, bool>(iterator(pnode, this), true);
00479         }
00480         else
00481         {
00482             // If the insert failed (element already there), then delete the new one
00483             destroy_node(pnode);
00484             return std::pair<iterator, bool>(end(), false);
00485         }
00486     }
00487 
00488     // Insert a new dummy element, starting search at a parent dummy element
00489     raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
00490     {
00491         raw_iterator last = raw_end();
00492         raw_iterator where = it;
00493 
00494         __TBB_ASSERT(where != last, "Invalid head node");
00495 
00496         ++where;
00497 
00498         // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
00499         nodeptr_t dummy_node = create_node(order_key);
00500 
00501         for (;;)
00502         {
00503             __TBB_ASSERT(it != last, "Invalid head list node");
00504 
00505             // If the head iterator is at the end of the list, or past the point where this dummy
00506             // node needs to be inserted, then try to insert it.
00507             if (where == last || get_order_key(where) > order_key)
00508             {
00509                 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
00510 
00511                 // Try to insert it in the right place
00512                 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
00513 
00514                 if (inserted_node == dummy_node)
00515                 {
00516                     // Insertion succeeded, check the list for order violations
00517                     check_range();
00518                     return raw_iterator(dummy_node);
00519                 }
00520                 else
00521                 {
00522                     // Insertion failed: either dummy node was inserted by another thread, or
00523                     // a real element was inserted at exactly the same place as dummy node.
00524                     // Proceed with the search from the previous location where order key was
00525                     // known to be larger (note: this is legal only because there is no safe
00526                     // concurrent erase operation supported).
00527                     where = it;
00528                     ++where;
00529                     continue;
00530                 }
00531             }
00532             else if (get_order_key(where) == order_key)
00533             {
00534                 // Another dummy node with the same value found, discard the new one.
00535                 destroy_node(dummy_node);
00536                 return where;
00537             }
00538 
00539             // Move the iterator forward
00540             it = where;
00541             ++where;
00542         }
00543 
00544     }
00545 
00546     // This erase function can handle both real and dummy nodes
00547     void erase_node(raw_iterator previous, raw_const_iterator& where)
00548     {
00549         nodeptr_t pnode = (where++).get_node_ptr();
00550         nodeptr_t prevnode = previous.get_node_ptr();
00551         __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
00552         prevnode->my_next = pnode->my_next;
00553 
00554         destroy_node(pnode);
00555     }
00556 
00557     // Erase the element (previous node needs to be passed because this is a forward only list)
00558     iterator erase_node(raw_iterator previous, const_iterator where)
00559     {
00560         raw_const_iterator it = where;
00561         erase_node(previous, it);
00562         my_element_count--;
00563 
00564         return get_iterator(first_real_iterator(it));
00565     }
00566 
00567     // Move all elements from the passed in split-ordered list to this one
00568     void move_all(self_type& source)
00569     {
00570         raw_const_iterator first = source.raw_begin();
00571         raw_const_iterator last = source.raw_end();
00572 
00573         if (first == last)
00574             return;
00575 
00576         nodeptr_t previous_node = my_head;
00577         raw_const_iterator begin_iterator = first++;
00578 
00579         // Move all elements one by one, including dummy ones
00580         for (raw_const_iterator it = first; it != last;)
00581         {
00582             nodeptr_t pnode = it.get_node_ptr();
00583 
00584             nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
00585             previous_node = try_insert(previous_node, dummy_node, NULL);
00586             __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
00587             raw_const_iterator where = it++;
00588             source.erase_node(get_iterator(begin_iterator), where);
00589         }
00590         check_range();
00591     }
00592 
00593 
00594 private:
00595 
00596     // Check the list for order violations
00597     void check_range()
00598     {
00599 #if TBB_USE_ASSERT
00600         for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
00601         {
00602             raw_iterator next_iterator = it;
00603             ++next_iterator;
00604 
00605             __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
00606         }
00607 #endif
00608     }
00609 
00610     typename allocator_type::template rebind<node>::other my_node_allocator;  // allocator object for nodes
00611     size_type                                             my_element_count;   // Total item count, not counting dummy nodes
00612     nodeptr_t                                             my_head;            // pointer to head node
00613 };
00614 
00615 // Template class for hash compare
00616 template<typename Key, typename Hasher, typename Key_equality>
00617 class hash_compare
00618 {
00619 public:
00620     hash_compare() {}
00621 
00622     hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
00623 
00624     hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
00625 
00626     size_t operator()(const Key& key) const {
00627         return ((size_t)my_hash_object(key));
00628     }
00629 
00630     bool operator()(const Key& key1, const Key& key2) const {
00631         return (!my_key_compare_object(key1, key2));
00632     }
00633 
00634     Hasher       my_hash_object;        // The hash object
00635     Key_equality my_key_compare_object; // The equality comparator object
00636 };
00637 
00638 #if _MSC_VER
00639 #pragma warning(push)
00640 #pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
00641 #endif
00642 
00643 template <typename Traits>
00644 class concurrent_unordered_base : public Traits
00645 {
00646 protected:
00647     // Type definitions
00648     typedef concurrent_unordered_base<Traits> self_type;
00649     typedef typename Traits::value_type value_type;
00650     typedef typename Traits::key_type key_type;
00651     typedef typename Traits::hash_compare hash_compare;
00652     typedef typename Traits::value_compare value_compare;
00653     typedef typename Traits::allocator_type allocator_type;
00654     typedef typename allocator_type::pointer pointer;
00655     typedef typename allocator_type::const_pointer const_pointer;
00656     typedef typename allocator_type::reference reference;
00657     typedef typename allocator_type::const_reference const_reference;
00658     typedef typename allocator_type::size_type size_type;
00659     typedef typename allocator_type::difference_type difference_type;
00660     typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
00661     typedef typename solist_t::nodeptr_t nodeptr_t;
00662     // Iterators that walk the entire split-order list, including dummy nodes
00663     typedef typename solist_t::raw_iterator raw_iterator;
00664     typedef typename solist_t::raw_const_iterator raw_const_iterator;
00665     typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
00666     typedef typename solist_t::const_iterator const_iterator;
00667     typedef iterator local_iterator;
00668     typedef const_iterator const_local_iterator;
00669     using Traits::my_hash_compare;
00670     using Traits::get_key;
00671     using Traits::allow_multimapping;
00672 
00673 private:
00674     typedef std::pair<iterator, iterator> pairii_t;
00675     typedef std::pair<const_iterator, const_iterator> paircc_t;
00676 
00677     static size_type const pointers_per_table = sizeof(size_type) * 8;              // One bucket segment per bit
00678     static const size_type initial_bucket_number = 8;                               // Initial number of buckets
00679     static const size_type initial_bucket_load = 4;                                // Initial maximum number of elements per bucket
00680 
00681 protected:
00682     // Constructors/Destructors
00683     concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
00684         const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
00685         : Traits(hc), my_number_of_buckets(n_of_buckets), my_solist(a),
00686           my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
00687     {
00688         internal_init();
00689     }
00690 
00691     concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
00692         : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
00693     {
00694         internal_copy(right);
00695     }
00696 
00697     concurrent_unordered_base(const concurrent_unordered_base& right)
00698         : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
00699     {
00700         internal_init();
00701         internal_copy(right);
00702     }
00703 
00704     concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
00705         if (this != &right)
00706             internal_copy(right);
00707         return (*this);
00708     }
00709 
00710     ~concurrent_unordered_base() {
00711         // Delete all node segments
00712         internal_clear();
00713     }
00714 
00715 public:
00716     allocator_type get_allocator() const {
00717         return my_solist.get_allocator();
00718     }
00719 
00720     // Size and capacity function
00721     bool empty() const {
00722         return my_solist.empty();
00723     }
00724 
00725     size_type size() const {
00726         return my_solist.size();
00727     }
00728 
00729     size_type max_size() const {
00730         return my_solist.max_size();
00731     }
00732 
00733     // Iterators 
00734     iterator begin() {
00735         return my_solist.begin();
00736     }
00737 
00738     const_iterator begin() const {
00739         return my_solist.begin();
00740     }
00741 
00742     iterator end() {
00743         return my_solist.end();
00744     }
00745 
00746     const_iterator end() const {
00747         return my_solist.end();
00748     }
00749 
00750     const_iterator cbegin() const {
00751         return my_solist.cbegin();
00752     }
00753 
00754     const_iterator cend() const {
00755         return my_solist.cend();
00756     }
00757 
00758     // Parallel traversal support
00759     class const_range_type : tbb::internal::no_assign {
00760         const concurrent_unordered_base &my_table;
00761         raw_const_iterator my_begin_node;
00762         raw_const_iterator my_end_node;
00763         mutable raw_const_iterator my_midpoint_node;
00764     public:
00766         typedef typename concurrent_unordered_base::size_type size_type;
00767         typedef typename concurrent_unordered_base::value_type value_type;
00768         typedef typename concurrent_unordered_base::reference reference;
00769         typedef typename concurrent_unordered_base::difference_type difference_type;
00770         typedef typename concurrent_unordered_base::const_iterator iterator;
00771 
00773         bool empty() const {return my_begin_node == my_end_node;}
00774 
00776         bool is_divisible() const {
00777             return my_midpoint_node != my_end_node;
00778         }
00780         const_range_type( const_range_type &r, split ) : 
00781             my_table(r.my_table), my_end_node(r.my_end_node)
00782         {
00783             r.my_end_node = my_begin_node = r.my_midpoint_node;
00784             __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00785             __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00786             set_midpoint();
00787             r.set_midpoint();
00788         }
00790         const_range_type( const concurrent_unordered_base &a_table ) : 
00791             my_table(a_table), my_begin_node(a_table.my_solist.begin()),
00792             my_end_node(a_table.my_solist.end())
00793         {
00794             set_midpoint();
00795         }
00796         iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
00797         iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
00799         size_type grainsize() const { return 1; }
00800 
00802         void set_midpoint() const {
00803             if( my_begin_node == my_end_node ) // not divisible
00804                 my_midpoint_node = my_end_node;
00805             else {
00806                 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
00807                 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
00808                 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
00809                 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
00810                 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
00811                 if( my_midpoint_node == my_begin_node )
00812                     my_midpoint_node = my_end_node;
00813 #if TBB_USE_ASSERT
00814                 else {
00815                     sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
00816                     __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
00817                     __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
00818                 }
00819 #endif // TBB_USE_ASSERT
00820             }
00821         }
00822     };
00823 
00824     class range_type : public const_range_type {
00825     public:
00826         typedef typename concurrent_unordered_base::iterator iterator;
00828         range_type( range_type &r, split ) : const_range_type( r, split() ) {}
00830         range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
00831 
00832         iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
00833         iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
00834     };
00835 
00836     range_type range() {
00837         return range_type( *this );
00838     }
00839 
00840     const_range_type range() const {
00841         return const_range_type( *this );
00842     }
00843 
00844     // Modifiers
00845     std::pair<iterator, bool> insert(const value_type& value) {
00846         return internal_insert(value);
00847     }
00848 
00849     iterator insert(const_iterator, const value_type& value) {
00850         // Ignore hint
00851         return insert(value).first;
00852     }
00853 
00854     template<class Iterator>
00855     void insert(Iterator first, Iterator last) {
00856         for (Iterator it = first; it != last; ++it)
00857             insert(*it);
00858     }
00859 
00860     iterator unsafe_erase(const_iterator where) {
00861         return internal_erase(where);
00862     }
00863 
00864     iterator unsafe_erase(const_iterator first, const_iterator last) {
00865         while (first != last)
00866             unsafe_erase(first++);
00867         return my_solist.get_iterator(first);
00868     }
00869 
00870     size_type unsafe_erase(const key_type& key) {
00871         pairii_t where = equal_range(key);
00872         size_type item_count = internal_distance(where.first, where.second);
00873         unsafe_erase(where.first, where.second);
00874         return item_count;
00875     }
00876 
00877     void swap(concurrent_unordered_base& right) {
00878         if (this != &right) {
00879             std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
00880             my_solist.swap(right.my_solist);
00881             internal_swap_buckets(right);
00882             std::swap(my_number_of_buckets, right.my_number_of_buckets);
00883             std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
00884         }
00885     }
00886 
00887     // Observers
00888     void clear() {
00889         // Clear list
00890         my_solist.clear();
00891 
00892         // Clear buckets
00893         internal_clear();
00894     }
00895 
00896     // Lookup
00897     iterator find(const key_type& key) {
00898         return internal_find(key);
00899     }
00900 
00901     const_iterator find(const key_type& key) const {
00902         return const_cast<self_type*>(this)->internal_find(key);
00903     }
00904 
00905     size_type count(const key_type& key) const {
00906         paircc_t answer = equal_range(key);
00907         size_type item_count = internal_distance(answer.first, answer.second);
00908         return item_count;
00909     }
00910 
00911     std::pair<iterator, iterator> equal_range(const key_type& key) {
00912         return internal_equal_range(key);
00913     }
00914 
00915     std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
00916         return internal_equal_range(key);
00917     }
00918 
00919     // Bucket interface - for debugging 
00920     size_type unsafe_bucket_count() const {
00921         return my_number_of_buckets;
00922     }
00923 
00924     size_type unsafe_max_bucket_count() const {
00925         return segment_size(pointers_per_table-1);
00926     }
00927 
00928     size_type unsafe_bucket_size(size_type bucket) {
00929         size_type item_count = 0;
00930         if (is_initialized(bucket)) {
00931             raw_iterator it = get_bucket(bucket);
00932             ++it;
00933             for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
00934                 ++item_count;
00935         }
00936         return item_count;
00937     }
00938 
00939     size_type unsafe_bucket(const key_type& key) const {
00940         sokey_t order_key = (sokey_t) my_hash_compare(key);
00941         size_type bucket = order_key % my_number_of_buckets;
00942         return bucket;
00943     }
00944 
00945     // If the bucket is initialized, return a first non-dummy element in it
00946     local_iterator unsafe_begin(size_type bucket) {
00947         if (!is_initialized(bucket))
00948             return end();
00949 
00950         raw_iterator it = get_bucket(bucket);
00951         return my_solist.first_real_iterator(it);
00952     }
00953 
00954     // If the bucket is initialized, return a first non-dummy element in it
00955     const_local_iterator unsafe_begin(size_type bucket) const
00956     {
00957         if (!is_initialized(bucket))
00958             return end();
00959 
00960         raw_const_iterator it = get_bucket(bucket);
00961         return my_solist.first_real_iterator(it);
00962     }
00963 
00964     // @REVIEW: Takes O(n)
00965     // Returns the iterator after the last non-dummy element in the bucket
00966     local_iterator unsafe_end(size_type bucket)
00967     {
00968         if (!is_initialized(bucket))
00969             return end();
00970 
00971         raw_iterator it = get_bucket(bucket);
00972     
00973         // Find the end of the bucket, denoted by the dummy element
00974         do ++it;
00975         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00976 
00977         // Return the first real element past the end of the bucket
00978         return my_solist.first_real_iterator(it);
00979     }
00980 
00981     // @REVIEW: Takes O(n)
00982     // Returns the iterator after the last non-dummy element in the bucket
00983     const_local_iterator unsafe_end(size_type bucket) const
00984     {
00985         if (!is_initialized(bucket))
00986             return end();
00987 
00988         raw_const_iterator it = get_bucket(bucket);
00989     
00990         // Find the end of the bucket, denoted by the dummy element
00991         do ++it;
00992         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00993 
00994         // Return the first real element past the end of the bucket
00995         return my_solist.first_real_iterator(it);
00996     }
00997 
00998     const_local_iterator unsafe_cbegin(size_type bucket) const {
00999         return ((const self_type *) this)->begin();
01000     }
01001 
01002     const_local_iterator unsafe_cend(size_type bucket) const {
01003         return ((const self_type *) this)->end();
01004     }
01005 
01006     // Hash policy
01007     float load_factor() const {
01008         return (float) size() / (float) unsafe_bucket_count();
01009     }
01010 
01011     float max_load_factor() const {
01012         return my_maximum_bucket_size;
01013     }
01014 
01015     void max_load_factor(float newmax) {
01016         if (newmax != newmax || newmax < 0)
01017             tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
01018         my_maximum_bucket_size = newmax;
01019     }
01020 
01021     // This function is a noop, because the underlying split-ordered list
01022     // is already sorted, so an increase in the bucket number will be
01023     // reflected next time this bucket is touched.
01024     void rehash(size_type buckets) {
01025         size_type current_buckets = my_number_of_buckets;
01026 
01027         if (current_buckets > buckets)
01028             return;
01029         else if ( (buckets & (buckets-1)) != 0 )
01030             tbb::internal::throw_exception(tbb::internal::eid_invalid_buckets_number);
01031         my_number_of_buckets = buckets;
01032     }
01033 
01034 private:
01035 
01036     // Initialize the hash and keep the first bucket open
01037     void internal_init() {
01038         // Allocate an array of segment pointers
01039         memset(my_buckets, 0, pointers_per_table * sizeof(void *));
01040 
01041         // Insert the first element in the split-ordered list
01042         raw_iterator dummy_node = my_solist.raw_begin();
01043         set_bucket(0, dummy_node);
01044     }
01045 
01046     void internal_clear() {
01047         for (size_type index = 0; index < pointers_per_table; ++index) {
01048             if (my_buckets[index] != NULL) {
01049                 size_type sz = segment_size(index);
01050                 for (size_type index2 = 0; index2 < sz; ++index2)
01051                     my_allocator.destroy(&my_buckets[index][index2]);
01052                 my_allocator.deallocate(my_buckets[index], sz);
01053                 my_buckets[index] = 0;
01054             }
01055         }
01056     }
01057 
01058     void internal_copy(const self_type& right) {
01059         clear();
01060 
01061         my_maximum_bucket_size = right.my_maximum_bucket_size;
01062         my_number_of_buckets = right.my_number_of_buckets;
01063 
01064         __TBB_TRY {
01065             insert(right.begin(), right.end());
01066             my_hash_compare = right.my_hash_compare;
01067         } __TBB_CATCH(...) {
01068             my_solist.clear();
01069             __TBB_RETHROW();
01070         }
01071     }
01072 
01073     void internal_swap_buckets(concurrent_unordered_base& right)
01074     {
01075         // Swap all node segments
01076         for (size_type index = 0; index < pointers_per_table; ++index)
01077         {
01078             raw_iterator * iterator_pointer = my_buckets[index];
01079             my_buckets[index] = right.my_buckets[index];
01080             right.my_buckets[index] = iterator_pointer;
01081         }
01082     }
01083 
01084     // Hash APIs
01085     size_type internal_distance(const_iterator first, const_iterator last) const
01086     {
01087         size_type num = 0;
01088 
01089         for (const_iterator it = first; it != last; ++it)
01090             ++num;
01091 
01092         return num;
01093     }
01094 
01095     // Insert an element in the hash given its value
01096     std::pair<iterator, bool> internal_insert(const value_type& value)
01097     {
01098         sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
01099         size_type bucket = order_key % my_number_of_buckets;
01100 
01101         // If bucket is empty, initialize it first
01102         if (!is_initialized(bucket))
01103             init_bucket(bucket);
01104 
01105         size_type new_count;
01106         order_key = split_order_key_regular(order_key);
01107         raw_iterator it = get_bucket(bucket);
01108         raw_iterator last = my_solist.raw_end();
01109         raw_iterator where = it;
01110 
01111         __TBB_ASSERT(where != last, "Invalid head node");
01112 
01113         // First node is a dummy node
01114         ++where;
01115 
01116         for (;;)
01117         {
01118             if (where == last || solist_t::get_order_key(where) > order_key)
01119             {
01120                 // Try to insert it in the right place
01121                 std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
01122                 
01123                 if (result.second)
01124                 {
01125                     // Insertion succeeded, adjust the table size, if needed
01126                     adjust_table_size(new_count, my_number_of_buckets);
01127                     return result;
01128                 }
01129                 else
01130                 {
01131                     // Insertion failed: either the same node was inserted by another thread, or
01132                     // another element was inserted at exactly the same place as this node.
01133                     // Proceed with the search from the previous location where order key was
01134                     // known to be larger (note: this is legal only because there is no safe
01135                     // concurrent erase operation supported).
01136                     where = it;
01137                     ++where;
01138                     continue;
01139                 }
01140             }
01141             else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
01142             {
01143                 // Element already in the list, return it
01144                 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
01145             }
01146 
01147             // Move the iterator forward
01148             it = where;
01149             ++where;
01150         }
01151     }
01152 
01153     // Find the element in the split-ordered list
01154     iterator internal_find(const key_type& key)
01155     {
01156         sokey_t order_key = (sokey_t) my_hash_compare(key);
01157         size_type bucket = order_key % my_number_of_buckets;
01158 
01159         // If bucket is empty, initialize it first
01160         if (!is_initialized(bucket))
01161             init_bucket(bucket);
01162 
01163         order_key = split_order_key_regular(order_key);
01164         raw_iterator last = my_solist.raw_end();
01165 
01166         for (raw_iterator it = get_bucket(bucket); it != last; ++it)
01167         {
01168             if (solist_t::get_order_key(it) > order_key)
01169             {
01170                 // If the order key is smaller than the current order key, the element
01171                 // is not in the hash.
01172                 return end();
01173             }
01174             else if (solist_t::get_order_key(it) == order_key)
01175             {
01176                 // The fact that order keys match does not mean that the element is found.
01177                 // Key function comparison has to be performed to check whether this is the
01178                 // right element. If not, keep searching while order key is the same.
01179                 if (!my_hash_compare(get_key(*it), key))
01180                     return my_solist.get_iterator(it);
01181             }
01182         }
01183 
01184         return end();
01185     }
01186 
01187     // Erase an element from the list. This is not a concurrency safe function.
01188     iterator internal_erase(const_iterator it)
01189     {
01190         key_type key = get_key(*it);
01191         sokey_t order_key = (sokey_t) my_hash_compare(key);
01192         size_type bucket = order_key % my_number_of_buckets;
01193 
01194         // If bucket is empty, initialize it first
01195         if (!is_initialized(bucket))
01196             init_bucket(bucket);
01197 
01198         order_key = split_order_key_regular(order_key);
01199 
01200         raw_iterator previous = get_bucket(bucket);
01201         raw_iterator last = my_solist.raw_end();
01202         raw_iterator where = previous;
01203 
01204         __TBB_ASSERT(where != last, "Invalid head node");
01205 
01206         // First node is a dummy node
01207         ++where;
01208 
01209         for (;;) {
01210             if (where == last)
01211                 return end();
01212             else if (my_solist.get_iterator(where) == it)
01213                 return my_solist.erase_node(previous, it);
01214 
01215             // Move the iterator forward
01216             previous = where;
01217             ++where;
01218         }
01219     }
01220 
01221     // Return the [begin, end) pair of iterators with the same key values.
01222     // This operation makes sense only if mapping is many-to-one.
01223     pairii_t internal_equal_range(const key_type& key)
01224     {
01225         sokey_t order_key = (sokey_t) my_hash_compare(key);
01226         size_type bucket = order_key % my_number_of_buckets;
01227 
01228         // If bucket is empty, initialize it first
01229         if (!is_initialized(bucket))
01230             init_bucket(bucket);
01231 
01232         order_key = split_order_key_regular(order_key);
01233         raw_iterator end_it = my_solist.raw_end();
01234 
01235         for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
01236         {
01237             if (solist_t::get_order_key(it) > order_key)
01238             {
01239                 // There is no element with the given key
01240                 return pairii_t(end(), end());
01241             }
01242             else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
01243             {
01244                 iterator first = my_solist.get_iterator(it);
01245                 iterator last = first;
01246 
01247                 while( last != end() && !my_hash_compare(get_key(*last), key) )
01248                     ++last;
01249                 return pairii_t(first, last);
01250             }
01251         }
01252 
01253         return pairii_t(end(), end());
01254     }
01255 
01256     // Return the [begin, end) pair of const iterators with the same key values.
01257     // This operation makes sense only if mapping is many-to-one.
01258     paircc_t internal_equal_range(const key_type& key) const
01259     {
01260         sokey_t order_key = (sokey_t) my_hash_compare(key);
01261         size_type bucket = order_key % my_number_of_buckets;
01262 
01263         // If bucket is empty, initialize it first
01264         if (!is_initialized(bucket))
01265             return paircc_t(end(), end());
01266 
01267         order_key = split_order_key_regular(order_key);
01268         raw_const_iterator end_it = my_solist.raw_end();
01269 
01270         for (raw_const_iterator it = get_bucket(bucket); it != end_it; ++it)
01271         {
01272             if (solist_t::get_order_key(it) > order_key)
01273             {
01274                 // There is no element with the given key
01275                 return paircc_t(end(), end());
01276             }
01277             else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
01278             {
01279                 const_iterator first = my_solist.get_iterator(it);
01280                 const_iterator last = first;
01281 
01282                 while( last != end() && !my_hash_compare(get_key(*last), key ) )
01283                     ++last;
01284                 return paircc_t(first, last);
01285             }
01286         }
01287 
01288         return paircc_t(end(), end());
01289     }
01290 
01291     // Bucket APIs
01292     void init_bucket(size_type bucket)
01293     {
01294         // Bucket 0 has no parent. Initialize it and return.
01295         if (bucket == 0) {
01296             internal_init();
01297             return;
01298         }
01299 
01300         size_type parent_bucket = get_parent(bucket);
01301 
01302         // All parent_bucket buckets have to be initialized before this bucket is
01303         if (!is_initialized(parent_bucket))
01304             init_bucket(parent_bucket);
01305 
01306         raw_iterator parent = get_bucket(parent_bucket);
01307 
01308         // Create a dummy first node in this bucket
01309         raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
01310         set_bucket(bucket, dummy_node);
01311     }
01312 
01313     void adjust_table_size(size_type total_elements, size_type current_size)
01314     {
01315         // Grow the table by a factor of 2 if possible and needed
01316         if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
01317         {
01318              // Double the size of the hash only if size has not changed inbetween loads
01319             __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, 2 * current_size, current_size );
01320         }
01321     }
01322 
01323     size_type get_parent(size_type bucket) const
01324     {
01325         // Unsets bucket's most significant turned-on bit
01326         size_type msb = __TBB_Log2((uintptr_t)bucket);
01327         return bucket & ~(size_type(1) << msb);
01328     }
01329 
01330 
01331     // Dynamic sized array (segments)
01333     static size_type segment_index_of( size_type index ) {
01334         return size_type( __TBB_Log2( index|1 ) );
01335     }
01336 
01338     static size_type segment_base( size_type k ) {
01339         return (size_type(1)<<k & ~size_type(1));
01340     }
01341 
01343     static size_type segment_size( size_type k ) {
01344         return k? size_type(1)<<k : 2;
01345     }
01346 
01347     raw_iterator get_bucket(size_type bucket) const {
01348         size_type segment = segment_index_of(bucket);
01349         bucket -= segment_base(segment);
01350         __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
01351         return my_buckets[segment][bucket];
01352     }
01353 
01354     void set_bucket(size_type bucket, raw_iterator dummy_head) {
01355         size_type segment = segment_index_of(bucket);
01356         bucket -= segment_base(segment);
01357 
01358         if (my_buckets[segment] == NULL) {
01359             size_type sz = segment_size(segment);
01360             raw_iterator * new_segment = my_allocator.allocate(sz);
01361             std::memset(new_segment, 0, sz*sizeof(raw_iterator));
01362 
01363             if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
01364                 my_allocator.deallocate(new_segment, sz);
01365         }
01366 
01367         my_buckets[segment][bucket] = dummy_head;
01368     }
01369 
01370     bool is_initialized(size_type bucket) const {
01371         size_type segment = segment_index_of(bucket);
01372         bucket -= segment_base(segment);
01373 
01374         if (my_buckets[segment] == NULL)
01375             return false;
01376 
01377         raw_iterator it = my_buckets[segment][bucket];
01378         return (it.get_node_ptr() != NULL);
01379     }
01380 
01381     // Utilities for keys
01382 
01383     // A regular order key has its original hash value reversed and the last bit set
01384     sokey_t split_order_key_regular(sokey_t order_key) const {
01385         return __TBB_ReverseBits(order_key) | 0x1;
01386     }
01387 
01388     // A dummy order key has its original hash value reversed and the last bit unset
01389     sokey_t split_order_key_dummy(sokey_t order_key) const {
01390         return __TBB_ReverseBits(order_key) & ~(0x1);
01391     }
01392 
01393     // Shared variables
01394     size_type                                                     my_number_of_buckets;       // Current table size
01395     solist_t                                                      my_solist;                  // List where all the elements are kept
01396     typename allocator_type::template rebind<raw_iterator>::other my_allocator;               // Allocator object for segments
01397     float                                                         my_maximum_bucket_size;     // Maximum size of the bucket
01398     raw_iterator                                                 *my_buckets[pointers_per_table]; // The segment table
01399 };
01400 #if _MSC_VER
01401 #pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
01402 #endif
01403 
01405 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
01406 } // namespace internal
01409 template<typename T>
01410 inline size_t tbb_hasher( const T& t ) {
01411     return static_cast<size_t>( t ) * internal::hash_multiplier;
01412 }
01413 template<typename P>
01414 inline size_t tbb_hasher( P* ptr ) {
01415     size_t const h = reinterpret_cast<size_t>( ptr );
01416     return (h >> 3) ^ h;
01417 }
01418 template<typename E, typename S, typename A>
01419 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
01420     size_t h = 0;
01421     for( const E* c = s.c_str(); *c; ++c )
01422         h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
01423     return h;
01424 }
01425 template<typename F, typename S>
01426 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
01427     return tbb_hasher(p.first) ^ tbb_hasher(p.second);
01428 }
01429 } // namespace interface5
01430 using interface5::tbb_hasher;
01431 } // namespace tbb
01432 #endif// __TBB_concurrent_unordered_internal_H

Copyright © 2005-2010 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.