00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029 #ifndef __TBB_flow_graph_H
00030 #define __TBB_flow_graph_H
00031
00032 #include "tbb_stddef.h"
00033 #include "atomic.h"
00034 #include "spin_mutex.h"
00035 #include "null_mutex.h"
00036 #include "spin_rw_mutex.h"
00037 #include "null_rw_mutex.h"
00038 #include "task.h"
00039 #include "concurrent_vector.h"
00040 #include "internal/_aggregator_impl.h"
00041
00042
00043 #if __TBB_CPP11_TUPLE_PRESENT
00044 #include <tuple>
00045 namespace tbb {
00046 namespace flow {
00047 using std::tuple;
00048 using std::tuple_size;
00049 using std::tuple_element;
00050 using std::get;
00051 }
00052 }
00053 #else
00054 #include "compat/tuple"
00055 #endif
00056
00057 #include<list>
00058 #include<queue>
00059
00070 namespace tbb {
00071 namespace flow {
00072
00074 enum concurrency { unlimited = 0, serial = 1 };
00075
00076 namespace interface6 {
00077
00078 namespace internal {
00079 template<typename T, typename M> class successor_cache;
00080 template<typename T, typename M> class broadcast_cache;
00081 template<typename T, typename M> class round_robin_cache;
00082 }
00083
00085 class continue_msg {};
00086
00087 template< typename T > class sender;
00088 template< typename T > class receiver;
00089 class continue_receiver;
00090
00092 template< typename T >
00093 class sender {
00094 public:
00096 typedef T output_type;
00097
00099 typedef receiver<T> successor_type;
00100
00101 virtual ~sender() {}
00102
00104 virtual bool register_successor( successor_type &r ) = 0;
00105
00107 virtual bool remove_successor( successor_type &r ) = 0;
00108
00110 virtual bool try_get( T & ) { return false; }
00111
00113 virtual bool try_reserve( T & ) { return false; }
00114
00116 virtual bool try_release( ) { return false; }
00117
00119 virtual bool try_consume( ) { return false; }
00120 };
00121
00122 template< typename T > class limiter_node;
00123 template< typename R, typename B > class run_and_put_task;
00124
00125 static tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1;
00126
00127
00128 static inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right) {
00129
00130 if(right == NULL) return left;
00131
00132 if(left == NULL) return right;
00133 if(left == SUCCESSFULLY_ENQUEUED) return right;
00134
00135 if(right != SUCCESSFULLY_ENQUEUED) {
00136
00137 tbb::task::enqueue(*left);
00138 return right;
00139 }
00140 return left;
00141 }
00142
00144 template< typename T >
00145 class receiver {
00146 public:
00148 typedef T input_type;
00149
00151 typedef sender<T> predecessor_type;
00152
00154 virtual ~receiver() {}
00155
00157 bool try_put( const T& t ) {
00158 task *res = try_put_task(t);
00159 if(!res) return false;
00160 if (res != SUCCESSFULLY_ENQUEUED) task::enqueue(*res);
00161 return true;
00162 }
00163
00165 protected:
00166 template< typename R, typename B > friend class run_and_put_task;
00167 template<typename X, typename Y> friend class internal::broadcast_cache;
00168 template<typename X, typename Y> friend class internal::round_robin_cache;
00169 virtual task *try_put_task(const T& t) = 0;
00170 public:
00171
00173 virtual bool register_predecessor( predecessor_type & ) { return false; }
00174
00176 virtual bool remove_predecessor( predecessor_type & ) { return false; }
00177
00178 protected:
00180 template<typename U> friend class limiter_node;
00181 virtual void reset_receiver() = 0;
00182
00183 template<typename TT, typename M>
00184 friend class internal::successor_cache;
00185 virtual bool is_continue_receiver() { return false; }
00186 };
00187
00189
00190 class continue_receiver : public receiver< continue_msg > {
00191 public:
00192
00194 typedef continue_msg input_type;
00195
00197 typedef sender< continue_msg > predecessor_type;
00198
00200 continue_receiver( int number_of_predecessors = 0 ) {
00201 my_predecessor_count = my_initial_predecessor_count = number_of_predecessors;
00202 my_current_count = 0;
00203 }
00204
00206 continue_receiver( const continue_receiver& src ) : receiver<continue_msg>() {
00207 my_predecessor_count = my_initial_predecessor_count = src.my_initial_predecessor_count;
00208 my_current_count = 0;
00209 }
00210
00212 virtual ~continue_receiver() { }
00213
00215 bool register_predecessor( predecessor_type & ) {
00216 spin_mutex::scoped_lock l(my_mutex);
00217 ++my_predecessor_count;
00218 return true;
00219 }
00220
00222
00225 bool remove_predecessor( predecessor_type & ) {
00226 spin_mutex::scoped_lock l(my_mutex);
00227 --my_predecessor_count;
00228 return true;
00229 }
00230
00231 protected:
00232 template< typename R, typename B > friend class run_and_put_task;
00233 template<typename X, typename Y> friend class internal::broadcast_cache;
00234 template<typename X, typename Y> friend class internal::round_robin_cache;
00235
00236 task *try_put_task( const input_type & ) {
00237 {
00238 spin_mutex::scoped_lock l(my_mutex);
00239 if ( ++my_current_count < my_predecessor_count )
00240 return SUCCESSFULLY_ENQUEUED;
00241 else
00242 my_current_count = 0;
00243 }
00244 task * res = execute();
00245 return res;
00246 }
00247
00248 spin_mutex my_mutex;
00249 int my_predecessor_count;
00250 int my_current_count;
00251 int my_initial_predecessor_count;
00252
00253
00254 template<typename U> friend class limiter_node;
00255 void reset_receiver() {
00256 my_current_count = 0;
00257 }
00258
00260
00262 virtual task * execute() = 0;
00263 template<typename TT, typename M>
00264 friend class internal::successor_cache;
00265 bool is_continue_receiver() { return true; }
00266 };
00267
00268 #include "internal/_flow_graph_impl.h"
00269 using namespace internal::graph_policy_namespace;
00270
00271 class graph;
00272 class graph_node;
00273
00274 template <typename GraphContainerType, typename GraphNodeType>
00275 class graph_iterator {
00276 friend class graph;
00277 friend class graph_node;
00278 public:
00279 typedef size_t size_type;
00280 typedef GraphNodeType value_type;
00281 typedef GraphNodeType* pointer;
00282 typedef GraphNodeType& reference;
00283 typedef const GraphNodeType& const_reference;
00284 typedef std::forward_iterator_tag iterator_category;
00285
00287 graph_iterator() : my_graph(NULL), current_node(NULL) {}
00288
00290 graph_iterator(const graph_iterator& other) :
00291 my_graph(other.my_graph), current_node(other.current_node)
00292 {}
00293
00295 graph_iterator& operator=(const graph_iterator& other) {
00296 if (this != &other) {
00297 my_graph = other.my_graph;
00298 current_node = other.current_node;
00299 }
00300 return *this;
00301 }
00302
00304 reference operator*() const;
00305
00307 pointer operator->() const;
00308
00310 bool operator==(const graph_iterator& other) const {
00311 return ((my_graph == other.my_graph) && (current_node == other.current_node));
00312 }
00313
00315 bool operator!=(const graph_iterator& other) const { return !(operator==(other)); }
00316
00318 graph_iterator& operator++() {
00319 internal_forward();
00320 return *this;
00321 }
00322
00324 graph_iterator operator++(int) {
00325 graph_iterator result = *this;
00326 operator++();
00327 return result;
00328 }
00329
00330 private:
00331
00332 GraphContainerType *my_graph;
00333
00334 pointer current_node;
00335
00337 graph_iterator(GraphContainerType *g, bool begin);
00338 void internal_forward();
00339 };
00340
00342
00343 class graph : tbb::internal::no_copy {
00344 friend class graph_node;
00345
00346 template< typename Body >
00347 class run_task : public task {
00348 public:
00349 run_task( Body& body ) : my_body(body) {}
00350 task *execute() {
00351 my_body();
00352 return NULL;
00353 }
00354 private:
00355 Body my_body;
00356 };
00357
00358 template< typename Receiver, typename Body >
00359 class run_and_put_task : public task {
00360 public:
00361 run_and_put_task( Receiver &r, Body& body ) : my_receiver(r), my_body(body) {}
00362 task *execute() {
00363 task *res = my_receiver.try_put_task( my_body() );
00364 if(res == SUCCESSFULLY_ENQUEUED) res = NULL;
00365 return res;
00366 }
00367 private:
00368 Receiver &my_receiver;
00369 Body my_body;
00370 };
00371
00372 public:
00374 explicit graph() : my_nodes(NULL), my_nodes_last(NULL)
00375 {
00376 own_context = true;
00377 cancelled = false;
00378 caught_exception = false;
00379 my_context = new task_group_context();
00380 my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task );
00381 my_root_task->set_ref_count(1);
00382 }
00383
00385 explicit graph(task_group_context& use_this_context) :
00386 my_context(&use_this_context), my_nodes(NULL), my_nodes_last(NULL)
00387 {
00388 own_context = false;
00389 my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task );
00390 my_root_task->set_ref_count(1);
00391 }
00392
00394
00395 ~graph() {
00396 wait_for_all();
00397 my_root_task->set_ref_count(0);
00398 task::destroy( *my_root_task );
00399 if (own_context) delete my_context;
00400 }
00401
00403
00405 void increment_wait_count() {
00406 if (my_root_task)
00407 my_root_task->increment_ref_count();
00408 }
00409
00411
00413 void decrement_wait_count() {
00414 if (my_root_task)
00415 my_root_task->decrement_ref_count();
00416 }
00417
00419
00421 template< typename Receiver, typename Body >
00422 void run( Receiver &r, Body body ) {
00423 task::enqueue( * new ( task::allocate_additional_child_of( *my_root_task ) )
00424 run_and_put_task< Receiver, Body >( r, body ) );
00425 }
00426
00428
00430 template< typename Body >
00431 void run( Body body ) {
00432 task::enqueue( * new ( task::allocate_additional_child_of( *my_root_task ) )
00433 run_task< Body >( body ) );
00434 }
00435
00437
00438 void wait_for_all() {
00439 cancelled = false;
00440 caught_exception = false;
00441 if (my_root_task) {
00442 #if TBB_USE_EXCEPTIONS
00443 try {
00444 #endif
00445 my_root_task->wait_for_all();
00446 cancelled = my_context->is_group_execution_cancelled();
00447 #if TBB_USE_EXCEPTIONS
00448 }
00449 catch(...) {
00450 my_root_task->set_ref_count(1);
00451 my_context->reset();
00452 caught_exception = true;
00453 cancelled = true;
00454 throw;
00455 }
00456 #endif
00457 my_context->reset();
00458 my_root_task->set_ref_count(1);
00459 }
00460 }
00461
00463 task * root_task() {
00464 return my_root_task;
00465 }
00466
00467
00468 template<typename C, typename N>
00469 friend class graph_iterator;
00470
00471
00472 typedef graph_iterator<graph,graph_node> iterator;
00473 typedef graph_iterator<const graph,const graph_node> const_iterator;
00474
00475
00477 iterator begin() { return iterator(this, true); }
00479 iterator end() { return iterator(this, false); }
00481 const_iterator begin() const { return const_iterator(this, true); }
00483 const_iterator end() const { return const_iterator(this, false); }
00485 const_iterator cbegin() const { return const_iterator(this, true); }
00487 const_iterator cend() const { return const_iterator(this, false); }
00488
00490 bool is_cancelled() { return cancelled; }
00491 bool exception_thrown() { return caught_exception; }
00492
00493
00494 void reset();
00495
00496 private:
00497 task *my_root_task;
00498 task_group_context *my_context;
00499 bool own_context;
00500 bool cancelled;
00501 bool caught_exception;
00502
00503 graph_node *my_nodes, *my_nodes_last;
00504
00505 spin_mutex nodelist_mutex;
00506 void register_node(graph_node *n);
00507 void remove_node(graph_node *n);
00508
00509 };
00510
00511 template <typename C, typename N>
00512 graph_iterator<C,N>::graph_iterator(C *g, bool begin) : my_graph(g), current_node(NULL)
00513 {
00514 if (begin) current_node = my_graph->my_nodes;
00515
00516 }
00517
00518 template <typename C, typename N>
00519 typename graph_iterator<C,N>::reference graph_iterator<C,N>::operator*() const {
00520 __TBB_ASSERT(current_node, "graph_iterator at end");
00521 return *operator->();
00522 }
00523
00524 template <typename C, typename N>
00525 typename graph_iterator<C,N>::pointer graph_iterator<C,N>::operator->() const {
00526 return current_node;
00527 }
00528
00529
00530 template <typename C, typename N>
00531 void graph_iterator<C,N>::internal_forward() {
00532 if (current_node) current_node = current_node->next;
00533 }
00534
00536 class graph_node : tbb::internal::no_assign {
00537 friend class graph;
00538 template<typename C, typename N>
00539 friend class graph_iterator;
00540 protected:
00541 graph& my_graph;
00542 graph_node *next, *prev;
00543 public:
00544 graph_node(graph& g) : my_graph(g) {
00545 my_graph.register_node(this);
00546 }
00547 virtual ~graph_node() {
00548 my_graph.remove_node(this);
00549 }
00550
00551 protected:
00552 virtual void reset() = 0;
00553 };
00554
00555 inline void graph::register_node(graph_node *n) {
00556 n->next = NULL;
00557 {
00558 spin_mutex::scoped_lock lock(nodelist_mutex);
00559 n->prev = my_nodes_last;
00560 if (my_nodes_last) my_nodes_last->next = n;
00561 my_nodes_last = n;
00562 if (!my_nodes) my_nodes = n;
00563 }
00564 }
00565
00566 inline void graph::remove_node(graph_node *n) {
00567 {
00568 spin_mutex::scoped_lock lock(nodelist_mutex);
00569 __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error: no registered nodes");
00570 if (n->prev) n->prev->next = n->next;
00571 if (n->next) n->next->prev = n->prev;
00572 if (my_nodes_last == n) my_nodes_last = n->prev;
00573 if (my_nodes == n) my_nodes = n->next;
00574 }
00575 n->prev = n->next = NULL;
00576 }
00577
00578 inline void graph::reset() {
00579
00580 if(my_context) my_context->reset();
00581 cancelled = false;
00582 caught_exception = false;
00583
00584 for(iterator ii = begin(); ii != end(); ++ii) {
00585 graph_node *my_p = &(*ii);
00586 my_p->reset();
00587 }
00588 }
00589
00590
00591 #include "internal/_flow_graph_node_impl.h"
00592
00594 template < typename Output >
00595 class source_node : public graph_node, public sender< Output > {
00596 protected:
00597 using graph_node::my_graph;
00598 public:
00600 typedef Output output_type;
00601
00603 typedef receiver< Output > successor_type;
00604
00606 template< typename Body >
00607 source_node( graph &g, Body body, bool is_active = true )
00608 : graph_node(g), my_root_task(g.root_task()), my_active(is_active), init_my_active(is_active),
00609 my_body( new internal::source_body_leaf< output_type, Body>(body) ),
00610 my_reserved(false), my_has_cached_item(false)
00611 {
00612 my_successors.set_owner(this);
00613 }
00614
00616 source_node( const source_node& src ) :
00617 graph_node(src.my_graph), sender<Output>(),
00618 my_root_task( src.my_root_task), my_active(src.init_my_active),
00619 init_my_active(src.init_my_active), my_body( src.my_body->clone() ),
00620 my_reserved(false), my_has_cached_item(false)
00621 {
00622 my_successors.set_owner(this);
00623 }
00624
00626 ~source_node() { delete my_body; }
00627
00629 bool register_successor( receiver<output_type> &r ) {
00630 spin_mutex::scoped_lock lock(my_mutex);
00631 my_successors.register_successor(r);
00632 if ( my_active )
00633 spawn_put();
00634 return true;
00635 }
00636
00638 bool remove_successor( receiver<output_type> &r ) {
00639 spin_mutex::scoped_lock lock(my_mutex);
00640 my_successors.remove_successor(r);
00641 return true;
00642 }
00643
00645 bool try_get( output_type &v ) {
00646 spin_mutex::scoped_lock lock(my_mutex);
00647 if ( my_reserved )
00648 return false;
00649
00650 if ( my_has_cached_item ) {
00651 v = my_cached_item;
00652 my_has_cached_item = false;
00653 return true;
00654 }
00655 return false;
00656 }
00657
00659 bool try_reserve( output_type &v ) {
00660 spin_mutex::scoped_lock lock(my_mutex);
00661 if ( my_reserved ) {
00662 return false;
00663 }
00664
00665 if ( my_has_cached_item ) {
00666 v = my_cached_item;
00667 my_reserved = true;
00668 return true;
00669 } else {
00670 return false;
00671 }
00672 }
00673
00675
00676 bool try_release( ) {
00677 spin_mutex::scoped_lock lock(my_mutex);
00678 __TBB_ASSERT( my_reserved && my_has_cached_item, "releasing non-existent reservation" );
00679 my_reserved = false;
00680 if(!my_successors.empty())
00681 spawn_put();
00682 return true;
00683 }
00684
00686 bool try_consume( ) {
00687 spin_mutex::scoped_lock lock(my_mutex);
00688 __TBB_ASSERT( my_reserved && my_has_cached_item, "consuming non-existent reservation" );
00689 my_reserved = false;
00690 my_has_cached_item = false;
00691 if ( !my_successors.empty() ) {
00692 spawn_put();
00693 }
00694 return true;
00695 }
00696
00698 void activate() {
00699 spin_mutex::scoped_lock lock(my_mutex);
00700 my_active = true;
00701 if ( !my_successors.empty() )
00702 spawn_put();
00703 }
00704
00705 template<typename Body>
00706 Body copy_function_object() {
00707 internal::source_body<output_type> &body_ref = *this->my_body;
00708 return dynamic_cast< internal::source_body_leaf<output_type, Body> & >(body_ref).get_body();
00709 }
00710
00711 protected:
00712
00714 void reset() {
00715 my_active = init_my_active;
00716 my_reserved =false;
00717 if(my_has_cached_item) {
00718 my_has_cached_item = false;
00719 }
00720 }
00721
00722 private:
00723 task *my_root_task;
00724 spin_mutex my_mutex;
00725 bool my_active;
00726 bool init_my_active;
00727 internal::source_body<output_type> *my_body;
00728 internal::broadcast_cache< output_type > my_successors;
00729 bool my_reserved;
00730 bool my_has_cached_item;
00731 output_type my_cached_item;
00732
00733
00734 bool try_reserve_apply_body(output_type &v) {
00735 spin_mutex::scoped_lock lock(my_mutex);
00736 if ( my_reserved ) {
00737 return false;
00738 }
00739 if ( !my_has_cached_item && (*my_body)(my_cached_item) )
00740 my_has_cached_item = true;
00741 if ( my_has_cached_item ) {
00742 v = my_cached_item;
00743 my_reserved = true;
00744 return true;
00745 } else {
00746 return false;
00747 }
00748 }
00749
00751 void spawn_put( ) {
00752 task::enqueue( * new ( task::allocate_additional_child_of( *my_root_task ) )
00753 internal:: source_task_bypass < source_node< output_type > >( *this ) );
00754 }
00755
00756 friend class internal::source_task_bypass< source_node< output_type > >;
00758 task * apply_body_bypass( ) {
00759 output_type v;
00760 if ( !try_reserve_apply_body(v) )
00761 return NULL;
00762
00763 task *last_task = my_successors.try_put_task(v);
00764 if ( last_task )
00765 try_consume();
00766 else
00767 try_release();
00768 return last_task;
00769 }
00770 };
00771
00773 template < typename Input, typename Output = continue_msg, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator<Input> >
00774 class function_node : public graph_node, public internal::function_input<Input,Output,Allocator>, public internal::function_output<Output> {
00775 protected:
00776 using graph_node::my_graph;
00777 public:
00778 typedef Input input_type;
00779 typedef Output output_type;
00780 typedef sender< input_type > predecessor_type;
00781 typedef receiver< output_type > successor_type;
00782 typedef internal::function_input<input_type,output_type,Allocator> fInput_type;
00783 typedef internal::function_output<output_type> fOutput_type;
00784
00786 template< typename Body >
00787 function_node( graph &g, size_t concurrency, Body body ) :
00788 graph_node(g), internal::function_input<input_type,output_type,Allocator>(g, concurrency, body)
00789 {}
00790
00792 function_node( const function_node& src ) :
00793 graph_node(src.my_graph), internal::function_input<input_type,output_type,Allocator>( src ),
00794 fOutput_type()
00795 {}
00796
00797 protected:
00798 template< typename R, typename B > friend class run_and_put_task;
00799 template<typename X, typename Y> friend class internal::broadcast_cache;
00800 template<typename X, typename Y> friend class internal::round_robin_cache;
00801 using fInput_type::try_put_task;
00802
00803
00804 void reset() {fInput_type::reset_function_input(); }
00805
00806 internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
00807 };
00808
00810 template < typename Input, typename Output, typename Allocator >
00811 class function_node<Input,Output,queueing,Allocator> : public graph_node, public internal::function_input<Input,Output,Allocator>, public internal::function_output<Output> {
00812 protected:
00813 using graph_node::my_graph;
00814 public:
00815 typedef Input input_type;
00816 typedef Output output_type;
00817 typedef sender< input_type > predecessor_type;
00818 typedef receiver< output_type > successor_type;
00819 typedef internal::function_input<input_type,output_type,Allocator> fInput_type;
00820 typedef internal::function_input_queue<input_type, Allocator> queue_type;
00821 typedef internal::function_output<output_type> fOutput_type;
00822
00824 template< typename Body >
00825 function_node( graph &g, size_t concurrency, Body body ) :
00826 graph_node(g), fInput_type( g, concurrency, body, new queue_type() )
00827 {}
00828
00830 function_node( const function_node& src ) :
00831 graph_node(src.my_graph), fInput_type( src, new queue_type() ), fOutput_type()
00832 {}
00833
00834 protected:
00835 template< typename R, typename B > friend class run_and_put_task;
00836 template<typename X, typename Y> friend class internal::broadcast_cache;
00837 template<typename X, typename Y> friend class internal::round_robin_cache;
00838 using fInput_type::try_put_task;
00839
00840 void reset() { fInput_type::reset_function_input(); }
00841
00842 internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
00843 };
00844
00845 #include "tbb/internal/_flow_graph_types_impl.h"
00846
00848
00849 template < typename Input, typename Output, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator<Input> >
00850 class multifunction_node :
00851 public graph_node,
00852 public internal::multifunction_input
00853 <
00854 Input,
00855 typename internal::wrap_tuple_elements<
00856 tbb::flow::tuple_size<Output>::value,
00857 internal::multifunction_output,
00858 Output
00859 >::type,
00860 Allocator
00861 > {
00862 protected:
00863 using graph_node::my_graph;
00864 private:
00865 static const int N = tbb::flow::tuple_size<Output>::value;
00866 public:
00867 typedef Input input_type;
00868 typedef typename internal::wrap_tuple_elements<N,internal::multifunction_output, Output>::type output_ports_type;
00869 private:
00870 typedef typename internal::multifunction_input<input_type, output_ports_type, Allocator> base_type;
00871 typedef typename internal::function_input_queue<input_type,Allocator> queue_type;
00872 public:
00873 template<typename Body>
00874 multifunction_node( graph &g, size_t concurrency, Body body ) :
00875 graph_node(g), base_type(g,concurrency, body)
00876 {}
00877 multifunction_node( const multifunction_node &other) :
00878 graph_node(other.my_graph), base_type(other)
00879 {}
00880
00881 protected:
00882 void reset() { base_type::reset(); }
00883 };
00884
00885 template < typename Input, typename Output, typename Allocator >
00886 class multifunction_node<Input,Output,queueing,Allocator> : public graph_node, public internal::multifunction_input<Input,
00887 typename internal::wrap_tuple_elements<tbb::flow::tuple_size<Output>::value, internal::multifunction_output, Output>::type, Allocator> {
00888 protected:
00889 using graph_node::my_graph;
00890 static const int N = tbb::flow::tuple_size<Output>::value;
00891 public:
00892 typedef Input input_type;
00893 typedef typename internal::wrap_tuple_elements<N, internal::multifunction_output, Output>::type output_ports_type;
00894 private:
00895 typedef typename internal::multifunction_input<input_type, output_ports_type, Allocator> base_type;
00896 typedef typename internal::function_input_queue<input_type,Allocator> queue_type;
00897 public:
00898 template<typename Body>
00899 multifunction_node( graph &g, size_t concurrency, Body body) :
00900 graph_node(g), base_type(g,concurrency, body, new queue_type())
00901 {}
00902 multifunction_node( const multifunction_node &other) :
00903 graph_node(other.my_graph), base_type(other, new queue_type())
00904 {}
00905
00906 protected:
00907 void reset() { base_type::reset(); }
00908 };
00909
00911
00912
00913 template<typename TupleType, typename Allocator=cache_aligned_allocator<TupleType> >
00914 class split_node : public multifunction_node<TupleType, TupleType, rejecting, Allocator> {
00915 static const int N = tbb::flow::tuple_size<TupleType>::value;
00916 typedef multifunction_node<TupleType,TupleType,rejecting,Allocator> base_type;
00917 public:
00918 typedef typename base_type::output_ports_type output_ports_type;
00919 private:
00920 struct splitting_body {
00921 void operator()(const TupleType& t, output_ports_type &p) {
00922 internal::emit_element<N>::emit_this(t, p);
00923 }
00924 };
00925 public:
00926 typedef TupleType input_type;
00927 typedef Allocator allocator_type;
00928 split_node(graph &g) : base_type(g, unlimited, splitting_body()) {}
00929 split_node( const split_node & other) : base_type(other) {}
00930 };
00931
00933 template <typename Output>
00934 class continue_node : public graph_node, public internal::continue_input<Output>, public internal::function_output<Output> {
00935 protected:
00936 using graph_node::my_graph;
00937 public:
00938 typedef continue_msg input_type;
00939 typedef Output output_type;
00940 typedef sender< input_type > predecessor_type;
00941 typedef receiver< output_type > successor_type;
00942 typedef internal::continue_input<Output> fInput_type;
00943 typedef internal::function_output<output_type> fOutput_type;
00944
00946 template <typename Body >
00947 continue_node( graph &g, Body body ) :
00948 graph_node(g), internal::continue_input<output_type>( g, body )
00949 {}
00950
00952 template <typename Body >
00953 continue_node( graph &g, int number_of_predecessors, Body body ) :
00954 graph_node(g), internal::continue_input<output_type>( g, number_of_predecessors, body )
00955 {}
00956
00958 continue_node( const continue_node& src ) :
00959 graph_node(src.my_graph), internal::continue_input<output_type>(src),
00960 internal::function_output<Output>()
00961 {}
00962
00963 protected:
00964 template< typename R, typename B > friend class run_and_put_task;
00965 template<typename X, typename Y> friend class internal::broadcast_cache;
00966 template<typename X, typename Y> friend class internal::round_robin_cache;
00967 using fInput_type::try_put_task;
00968
00969 void reset() { internal::continue_input<Output>::reset_receiver(); }
00970
00971 internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
00972 };
00973
00974 template< typename T >
00975 class overwrite_node : public graph_node, public receiver<T>, public sender<T> {
00976 protected:
00977 using graph_node::my_graph;
00978 public:
00979 typedef T input_type;
00980 typedef T output_type;
00981 typedef sender< input_type > predecessor_type;
00982 typedef receiver< output_type > successor_type;
00983
00984 overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) {
00985 my_successors.set_owner( this );
00986 }
00987
00988
00989 overwrite_node( const overwrite_node& src ) :
00990 graph_node(src.my_graph), receiver<T>(), sender<T>(), my_buffer_is_valid(false)
00991 {
00992 my_successors.set_owner( this );
00993 }
00994
00995 ~overwrite_node() {}
00996
00997 bool register_successor( successor_type &s ) {
00998 spin_mutex::scoped_lock l( my_mutex );
00999 if ( my_buffer_is_valid ) {
01000
01001 if ( s.try_put( my_buffer ) || !s.register_predecessor( *this ) ) {
01002
01003 my_successors.register_successor( s );
01004 return true;
01005 } else {
01006
01007 return false;
01008 }
01009 } else {
01010
01011 my_successors.register_successor( s );
01012 return true;
01013 }
01014 }
01015
01016 bool remove_successor( successor_type &s ) {
01017 spin_mutex::scoped_lock l( my_mutex );
01018 my_successors.remove_successor(s);
01019 return true;
01020 }
01021
01022 bool try_get( T &v ) {
01023 spin_mutex::scoped_lock l( my_mutex );
01024 if ( my_buffer_is_valid ) {
01025 v = my_buffer;
01026 return true;
01027 } else {
01028 return false;
01029 }
01030 }
01031
01032 bool is_valid() {
01033 spin_mutex::scoped_lock l( my_mutex );
01034 return my_buffer_is_valid;
01035 }
01036
01037 void clear() {
01038 spin_mutex::scoped_lock l( my_mutex );
01039 my_buffer_is_valid = false;
01040 }
01041
01042 protected:
01043 template< typename R, typename B > friend class run_and_put_task;
01044 template<typename X, typename Y> friend class internal::broadcast_cache;
01045 template<typename X, typename Y> friend class internal::round_robin_cache;
01046 task * try_put_task( const T &v ) {
01047 spin_mutex::scoped_lock l( my_mutex );
01048 my_buffer = v;
01049 my_buffer_is_valid = true;
01050 task * rtask = my_successors.try_put_task(v);
01051 if(!rtask) rtask = SUCCESSFULLY_ENQUEUED;
01052 return rtask;
01053 }
01054
01055 void reset() { my_buffer_is_valid = false; }
01056
01057 spin_mutex my_mutex;
01058 internal::broadcast_cache< T, null_rw_mutex > my_successors;
01059 T my_buffer;
01060 bool my_buffer_is_valid;
01061 void reset_receiver() {}
01062 };
01063
01064 template< typename T >
01065 class write_once_node : public overwrite_node<T> {
01066 public:
01067 typedef T input_type;
01068 typedef T output_type;
01069 typedef sender< input_type > predecessor_type;
01070 typedef receiver< output_type > successor_type;
01071
01073 write_once_node(graph& g) : overwrite_node<T>(g) {}
01074
01076 write_once_node( const write_once_node& src ) : overwrite_node<T>(src) {}
01077
01078 protected:
01079 template< typename R, typename B > friend class run_and_put_task;
01080 template<typename X, typename Y> friend class internal::broadcast_cache;
01081 template<typename X, typename Y> friend class internal::round_robin_cache;
01082 task *try_put_task( const T &v ) {
01083 spin_mutex::scoped_lock l( this->my_mutex );
01084 if ( this->my_buffer_is_valid ) {
01085 return NULL;
01086 } else {
01087 this->my_buffer = v;
01088 this->my_buffer_is_valid = true;
01089 task *res = this->my_successors.try_put_task(v);
01090 if(!res) res = SUCCESSFULLY_ENQUEUED;
01091 return res;
01092 }
01093 }
01094 };
01095
01097 template <typename T>
01098 class broadcast_node : public graph_node, public receiver<T>, public sender<T> {
01099 protected:
01100 using graph_node::my_graph;
01101 private:
01102 internal::broadcast_cache<T> my_successors;
01103 public:
01104 typedef T input_type;
01105 typedef T output_type;
01106 typedef sender< input_type > predecessor_type;
01107 typedef receiver< output_type > successor_type;
01108
01109 broadcast_node(graph& g) : graph_node(g) {
01110 my_successors.set_owner( this );
01111 }
01112
01113
01114 broadcast_node( const broadcast_node& src ) :
01115 graph_node(src.my_graph), receiver<T>(), sender<T>()
01116 {
01117 my_successors.set_owner( this );
01118 }
01119
01121 virtual bool register_successor( receiver<T> &r ) {
01122 my_successors.register_successor( r );
01123 return true;
01124 }
01125
01127 virtual bool remove_successor( receiver<T> &r ) {
01128 my_successors.remove_successor( r );
01129 return true;
01130 }
01131
01132 protected:
01133 template< typename R, typename B > friend class run_and_put_task;
01134 template<typename X, typename Y> friend class internal::broadcast_cache;
01135 template<typename X, typename Y> friend class internal::round_robin_cache;
01137 task *try_put_task(const T& t) {
01138 task *new_task = my_successors.try_put_task(t);
01139 if(!new_task) new_task = SUCCESSFULLY_ENQUEUED;
01140 return new_task;
01141 }
01142
01143 void reset() {}
01144 void reset_receiver() {}
01145 };
01146
01147 #include "internal/_flow_graph_item_buffer_impl.h"
01148
01150 template <typename T, typename A=cache_aligned_allocator<T> >
01151 class buffer_node : public graph_node, public reservable_item_buffer<T, A>, public receiver<T>, public sender<T> {
01152 protected:
01153 using graph_node::my_graph;
01154 public:
01155 typedef T input_type;
01156 typedef T output_type;
01157 typedef sender< input_type > predecessor_type;
01158 typedef receiver< output_type > successor_type;
01159 typedef buffer_node<T, A> my_class;
01160 protected:
01161 typedef size_t size_type;
01162 internal::round_robin_cache< T, null_rw_mutex > my_successors;
01163
01164 task *my_parent;
01165
01166 friend class internal::forward_task_bypass< buffer_node< T, A > >;
01167
01168 enum op_type {reg_succ, rem_succ, req_item, res_item, rel_res, con_res, put_item, try_fwd_task };
01169 enum op_stat {WAIT=0, SUCCEEDED, FAILED};
01170
01171
01172 class buffer_operation : public internal::aggregated_operation< buffer_operation > {
01173 public:
01174 char type;
01175 T *elem;
01176 task * ltask;
01177 successor_type *r;
01178 buffer_operation(const T& e, op_type t) : type(char(t)), elem(const_cast<T*>(&e)) , ltask(NULL) , r(NULL) {}
01179 buffer_operation(op_type t) : type(char(t)) , ltask(NULL) , r(NULL) {}
01180 };
01181
01182 bool forwarder_busy;
01183 typedef internal::aggregating_functor<my_class, buffer_operation> my_handler;
01184 friend class internal::aggregating_functor<my_class, buffer_operation>;
01185 internal::aggregator< my_handler, buffer_operation> my_aggregator;
01186
01187 virtual void handle_operations(buffer_operation *op_list) {
01188 buffer_operation *tmp = NULL;
01189 bool try_forwarding=false;
01190 while (op_list) {
01191 tmp = op_list;
01192 op_list = op_list->next;
01193 switch (tmp->type) {
01194 case reg_succ: internal_reg_succ(tmp); try_forwarding = true; break;
01195 case rem_succ: internal_rem_succ(tmp); break;
01196 case req_item: internal_pop(tmp); break;
01197 case res_item: internal_reserve(tmp); break;
01198 case rel_res: internal_release(tmp); try_forwarding = true; break;
01199 case con_res: internal_consume(tmp); try_forwarding = true; break;
01200 case put_item: internal_push(tmp); try_forwarding = true; break;
01201 case try_fwd_task: internal_forward_task(tmp); break;
01202 }
01203 }
01204 if (try_forwarding && !forwarder_busy) {
01205 forwarder_busy = true;
01206 task *new_task = new(task::allocate_additional_child_of(*my_parent)) internal::
01207 forward_task_bypass
01208 < buffer_node<input_type, A> >(*this);
01209
01210
01211 tbb::task *z = tmp->ltask;
01212 tmp->ltask = combine_tasks(z, new_task);
01213 }
01214 }
01215
01216 inline task *grab_forwarding_task( buffer_operation &op_data) {
01217 return op_data.ltask;
01218 }
01219
01220 inline bool enqueue_forwarding_task(buffer_operation &op_data) {
01221 task *ft = grab_forwarding_task(op_data);
01222 if(ft) {
01223 task::enqueue(*ft);
01224 return true;
01225 }
01226 return false;
01227 }
01228
01230 virtual task *forward_task() {
01231 buffer_operation op_data(try_fwd_task);
01232 task *last_task = NULL;
01233 do {
01234 op_data.status = WAIT;
01235 op_data.ltask = NULL;
01236 my_aggregator.execute(&op_data);
01237 tbb::task *xtask = op_data.ltask;
01238 last_task = combine_tasks(last_task, xtask);
01239 } while (op_data.status == SUCCEEDED);
01240 return last_task;
01241 }
01242
01244 virtual void internal_reg_succ(buffer_operation *op) {
01245 my_successors.register_successor(*(op->r));
01246 __TBB_store_with_release(op->status, SUCCEEDED);
01247 }
01248
01250 virtual void internal_rem_succ(buffer_operation *op) {
01251 my_successors.remove_successor(*(op->r));
01252 __TBB_store_with_release(op->status, SUCCEEDED);
01253 }
01254
01256 virtual void internal_forward_task(buffer_operation *op) {
01257 if (this->my_reserved || !this->item_valid(this->my_tail-1)) {
01258 __TBB_store_with_release(op->status, FAILED);
01259 this->forwarder_busy = false;
01260 return;
01261 }
01262 T i_copy;
01263 task * last_task = NULL;
01264 size_type counter = my_successors.size();
01265
01266 while (counter>0 && !this->buffer_empty() && this->item_valid(this->my_tail-1)) {
01267 this->fetch_back(i_copy);
01268 task *new_task = my_successors.try_put_task(i_copy);
01269 last_task = combine_tasks(last_task, new_task);
01270 if(new_task) {
01271 this->invalidate_back();
01272 --(this->my_tail);
01273 }
01274 --counter;
01275 }
01276 op->ltask = last_task;
01277 if (last_task && !counter) {
01278 __TBB_store_with_release(op->status, SUCCEEDED);
01279 }
01280 else {
01281 __TBB_store_with_release(op->status, FAILED);
01282 forwarder_busy = false;
01283 }
01284 }
01285
01286 virtual void internal_push(buffer_operation *op) {
01287 this->push_back(*(op->elem));
01288 __TBB_store_with_release(op->status, SUCCEEDED);
01289 }
01290
01291 virtual void internal_pop(buffer_operation *op) {
01292 if(this->pop_back(*(op->elem))) {
01293 __TBB_store_with_release(op->status, SUCCEEDED);
01294 }
01295 else {
01296 __TBB_store_with_release(op->status, FAILED);
01297 }
01298 }
01299
01300 virtual void internal_reserve(buffer_operation *op) {
01301 if(this->reserve_front(*(op->elem))) {
01302 __TBB_store_with_release(op->status, SUCCEEDED);
01303 }
01304 else {
01305 __TBB_store_with_release(op->status, FAILED);
01306 }
01307 }
01308
01309 virtual void internal_consume(buffer_operation *op) {
01310 this->consume_front();
01311 __TBB_store_with_release(op->status, SUCCEEDED);
01312 }
01313
01314 virtual void internal_release(buffer_operation *op) {
01315 this->release_front();
01316 __TBB_store_with_release(op->status, SUCCEEDED);
01317 }
01318
01319 public:
01321 buffer_node( graph &g ) : graph_node(g), reservable_item_buffer<T>(),
01322 my_parent( g.root_task() ), forwarder_busy(false) {
01323 my_successors.set_owner(this);
01324 my_aggregator.initialize_handler(my_handler(this));
01325 }
01326
01328 buffer_node( const buffer_node& src ) : graph_node(src.my_graph),
01329 reservable_item_buffer<T>(), receiver<T>(), sender<T>(),
01330 my_parent( src.my_parent ) {
01331 forwarder_busy = false;
01332 my_successors.set_owner(this);
01333 my_aggregator.initialize_handler(my_handler(this));
01334 }
01335
01336 virtual ~buffer_node() {}
01337
01338
01339
01340
01341
01343
01344 bool register_successor( receiver<output_type> &r ) {
01345 buffer_operation op_data(reg_succ);
01346 op_data.r = &r;
01347 my_aggregator.execute(&op_data);
01348 (void)enqueue_forwarding_task(op_data);
01349 return true;
01350 }
01351
01353
01355 bool remove_successor( receiver<output_type> &r ) {
01356 r.remove_predecessor(*this);
01357 buffer_operation op_data(rem_succ);
01358 op_data.r = &r;
01359 my_aggregator.execute(&op_data);
01360
01361
01362
01363 (void)enqueue_forwarding_task(op_data);
01364 return true;
01365 }
01366
01368
01370 bool try_get( T &v ) {
01371 buffer_operation op_data(req_item);
01372 op_data.elem = &v;
01373 my_aggregator.execute(&op_data);
01374 (void)enqueue_forwarding_task(op_data);
01375 return (op_data.status==SUCCEEDED);
01376 }
01377
01379
01381 bool try_reserve( T &v ) {
01382 buffer_operation op_data(res_item);
01383 op_data.elem = &v;
01384 my_aggregator.execute(&op_data);
01385 (void)enqueue_forwarding_task(op_data);
01386 return (op_data.status==SUCCEEDED);
01387 }
01388
01390
01391 bool try_release() {
01392 buffer_operation op_data(rel_res);
01393 my_aggregator.execute(&op_data);
01394 (void)enqueue_forwarding_task(op_data);
01395 return true;
01396 }
01397
01399
01400 bool try_consume() {
01401 buffer_operation op_data(con_res);
01402 my_aggregator.execute(&op_data);
01403 (void)enqueue_forwarding_task(op_data);
01404 return true;
01405 }
01406
01407 protected:
01408
01409 template< typename R, typename B > friend class run_and_put_task;
01410 template<typename X, typename Y> friend class internal::broadcast_cache;
01411 template<typename X, typename Y> friend class internal::round_robin_cache;
01413 task *try_put_task(const T &t) {
01414 buffer_operation op_data(t, put_item);
01415 my_aggregator.execute(&op_data);
01416 task *ft = grab_forwarding_task(op_data);
01417 if(!ft) {
01418 ft = SUCCESSFULLY_ENQUEUED;
01419 }
01420 return ft;
01421 }
01422
01423 void reset() {
01424 reservable_item_buffer<T, A>::reset();
01425 forwarder_busy = false;
01426 }
01427
01428 void reset_receiver() {
01429
01430 }
01431
01432 };
01433
01435 template <typename T, typename A=cache_aligned_allocator<T> >
01436 class queue_node : public buffer_node<T, A> {
01437 protected:
01438 typedef typename buffer_node<T, A>::size_type size_type;
01439 typedef typename buffer_node<T, A>::buffer_operation queue_operation;
01440
01441 enum op_stat {WAIT=0, SUCCEEDED, FAILED};
01442
01443 void internal_forward_task(queue_operation *op) {
01444 if (this->my_reserved || !this->item_valid(this->my_head)) {
01445 __TBB_store_with_release(op->status, FAILED);
01446 this->forwarder_busy = false;
01447 return;
01448 }
01449 T i_copy;
01450 task *last_task = NULL;
01451 size_type counter = this->my_successors.size();
01452
01453 while (counter>0 && this->item_valid(this->my_head)) {
01454 this->fetch_front(i_copy);
01455 task *new_task = this->my_successors.try_put_task(i_copy);
01456 if(new_task) {
01457 this->invalidate_front();
01458 ++(this->my_head);
01459 last_task = combine_tasks(last_task, new_task);
01460 }
01461 --counter;
01462 }
01463 op->ltask = last_task;
01464 if (last_task && !counter)
01465 __TBB_store_with_release(op->status, SUCCEEDED);
01466 else {
01467 __TBB_store_with_release(op->status, FAILED);
01468 this->forwarder_busy = false;
01469 }
01470 }
01471
01472 void internal_pop(queue_operation *op) {
01473 if ( this->my_reserved || !this->item_valid(this->my_head)){
01474 __TBB_store_with_release(op->status, FAILED);
01475 }
01476 else {
01477 this->pop_front(*(op->elem));
01478 __TBB_store_with_release(op->status, SUCCEEDED);
01479 }
01480 }
01481 void internal_reserve(queue_operation *op) {
01482 if (this->my_reserved || !this->item_valid(this->my_head)) {
01483 __TBB_store_with_release(op->status, FAILED);
01484 }
01485 else {
01486 this->my_reserved = true;
01487 this->fetch_front(*(op->elem));
01488 this->invalidate_front();
01489 __TBB_store_with_release(op->status, SUCCEEDED);
01490 }
01491 }
01492 void internal_consume(queue_operation *op) {
01493 this->consume_front();
01494 __TBB_store_with_release(op->status, SUCCEEDED);
01495 }
01496
01497 public:
01498 typedef T input_type;
01499 typedef T output_type;
01500 typedef sender< input_type > predecessor_type;
01501 typedef receiver< output_type > successor_type;
01502
01504 queue_node( graph &g ) : buffer_node<T, A>(g) {}
01505
01507 queue_node( const queue_node& src) : buffer_node<T, A>(src) {}
01508 };
01509
01511 template< typename T, typename A=cache_aligned_allocator<T> >
01512 class sequencer_node : public queue_node<T, A> {
01513 internal::function_body< T, size_t > *my_sequencer;
01514 public:
01515 typedef T input_type;
01516 typedef T output_type;
01517 typedef sender< input_type > predecessor_type;
01518 typedef receiver< output_type > successor_type;
01519
01521 template< typename Sequencer >
01522 sequencer_node( graph &g, const Sequencer& s ) : queue_node<T, A>(g),
01523 my_sequencer(new internal::function_body_leaf< T, size_t, Sequencer>(s) ) {}
01524
01526 sequencer_node( const sequencer_node& src ) : queue_node<T, A>(src),
01527 my_sequencer( src.my_sequencer->clone() ) {}
01528
01530 ~sequencer_node() { delete my_sequencer; }
01531 protected:
01532 typedef typename buffer_node<T, A>::size_type size_type;
01533 typedef typename buffer_node<T, A>::buffer_operation sequencer_operation;
01534
01535 enum op_stat {WAIT=0, SUCCEEDED, FAILED};
01536
01537 private:
01538 void internal_push(sequencer_operation *op) {
01539 size_type tag = (*my_sequencer)(*(op->elem));
01540
01541 this->my_tail = (tag+1 > this->my_tail) ? tag+1 : this->my_tail;
01542
01543 if(this->size() > this->capacity())
01544 this->grow_my_array(this->size());
01545 this->item(tag) = std::make_pair( *(op->elem), true );
01546 __TBB_store_with_release(op->status, SUCCEEDED);
01547 }
01548 };
01549
01551 template< typename T, typename Compare = std::less<T>, typename A=cache_aligned_allocator<T> >
01552 class priority_queue_node : public buffer_node<T, A> {
01553 public:
01554 typedef T input_type;
01555 typedef T output_type;
01556 typedef buffer_node<T,A> base_type;
01557 typedef sender< input_type > predecessor_type;
01558 typedef receiver< output_type > successor_type;
01559
01561 priority_queue_node( graph &g ) : buffer_node<T, A>(g), mark(0) {}
01562
01564 priority_queue_node( const priority_queue_node &src ) : buffer_node<T, A>(src), mark(0) {}
01565
01566 protected:
01567
01568 void reset() {
01569 mark = 0;
01570 base_type::reset();
01571 }
01572
01573 typedef typename buffer_node<T, A>::size_type size_type;
01574 typedef typename buffer_node<T, A>::item_type item_type;
01575 typedef typename buffer_node<T, A>::buffer_operation prio_operation;
01576
01577 enum op_stat {WAIT=0, SUCCEEDED, FAILED};
01578
01579 void handle_operations(prio_operation *op_list) {
01580 prio_operation *tmp = op_list ;
01581 bool try_forwarding=false;
01582 while (op_list) {
01583 tmp = op_list;
01584 op_list = op_list->next;
01585 switch (tmp->type) {
01586 case buffer_node<T, A>::reg_succ: this->internal_reg_succ(tmp); try_forwarding = true; break;
01587 case buffer_node<T, A>::rem_succ: this->internal_rem_succ(tmp); break;
01588 case buffer_node<T, A>::put_item: internal_push(tmp); try_forwarding = true; break;
01589 case buffer_node<T, A>::try_fwd_task: internal_forward_task(tmp); break;
01590 case buffer_node<T, A>::rel_res: internal_release(tmp); try_forwarding = true; break;
01591 case buffer_node<T, A>::con_res: internal_consume(tmp); try_forwarding = true; break;
01592 case buffer_node<T, A>::req_item: internal_pop(tmp); break;
01593 case buffer_node<T, A>::res_item: internal_reserve(tmp); break;
01594 }
01595 }
01596
01597 if (mark<this->my_tail) heapify();
01598 if (try_forwarding && !this->forwarder_busy) {
01599 this->forwarder_busy = true;
01600 task *new_task = new(task::allocate_additional_child_of(*(this->my_parent))) internal::
01601 forward_task_bypass
01602 < buffer_node<input_type, A> >(*this);
01603
01604
01605 tbb::task *tmp1 = tmp->ltask;
01606 tmp->ltask = combine_tasks(tmp1, new_task);
01607 }
01608 }
01609
01611 void internal_forward_task(prio_operation *op) {
01612 T i_copy;
01613 task * last_task = NULL;
01614 size_type counter = this->my_successors.size();
01615
01616 if (this->my_reserved || this->my_tail == 0) {
01617 __TBB_store_with_release(op->status, FAILED);
01618 this->forwarder_busy = false;
01619 return;
01620 }
01621
01622 while (counter>0 && this->my_tail > 0) {
01623 i_copy = this->my_array[0].first;
01624 task * new_task = this->my_successors.try_put_task(i_copy);
01625 last_task = combine_tasks(last_task, new_task);
01626 if ( new_task ) {
01627 if (mark == this->my_tail) --mark;
01628 --(this->my_tail);
01629 this->my_array[0].first=this->my_array[this->my_tail].first;
01630 if (this->my_tail > 1)
01631 reheap();
01632 }
01633 --counter;
01634 }
01635 op->ltask = last_task;
01636 if (last_task && !counter)
01637 __TBB_store_with_release(op->status, SUCCEEDED);
01638 else {
01639 __TBB_store_with_release(op->status, FAILED);
01640 this->forwarder_busy = false;
01641 }
01642 }
01643
01644 void internal_push(prio_operation *op) {
01645 if ( this->my_tail >= this->my_array_size )
01646 this->grow_my_array( this->my_tail + 1 );
01647 this->my_array[this->my_tail] = std::make_pair( *(op->elem), true );
01648 ++(this->my_tail);
01649 __TBB_store_with_release(op->status, SUCCEEDED);
01650 }
01651
01652 void internal_pop(prio_operation *op) {
01653 if ( this->my_reserved == true || this->my_tail == 0 ) {
01654 __TBB_store_with_release(op->status, FAILED);
01655 }
01656 else {
01657 if (mark<this->my_tail &&
01658 compare(this->my_array[0].first,
01659 this->my_array[this->my_tail-1].first)) {
01660
01661
01662 *(op->elem) = this->my_array[this->my_tail-1].first;
01663 --(this->my_tail);
01664 __TBB_store_with_release(op->status, SUCCEEDED);
01665 }
01666 else {
01667 *(op->elem) = this->my_array[0].first;
01668 if (mark == this->my_tail) --mark;
01669 --(this->my_tail);
01670 __TBB_store_with_release(op->status, SUCCEEDED);
01671 this->my_array[0].first=this->my_array[this->my_tail].first;
01672 if (this->my_tail > 1)
01673 reheap();
01674 }
01675 }
01676 }
01677 void internal_reserve(prio_operation *op) {
01678 if (this->my_reserved == true || this->my_tail == 0) {
01679 __TBB_store_with_release(op->status, FAILED);
01680 }
01681 else {
01682 this->my_reserved = true;
01683 *(op->elem) = reserved_item = this->my_array[0].first;
01684 if (mark == this->my_tail) --mark;
01685 --(this->my_tail);
01686 __TBB_store_with_release(op->status, SUCCEEDED);
01687 this->my_array[0].first = this->my_array[this->my_tail].first;
01688 if (this->my_tail > 1)
01689 reheap();
01690 }
01691 }
01692 void internal_consume(prio_operation *op) {
01693 this->my_reserved = false;
01694 __TBB_store_with_release(op->status, SUCCEEDED);
01695 }
01696 void internal_release(prio_operation *op) {
01697 if (this->my_tail >= this->my_array_size)
01698 this->grow_my_array( this->my_tail + 1 );
01699 this->my_array[this->my_tail] = std::make_pair(reserved_item, true);
01700 ++(this->my_tail);
01701 this->my_reserved = false;
01702 __TBB_store_with_release(op->status, SUCCEEDED);
01703 heapify();
01704 }
01705 private:
01706 Compare compare;
01707 size_type mark;
01708 input_type reserved_item;
01709
01710 void heapify() {
01711 if (!mark) mark = 1;
01712 for (; mark<this->my_tail; ++mark) {
01713 size_type cur_pos = mark;
01714 input_type to_place = this->my_array[mark].first;
01715 do {
01716 size_type parent = (cur_pos-1)>>1;
01717 if (!compare(this->my_array[parent].first, to_place))
01718 break;
01719 this->my_array[cur_pos].first = this->my_array[parent].first;
01720 cur_pos = parent;
01721 } while( cur_pos );
01722 this->my_array[cur_pos].first = to_place;
01723 }
01724 }
01725
01726 void reheap() {
01727 size_type cur_pos=0, child=1;
01728 while (child < mark) {
01729 size_type target = child;
01730 if (child+1<mark &&
01731 compare(this->my_array[child].first,
01732 this->my_array[child+1].first))
01733 ++target;
01734
01735 if (compare(this->my_array[target].first,
01736 this->my_array[this->my_tail].first))
01737 break;
01738 this->my_array[cur_pos].first = this->my_array[target].first;
01739 cur_pos = target;
01740 child = (cur_pos<<1)+1;
01741 }
01742 this->my_array[cur_pos].first = this->my_array[this->my_tail].first;
01743 }
01744 };
01745
01747
01750 template< typename T >
01751 class limiter_node : public graph_node, public receiver< T >, public sender< T > {
01752 protected:
01753 using graph_node::my_graph;
01754 public:
01755 typedef T input_type;
01756 typedef T output_type;
01757 typedef sender< input_type > predecessor_type;
01758 typedef receiver< output_type > successor_type;
01759
01760 private:
01761 task *my_root_task;
01762 size_t my_threshold;
01763 size_t my_count;
01764 internal::predecessor_cache< T > my_predecessors;
01765 spin_mutex my_mutex;
01766 internal::broadcast_cache< T > my_successors;
01767 int init_decrement_predecessors;
01768
01769 friend class internal::forward_task_bypass< limiter_node<T> >;
01770
01771
01772 friend class internal::decrementer< limiter_node<T> >;
01773
01774
01775 task * decrement_counter() {
01776 input_type v;
01777 task *rval = NULL;
01778
01779
01780 if ( my_predecessors.get_item( v ) == false
01781 || (rval = my_successors.try_put_task(v)) == NULL ) {
01782 spin_mutex::scoped_lock lock(my_mutex);
01783 --my_count;
01784 if ( !my_predecessors.empty() ) {
01785 task *rtask = new ( task::allocate_additional_child_of( *my_root_task ) )
01786 internal::forward_task_bypass< limiter_node<T> >( *this );
01787 __TBB_ASSERT(!rval, "Have two tasks to handle");
01788 return rtask;
01789 }
01790 }
01791 return rval;
01792 }
01793
01794 void forward() {
01795 {
01796 spin_mutex::scoped_lock lock(my_mutex);
01797 if ( my_count < my_threshold )
01798 ++my_count;
01799 else
01800 return;
01801 }
01802 task * rtask = decrement_counter();
01803 if(rtask) task::enqueue(*rtask);
01804 }
01805
01806 task *forward_task() {
01807 spin_mutex::scoped_lock lock(my_mutex);
01808 if ( my_count >= my_threshold )
01809 return NULL;
01810 ++my_count;
01811 task * rtask = decrement_counter();
01812 return rtask;
01813 }
01814
01815 public:
01817 internal::decrementer< limiter_node<T> > decrement;
01818
01820 limiter_node(graph &g, size_t threshold, int num_decrement_predecessors=0) :
01821 graph_node(g), my_root_task(g.root_task()), my_threshold(threshold), my_count(0),
01822 init_decrement_predecessors(num_decrement_predecessors),
01823 decrement(num_decrement_predecessors)
01824 {
01825 my_predecessors.set_owner(this);
01826 my_successors.set_owner(this);
01827 decrement.set_owner(this);
01828 }
01829
01831 limiter_node( const limiter_node& src ) :
01832 graph_node(src.my_graph), receiver<T>(), sender<T>(),
01833 my_root_task(src.my_root_task), my_threshold(src.my_threshold), my_count(0),
01834 init_decrement_predecessors(src.init_decrement_predecessors),
01835 decrement(src.init_decrement_predecessors)
01836 {
01837 my_predecessors.set_owner(this);
01838 my_successors.set_owner(this);
01839 decrement.set_owner(this);
01840 }
01841
01843 bool register_successor( receiver<output_type> &r ) {
01844 my_successors.register_successor(r);
01845 return true;
01846 }
01847
01849
01850 bool remove_successor( receiver<output_type> &r ) {
01851 r.remove_predecessor(*this);
01852 my_successors.remove_successor(r);
01853 return true;
01854 }
01855
01857 bool register_predecessor( predecessor_type &src ) {
01858 spin_mutex::scoped_lock lock(my_mutex);
01859 my_predecessors.add( src );
01860 if ( my_count < my_threshold && !my_successors.empty() ) {
01861 task::enqueue( * new ( task::allocate_additional_child_of( *my_root_task ) )
01862 internal::
01863 forward_task_bypass
01864 < limiter_node<T> >( *this ) );
01865 }
01866 return true;
01867 }
01868
01870 bool remove_predecessor( predecessor_type &src ) {
01871 my_predecessors.remove( src );
01872 return true;
01873 }
01874
01875 protected:
01876
01877 template< typename R, typename B > friend class run_and_put_task;
01878 template<typename X, typename Y> friend class internal::broadcast_cache;
01879 template<typename X, typename Y> friend class internal::round_robin_cache;
01881 task *try_put_task( const T &t ) {
01882 {
01883 spin_mutex::scoped_lock lock(my_mutex);
01884 if ( my_count >= my_threshold )
01885 return NULL;
01886 else
01887 ++my_count;
01888 }
01889
01890 task * rtask = my_successors.try_put_task(t);
01891
01892 if ( !rtask ) {
01893 spin_mutex::scoped_lock lock(my_mutex);
01894 --my_count;
01895 if ( !my_predecessors.empty() ) {
01896 rtask = new ( task::allocate_additional_child_of( *my_root_task ) )
01897 internal::forward_task_bypass< limiter_node<T> >( *this );
01898 }
01899 }
01900 return rtask;
01901 }
01902
01903 void reset() {
01904 my_count = 0;
01905 my_predecessors.reset();
01906 decrement.reset_receiver();
01907 }
01908
01909 void reset_receiver() { my_predecessors.reset(); }
01910 };
01911
01912 #include "internal/_flow_graph_join_impl.h"
01913
01914 using internal::reserving_port;
01915 using internal::queueing_port;
01916 using internal::tag_matching_port;
01917 using internal::input_port;
01918 using internal::tag_value;
01919 using internal::NO_TAG;
01920
01921 template<typename OutputTuple, graph_buffer_policy JP=queueing> class join_node;
01922
01923 template<typename OutputTuple>
01924 class join_node<OutputTuple,reserving>: public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value, reserving_port, OutputTuple, reserving> {
01925 private:
01926 static const int N = tbb::flow::tuple_size<OutputTuple>::value;
01927 typedef typename internal::unfolded_join_node<N, reserving_port, OutputTuple, reserving> unfolded_type;
01928 public:
01929 typedef OutputTuple output_type;
01930 typedef typename unfolded_type::input_ports_type input_ports_type;
01931 join_node(graph &g) : unfolded_type(g) { }
01932 join_node(const join_node &other) : unfolded_type(other) {}
01933 };
01934
01935 template<typename OutputTuple>
01936 class join_node<OutputTuple,queueing>: public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value, queueing_port, OutputTuple, queueing> {
01937 private:
01938 static const int N = tbb::flow::tuple_size<OutputTuple>::value;
01939 typedef typename internal::unfolded_join_node<N, queueing_port, OutputTuple, queueing> unfolded_type;
01940 public:
01941 typedef OutputTuple output_type;
01942 typedef typename unfolded_type::input_ports_type input_ports_type;
01943 join_node(graph &g) : unfolded_type(g) { }
01944 join_node(const join_node &other) : unfolded_type(other) {}
01945 };
01946
01947
01948 template<typename OutputTuple>
01949 class join_node<OutputTuple, tag_matching> : public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value,
01950 tag_matching_port, OutputTuple, tag_matching> {
01951 private:
01952 static const int N = tbb::flow::tuple_size<OutputTuple>::value;
01953 typedef typename internal::unfolded_join_node<N, tag_matching_port, OutputTuple, tag_matching> unfolded_type;
01954 public:
01955 typedef OutputTuple output_type;
01956 typedef typename unfolded_type::input_ports_type input_ports_type;
01957 template<typename B0, typename B1>
01958 join_node(graph &g, B0 b0, B1 b1) : unfolded_type(g, b0, b1) { }
01959 template<typename B0, typename B1, typename B2>
01960 join_node(graph &g, B0 b0, B1 b1, B2 b2) : unfolded_type(g, b0, b1, b2) { }
01961 template<typename B0, typename B1, typename B2, typename B3>
01962 join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3) : unfolded_type(g, b0, b1, b2, b3) { }
01963 template<typename B0, typename B1, typename B2, typename B3, typename B4>
01964 join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4) : unfolded_type(g, b0, b1, b2, b3, b4) { }
01965 #if __TBB_VARIADIC_MAX >= 6
01966 template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5>
01967 join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5) : unfolded_type(g, b0, b1, b2, b3, b4, b5) { }
01968 #endif
01969 #if __TBB_VARIADIC_MAX >= 7
01970 template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6>
01971 join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) { }
01972 #endif
01973 #if __TBB_VARIADIC_MAX >= 8
01974 template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7>
01975 join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) { }
01976 #endif
01977 #if __TBB_VARIADIC_MAX >= 9
01978 template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7, typename B8>
01979 join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) { }
01980 #endif
01981 #if __TBB_VARIADIC_MAX >= 10
01982 template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7, typename B8, typename B9>
01983 join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8, B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) { }
01984 #endif
01985 join_node(const join_node &other) : unfolded_type(other) {}
01986 };
01987
01988 #if TBB_PREVIEW_GRAPH_NODES
01989
01990 #include "internal/_flow_graph_or_impl.h"
01991
01992 template<typename InputTuple>
01993 class or_node : public internal::unfolded_or_node<InputTuple> {
01994 private:
01995 static const int N = tbb::flow::tuple_size<InputTuple>::value;
01996 public:
01997 typedef typename internal::or_output_type<InputTuple>::type output_type;
01998 typedef typename internal::unfolded_or_node<InputTuple> unfolded_type;
01999 or_node(graph& g) : unfolded_type(g) { }
02000
02001 or_node( const or_node& other ) : unfolded_type(other) { }
02002 };
02003 #endif // TBB_PREVIEW_GRAPH_NODES
02004
02006 template< typename T >
02007 inline void make_edge( sender<T> &p, receiver<T> &s ) {
02008 p.register_successor( s );
02009 }
02010
02012 template< typename T >
02013 inline void remove_edge( sender<T> &p, receiver<T> &s ) {
02014 p.remove_successor( s );
02015 }
02016
02018 template< typename Body, typename Node >
02019 Body copy_body( Node &n ) {
02020 return n.template copy_function_object<Body>();
02021 }
02022
02023 }
02024
02025 using interface6::graph;
02026 using interface6::graph_node;
02027 using interface6::continue_msg;
02028 using interface6::sender;
02029 using interface6::receiver;
02030 using interface6::continue_receiver;
02031
02032 using interface6::source_node;
02033 using interface6::function_node;
02034 using interface6::multifunction_node;
02035 using interface6::split_node;
02036 using interface6::internal::output_port;
02037 #if TBB_PREVIEW_GRAPH_NODES
02038 using interface6::or_node;
02039 #endif
02040 using interface6::continue_node;
02041 using interface6::overwrite_node;
02042 using interface6::write_once_node;
02043 using interface6::broadcast_node;
02044 using interface6::buffer_node;
02045 using interface6::queue_node;
02046 using interface6::sequencer_node;
02047 using interface6::priority_queue_node;
02048 using interface6::limiter_node;
02049 using namespace interface6::internal::graph_policy_namespace;
02050 using interface6::join_node;
02051 using interface6::input_port;
02052 using interface6::copy_body;
02053 using interface6::make_edge;
02054 using interface6::remove_edge;
02055 using interface6::internal::NO_TAG;
02056 using interface6::internal::tag_value;
02057
02058 }
02059 }
02060
02061 #endif // __TBB_flow_graph_H