task.h

00001 /*
00002     Copyright 2005-2009 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_task_H
00022 #define __TBB_task_H
00023 
00024 #include "tbb_stddef.h"
00025 #include "tbb_machine.h"
00026 
00027 #if __TBB_EXCEPTIONS
00028 #include "cache_aligned_allocator.h"
00029 #endif /* __TBB_EXCEPTIONS */
00030 
00031 namespace tbb {
00032 
00033 class task;
00034 class task_list;
00035 
00036 #if __TBB_EXCEPTIONS
00037 class task_group_context;
00038 #endif /* __TBB_EXCEPTIONS */
00039 
00041 namespace internal {
00042 
00043     class scheduler: no_copy {
00044     public:
00046         virtual void spawn( task& first, task*& next ) = 0;
00047 
00049         virtual void wait_for_all( task& parent, task* child ) = 0;
00050 
00052         virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
00053 
00055         //  Have to have it just to shut up overzealous compilation warnings
00056         virtual ~scheduler() = 0;
00057     };
00058 
00060 
00061     typedef intptr reference_count;
00062 
00064     typedef unsigned short affinity_id;
00065 
00066 #if __TBB_EXCEPTIONS
00067     struct context_list_node_t {
00068         context_list_node_t *my_prev,
00069                             *my_next;
00070     };
00071 
00072     class allocate_root_with_context_proxy: no_assign {
00073         task_group_context& my_context;
00074     public:
00075         allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
00076         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00077         void __TBB_EXPORTED_METHOD free( task& ) const;
00078     };
00079 #endif /* __TBB_EXCEPTIONS */
00080 
00081     class allocate_root_proxy: no_assign {
00082     public:
00083         static task& __TBB_EXPORTED_FUNC allocate( size_t size );
00084         static void __TBB_EXPORTED_FUNC free( task& );
00085     };
00086 
00087     class allocate_continuation_proxy: no_assign {
00088     public:
00089         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00090         void __TBB_EXPORTED_METHOD free( task& ) const;
00091     };
00092 
00093     class allocate_child_proxy: no_assign {
00094     public:
00095         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00096         void __TBB_EXPORTED_METHOD free( task& ) const;
00097     };
00098 
00099     class allocate_additional_child_of_proxy: no_assign {
00100         task& self;
00101         task& parent;
00102     public:
00103         allocate_additional_child_of_proxy( task& self_, task& parent_ ) : self(self_), parent(parent_) {}
00104         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00105         void __TBB_EXPORTED_METHOD free( task& ) const;
00106     };
00107 
00108     class task_group_base;
00109 
00111 
00116     class task_prefix {
00117     private:
00118         friend class tbb::task;
00119         friend class tbb::task_list;
00120         friend class internal::scheduler;
00121         friend class internal::allocate_root_proxy;
00122         friend class internal::allocate_child_proxy;
00123         friend class internal::allocate_continuation_proxy;
00124         friend class internal::allocate_additional_child_of_proxy;
00125         friend class internal::task_group_base;
00126 
00127 #if __TBB_EXCEPTIONS
00129 
00132         task_group_context  *context;
00133 #endif /* __TBB_EXCEPTIONS */
00134         
00136 
00141         scheduler* origin;
00142 
00144         scheduler* owner;
00145 
00147 
00150         tbb::task* parent;
00151 
00153 
00157         reference_count ref_count;
00158 
00160         int depth;
00161 
00163 
00164         unsigned char state;
00165 
00167 
00171         unsigned char extra_state;
00172 
00173         affinity_id affinity;
00174 
00176         tbb::task* next;
00177 
00179         tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
00180     };
00181 
00182 } // namespace internal
00184 
00185 #if __TBB_EXCEPTIONS
00186 
00187 #if TBB_USE_CAPTURED_EXCEPTION
00188     class tbb_exception;
00189 #else
00190     namespace internal {
00191         class tbb_exception_ptr;
00192     }
00193 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
00194 
00196 
00216 class task_group_context : internal::no_copy
00217 {
00218 private:
00219 #if TBB_USE_CAPTURED_EXCEPTION
00220     typedef tbb_exception exception_container_type;
00221 #else
00222     typedef internal::tbb_exception_ptr exception_container_type;
00223 #endif
00224 
00225     enum version_traits_word_layout {
00226         traits_offset = 16,
00227         version_mask = 0xFFFF,
00228         traits_mask = 0xFFFFul << traits_offset
00229     };
00230 
00231 public:
00232     enum kind_type {
00233         isolated,
00234         bound
00235     };
00236 
00237     enum traits_type {
00238         exact_exception = 0x0001ul << traits_offset,
00239         no_cancellation = 0x0002ul << traits_offset,
00240         concurrent_wait = 0x0004ul << traits_offset,
00241 #if TBB_USE_CAPTURED_EXCEPTION
00242         default_traits = 0
00243 #else
00244         default_traits = exact_exception
00245 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
00246     };
00247 
00248 private:
00249     union {
00251         kind_type my_kind;
00252         uintptr_t _my_kind_aligner;
00253     };
00254 
00256     task_group_context *my_parent;
00257 
00259 
00261     internal::context_list_node_t my_node;
00262 
00264 
00267     char _leading_padding[internal::NFS_MaxLineSize - 
00268                     2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)];
00269     
00271     uintptr_t my_cancellation_requested;
00272     
00274 
00277     uintptr_t  my_version_and_traits;
00278 
00280     exception_container_type *my_exception;
00281 
00283 
00286     void *my_owner;
00287 
00289 
00290     char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)];
00291 
00292 public:
00294 
00321     task_group_context ( kind_type relation_with_parent = bound,
00322                          uintptr_t traits = default_traits )
00323         : my_kind(relation_with_parent)
00324         , my_version_and_traits(1 | traits)
00325     {
00326         init();
00327     }
00328 
00329     __TBB_EXPORTED_METHOD ~task_group_context ();
00330 
00332 
00339     void __TBB_EXPORTED_METHOD reset ();
00340 
00342 
00349     bool __TBB_EXPORTED_METHOD cancel_group_execution ();
00350 
00352     bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
00353 
00355 
00361     void __TBB_EXPORTED_METHOD register_pending_exception ();
00362 
00363 protected:
00365 
00366     void __TBB_EXPORTED_METHOD init ();
00367 
00368 private:
00369     friend class task;
00370     friend class internal::allocate_root_with_context_proxy;
00371 
00372     static const kind_type binding_required = bound;
00373     static const kind_type binding_completed = kind_type(bound+1);
00374 
00377     void propagate_cancellation_from_ancestors ();
00378 
00380     bool is_alive () { 
00381 #if TBB_USE_DEBUG
00382         return my_version_and_traits != 0xDeadBeef;
00383 #else
00384         return true;
00385 #endif /* TBB_USE_DEBUG */
00386     }
00387 }; // class task_group_context
00388 
00389 #endif /* __TBB_EXCEPTIONS */
00390 
00392 
00393 class task: internal::no_copy {
00395     void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
00396 
00398     internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
00399 
00400 protected:
00402     task() {prefix().extra_state=1;}
00403 
00404 public:
00406     virtual ~task() {}
00407 
00409     virtual task* execute() = 0;
00410 
00412     enum state_type {
00414         executing,
00416         reexecute,
00418         ready,
00420         allocated,
00422         freed,
00424         recycle
00425     };
00426 
00427     //------------------------------------------------------------------------
00428     // Allocating tasks
00429     //------------------------------------------------------------------------
00430 
00432     static internal::allocate_root_proxy allocate_root() {
00433         return internal::allocate_root_proxy();
00434     }
00435 
00436 #if __TBB_EXCEPTIONS
00438     static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
00439         return internal::allocate_root_with_context_proxy(ctx);
00440     }
00441 #endif /* __TBB_EXCEPTIONS */
00442 
00444 
00445     internal::allocate_continuation_proxy& allocate_continuation() {
00446         return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
00447     }
00448 
00450     internal::allocate_child_proxy& allocate_child() {
00451         return *reinterpret_cast<internal::allocate_child_proxy*>(this);
00452     }
00453 
00455 
00457     internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
00458         return internal::allocate_additional_child_of_proxy(*this,t);
00459     }
00460 
00462 
00466     void __TBB_EXPORTED_METHOD destroy( task& victim );
00467 
00468     //------------------------------------------------------------------------
00469     // Recycling of tasks
00470     //------------------------------------------------------------------------
00471 
00473 
00479     void recycle_as_continuation() {
00480         __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00481         prefix().state = allocated;
00482     }
00483 
00485 
00486     void recycle_as_safe_continuation() {
00487         __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00488         prefix().state = recycle;
00489     }
00490 
00492     void recycle_as_child_of( task& new_parent ) {
00493         internal::task_prefix& p = prefix();
00494         __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
00495         __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
00496         __TBB_ASSERT( p.parent==NULL, "parent must be null" );
00497         __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
00498         __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
00499         p.state = allocated;
00500         p.parent = &new_parent;
00501         p.depth = new_parent.prefix().depth+1;
00502 #if __TBB_EXCEPTIONS
00503         p.context = new_parent.prefix().context;
00504 #endif /* __TBB_EXCEPTIONS */
00505     }
00506 
00508 
00509     void recycle_to_reexecute() {
00510         __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00511         __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
00512         prefix().state = reexecute;
00513     }
00514 
00515 #if __TBB_TASK_DEQUE
00516     // All depth-related methods are obsolete, and are retained for the sake 
00517     // of backward source compatibility only
00518     intptr_t depth() const {return 0;}
00519     void set_depth( intptr_t ) {}
00520     void add_to_depth( int ) {}
00521 
00522 #else /* !__TBB_TASK_DEQUE */
00524 
00525     typedef internal::intptr depth_type;
00526 
00528     depth_type depth() const {return prefix().depth;}
00529 
00531 
00532     void set_depth( depth_type new_depth ) {
00533         __TBB_ASSERT( state()!=ready, "cannot change depth of ready task" );
00534         __TBB_ASSERT( new_depth>=0, "depth cannot be negative" );
00535         __TBB_ASSERT( new_depth==int(new_depth), "integer overflow error");
00536         prefix().depth = int(new_depth);
00537     }
00538 
00540 
00541     void add_to_depth( int delta ) {
00542         __TBB_ASSERT( state()!=ready, "cannot change depth of ready task" );
00543         __TBB_ASSERT( prefix().depth>=-delta, "depth cannot be negative" );
00544         prefix().depth+=delta;
00545     }
00546 #endif /* !__TBB_TASK_DEQUE */
00547 
00548     //------------------------------------------------------------------------
00549     // Spawning and blocking
00550     //------------------------------------------------------------------------
00551 
00553     void set_ref_count( int count ) {
00554 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00555         internal_set_ref_count(count);
00556 #else
00557         prefix().ref_count = count;
00558 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
00559     }
00560 
00562 
00563     void increment_ref_count() {
00564         __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
00565     }
00566 
00568 
00569     int decrement_ref_count() {
00570 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00571         return int(internal_decrement_ref_count());
00572 #else
00573         return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
00574 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
00575     }
00576 
00578 
00582     void spawn( task& child ) {
00583 #if !__TBB_RELAXED_OWNERSHIP
00584         __TBB_ASSERT( is_owned_by_current_thread(), "'this' not owned by current thread" );
00585 #endif /* !__TBB_RELAXED_OWNERSHIP */
00586         prefix().owner->spawn( child, child.prefix().next );
00587     }
00588 
00590 
00591     void spawn( task_list& list );
00592 
00594     void spawn_and_wait_for_all( task& child ) {
00595 #if !__TBB_RELAXED_OWNERSHIP
00596         __TBB_ASSERT( is_owned_by_current_thread(), "'this' not owned by current thread" );
00597 #endif /* !__TBB_RELAXED_OWNERSHIP */
00598         prefix().owner->wait_for_all( *this, &child );
00599     }
00600 
00602     void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
00603 
00605 
00607     static void spawn_root_and_wait( task& root ) {
00608 #if !__TBB_RELAXED_OWNERSHIP
00609         __TBB_ASSERT( root.is_owned_by_current_thread(), "root not owned by current thread" );
00610 #endif /* !__TBB_RELAXED_OWNERSHIP */
00611         root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
00612     }
00613 
00615 
00617     static void spawn_root_and_wait( task_list& root_list );
00618 
00620 
00621     void wait_for_all() {
00622 #if !__TBB_RELAXED_OWNERSHIP
00623         __TBB_ASSERT( is_owned_by_current_thread(), "'this' not owned by current thread" );
00624 #endif /* !__TBB_RELAXED_OWNERSHIP */
00625         prefix().owner->wait_for_all( *this, NULL );
00626     }
00627 
00629     static task& __TBB_EXPORTED_FUNC self();
00630 
00632     task* parent() const {return prefix().parent;}
00633 
00634 #if __TBB_EXCEPTIONS
00636     task_group_context* context() {return prefix().context;}
00637 #endif /* __TBB_EXCEPTIONS */   
00638 
00640     bool is_stolen_task() const {
00641         internal::task_prefix& p = prefix();
00642         internal::task_prefix& q = parent()->prefix();
00643         return p.owner!=q.owner;
00644     }
00645 
00646     //------------------------------------------------------------------------
00647     // Debugging
00648     //------------------------------------------------------------------------
00649 
00651     state_type state() const {return state_type(prefix().state);}
00652 
00654     int ref_count() const {
00655 #if TBB_USE_ASSERT
00656         internal::reference_count ref_count = prefix().ref_count;
00657         __TBB_ASSERT( ref_count==int(ref_count), "integer overflow error");
00658 #endif
00659         return int(prefix().ref_count);
00660     }
00661 
00663     bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
00664 
00665     //------------------------------------------------------------------------
00666     // Affinity
00667     //------------------------------------------------------------------------
00668  
00670 
00671     typedef internal::affinity_id affinity_id;
00672 
00674     void set_affinity( affinity_id id ) {prefix().affinity = id;}
00675 
00677     affinity_id affinity() const {return prefix().affinity;}
00678 
00680 
00684     virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
00685 
00686 #if __TBB_EXCEPTIONS
00688 
00689     bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
00690 
00692     bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
00693 #endif /* __TBB_EXCEPTIONS */
00694 
00695 private:
00696     friend class task_list;
00697     friend class internal::scheduler;
00698     friend class internal::allocate_root_proxy;
00699 #if __TBB_EXCEPTIONS
00700     friend class internal::allocate_root_with_context_proxy;
00701 #endif /* __TBB_EXCEPTIONS */
00702     friend class internal::allocate_continuation_proxy;
00703     friend class internal::allocate_child_proxy;
00704     friend class internal::allocate_additional_child_of_proxy;
00705     
00706     friend class internal::task_group_base;
00707 
00709 
00710     internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
00711         return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
00712     }
00713 }; // class task
00714 
00716 
00717 class empty_task: public task {
00718     /*override*/ task* execute() {
00719         return NULL;
00720     }
00721 };
00722 
00724 
00726 class task_list: internal::no_copy {
00727 private:
00728     task* first;
00729     task** next_ptr;
00730     friend class task;
00731 public:
00733     task_list() : first(NULL), next_ptr(&first) {}
00734 
00736     ~task_list() {}
00737 
00739     bool empty() const {return !first;}
00740 
00742     void push_back( task& task ) {
00743         task.prefix().next = NULL;
00744         *next_ptr = &task;
00745         next_ptr = &task.prefix().next;
00746     }
00747 
00749     task& pop_front() {
00750         __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
00751         task* result = first;
00752         first = result->prefix().next;
00753         if( !first ) next_ptr = &first;
00754         return *result;
00755     }
00756 
00758     void clear() {
00759         first=NULL;
00760         next_ptr=&first;
00761     }
00762 };
00763 
00764 inline void task::spawn( task_list& list ) {
00765 #if !__TBB_RELAXED_OWNERSHIP
00766     __TBB_ASSERT( is_owned_by_current_thread(), "'this' not owned by current thread" );
00767 #endif /* !__TBB_RELAXED_OWNERSHIP */
00768     if( task* t = list.first ) {
00769         prefix().owner->spawn( *t, *list.next_ptr );
00770         list.clear();
00771     }
00772 }
00773 
00774 inline void task::spawn_root_and_wait( task_list& root_list ) {
00775     if( task* t = root_list.first ) {
00776 #if !__TBB_RELAXED_OWNERSHIP
00777         __TBB_ASSERT( t->is_owned_by_current_thread(), "'this' not owned by current thread" );
00778 #endif /* !__TBB_RELAXED_OWNERSHIP */
00779         t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
00780         root_list.clear();
00781     }
00782 }
00783 
00784 } // namespace tbb
00785 
00786 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
00787     return &tbb::internal::allocate_root_proxy::allocate(bytes);
00788 }
00789 
00790 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
00791     tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
00792 }
00793 
00794 #if __TBB_EXCEPTIONS
00795 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
00796     return &p.allocate(bytes);
00797 }
00798 
00799 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
00800     p.free( *static_cast<tbb::task*>(task) );
00801 }
00802 #endif /* __TBB_EXCEPTIONS */
00803 
00804 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
00805     return &p.allocate(bytes);
00806 }
00807 
00808 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
00809     p.free( *static_cast<tbb::task*>(task) );
00810 }
00811 
00812 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
00813     return &p.allocate(bytes);
00814 }
00815 
00816 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
00817     p.free( *static_cast<tbb::task*>(task) );
00818 }
00819 
00820 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00821     return &p.allocate(bytes);
00822 }
00823 
00824 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00825     p.free( *static_cast<tbb::task*>(task) );
00826 }
00827 
00828 #endif /* __TBB_task_H */

Copyright © 2005-2009 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.