spinlock_ttas_futex.hpp 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. // Copyright Oliver Kowalke 2016.
  2. // Distributed under the Boost Software License, Version 1.0.
  3. // (See accompanying file LICENSE_1_0.txt or copy at
  4. // http://www.boost.org/LICENSE_1_0.txt)
  5. #ifndef BOOST_FIBERS_spinlock_ttas_futex_FUTEX_H
  6. #define BOOST_FIBERS_spinlock_ttas_futex_FUTEX_H
  7. #include <atomic>
  8. #include <cmath>
  9. #include <random>
  10. #include <thread>
  11. #include <boost/fiber/detail/config.hpp>
  12. #include <boost/fiber/detail/cpu_relax.hpp>
  13. #include <boost/fiber/detail/futex.hpp>
  14. // based on informations from:
  15. // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
  16. // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
  17. namespace boost {
  18. namespace fibers {
  19. namespace detail {
  20. class spinlock_ttas_futex {
  21. private:
  22. template< typename FBSplk >
  23. friend class spinlock_rtm;
  24. std::atomic< std::int32_t > value_{ 0 };
  25. public:
  26. spinlock_ttas_futex() = default;
  27. spinlock_ttas_futex( spinlock_ttas_futex const&) = delete;
  28. spinlock_ttas_futex & operator=( spinlock_ttas_futex const&) = delete;
  29. void lock() noexcept {
  30. static thread_local std::minstd_rand generator{ std::random_device{}() };
  31. std::int32_t collisions = 0, retries = 0, expected = 0;
  32. // after max. spins or collisions suspend via futex
  33. while ( retries++ < BOOST_FIBERS_RETRY_THRESHOLD) {
  34. // avoid using multiple pause instructions for a delay of a specific cycle count
  35. // the delay of cpu_relax() (pause on Intel) depends on the processor family
  36. // the cycle count can not guaranteed from one system to the next
  37. // -> check the shared variable 'value_' in between each cpu_relax() to prevent
  38. // unnecessarily long delays on some systems
  39. // test shared variable 'status_'
  40. // first access to 'value_' -> chache miss
  41. // sucessive acccess to 'value_' -> cache hit
  42. // if 'value_' was released by other fiber
  43. // cached 'value_' is invalidated -> cache miss
  44. if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
  45. #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
  46. if ( BOOST_FIBERS_SPIN_BEFORE_SLEEP0 > retries) {
  47. // give CPU a hint that this thread is in a "spin-wait" loop
  48. // delays the next instruction's execution for a finite period of time (depends on processor family)
  49. // the CPU is not under demand, parts of the pipeline are no longer being used
  50. // -> reduces the power consumed by the CPU
  51. // -> prevent pipeline stalls
  52. cpu_relax();
  53. } else if ( BOOST_FIBERS_SPIN_BEFORE_YIELD > retries) {
  54. // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
  55. // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
  56. // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
  57. // if and only if a thread of equal or greater priority is ready to run
  58. static constexpr std::chrono::microseconds us0{ 0 };
  59. std::this_thread::sleep_for( us0);
  60. } else {
  61. // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
  62. // but only to another thread on the same processor
  63. // instead of constant checking, a thread only checks if no other useful work is pending
  64. std::this_thread::yield();
  65. }
  66. #else
  67. // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
  68. // but only to another thread on the same processor
  69. // instead of constant checking, a thread only checks if no other useful work is pending
  70. std::this_thread::yield();
  71. #endif
  72. } else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire) ) {
  73. // spinlock now contended
  74. // utilize 'Binary Exponential Backoff' algorithm
  75. // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
  76. std::uniform_int_distribution< std::int32_t > distribution{
  77. 0, static_cast< std::int32_t >( 1) << (std::min)(collisions, static_cast< std::int32_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) };
  78. const std::int32_t z = distribution( generator);
  79. ++collisions;
  80. for ( std::int32_t i = 0; i < z; ++i) {
  81. // -> reduces the power consumed by the CPU
  82. // -> prevent pipeline stalls
  83. cpu_relax();
  84. }
  85. } else {
  86. // success, lock acquired
  87. return;
  88. }
  89. }
  90. // failure, lock not acquired
  91. // pause via futex
  92. if ( 2 != expected) {
  93. expected = value_.exchange( 2, std::memory_order_acquire);
  94. }
  95. while ( 0 != expected) {
  96. futex_wait( & value_, 2);
  97. expected = value_.exchange( 2, std::memory_order_acquire);
  98. }
  99. }
  100. bool try_lock() noexcept {
  101. std::int32_t expected = 0;
  102. return value_.compare_exchange_strong( expected, 1, std::memory_order_acquire);
  103. }
  104. void unlock() noexcept {
  105. if ( 1 != value_.fetch_sub( 1, std::memory_order_acquire) ) {
  106. value_.store( 0, std::memory_order_release);
  107. futex_wake( & value_);
  108. }
  109. }
  110. };
  111. }}}
  112. #endif // BOOST_FIBERS_spinlock_ttas_futex_FUTEX_H