quick_allocator.hpp 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. #ifndef BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
  2. #define BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
  3. // MS compatible compilers support #pragma once
  4. #if defined(_MSC_VER) && (_MSC_VER >= 1020)
  5. # pragma once
  6. #endif
  7. //
  8. // detail/quick_allocator.hpp
  9. //
  10. // Copyright (c) 2003 David Abrahams
  11. // Copyright (c) 2003 Peter Dimov
  12. //
  13. // Distributed under the Boost Software License, Version 1.0. (See
  14. // accompanying file LICENSE_1_0.txt or copy at
  15. // http://www.boost.org/LICENSE_1_0.txt)
  16. //
  17. #include <boost/config.hpp>
  18. #include <boost/smart_ptr/detail/lightweight_mutex.hpp>
  19. #include <boost/type_traits/type_with_alignment.hpp>
  20. #include <boost/type_traits/alignment_of.hpp>
  21. #include <new> // ::operator new, ::operator delete
  22. #include <cstddef> // std::size_t
  23. namespace boost
  24. {
  25. namespace detail
  26. {
  27. template<unsigned size, unsigned align_> union freeblock
  28. {
  29. typedef typename boost::type_with_alignment<align_>::type aligner_type;
  30. aligner_type aligner;
  31. char bytes[size];
  32. freeblock * next;
  33. };
  34. template<unsigned size, unsigned align_> struct allocator_impl
  35. {
  36. typedef freeblock<size, align_> block;
  37. // It may seem odd to use such small pages.
  38. //
  39. // However, on a typical Windows implementation that uses
  40. // the OS allocator, "normal size" pages interact with the
  41. // "ordinary" operator new, slowing it down dramatically.
  42. //
  43. // 512 byte pages are handled by the small object allocator,
  44. // and don't interfere with ::new.
  45. //
  46. // The other alternative is to use much bigger pages (1M.)
  47. //
  48. // It is surprisingly easy to hit pathological behavior by
  49. // varying the page size. g++ 2.96 on Red Hat Linux 7.2,
  50. // for example, passionately dislikes 496. 512 seems OK.
  51. #if defined(BOOST_QA_PAGE_SIZE)
  52. enum { items_per_page = BOOST_QA_PAGE_SIZE / size };
  53. #else
  54. enum { items_per_page = 512 / size }; // 1048560 / size
  55. #endif
  56. #ifdef BOOST_HAS_THREADS
  57. static lightweight_mutex & mutex()
  58. {
  59. static freeblock< sizeof( lightweight_mutex ), boost::alignment_of< lightweight_mutex >::value > fbm;
  60. static lightweight_mutex * pm = new( &fbm ) lightweight_mutex;
  61. return *pm;
  62. }
  63. static lightweight_mutex * mutex_init;
  64. #endif
  65. static block * free;
  66. static block * page;
  67. static unsigned last;
  68. static inline void * alloc()
  69. {
  70. #ifdef BOOST_HAS_THREADS
  71. lightweight_mutex::scoped_lock lock( mutex() );
  72. #endif
  73. if(block * x = free)
  74. {
  75. free = x->next;
  76. return x;
  77. }
  78. else
  79. {
  80. if(last == items_per_page)
  81. {
  82. // "Listen to me carefully: there is no memory leak"
  83. // -- Scott Meyers, Eff C++ 2nd Ed Item 10
  84. page = ::new block[items_per_page];
  85. last = 0;
  86. }
  87. return &page[last++];
  88. }
  89. }
  90. static inline void * alloc(std::size_t n)
  91. {
  92. if(n != size) // class-specific new called for a derived object
  93. {
  94. return ::operator new(n);
  95. }
  96. else
  97. {
  98. #ifdef BOOST_HAS_THREADS
  99. lightweight_mutex::scoped_lock lock( mutex() );
  100. #endif
  101. if(block * x = free)
  102. {
  103. free = x->next;
  104. return x;
  105. }
  106. else
  107. {
  108. if(last == items_per_page)
  109. {
  110. page = ::new block[items_per_page];
  111. last = 0;
  112. }
  113. return &page[last++];
  114. }
  115. }
  116. }
  117. static inline void dealloc(void * pv)
  118. {
  119. if(pv != 0) // 18.4.1.1/13
  120. {
  121. #ifdef BOOST_HAS_THREADS
  122. lightweight_mutex::scoped_lock lock( mutex() );
  123. #endif
  124. block * pb = static_cast<block *>(pv);
  125. pb->next = free;
  126. free = pb;
  127. }
  128. }
  129. static inline void dealloc(void * pv, std::size_t n)
  130. {
  131. if(n != size) // class-specific delete called for a derived object
  132. {
  133. ::operator delete(pv);
  134. }
  135. else if(pv != 0) // 18.4.1.1/13
  136. {
  137. #ifdef BOOST_HAS_THREADS
  138. lightweight_mutex::scoped_lock lock( mutex() );
  139. #endif
  140. block * pb = static_cast<block *>(pv);
  141. pb->next = free;
  142. free = pb;
  143. }
  144. }
  145. };
  146. #ifdef BOOST_HAS_THREADS
  147. template<unsigned size, unsigned align_>
  148. lightweight_mutex * allocator_impl<size, align_>::mutex_init = &allocator_impl<size, align_>::mutex();
  149. #endif
  150. template<unsigned size, unsigned align_>
  151. freeblock<size, align_> * allocator_impl<size, align_>::free = 0;
  152. template<unsigned size, unsigned align_>
  153. freeblock<size, align_> * allocator_impl<size, align_>::page = 0;
  154. template<unsigned size, unsigned align_>
  155. unsigned allocator_impl<size, align_>::last = allocator_impl<size, align_>::items_per_page;
  156. template<class T>
  157. struct quick_allocator: public allocator_impl< sizeof(T), boost::alignment_of<T>::value >
  158. {
  159. };
  160. } // namespace detail
  161. } // namespace boost
  162. #endif // #ifndef BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED