rbtree_best_fit.hpp 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
  11. #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. // interprocess
  22. #include <boost/interprocess/containers/allocation_type.hpp>
  23. #include <boost/interprocess/exceptions.hpp>
  24. #include <boost/interprocess/interprocess_fwd.hpp>
  25. #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
  26. #include <boost/interprocess/offset_ptr.hpp>
  27. #include <boost/interprocess/sync/scoped_lock.hpp>
  28. // interprocess/detail
  29. #include <boost/interprocess/detail/min_max.hpp>
  30. #include <boost/interprocess/detail/math_functions.hpp>
  31. #include <boost/interprocess/detail/type_traits.hpp>
  32. #include <boost/interprocess/detail/utilities.hpp>
  33. // container
  34. #include <boost/container/detail/multiallocation_chain.hpp>
  35. // container/detail
  36. #include <boost/container/detail/placement_new.hpp>
  37. // move/detail
  38. #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
  39. // intrusive
  40. #include <boost/intrusive/pointer_traits.hpp>
  41. #include <boost/intrusive/set.hpp>
  42. // other boost
  43. #include <boost/assert.hpp>
  44. #include <boost/static_assert.hpp>
  45. // std
  46. #include <climits>
  47. #include <cstring>
  48. //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  49. //to maintain ABI compatible with the original version
  50. //ABI had to be updated to fix compatibility issues when
  51. //sharing shared memory between 32 adn 64 bit processes.
  52. //!\file
  53. //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
  54. //!objects in shared memory. This class is intended as a base class for single segment
  55. //!and multi-segment implementations.
  56. namespace boost {
  57. namespace interprocess {
  58. //!This class implements an algorithm that stores the free nodes in a red-black tree
  59. //!to have logarithmic search/insert times.
  60. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  61. class rbtree_best_fit
  62. {
  63. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  64. //Non-copyable
  65. rbtree_best_fit();
  66. rbtree_best_fit(const rbtree_best_fit &);
  67. rbtree_best_fit &operator=(const rbtree_best_fit &);
  68. private:
  69. struct block_ctrl;
  70. typedef typename boost::intrusive::
  71. pointer_traits<VoidPointer>::template
  72. rebind_pointer<block_ctrl>::type block_ctrl_ptr;
  73. typedef typename boost::intrusive::
  74. pointer_traits<VoidPointer>::template
  75. rebind_pointer<char>::type char_ptr;
  76. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  77. public:
  78. //!Shared mutex family used for the rest of the Interprocess framework
  79. typedef MutexFamily mutex_family;
  80. //!Pointer type to be used with the rest of the Interprocess framework
  81. typedef VoidPointer void_pointer;
  82. typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
  83. typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
  84. typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
  85. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  86. private:
  87. typedef typename bi::make_set_base_hook
  88. < bi::void_pointer<VoidPointer>
  89. , bi::optimize_size<true>
  90. , bi::link_mode<bi::normal_link> >::type TreeHook;
  91. struct SizeHolder
  92. {
  93. //!This block's memory size (including block_ctrl
  94. //!header) in Alignment units
  95. size_type m_prev_size;
  96. size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
  97. size_type m_prev_allocated : 1;
  98. size_type m_allocated : 1;
  99. };
  100. //!Block control structure
  101. struct block_ctrl
  102. : public SizeHolder, public TreeHook
  103. {
  104. block_ctrl()
  105. { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; }
  106. friend bool operator<(const block_ctrl &a, const block_ctrl &b)
  107. { return a.m_size < b.m_size; }
  108. friend bool operator==(const block_ctrl &a, const block_ctrl &b)
  109. { return a.m_size == b.m_size; }
  110. };
  111. struct size_block_ctrl_compare
  112. {
  113. bool operator()(size_type size, const block_ctrl &block) const
  114. { return size < block.m_size; }
  115. bool operator()(const block_ctrl &block, size_type size) const
  116. { return block.m_size < size; }
  117. };
  118. //!Shared mutex to protect memory allocate/deallocate
  119. typedef typename MutexFamily::mutex_type mutex_type;
  120. typedef typename bi::make_multiset
  121. <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
  122. typedef typename Imultiset::iterator imultiset_iterator;
  123. typedef typename Imultiset::const_iterator imultiset_const_iterator;
  124. //!This struct includes needed data and derives from
  125. //!mutex_type to allow EBO when using null mutex_type
  126. struct header_t : public mutex_type
  127. {
  128. Imultiset m_imultiset;
  129. //!The extra size required by the segment
  130. size_type m_extra_hdr_bytes;
  131. //!Allocated bytes for internal checking
  132. size_type m_allocated;
  133. //!The size of the memory segment
  134. size_type m_size;
  135. } m_header;
  136. friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
  137. typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
  138. public:
  139. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  140. //!Constructor. "size" is the total size of the managed memory segment,
  141. //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
  142. //!offset that the allocator should not use at all.
  143. rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
  144. //!Destructor.
  145. ~rbtree_best_fit();
  146. //!Obtains the minimum size needed by the algorithm
  147. static size_type get_min_size (size_type extra_hdr_bytes);
  148. //Functions for single segment management
  149. //!Allocates bytes, returns 0 if there is not more memory
  150. void* allocate (size_type nbytes);
  151. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  152. //Experimental. Dont' use
  153. //!Multiple element allocation, same size
  154. void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
  155. {
  156. //-----------------------
  157. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  158. //-----------------------
  159. algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
  160. }
  161. //!Multiple element allocation, different size
  162. void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
  163. {
  164. //-----------------------
  165. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  166. //-----------------------
  167. algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
  168. }
  169. //!Multiple element allocation, different size
  170. void deallocate_many(multiallocation_chain &chain);
  171. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  172. //!Deallocates previously allocated bytes
  173. void deallocate (void *addr);
  174. //!Returns the size of the memory segment
  175. size_type get_size() const;
  176. //!Returns the number of free bytes of the segment
  177. size_type get_free_memory() const;
  178. //!Initializes to zero all the memory that's not in use.
  179. //!This function is normally used for security reasons.
  180. void zero_free_memory();
  181. //!Increases managed memory in
  182. //!extra_size bytes more
  183. void grow(size_type extra_size);
  184. //!Decreases managed memory as much as possible
  185. void shrink_to_fit();
  186. //!Returns true if all allocated memory has been deallocated
  187. bool all_memory_deallocated();
  188. //!Makes an internal sanity check
  189. //!and returns true if success
  190. bool check_sanity();
  191. template<class T>
  192. T * allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  193. size_type &prefer_in_recvd_out_size, T *&reuse);
  194. void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object,
  195. size_type &prefer_in_recvd_out_size,
  196. void *&reuse_ptr, size_type sizeof_object = 1);
  197. //!Returns the size of the buffer previously allocated pointed by ptr
  198. size_type size(const void *ptr) const;
  199. //!Allocates aligned bytes, returns 0 if there is not more memory.
  200. //!Alignment must be power of 2
  201. void* allocate_aligned (size_type nbytes, size_type alignment);
  202. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  203. private:
  204. static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
  205. block_ctrl *priv_first_block();
  206. block_ctrl *priv_end_block();
  207. void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
  208. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
  209. //!Real allocation algorithm with min allocation option
  210. void * priv_allocate( boost::interprocess::allocation_type command
  211. , size_type limit_size, size_type &prefer_in_recvd_out_size
  212. , void *&reuse_ptr, size_type backwards_multiple = 1);
  213. //!Obtains the block control structure of the user buffer
  214. static block_ctrl *priv_get_block(const void *ptr);
  215. //!Obtains the pointer returned to the user from the block control
  216. static void *priv_get_user_buffer(const block_ctrl *block);
  217. //!Returns the number of total units that a user buffer
  218. //!of "userbytes" bytes really occupies (including header)
  219. static size_type priv_get_total_units(size_type userbytes);
  220. //!Real expand function implementation
  221. bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
  222. //!Real expand to both sides implementation
  223. void* priv_expand_both_sides(boost::interprocess::allocation_type command
  224. ,size_type min_size
  225. ,size_type &prefer_in_recvd_out_size
  226. ,void *reuse_ptr
  227. ,bool only_preferred_backwards
  228. ,size_type backwards_multiple);
  229. //!Returns true if the previous block is allocated
  230. bool priv_is_prev_allocated(block_ctrl *ptr);
  231. //!Get a pointer of the "end" block from the first block of the segment
  232. static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
  233. //!Get a pointer of the "first" block from the end block of the segment
  234. static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
  235. //!Get poitner of the previous block (previous block must be free)
  236. static block_ctrl * priv_prev_block(block_ctrl *ptr);
  237. //!Get the size in the tail of the previous block
  238. static block_ctrl * priv_next_block(block_ctrl *ptr);
  239. //!Check if this block is free (not allocated)
  240. bool priv_is_allocated_block(block_ctrl *ptr);
  241. //!Marks the block as allocated
  242. void priv_mark_as_allocated_block(block_ctrl *ptr);
  243. //!Marks the block as allocated
  244. void priv_mark_new_allocated_block(block_ctrl *ptr)
  245. { return priv_mark_as_allocated_block(ptr); }
  246. //!Marks the block as allocated
  247. void priv_mark_as_free_block(block_ctrl *ptr);
  248. //!Checks if block has enough memory and splits/unlinks the block
  249. //!returning the address to the users
  250. void* priv_check_and_allocate(size_type units
  251. ,block_ctrl* block
  252. ,size_type &received_size);
  253. //!Real deallocation algorithm
  254. void priv_deallocate(void *addr);
  255. //!Makes a new memory portion available for allocation
  256. void priv_add_segment(void *addr, size_type size);
  257. public:
  258. static const size_type Alignment = !MemAlignment
  259. ? size_type(::boost::container::dtl::alignment_of
  260. < ::boost::container::dtl::max_align_t>::value)
  261. : size_type(MemAlignment)
  262. ;
  263. private:
  264. //Due to embedded bits in size, Alignment must be at least 4
  265. BOOST_STATIC_ASSERT((Alignment >= 4));
  266. //Due to rbtree size optimizations, Alignment must have at least pointer alignment
  267. BOOST_STATIC_ASSERT((Alignment >= ::boost::container::dtl::alignment_of<void_pointer>::value));
  268. static const size_type AlignmentMask = (Alignment - 1);
  269. static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
  270. static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
  271. static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
  272. static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
  273. static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
  274. static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
  275. static const size_type MinBlockUnits = BlockCtrlUnits;
  276. static const size_type UsableByPreviousChunk = sizeof(size_type);
  277. //Make sure the maximum alignment is power of two
  278. BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
  279. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  280. public:
  281. static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
  282. };
  283. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  284. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  285. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  286. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  287. ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
  288. {
  289. size_type uint_this = (std::size_t)this_ptr;
  290. size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
  291. size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
  292. size_type block1_off = aligned_main_hdr_end - uint_this;
  293. algo_impl_t::assert_alignment(aligned_main_hdr_end);
  294. algo_impl_t::assert_alignment(uint_this + block1_off);
  295. return block1_off;
  296. }
  297. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  298. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  299. priv_add_segment(void *addr, size_type segment_size)
  300. {
  301. //Check alignment
  302. algo_impl_t::check_alignment(addr);
  303. //Check size
  304. BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
  305. //Initialize the first big block and the "end" node
  306. block_ctrl *first_big_block = ::new(addr, boost_container_new_t())block_ctrl;
  307. first_big_block->m_size = segment_size/Alignment - EndCtrlBlockUnits;
  308. BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
  309. //The "end" node is just a node of size 0 with the "end" bit set
  310. block_ctrl *end_block = static_cast<block_ctrl*>
  311. (new (reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment)SizeHolder);
  312. //This will overwrite the prev part of the "end" node
  313. priv_mark_as_free_block (first_big_block);
  314. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  315. first_big_block->m_prev_size = end_block->m_size =
  316. (reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment;
  317. #else
  318. first_big_block->m_prev_size = end_block->m_size =
  319. (reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment;
  320. #endif
  321. end_block->m_allocated = 1;
  322. first_big_block->m_prev_allocated = 1;
  323. BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
  324. BOOST_ASSERT(priv_prev_block(end_block) == first_big_block);
  325. BOOST_ASSERT(priv_first_block() == first_big_block);
  326. BOOST_ASSERT(priv_end_block() == end_block);
  327. //Some check to validate the algorithm, since it makes some assumptions
  328. //to optimize the space wasted in bookkeeping:
  329. //Check that the sizes of the header are placed before the rbtree
  330. BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
  331. < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
  332. //Insert it in the intrusive containers
  333. m_header.m_imultiset.insert(*first_big_block);
  334. }
  335. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  336. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  337. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  338. ::priv_first_block()
  339. {
  340. size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  341. return reinterpret_cast<block_ctrl *>(reinterpret_cast<char*>(this) + block1_off);
  342. }
  343. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  344. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  345. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  346. ::priv_end_block()
  347. {
  348. size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  349. const size_type original_first_block_size = m_header.m_size/Alignment*Alignment - block1_off/Alignment*Alignment - EndCtrlBlockBytes;
  350. block_ctrl *end_block = reinterpret_cast<block_ctrl*>
  351. (reinterpret_cast<char*>(this) + block1_off + original_first_block_size);
  352. return end_block;
  353. }
  354. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  355. inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  356. rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
  357. {
  358. //Initialize the header
  359. m_header.m_allocated = 0;
  360. m_header.m_size = segment_size;
  361. m_header.m_extra_hdr_bytes = extra_hdr_bytes;
  362. //Now write calculate the offset of the first big block that will
  363. //cover the whole segment
  364. BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
  365. size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
  366. priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
  367. }
  368. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  369. inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
  370. {
  371. //There is a memory leak!
  372. // BOOST_ASSERT(m_header.m_allocated == 0);
  373. // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
  374. }
  375. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  376. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
  377. {
  378. //Get the address of the first block
  379. block_ctrl *first_block = priv_first_block();
  380. block_ctrl *old_end_block = priv_end_block();
  381. size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
  382. reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
  383. //Update managed buffer's size
  384. m_header.m_size += extra_size;
  385. //We need at least MinBlockUnits blocks to create a new block
  386. if((m_header.m_size - old_border_offset) < MinBlockUnits){
  387. return;
  388. }
  389. //Now create a new block between the old end and the new end
  390. size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
  391. block_ctrl *new_end_block = reinterpret_cast<block_ctrl*>
  392. (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
  393. //the last and first block are special:
  394. //new_end_block->m_size & first_block->m_prev_size store the absolute value
  395. //between them
  396. new_end_block->m_allocated = 1;
  397. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  398. new_end_block->m_size = (reinterpret_cast<char*>(first_block) -
  399. reinterpret_cast<char*>(new_end_block))/Alignment;
  400. #else
  401. new_end_block->m_size = (reinterpret_cast<char*>(new_end_block) -
  402. reinterpret_cast<char*>(first_block))/Alignment;
  403. #endif
  404. first_block->m_prev_size = new_end_block->m_size;
  405. first_block->m_prev_allocated = 1;
  406. BOOST_ASSERT(new_end_block == priv_end_block());
  407. //The old end block is the new block
  408. block_ctrl *new_block = old_end_block;
  409. new_block->m_size = (reinterpret_cast<char*>(new_end_block) -
  410. reinterpret_cast<char*>(new_block))/Alignment;
  411. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  412. priv_mark_as_allocated_block(new_block);
  413. BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
  414. m_header.m_allocated += (size_type)new_block->m_size*Alignment;
  415. //Now deallocate the newly created block
  416. this->priv_deallocate(priv_get_user_buffer(new_block));
  417. }
  418. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  419. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
  420. {
  421. //Get the address of the first block
  422. block_ctrl *first_block = priv_first_block();
  423. algo_impl_t::assert_alignment(first_block);
  424. //block_ctrl *old_end_block = priv_end_block(first_block);
  425. block_ctrl *old_end_block = priv_end_block();
  426. algo_impl_t::assert_alignment(old_end_block);
  427. size_type old_end_block_size = old_end_block->m_size;
  428. void *unique_buffer = 0;
  429. block_ctrl *last_block;
  430. //Check if no memory is allocated between the first and last block
  431. if(priv_next_block(first_block) == old_end_block){
  432. //If so check if we can allocate memory
  433. size_type ignore_recvd = 0;
  434. void *ignore_reuse = 0;
  435. unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
  436. //If not, return, we can't shrink
  437. if(!unique_buffer)
  438. return;
  439. //If we can, mark the position just after the new allocation as the new end
  440. algo_impl_t::assert_alignment(unique_buffer);
  441. block_ctrl *unique_block = priv_get_block(unique_buffer);
  442. BOOST_ASSERT(priv_is_allocated_block(unique_block));
  443. algo_impl_t::assert_alignment(unique_block);
  444. last_block = priv_next_block(unique_block);
  445. BOOST_ASSERT(!priv_is_allocated_block(last_block));
  446. algo_impl_t::assert_alignment(last_block);
  447. }
  448. else{
  449. //If memory is allocated, check if the last block is allocated
  450. if(priv_is_prev_allocated(old_end_block))
  451. return;
  452. //If not, mark last block after the free block
  453. last_block = priv_prev_block(old_end_block);
  454. }
  455. size_type last_block_size = last_block->m_size;
  456. //Erase block from the free tree, since we will erase it
  457. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
  458. size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
  459. reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
  460. block_ctrl *new_end_block = last_block;
  461. algo_impl_t::assert_alignment(new_end_block);
  462. //Write new end block attributes
  463. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  464. new_end_block->m_size = first_block->m_prev_size =
  465. (reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment;
  466. #else
  467. new_end_block->m_size = first_block->m_prev_size =
  468. (reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment;
  469. #endif
  470. new_end_block->m_allocated = 1;
  471. (void)last_block_size;
  472. (void)old_end_block_size;
  473. BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
  474. //Update managed buffer's size
  475. m_header.m_size = shrunk_border_offset;
  476. BOOST_ASSERT(priv_end_block() == new_end_block);
  477. if(unique_buffer)
  478. priv_deallocate(unique_buffer);
  479. }
  480. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  481. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  482. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
  483. { return m_header.m_size; }
  484. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  485. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  486. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
  487. {
  488. return m_header.m_size - m_header.m_allocated -
  489. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  490. }
  491. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  492. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  493. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  494. get_min_size (size_type extra_hdr_bytes)
  495. {
  496. return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
  497. algo_impl_t::ceil_units(extra_hdr_bytes) +
  498. MinBlockUnits + EndCtrlBlockUnits)*Alignment;
  499. }
  500. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  501. inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  502. all_memory_deallocated()
  503. {
  504. //-----------------------
  505. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  506. //-----------------------
  507. size_type block1_off =
  508. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  509. return m_header.m_allocated == 0 &&
  510. m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
  511. (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
  512. && m_header.m_imultiset.begin()->m_size ==
  513. (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
  514. }
  515. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  516. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  517. check_sanity()
  518. {
  519. //-----------------------
  520. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  521. //-----------------------
  522. imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
  523. size_type free_memory = 0;
  524. //Iterate through all blocks obtaining their size
  525. for(; ib != ie; ++ib){
  526. free_memory += (size_type)ib->m_size*Alignment;
  527. algo_impl_t::assert_alignment(&*ib);
  528. if(!algo_impl_t::check_alignment(&*ib))
  529. return false;
  530. }
  531. //Check allocated bytes are less than size
  532. if(m_header.m_allocated > m_header.m_size){
  533. return false;
  534. }
  535. size_type block1_off =
  536. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  537. //Check free bytes are less than size
  538. if(free_memory > (m_header.m_size - block1_off)){
  539. return false;
  540. }
  541. return true;
  542. }
  543. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  544. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  545. allocate(size_type nbytes)
  546. {
  547. //-----------------------
  548. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  549. //-----------------------
  550. size_type ignore_recvd = nbytes;
  551. void *ignore_reuse = 0;
  552. return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
  553. }
  554. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  555. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  556. allocate_aligned(size_type nbytes, size_type alignment)
  557. {
  558. //-----------------------
  559. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  560. //-----------------------
  561. return algo_impl_t::allocate_aligned(this, nbytes, alignment);
  562. }
  563. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  564. template<class T>
  565. inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  566. allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  567. size_type &prefer_in_recvd_out_size, T *&reuse)
  568. {
  569. void* raw_reuse = reuse;
  570. void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
  571. reuse = static_cast<T*>(raw_reuse);
  572. BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
  573. return static_cast<T*>(ret);
  574. }
  575. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  576. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  577. raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
  578. size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
  579. {
  580. size_type const preferred_objects = prefer_in_recvd_out_objects;
  581. if(!sizeof_object)
  582. return reuse_ptr = 0, static_cast<void*>(0);
  583. if(command & boost::interprocess::try_shrink_in_place){
  584. if(!reuse_ptr) return static_cast<void*>(0);
  585. const bool success = algo_impl_t::try_shrink
  586. ( this, reuse_ptr, limit_objects*sizeof_object
  587. , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
  588. prefer_in_recvd_out_objects /= sizeof_object;
  589. return success ? reuse_ptr : 0;
  590. }
  591. else{
  592. return priv_allocation_command
  593. (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
  594. }
  595. }
  596. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  597. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  598. priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  599. size_type &prefer_in_recvd_out_size,
  600. void *&reuse_ptr, size_type sizeof_object)
  601. {
  602. void* ret;
  603. size_type const preferred_size = prefer_in_recvd_out_size;
  604. size_type const max_count = m_header.m_size/sizeof_object;
  605. if(limit_size > max_count || preferred_size > max_count){
  606. return reuse_ptr = 0, static_cast<void*>(0);
  607. }
  608. size_type l_size = limit_size*sizeof_object;
  609. size_type p_size = preferred_size*sizeof_object;
  610. size_type r_size;
  611. {
  612. //-----------------------
  613. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  614. //-----------------------
  615. ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
  616. }
  617. prefer_in_recvd_out_size = r_size/sizeof_object;
  618. return ret;
  619. }
  620. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  621. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  622. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  623. size(const void *ptr) const
  624. {
  625. //We need no synchronization since this block's size is not going
  626. //to be modified by anyone else
  627. //Obtain the real size of the block
  628. return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  629. }
  630. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  631. inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
  632. {
  633. //-----------------------
  634. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  635. //-----------------------
  636. imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
  637. //Iterate through all blocks obtaining their size
  638. while(ib != ie){
  639. //Just clear user the memory part reserved for the user
  640. volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
  641. size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
  642. while(s--){
  643. *ptr++ = 0;
  644. }
  645. //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
  646. //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
  647. // , 0
  648. // , ib->m_size*Alignment - BlockCtrlBytes);
  649. ++ib;
  650. }
  651. }
  652. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  653. void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  654. priv_expand_both_sides(boost::interprocess::allocation_type command
  655. ,size_type min_size
  656. ,size_type &prefer_in_recvd_out_size
  657. ,void *reuse_ptr
  658. ,bool only_preferred_backwards
  659. ,size_type backwards_multiple)
  660. {
  661. size_type const preferred_size = prefer_in_recvd_out_size;
  662. algo_impl_t::assert_alignment(reuse_ptr);
  663. if(command & boost::interprocess::expand_fwd){
  664. if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
  665. return reuse_ptr;
  666. }
  667. else{
  668. prefer_in_recvd_out_size = this->size(reuse_ptr);
  669. if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
  670. return reuse_ptr;
  671. }
  672. if(backwards_multiple){
  673. BOOST_ASSERT(0 == (min_size % backwards_multiple));
  674. BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
  675. }
  676. if(command & boost::interprocess::expand_bwd){
  677. //Obtain the real size of the block
  678. block_ctrl *reuse = priv_get_block(reuse_ptr);
  679. //Sanity check
  680. algo_impl_t::assert_alignment(reuse);
  681. block_ctrl *prev_block;
  682. //If the previous block is not free, there is nothing to do
  683. if(priv_is_prev_allocated(reuse)){
  684. return 0;
  685. }
  686. prev_block = priv_prev_block(reuse);
  687. BOOST_ASSERT(!priv_is_allocated_block(prev_block));
  688. //Some sanity checks
  689. BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
  690. algo_impl_t::assert_alignment(prev_block);
  691. size_type needs_backwards_aligned;
  692. size_type lcm;
  693. if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
  694. ( backwards_multiple
  695. , prefer_in_recvd_out_size
  696. , only_preferred_backwards ? preferred_size : min_size
  697. , lcm, needs_backwards_aligned)){
  698. return 0;
  699. }
  700. //Check if previous block has enough size
  701. if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
  702. //Now take all next space. This will succeed
  703. if(command & boost::interprocess::expand_fwd){
  704. size_type received_size2;
  705. if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
  706. BOOST_ASSERT(0);
  707. }
  708. BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
  709. }
  710. //We need a minimum size to split the previous one
  711. if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
  712. block_ctrl *new_block = reinterpret_cast<block_ctrl *>
  713. (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
  714. //Free old previous buffer
  715. new_block->m_size =
  716. AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment;
  717. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  718. priv_mark_as_allocated_block(new_block);
  719. prev_block->m_size = (reinterpret_cast<char*>(new_block) -
  720. reinterpret_cast<char*>(prev_block))/Alignment;
  721. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  722. priv_mark_as_free_block(prev_block);
  723. //Update the old previous block in the free blocks tree
  724. //If the new size fulfills tree invariants do nothing,
  725. //otherwise erase() + insert()
  726. {
  727. imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
  728. imultiset_iterator was_smaller_it(prev_block_it);
  729. if(prev_block_it != m_header.m_imultiset.begin() &&
  730. (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
  731. m_header.m_imultiset.erase(prev_block_it);
  732. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
  733. }
  734. }
  735. prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
  736. m_header.m_allocated += needs_backwards_aligned;
  737. //Check alignment
  738. algo_impl_t::assert_alignment(new_block);
  739. //If the backwards expansion has remaining bytes in the
  740. //first bytes, fill them with a pattern
  741. void *p = priv_get_user_buffer(new_block);
  742. void *user_ptr = reinterpret_cast<char*>(p);
  743. BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
  744. algo_impl_t::assert_alignment(user_ptr);
  745. return user_ptr;
  746. }
  747. //Check if there is no place to create a new block and
  748. //the whole new block is multiple of the backwards expansion multiple
  749. else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
  750. 0 == ((prev_block->m_size*Alignment) % lcm)) {
  751. //Erase old previous block, since we will change it
  752. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
  753. //Just merge the whole previous block
  754. //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
  755. prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
  756. m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
  757. //Now update sizes
  758. prev_block->m_size = prev_block->m_size + reuse->m_size;
  759. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  760. priv_mark_as_allocated_block(prev_block);
  761. //If the backwards expansion has remaining bytes in the
  762. //first bytes, fill them with a pattern
  763. void *user_ptr = priv_get_user_buffer(prev_block);
  764. BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
  765. algo_impl_t::assert_alignment(user_ptr);
  766. return user_ptr;
  767. }
  768. else{
  769. //Alignment issues
  770. }
  771. }
  772. }
  773. return 0;
  774. }
  775. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  776. inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  777. deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
  778. {
  779. //-----------------------
  780. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  781. //-----------------------
  782. algo_impl_t::deallocate_many(this, chain);
  783. }
  784. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  785. void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  786. priv_allocate(boost::interprocess::allocation_type command
  787. ,size_type limit_size
  788. ,size_type &prefer_in_recvd_out_size
  789. ,void *&reuse_ptr
  790. ,size_type backwards_multiple)
  791. {
  792. size_type const preferred_size = prefer_in_recvd_out_size;
  793. if(command & boost::interprocess::shrink_in_place){
  794. if(!reuse_ptr) return static_cast<void*>(0);
  795. bool success =
  796. algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
  797. return success ? reuse_ptr : 0;
  798. }
  799. prefer_in_recvd_out_size = 0;
  800. if(limit_size > preferred_size)
  801. return reuse_ptr = 0, static_cast<void*>(0);
  802. //Number of units to request (including block_ctrl header)
  803. size_type preferred_units = priv_get_total_units(preferred_size);
  804. //Number of units to request (including block_ctrl header)
  805. size_type limit_units = priv_get_total_units(limit_size);
  806. //Expand in place
  807. prefer_in_recvd_out_size = preferred_size;
  808. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  809. void *ret = priv_expand_both_sides
  810. (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
  811. if(ret)
  812. return ret;
  813. }
  814. if(command & boost::interprocess::allocate_new){
  815. size_block_ctrl_compare comp;
  816. imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
  817. if(it != m_header.m_imultiset.end()){
  818. return reuse_ptr = 0, this->priv_check_and_allocate
  819. (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
  820. }
  821. if(it != m_header.m_imultiset.begin()&&
  822. (--it)->m_size >= limit_units){
  823. return reuse_ptr = 0, this->priv_check_and_allocate
  824. (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
  825. }
  826. }
  827. //Now try to expand both sides with min size
  828. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  829. return priv_expand_both_sides
  830. (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
  831. }
  832. return reuse_ptr = 0, static_cast<void*>(0);
  833. }
  834. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  835. inline
  836. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  837. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
  838. {
  839. return const_cast<block_ctrl*>
  840. (reinterpret_cast<const block_ctrl*>
  841. (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
  842. }
  843. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  844. inline
  845. void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  846. priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  847. { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
  848. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  849. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  850. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  851. priv_get_total_units(size_type userbytes)
  852. {
  853. if(userbytes < UsableByPreviousChunk)
  854. userbytes = UsableByPreviousChunk;
  855. size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
  856. if(units < BlockCtrlUnits) units = BlockCtrlUnits;
  857. return units;
  858. }
  859. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  860. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  861. priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
  862. {
  863. size_type const preferred_size = prefer_in_recvd_out_size;
  864. //Obtain the real size of the block
  865. block_ctrl *block = priv_get_block(ptr);
  866. size_type old_block_units = block->m_size;
  867. //The block must be marked as allocated and the sizes must be equal
  868. BOOST_ASSERT(priv_is_allocated_block(block));
  869. //Put this to a safe value
  870. prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  871. if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
  872. return true;
  873. //Now translate it to Alignment units
  874. const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
  875. const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
  876. //Some parameter checks
  877. BOOST_ASSERT(min_user_units <= preferred_user_units);
  878. block_ctrl *next_block;
  879. if(priv_is_allocated_block(next_block = priv_next_block(block))){
  880. return prefer_in_recvd_out_size >= min_size;
  881. }
  882. algo_impl_t::assert_alignment(next_block);
  883. //Is "block" + "next_block" big enough?
  884. const size_type merged_units = old_block_units + (size_type)next_block->m_size;
  885. //Now get the expansion size
  886. const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
  887. if(merged_user_units < min_user_units){
  888. prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
  889. return false;
  890. }
  891. //Now get the maximum size the user can allocate
  892. size_type intended_user_units = (merged_user_units < preferred_user_units) ?
  893. merged_user_units : preferred_user_units;
  894. //These are total units of the merged block (supposing the next block can be split)
  895. const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
  896. //Check if we can split the next one in two parts
  897. if((merged_units - intended_units) >= BlockCtrlUnits){
  898. //This block is bigger than needed, split it in
  899. //two blocks, the first one will be merged and
  900. //the second's size will be the remaining space
  901. BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
  902. const size_type rem_units = merged_units - intended_units;
  903. //Check if we we need to update the old next block in the free blocks tree
  904. //If the new size fulfills tree invariants, we just need to replace the node
  905. //(the block start has been displaced), otherwise erase() + insert().
  906. //
  907. //This fixup must be done in two parts, because the new next block might
  908. //overwrite the tree hook of the old next block. So we first erase the
  909. //old if needed and we'll insert the new one after creating the new next
  910. imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
  911. const bool size_invariants_broken =
  912. (next_block->m_size - rem_units ) < BlockCtrlUnits ||
  913. (old_next_block_it != m_header.m_imultiset.begin() &&
  914. (--imultiset_iterator(old_next_block_it))->m_size > rem_units);
  915. if(size_invariants_broken){
  916. m_header.m_imultiset.erase(old_next_block_it);
  917. }
  918. //This is the remaining block
  919. block_ctrl *rem_block = ::new(reinterpret_cast<block_ctrl*>
  920. (reinterpret_cast<char*>(block) + intended_units*Alignment), boost_container_new_t())block_ctrl;
  921. rem_block->m_size = rem_units;
  922. algo_impl_t::assert_alignment(rem_block);
  923. BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
  924. priv_mark_as_free_block(rem_block);
  925. //Now the second part of the fixup
  926. if(size_invariants_broken)
  927. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
  928. else
  929. m_header.m_imultiset.replace_node(old_next_block_it, *rem_block);
  930. //Write the new length
  931. block->m_size = intended_user_units + AllocatedCtrlUnits;
  932. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  933. m_header.m_allocated += (intended_units - old_block_units)*Alignment;
  934. }
  935. //There is no free space to create a new node: just merge both blocks
  936. else{
  937. //Now we have to update the data in the tree
  938. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
  939. //Write the new length
  940. block->m_size = merged_units;
  941. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  942. m_header.m_allocated += (merged_units - old_block_units)*Alignment;
  943. }
  944. priv_mark_as_allocated_block(block);
  945. prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  946. return true;
  947. }
  948. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  949. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  950. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
  951. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
  952. {
  953. BOOST_ASSERT(!ptr->m_prev_allocated);
  954. return reinterpret_cast<block_ctrl *>
  955. (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
  956. }
  957. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  958. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  959. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
  960. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
  961. {
  962. //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
  963. //distance with the end block
  964. BOOST_ASSERT(first_segment_block->m_prev_allocated);
  965. block_ctrl *end_block = reinterpret_cast<block_ctrl *>
  966. (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
  967. (void)end_block;
  968. BOOST_ASSERT(end_block->m_allocated == 1);
  969. BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
  970. BOOST_ASSERT(end_block > first_segment_block);
  971. return end_block;
  972. }
  973. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  974. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  975. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
  976. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
  977. {
  978. //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
  979. //distance with the end block
  980. BOOST_ASSERT(end_segment_block->m_allocated);
  981. block_ctrl *first_block = reinterpret_cast<block_ctrl *>
  982. (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
  983. (void)first_block;
  984. BOOST_ASSERT(first_block->m_prev_allocated == 1);
  985. BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
  986. BOOST_ASSERT(end_segment_block > first_block);
  987. return first_block;
  988. }
  989. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  990. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  991. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
  992. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
  993. {
  994. return reinterpret_cast<block_ctrl *>
  995. (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
  996. }
  997. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  998. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
  999. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1000. {
  1001. bool allocated = block->m_allocated != 0;
  1002. #ifndef NDEBUG
  1003. if(block != priv_end_block()){
  1004. block_ctrl *next_block = reinterpret_cast<block_ctrl *>
  1005. (reinterpret_cast<char*>(block) + block->m_size*Alignment);
  1006. bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
  1007. (void)next_block_prev_allocated;
  1008. BOOST_ASSERT(allocated == next_block_prev_allocated);
  1009. }
  1010. #endif
  1011. return allocated;
  1012. }
  1013. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1014. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
  1015. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1016. {
  1017. if(block->m_prev_allocated){
  1018. return true;
  1019. }
  1020. else{
  1021. #ifndef NDEBUG
  1022. if(block != priv_first_block()){
  1023. block_ctrl *prev = priv_prev_block(block);
  1024. (void)prev;
  1025. BOOST_ASSERT(!prev->m_allocated);
  1026. BOOST_ASSERT(prev->m_size == block->m_prev_size);
  1027. }
  1028. #endif
  1029. return false;
  1030. }
  1031. }
  1032. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1033. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
  1034. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1035. {
  1036. block->m_allocated = 1;
  1037. reinterpret_cast<block_ctrl *>
  1038. (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
  1039. }
  1040. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1041. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
  1042. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1043. {
  1044. block->m_allocated = 0;
  1045. block_ctrl *next_block = priv_next_block(block);
  1046. next_block->m_prev_allocated = 0;
  1047. next_block->m_prev_size = block->m_size;
  1048. }
  1049. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1050. void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
  1051. (size_type nunits
  1052. ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
  1053. ,size_type &received_size)
  1054. {
  1055. size_type upper_nunits = nunits + BlockCtrlUnits;
  1056. imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
  1057. algo_impl_t::assert_alignment(block);
  1058. if (block->m_size >= upper_nunits){
  1059. //This block is bigger than needed, split it in
  1060. //two blocks, the first's size will be "units" and
  1061. //the second's size "block->m_size-units"
  1062. size_type block_old_size = block->m_size;
  1063. block->m_size = nunits;
  1064. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  1065. //This is the remaining block
  1066. block_ctrl *rem_block = ::new(reinterpret_cast<block_ctrl*>
  1067. (reinterpret_cast<char*>(block) + Alignment*nunits), boost_container_new_t())block_ctrl;
  1068. algo_impl_t::assert_alignment(rem_block);
  1069. rem_block->m_size = block_old_size - nunits;
  1070. BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
  1071. priv_mark_as_free_block(rem_block);
  1072. imultiset_iterator it_hint;
  1073. if(it_old == m_header.m_imultiset.begin()
  1074. || (--imultiset_iterator(it_old))->m_size <= rem_block->m_size){
  1075. //option a: slow but secure
  1076. //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
  1077. //option b: Construct an empty node and swap
  1078. //Imultiset::init_node(*rem_block);
  1079. //block->swap_nodes(*rem_block);
  1080. //option c: replace the node directly
  1081. m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block);
  1082. }
  1083. else{
  1084. //Now we have to update the data in the tree
  1085. m_header.m_imultiset.erase(it_old);
  1086. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
  1087. }
  1088. }
  1089. else if (block->m_size >= nunits){
  1090. m_header.m_imultiset.erase(it_old);
  1091. }
  1092. else{
  1093. BOOST_ASSERT(0);
  1094. return 0;
  1095. }
  1096. //We need block_ctrl for deallocation stuff, so
  1097. //return memory user can overwrite
  1098. m_header.m_allocated += (size_type)block->m_size*Alignment;
  1099. received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  1100. //Mark the block as allocated
  1101. priv_mark_as_allocated_block(block);
  1102. //Clear the memory occupied by the tree hook, since this won't be
  1103. //cleared with zero_free_memory
  1104. TreeHook *t = static_cast<TreeHook*>(block);
  1105. //Just clear the memory part reserved for the user
  1106. std::size_t tree_hook_offset_in_block = (char*)t - (char*)block;
  1107. //volatile char *ptr =
  1108. char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
  1109. const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
  1110. std::memset(ptr, 0, s);
  1111. this->priv_next_block(block)->m_prev_size = 0;
  1112. return priv_get_user_buffer(block);
  1113. }
  1114. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  1115. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
  1116. {
  1117. if(!addr) return;
  1118. //-----------------------
  1119. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  1120. //-----------------------
  1121. return this->priv_deallocate(addr);
  1122. }
  1123. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  1124. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
  1125. {
  1126. if(!addr) return;
  1127. block_ctrl *block = priv_get_block(addr);
  1128. //The blocks must be marked as allocated and the sizes must be equal
  1129. BOOST_ASSERT(priv_is_allocated_block(block));
  1130. //Check if alignment and block size are right
  1131. algo_impl_t::assert_alignment(addr);
  1132. size_type block_old_size = Alignment*(size_type)block->m_size;
  1133. BOOST_ASSERT(m_header.m_allocated >= block_old_size);
  1134. //Update used memory count
  1135. m_header.m_allocated -= block_old_size;
  1136. //The block to insert in the tree
  1137. block_ctrl *block_to_insert = block;
  1138. //Get the next block
  1139. block_ctrl *const next_block = priv_next_block(block);
  1140. const bool merge_with_prev = !priv_is_prev_allocated(block);
  1141. const bool merge_with_next = !priv_is_allocated_block(next_block);
  1142. //Merge logic. First just update block sizes, then fix free blocks tree
  1143. if(merge_with_prev || merge_with_next){
  1144. //Merge if the previous is free
  1145. if(merge_with_prev){
  1146. //Get the previous block
  1147. block_to_insert = priv_prev_block(block);
  1148. block_to_insert->m_size += block->m_size;
  1149. BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
  1150. }
  1151. //Merge if the next is free
  1152. if(merge_with_next){
  1153. block_to_insert->m_size += next_block->m_size;
  1154. BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
  1155. const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
  1156. if(merge_with_prev){
  1157. m_header.m_imultiset.erase(next_it);
  1158. }
  1159. else{
  1160. m_header.m_imultiset.replace_node(next_it, *block_to_insert);
  1161. }
  1162. }
  1163. //Now try to shortcut erasure + insertion (O(log(N))) with
  1164. //a O(1) operation if merging does not alter tree positions
  1165. const imultiset_iterator block_to_check_it = Imultiset::s_iterator_to(*block_to_insert);
  1166. imultiset_const_iterator next_to_check_it(block_to_check_it), end_it(m_header.m_imultiset.end());
  1167. if(++next_to_check_it != end_it && block_to_insert->m_size > next_to_check_it->m_size){
  1168. //Block is bigger than next, so move it
  1169. m_header.m_imultiset.erase(block_to_check_it);
  1170. m_header.m_imultiset.insert(end_it, *block_to_insert);
  1171. }
  1172. else{
  1173. //Block size increment didn't violate tree invariants so there is nothing to fix
  1174. }
  1175. }
  1176. else{
  1177. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
  1178. }
  1179. priv_mark_as_free_block(block_to_insert);
  1180. }
  1181. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  1182. } //namespace interprocess {
  1183. } //namespace boost {
  1184. #include <boost/interprocess/detail/config_end.hpp>
  1185. #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP