simple_seq_fit_impl.hpp 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
  11. #define BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. #include <boost/intrusive/pointer_traits.hpp>
  22. #include <boost/interprocess/interprocess_fwd.hpp>
  23. #include <boost/interprocess/containers/allocation_type.hpp>
  24. #include <boost/container/detail/multiallocation_chain.hpp>
  25. #include <boost/interprocess/offset_ptr.hpp>
  26. #include <boost/interprocess/sync/interprocess_mutex.hpp>
  27. #include <boost/interprocess/exceptions.hpp>
  28. #include <boost/interprocess/detail/utilities.hpp>
  29. #include <boost/interprocess/detail/min_max.hpp>
  30. #include <boost/interprocess/detail/type_traits.hpp>
  31. #include <boost/interprocess/sync/scoped_lock.hpp>
  32. #include <boost/intrusive/pointer_traits.hpp>
  33. #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
  34. #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
  35. #include <boost/intrusive/detail/minimal_pair_header.hpp>
  36. #include <cstring>
  37. #include <boost/assert.hpp>
  38. //!\file
  39. //!Describes sequential fit algorithm used to allocate objects in shared memory.
  40. //!This class is intended as a base class for single segment and multi-segment
  41. //!implementations.
  42. namespace boost {
  43. namespace interprocess {
  44. namespace ipcdetail {
  45. //!This class implements the simple sequential fit algorithm with a simply
  46. //!linked list of free buffers.
  47. //!This class is intended as a base class for single segment and multi-segment
  48. //!implementations.
  49. template<class MutexFamily, class VoidPointer>
  50. class simple_seq_fit_impl
  51. {
  52. //Non-copyable
  53. simple_seq_fit_impl();
  54. simple_seq_fit_impl(const simple_seq_fit_impl &);
  55. simple_seq_fit_impl &operator=(const simple_seq_fit_impl &);
  56. typedef typename boost::intrusive::
  57. pointer_traits<VoidPointer>::template
  58. rebind_pointer<char>::type char_ptr;
  59. public:
  60. //!Shared interprocess_mutex family used for the rest of the Interprocess framework
  61. typedef MutexFamily mutex_family;
  62. //!Pointer type to be used with the rest of the Interprocess framework
  63. typedef VoidPointer void_pointer;
  64. typedef boost::container::dtl::
  65. basic_multiallocation_chain<VoidPointer> multiallocation_chain;
  66. typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
  67. typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
  68. private:
  69. class block_ctrl;
  70. friend class block_ctrl;
  71. typedef typename boost::intrusive::
  72. pointer_traits<VoidPointer>::template
  73. rebind_pointer<block_ctrl>::type block_ctrl_ptr;
  74. //!Block control structure
  75. class block_ctrl
  76. {
  77. public:
  78. //!Offset pointer to the next block.
  79. block_ctrl_ptr m_next;
  80. //!This block's memory size (including block_ctrl
  81. //!header) in BasicSize units
  82. size_type m_size;
  83. size_type get_user_bytes() const
  84. { return this->m_size*Alignment - BlockCtrlBytes; }
  85. size_type get_total_bytes() const
  86. { return this->m_size*Alignment; }
  87. };
  88. //!Shared interprocess_mutex to protect memory allocate/deallocate
  89. typedef typename MutexFamily::mutex_type interprocess_mutex;
  90. //!This struct includes needed data and derives from
  91. //!interprocess_mutex to allow EBO when using null interprocess_mutex
  92. struct header_t : public interprocess_mutex
  93. {
  94. //!Pointer to the first free block
  95. block_ctrl m_root;
  96. //!Allocated bytes for internal checking
  97. size_type m_allocated;
  98. //!The size of the memory segment
  99. size_type m_size;
  100. //!The extra size required by the segment
  101. size_type m_extra_hdr_bytes;
  102. } m_header;
  103. friend class ipcdetail::memory_algorithm_common<simple_seq_fit_impl>;
  104. typedef ipcdetail::memory_algorithm_common<simple_seq_fit_impl> algo_impl_t;
  105. public:
  106. //!Constructor. "size" is the total size of the managed memory segment,
  107. //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(simple_seq_fit_impl)
  108. //!offset that the allocator should not use at all.
  109. simple_seq_fit_impl (size_type size, size_type extra_hdr_bytes);
  110. //!Destructor
  111. ~simple_seq_fit_impl();
  112. //!Obtains the minimum size needed by the algorithm
  113. static size_type get_min_size (size_type extra_hdr_bytes);
  114. //Functions for single segment management
  115. //!Allocates bytes, returns 0 if there is not more memory
  116. void* allocate (size_type nbytes);
  117. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  118. //!Multiple element allocation, same size
  119. void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
  120. {
  121. //-----------------------
  122. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  123. //-----------------------
  124. algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
  125. }
  126. //!Multiple element allocation, different size
  127. void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
  128. {
  129. //-----------------------
  130. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  131. //-----------------------
  132. algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
  133. }
  134. //!Multiple element deallocation
  135. void deallocate_many(multiallocation_chain &chain);
  136. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  137. //!Deallocates previously allocated bytes
  138. void deallocate (void *addr);
  139. //!Returns the size of the memory segment
  140. size_type get_size() const;
  141. //!Returns the number of free bytes of the memory segment
  142. size_type get_free_memory() const;
  143. //!Increases managed memory in extra_size bytes more
  144. void grow(size_type extra_size);
  145. //!Decreases managed memory as much as possible
  146. void shrink_to_fit();
  147. //!Returns true if all allocated memory has been deallocated
  148. bool all_memory_deallocated();
  149. //!Makes an internal sanity check and returns true if success
  150. bool check_sanity();
  151. //!Initializes to zero all the memory that's not in use.
  152. //!This function is normally used for security reasons.
  153. void zero_free_memory();
  154. template<class T>
  155. T *allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  156. size_type &prefer_in_recvd_out_size, T *&reuse);
  157. void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  158. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object = 1);
  159. //!Returns the size of the buffer previously allocated pointed by ptr
  160. size_type size(const void *ptr) const;
  161. //!Allocates aligned bytes, returns 0 if there is not more memory.
  162. //!Alignment must be power of 2
  163. void* allocate_aligned (size_type nbytes, size_type alignment);
  164. private:
  165. //!Obtains the pointer returned to the user from the block control
  166. static void *priv_get_user_buffer(const block_ctrl *block);
  167. //!Obtains the block control structure of the user buffer
  168. static block_ctrl *priv_get_block(const void *ptr);
  169. //!Real allocation algorithm with min allocation option
  170. void * priv_allocate(boost::interprocess::allocation_type command
  171. ,size_type min_size
  172. ,size_type &prefer_in_recvd_out_size, void *&reuse_ptr);
  173. void * priv_allocation_command(boost::interprocess::allocation_type command
  174. ,size_type min_size
  175. ,size_type &prefer_in_recvd_out_size
  176. ,void *&reuse_ptr
  177. ,size_type sizeof_object);
  178. //!Returns the number of total units that a user buffer
  179. //!of "userbytes" bytes really occupies (including header)
  180. static size_type priv_get_total_units(size_type userbytes);
  181. static size_type priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes);
  182. size_type priv_block_end_offset() const;
  183. //!Returns next block if it's free.
  184. //!Returns 0 if next block is not free.
  185. block_ctrl *priv_next_block_if_free(block_ctrl *ptr);
  186. //!Check if this block is free (not allocated)
  187. bool priv_is_allocated_block(block_ctrl *ptr);
  188. //!Returns previous block's if it's free.
  189. //!Returns 0 if previous block is not free.
  190. std::pair<block_ctrl*, block_ctrl*> priv_prev_block_if_free(block_ctrl *ptr);
  191. //!Real expand function implementation
  192. bool priv_expand(void *ptr, size_type min_size, size_type &prefer_in_recvd_out_size);
  193. //!Real expand to both sides implementation
  194. void* priv_expand_both_sides(boost::interprocess::allocation_type command
  195. ,size_type min_size, size_type &prefer_in_recvd_out_size
  196. ,void *reuse_ptr
  197. ,bool only_preferred_backwards);
  198. //!Real private aligned allocation function
  199. //void* priv_allocate_aligned (size_type nbytes, size_type alignment);
  200. //!Checks if block has enough memory and splits/unlinks the block
  201. //!returning the address to the users
  202. void* priv_check_and_allocate(size_type units
  203. ,block_ctrl* prev
  204. ,block_ctrl* block
  205. ,size_type &received_size);
  206. //!Real deallocation algorithm
  207. void priv_deallocate(void *addr);
  208. //!Makes a new memory portion available for allocation
  209. void priv_add_segment(void *addr, size_type size);
  210. void priv_mark_new_allocated_block(block_ctrl *block);
  211. public:
  212. static const size_type Alignment = ::boost::container::dtl::alignment_of
  213. < ::boost::container::dtl::max_align_t>::value;
  214. private:
  215. static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
  216. static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
  217. static const size_type MinBlockUnits = BlockCtrlUnits;
  218. static const size_type MinBlockSize = MinBlockUnits*Alignment;
  219. static const size_type AllocatedCtrlBytes = BlockCtrlBytes;
  220. static const size_type AllocatedCtrlUnits = BlockCtrlUnits;
  221. static const size_type UsableByPreviousChunk = 0;
  222. public:
  223. static const size_type PayloadPerAllocation = BlockCtrlBytes;
  224. };
  225. template<class MutexFamily, class VoidPointer>
  226. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  227. simple_seq_fit_impl<MutexFamily, VoidPointer>
  228. ::priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes)
  229. {
  230. //First align "this" pointer
  231. size_type uint_this = (std::size_t)this_ptr;
  232. size_type uint_aligned_this = uint_this/Alignment*Alignment;
  233. size_type this_disalignment = (uint_this - uint_aligned_this);
  234. size_type block1_off =
  235. ipcdetail::get_rounded_size(sizeof(simple_seq_fit_impl) + extra_hdr_bytes + this_disalignment, Alignment)
  236. - this_disalignment;
  237. algo_impl_t::assert_alignment(this_disalignment + block1_off);
  238. return block1_off;
  239. }
  240. template<class MutexFamily, class VoidPointer>
  241. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  242. simple_seq_fit_impl<MutexFamily, VoidPointer>
  243. ::priv_block_end_offset() const
  244. {
  245. //First align "this" pointer
  246. size_type uint_this = (std::size_t)this;
  247. size_type uint_aligned_this = uint_this/Alignment*Alignment;
  248. size_type this_disalignment = (uint_this - uint_aligned_this);
  249. size_type old_end =
  250. ipcdetail::get_truncated_size(m_header.m_size + this_disalignment, Alignment)
  251. - this_disalignment;
  252. algo_impl_t::assert_alignment(old_end + this_disalignment);
  253. return old_end;
  254. }
  255. template<class MutexFamily, class VoidPointer>
  256. inline simple_seq_fit_impl<MutexFamily, VoidPointer>::
  257. simple_seq_fit_impl(size_type segment_size, size_type extra_hdr_bytes)
  258. {
  259. //Initialize sizes and counters
  260. m_header.m_allocated = 0;
  261. m_header.m_size = segment_size;
  262. m_header.m_extra_hdr_bytes = extra_hdr_bytes;
  263. //Initialize pointers
  264. size_type block1_off = priv_first_block_offset(this, extra_hdr_bytes);
  265. m_header.m_root.m_next = reinterpret_cast<block_ctrl*>
  266. ((reinterpret_cast<char*>(this) + block1_off));
  267. algo_impl_t::assert_alignment(ipcdetail::to_raw_pointer(m_header.m_root.m_next));
  268. m_header.m_root.m_next->m_size = (segment_size - block1_off)/Alignment;
  269. m_header.m_root.m_next->m_next = &m_header.m_root;
  270. }
  271. template<class MutexFamily, class VoidPointer>
  272. inline simple_seq_fit_impl<MutexFamily, VoidPointer>::~simple_seq_fit_impl()
  273. {
  274. //There is a memory leak!
  275. // BOOST_ASSERT(m_header.m_allocated == 0);
  276. // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
  277. }
  278. template<class MutexFamily, class VoidPointer>
  279. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::grow(size_type extra_size)
  280. {
  281. //Old highest address block's end offset
  282. size_type old_end = this->priv_block_end_offset();
  283. //Update managed buffer's size
  284. m_header.m_size += extra_size;
  285. //We need at least MinBlockSize blocks to create a new block
  286. if((m_header.m_size - old_end) < MinBlockSize){
  287. return;
  288. }
  289. //We'll create a new free block with extra_size bytes
  290. block_ctrl *new_block = reinterpret_cast<block_ctrl*>
  291. (reinterpret_cast<char*>(this) + old_end);
  292. algo_impl_t::assert_alignment(new_block);
  293. new_block->m_next = 0;
  294. new_block->m_size = (m_header.m_size - old_end)/Alignment;
  295. m_header.m_allocated += new_block->m_size*Alignment;
  296. this->priv_deallocate(priv_get_user_buffer(new_block));
  297. }
  298. template<class MutexFamily, class VoidPointer>
  299. void simple_seq_fit_impl<MutexFamily, VoidPointer>::shrink_to_fit()
  300. {
  301. //Get the root and the first memory block
  302. block_ctrl *prev = &m_header.m_root;
  303. block_ctrl *last = &m_header.m_root;
  304. block_ctrl *block = ipcdetail::to_raw_pointer(last->m_next);
  305. block_ctrl *root = &m_header.m_root;
  306. //No free block?
  307. if(block == root) return;
  308. //Iterate through the free block list
  309. while(block != root){
  310. prev = last;
  311. last = block;
  312. block = ipcdetail::to_raw_pointer(block->m_next);
  313. }
  314. char *last_free_end_address = reinterpret_cast<char*>(last) + last->m_size*Alignment;
  315. if(last_free_end_address != (reinterpret_cast<char*>(this) + priv_block_end_offset())){
  316. //there is an allocated block in the end of this block
  317. //so no shrinking is possible
  318. return;
  319. }
  320. //Check if have only 1 big free block
  321. void *unique_block = 0;
  322. if(!m_header.m_allocated){
  323. BOOST_ASSERT(prev == root);
  324. size_type ignore_recvd = 0;
  325. void *ignore_reuse = 0;
  326. unique_block = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
  327. if(!unique_block)
  328. return;
  329. last = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
  330. BOOST_ASSERT(last_free_end_address == (reinterpret_cast<char*>(last) + last->m_size*Alignment));
  331. }
  332. size_type last_units = last->m_size;
  333. size_type received_size;
  334. void *addr = priv_check_and_allocate(last_units, prev, last, received_size);
  335. (void)addr;
  336. BOOST_ASSERT(addr);
  337. BOOST_ASSERT(received_size == last_units*Alignment - AllocatedCtrlBytes);
  338. //Shrink it
  339. m_header.m_size /= Alignment;
  340. m_header.m_size -= last->m_size;
  341. m_header.m_size *= Alignment;
  342. m_header.m_allocated -= last->m_size*Alignment;
  343. if(unique_block)
  344. priv_deallocate(unique_block);
  345. }
  346. template<class MutexFamily, class VoidPointer>
  347. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
  348. priv_mark_new_allocated_block(block_ctrl *new_block)
  349. {
  350. new_block->m_next = 0;
  351. }
  352. template<class MutexFamily, class VoidPointer>
  353. inline
  354. typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
  355. simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_get_block(const void *ptr)
  356. {
  357. return const_cast<block_ctrl*>(reinterpret_cast<const block_ctrl*>
  358. (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
  359. }
  360. template<class MutexFamily, class VoidPointer>
  361. inline
  362. void *simple_seq_fit_impl<MutexFamily, VoidPointer>::
  363. priv_get_user_buffer(const typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
  364. {
  365. return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes);
  366. }
  367. template<class MutexFamily, class VoidPointer>
  368. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_add_segment(void *addr, size_type segment_size)
  369. {
  370. algo_impl_t::assert_alignment(addr);
  371. //Check size
  372. BOOST_ASSERT(!(segment_size < MinBlockSize));
  373. if(segment_size < MinBlockSize)
  374. return;
  375. //Construct big block using the new segment
  376. block_ctrl *new_block = static_cast<block_ctrl *>(addr);
  377. new_block->m_size = segment_size/Alignment;
  378. new_block->m_next = 0;
  379. //Simulate this block was previously allocated
  380. m_header.m_allocated += new_block->m_size*Alignment;
  381. //Return block and insert it in the free block list
  382. this->priv_deallocate(priv_get_user_buffer(new_block));
  383. }
  384. template<class MutexFamily, class VoidPointer>
  385. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  386. simple_seq_fit_impl<MutexFamily, VoidPointer>::get_size() const
  387. { return m_header.m_size; }
  388. template<class MutexFamily, class VoidPointer>
  389. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  390. simple_seq_fit_impl<MutexFamily, VoidPointer>::get_free_memory() const
  391. {
  392. return m_header.m_size - m_header.m_allocated -
  393. algo_impl_t::multiple_of_units(sizeof(*this) + m_header.m_extra_hdr_bytes);
  394. }
  395. template<class MutexFamily, class VoidPointer>
  396. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  397. simple_seq_fit_impl<MutexFamily, VoidPointer>::
  398. get_min_size (size_type extra_hdr_bytes)
  399. {
  400. return ipcdetail::get_rounded_size((size_type)sizeof(simple_seq_fit_impl),Alignment) +
  401. ipcdetail::get_rounded_size(extra_hdr_bytes,Alignment)
  402. + MinBlockSize;
  403. }
  404. template<class MutexFamily, class VoidPointer>
  405. inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
  406. all_memory_deallocated()
  407. {
  408. //-----------------------
  409. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  410. //-----------------------
  411. return m_header.m_allocated == 0 &&
  412. ipcdetail::to_raw_pointer(m_header.m_root.m_next->m_next) == &m_header.m_root;
  413. }
  414. template<class MutexFamily, class VoidPointer>
  415. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::zero_free_memory()
  416. {
  417. //-----------------------
  418. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  419. //-----------------------
  420. block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
  421. //Iterate through all free portions
  422. do{
  423. //Just clear user the memory part reserved for the user
  424. std::memset( priv_get_user_buffer(block)
  425. , 0
  426. , block->get_user_bytes());
  427. block = ipcdetail::to_raw_pointer(block->m_next);
  428. }
  429. while(block != &m_header.m_root);
  430. }
  431. template<class MutexFamily, class VoidPointer>
  432. inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
  433. check_sanity()
  434. {
  435. //-----------------------
  436. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  437. //-----------------------
  438. block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
  439. size_type free_memory = 0;
  440. //Iterate through all blocks obtaining their size
  441. while(block != &m_header.m_root){
  442. algo_impl_t::assert_alignment(block);
  443. if(!algo_impl_t::check_alignment(block))
  444. return false;
  445. //Free blocks's next must be always valid
  446. block_ctrl *next = ipcdetail::to_raw_pointer(block->m_next);
  447. if(!next){
  448. return false;
  449. }
  450. free_memory += block->m_size*Alignment;
  451. block = next;
  452. }
  453. //Check allocated bytes are less than size
  454. if(m_header.m_allocated > m_header.m_size){
  455. return false;
  456. }
  457. //Check free bytes are less than size
  458. if(free_memory > m_header.m_size){
  459. return false;
  460. }
  461. return true;
  462. }
  463. template<class MutexFamily, class VoidPointer>
  464. inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  465. allocate(size_type nbytes)
  466. {
  467. //-----------------------
  468. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  469. //-----------------------
  470. size_type ignore_recvd = nbytes;
  471. void *ignore_reuse = 0;
  472. return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
  473. }
  474. template<class MutexFamily, class VoidPointer>
  475. inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  476. allocate_aligned(size_type nbytes, size_type alignment)
  477. {
  478. //-----------------------
  479. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  480. //-----------------------
  481. return algo_impl_t::
  482. allocate_aligned(this, nbytes, alignment);
  483. }
  484. template<class MutexFamily, class VoidPointer>
  485. template<class T>
  486. inline T* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  487. allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  488. size_type &prefer_in_recvd_out_size, T *&reuse_ptr)
  489. {
  490. void *raw_reuse = reuse_ptr;
  491. void * const ret = priv_allocation_command
  492. (command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
  493. BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
  494. reuse_ptr = static_cast<T*>(raw_reuse);
  495. return static_cast<T*>(ret);
  496. }
  497. template<class MutexFamily, class VoidPointer>
  498. inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  499. raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
  500. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
  501. {
  502. size_type const preferred_objects = prefer_in_recvd_out_size;
  503. if(!sizeof_object){
  504. return reuse_ptr = 0, static_cast<void*>(0);
  505. }
  506. if(command & boost::interprocess::try_shrink_in_place){
  507. if(!reuse_ptr) return static_cast<void*>(0);
  508. prefer_in_recvd_out_size = preferred_objects*sizeof_object;
  509. bool success = algo_impl_t::try_shrink
  510. ( this, reuse_ptr, limit_objects*sizeof_object, prefer_in_recvd_out_size);
  511. prefer_in_recvd_out_size /= sizeof_object;
  512. return success ? reuse_ptr : 0;
  513. }
  514. else{
  515. return priv_allocation_command
  516. (command, limit_objects, prefer_in_recvd_out_size, reuse_ptr, sizeof_object);
  517. }
  518. }
  519. template<class MutexFamily, class VoidPointer>
  520. inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  521. priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  522. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
  523. {
  524. size_type const preferred_size = prefer_in_recvd_out_size;
  525. command &= ~boost::interprocess::expand_bwd;
  526. if(!command){
  527. return reuse_ptr = 0, static_cast<void*>(0);
  528. }
  529. size_type max_count = m_header.m_size/sizeof_object;
  530. if(limit_size > max_count || preferred_size > max_count){
  531. return reuse_ptr = 0, static_cast<void*>(0);
  532. }
  533. size_type l_size = limit_size*sizeof_object;
  534. size_type r_size = preferred_size*sizeof_object;
  535. void *ret = 0;
  536. {
  537. //-----------------------
  538. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  539. //-----------------------
  540. ret = priv_allocate(command, l_size, r_size, reuse_ptr);
  541. }
  542. prefer_in_recvd_out_size = r_size/sizeof_object;
  543. return ret;
  544. }
  545. template<class MutexFamily, class VoidPointer>
  546. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  547. simple_seq_fit_impl<MutexFamily, VoidPointer>::size(const void *ptr) const
  548. {
  549. //We need no synchronization since this block is not going
  550. //to be modified
  551. //Obtain the real size of the block
  552. const block_ctrl *block = static_cast<const block_ctrl*>(priv_get_block(ptr));
  553. return block->get_user_bytes();
  554. }
  555. template<class MutexFamily, class VoidPointer>
  556. void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  557. priv_expand_both_sides(boost::interprocess::allocation_type command
  558. ,size_type min_size
  559. ,size_type &prefer_in_recvd_out_size
  560. ,void *reuse_ptr
  561. ,bool only_preferred_backwards)
  562. {
  563. size_type const preferred_size = prefer_in_recvd_out_size;
  564. typedef std::pair<block_ctrl *, block_ctrl *> prev_block_t;
  565. block_ctrl *reuse = priv_get_block(reuse_ptr);
  566. prefer_in_recvd_out_size = 0;
  567. if(this->size(reuse_ptr) > min_size){
  568. prefer_in_recvd_out_size = this->size(reuse_ptr);
  569. return reuse_ptr;
  570. }
  571. if(command & boost::interprocess::expand_fwd){
  572. if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
  573. return reuse_ptr;
  574. }
  575. else{
  576. prefer_in_recvd_out_size = this->size(reuse_ptr);
  577. }
  578. if(command & boost::interprocess::expand_bwd){
  579. size_type extra_forward = !prefer_in_recvd_out_size ? 0 : prefer_in_recvd_out_size + BlockCtrlBytes;
  580. prev_block_t prev_pair = priv_prev_block_if_free(reuse);
  581. block_ctrl *prev = prev_pair.second;
  582. if(!prev){
  583. return 0;
  584. }
  585. size_type needs_backwards =
  586. ipcdetail::get_rounded_size(preferred_size - extra_forward, Alignment);
  587. if(!only_preferred_backwards){
  588. max_value(ipcdetail::get_rounded_size(min_size - extra_forward, Alignment)
  589. ,min_value(prev->get_user_bytes(), needs_backwards));
  590. }
  591. //Check if previous block has enough size
  592. if((prev->get_user_bytes()) >= needs_backwards){
  593. //Now take all next space. This will succeed
  594. if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, prefer_in_recvd_out_size)){
  595. BOOST_ASSERT(0);
  596. }
  597. //We need a minimum size to split the previous one
  598. if((prev->get_user_bytes() - needs_backwards) > 2*BlockCtrlBytes){
  599. block_ctrl *new_block = reinterpret_cast<block_ctrl*>
  600. (reinterpret_cast<char*>(reuse) - needs_backwards - BlockCtrlBytes);
  601. new_block->m_next = 0;
  602. new_block->m_size =
  603. BlockCtrlUnits + (needs_backwards + extra_forward)/Alignment;
  604. prev->m_size =
  605. (prev->get_total_bytes() - needs_backwards)/Alignment - BlockCtrlUnits;
  606. prefer_in_recvd_out_size = needs_backwards + extra_forward;
  607. m_header.m_allocated += needs_backwards + BlockCtrlBytes;
  608. return priv_get_user_buffer(new_block);
  609. }
  610. else{
  611. //Just merge the whole previous block
  612. block_ctrl *prev_2_block = prev_pair.first;
  613. //Update received size and allocation
  614. prefer_in_recvd_out_size = extra_forward + prev->get_user_bytes();
  615. m_header.m_allocated += prev->get_total_bytes();
  616. //Now unlink it from previous block
  617. prev_2_block->m_next = prev->m_next;
  618. prev->m_size = reuse->m_size + prev->m_size;
  619. prev->m_next = 0;
  620. priv_get_user_buffer(prev);
  621. }
  622. }
  623. }
  624. return 0;
  625. }
  626. template<class MutexFamily, class VoidPointer>
  627. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
  628. deallocate_many(typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_chain &chain)
  629. {
  630. //-----------------------
  631. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  632. //-----------------------
  633. while(!chain.empty()){
  634. this->priv_deallocate(to_raw_pointer(chain.pop_front()));
  635. }
  636. }
  637. template<class MutexFamily, class VoidPointer>
  638. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  639. simple_seq_fit_impl<MutexFamily, VoidPointer>::
  640. priv_get_total_units(size_type userbytes)
  641. {
  642. size_type s = ipcdetail::get_rounded_size(userbytes, Alignment)/Alignment;
  643. if(!s) ++s;
  644. return BlockCtrlUnits + s;
  645. }
  646. template<class MutexFamily, class VoidPointer>
  647. void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
  648. priv_allocate(boost::interprocess::allocation_type command
  649. ,size_type limit_size, size_type &prefer_in_recvd_out_size, void *&reuse_ptr)
  650. {
  651. size_type const preferred_size = prefer_in_recvd_out_size;
  652. if(command & boost::interprocess::shrink_in_place){
  653. if(!reuse_ptr) return static_cast<void*>(0);
  654. bool success = algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size);
  655. return success ? reuse_ptr : 0;
  656. }
  657. prefer_in_recvd_out_size = 0;
  658. if(limit_size > preferred_size){
  659. return reuse_ptr = 0, static_cast<void*>(0);
  660. }
  661. //Number of units to request (including block_ctrl header)
  662. size_type nunits = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment + BlockCtrlUnits;
  663. //Get the root and the first memory block
  664. block_ctrl *prev = &m_header.m_root;
  665. block_ctrl *block = ipcdetail::to_raw_pointer(prev->m_next);
  666. block_ctrl *root = &m_header.m_root;
  667. block_ctrl *biggest_block = 0;
  668. block_ctrl *prev_biggest_block = 0;
  669. size_type biggest_size = 0;
  670. //Expand in place
  671. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  672. void *ret = priv_expand_both_sides(command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, true);
  673. if(ret){
  674. algo_impl_t::assert_alignment(ret);
  675. return ret;
  676. }
  677. }
  678. if(command & boost::interprocess::allocate_new){
  679. prefer_in_recvd_out_size = 0;
  680. while(block != root){
  681. //Update biggest block pointers
  682. if(block->m_size > biggest_size){
  683. prev_biggest_block = prev;
  684. biggest_size = block->m_size;
  685. biggest_block = block;
  686. }
  687. algo_impl_t::assert_alignment(block);
  688. void *addr = this->priv_check_and_allocate(nunits, prev, block, prefer_in_recvd_out_size);
  689. if(addr){
  690. algo_impl_t::assert_alignment(addr);
  691. return reuse_ptr = 0, addr;
  692. }
  693. //Bad luck, let's check next block
  694. prev = block;
  695. block = ipcdetail::to_raw_pointer(block->m_next);
  696. }
  697. //Bad luck finding preferred_size, now if we have any biggest_block
  698. //try with this block
  699. if(biggest_block){
  700. size_type limit_units = ipcdetail::get_rounded_size(limit_size, Alignment)/Alignment + BlockCtrlUnits;
  701. if(biggest_block->m_size < limit_units){
  702. return reuse_ptr = 0, static_cast<void*>(0);
  703. }
  704. void *ret = this->priv_check_and_allocate
  705. (biggest_block->m_size, prev_biggest_block, biggest_block, prefer_in_recvd_out_size = biggest_block->m_size*Alignment - BlockCtrlUnits);
  706. BOOST_ASSERT(ret != 0);
  707. algo_impl_t::assert_alignment(ret);
  708. return reuse_ptr = 0, ret;
  709. }
  710. }
  711. //Now try to expand both sides with min size
  712. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  713. void *ret = priv_expand_both_sides (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false);
  714. algo_impl_t::assert_alignment(ret);
  715. return ret;
  716. }
  717. return reuse_ptr = 0, static_cast<void*>(0);
  718. }
  719. template<class MutexFamily, class VoidPointer> inline
  720. bool simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_is_allocated_block
  721. (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
  722. { return block->m_next == 0; }
  723. template<class MutexFamily, class VoidPointer>
  724. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
  725. simple_seq_fit_impl<MutexFamily, VoidPointer>::
  726. priv_next_block_if_free
  727. (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
  728. {
  729. //Take the address where the next block should go
  730. block_ctrl *next_block = reinterpret_cast<block_ctrl*>
  731. (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
  732. //Check if the adjacent block is in the managed segment
  733. char *this_char_ptr = reinterpret_cast<char*>(this);
  734. char *next_char_ptr = reinterpret_cast<char*>(next_block);
  735. size_type distance = (size_type)(next_char_ptr - this_char_ptr)/Alignment;
  736. if(distance >= (m_header.m_size/Alignment)){
  737. //"next_block" does not exist so we can't expand "block"
  738. return 0;
  739. }
  740. if(!next_block->m_next)
  741. return 0;
  742. return next_block;
  743. }
  744. template<class MutexFamily, class VoidPointer>
  745. inline
  746. std::pair<typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
  747. ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *>
  748. simple_seq_fit_impl<MutexFamily, VoidPointer>::
  749. priv_prev_block_if_free
  750. (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
  751. {
  752. typedef std::pair<block_ctrl *, block_ctrl *> prev_pair_t;
  753. //Take the address where the previous block should go
  754. block_ctrl *root = &m_header.m_root;
  755. block_ctrl *prev_2_block = root;
  756. block_ctrl *prev_block = ipcdetail::to_raw_pointer(root->m_next);
  757. while((reinterpret_cast<char*>(prev_block) + prev_block->m_size*Alignment)
  758. != reinterpret_cast<char*>(ptr)
  759. && prev_block != root){
  760. prev_2_block = prev_block;
  761. prev_block = ipcdetail::to_raw_pointer(prev_block->m_next);
  762. }
  763. if(prev_block == root || !prev_block->m_next)
  764. return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
  765. //Check if the previous block is in the managed segment
  766. char *this_char_ptr = reinterpret_cast<char*>(this);
  767. char *prev_char_ptr = reinterpret_cast<char*>(prev_block);
  768. size_type distance = (size_type)(prev_char_ptr - this_char_ptr)/Alignment;
  769. if(distance >= (m_header.m_size/Alignment)){
  770. //"previous_block" does not exist so we can't expand "block"
  771. return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
  772. }
  773. return prev_pair_t(prev_2_block, prev_block);
  774. }
  775. template<class MutexFamily, class VoidPointer>
  776. inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
  777. priv_expand (void *ptr, size_type min_size, size_type &received_size)
  778. {
  779. size_type preferred_size = received_size;
  780. //Obtain the real size of the block
  781. block_ctrl *block = reinterpret_cast<block_ctrl*>(priv_get_block(ptr));
  782. size_type old_block_size = block->m_size;
  783. //All used blocks' next is marked with 0 so check it
  784. BOOST_ASSERT(block->m_next == 0);
  785. //Put this to a safe value
  786. received_size = old_block_size*Alignment - BlockCtrlBytes;
  787. //Now translate it to Alignment units
  788. min_size = ipcdetail::get_rounded_size(min_size, Alignment)/Alignment;
  789. preferred_size = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment;
  790. //Some parameter checks
  791. if(min_size > preferred_size)
  792. return false;
  793. size_type data_size = old_block_size - BlockCtrlUnits;
  794. if(data_size >= min_size)
  795. return true;
  796. block_ctrl *next_block = priv_next_block_if_free(block);
  797. if(!next_block){
  798. return false;
  799. }
  800. //Is "block" + "next_block" big enough?
  801. size_type merged_size = old_block_size + next_block->m_size;
  802. //Now we can expand this block further than before
  803. received_size = merged_size*Alignment - BlockCtrlBytes;
  804. if(merged_size < (min_size + BlockCtrlUnits)){
  805. return false;
  806. }
  807. //We can fill expand. Merge both blocks,
  808. block->m_next = next_block->m_next;
  809. block->m_size = merged_size;
  810. //Find the previous free block of next_block
  811. block_ctrl *prev = &m_header.m_root;
  812. while(ipcdetail::to_raw_pointer(prev->m_next) != next_block){
  813. prev = ipcdetail::to_raw_pointer(prev->m_next);
  814. }
  815. //Now insert merged block in the free list
  816. //This allows reusing allocation logic in this function
  817. m_header.m_allocated -= old_block_size*Alignment;
  818. prev->m_next = block;
  819. //Now use check and allocate to do the allocation logic
  820. preferred_size += BlockCtrlUnits;
  821. size_type nunits = preferred_size < merged_size ? preferred_size : merged_size;
  822. //This must success since nunits is less than merged_size!
  823. if(!this->priv_check_and_allocate (nunits, prev, block, received_size)){
  824. //Something very ugly is happening here. This is a bug
  825. //or there is memory corruption
  826. BOOST_ASSERT(0);
  827. return false;
  828. }
  829. return true;
  830. }
  831. template<class MutexFamily, class VoidPointer> inline
  832. void* simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_check_and_allocate
  833. (size_type nunits
  834. ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* prev
  835. ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* block
  836. ,size_type &received_size)
  837. {
  838. size_type upper_nunits = nunits + BlockCtrlUnits;
  839. bool found = false;
  840. if (block->m_size > upper_nunits){
  841. //This block is bigger than needed, split it in
  842. //two blocks, the first's size will be "units"
  843. //the second's size will be "block->m_size-units"
  844. size_type total_size = block->m_size;
  845. block->m_size = nunits;
  846. block_ctrl *new_block = reinterpret_cast<block_ctrl*>
  847. (reinterpret_cast<char*>(block) + Alignment*nunits);
  848. new_block->m_size = total_size - nunits;
  849. new_block->m_next = block->m_next;
  850. prev->m_next = new_block;
  851. found = true;
  852. }
  853. else if (block->m_size >= nunits){
  854. //This block has exactly the right size with an extra
  855. //unusable extra bytes.
  856. prev->m_next = block->m_next;
  857. found = true;
  858. }
  859. if(found){
  860. //We need block_ctrl for deallocation stuff, so
  861. //return memory user can overwrite
  862. m_header.m_allocated += block->m_size*Alignment;
  863. received_size = block->get_user_bytes();
  864. //Mark the block as allocated
  865. block->m_next = 0;
  866. //Check alignment
  867. algo_impl_t::assert_alignment(block);
  868. return priv_get_user_buffer(block);
  869. }
  870. return 0;
  871. }
  872. template<class MutexFamily, class VoidPointer>
  873. void simple_seq_fit_impl<MutexFamily, VoidPointer>::deallocate(void* addr)
  874. {
  875. if(!addr) return;
  876. //-----------------------
  877. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  878. //-----------------------
  879. return this->priv_deallocate(addr);
  880. }
  881. template<class MutexFamily, class VoidPointer>
  882. void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_deallocate(void* addr)
  883. {
  884. if(!addr) return;
  885. //Let's get free block list. List is always sorted
  886. //by memory address to allow block merging.
  887. //Pointer next always points to the first
  888. //(lower address) block
  889. block_ctrl * prev = &m_header.m_root;
  890. block_ctrl * pos = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
  891. block_ctrl * block = reinterpret_cast<block_ctrl*>(priv_get_block(addr));
  892. //All used blocks' next is marked with 0 so check it
  893. BOOST_ASSERT(block->m_next == 0);
  894. //Check if alignment and block size are right
  895. algo_impl_t::assert_alignment(addr);
  896. size_type total_size = Alignment*block->m_size;
  897. BOOST_ASSERT(m_header.m_allocated >= total_size);
  898. //Update used memory count
  899. m_header.m_allocated -= total_size;
  900. //Let's find the previous and the next block of the block to deallocate
  901. //This ordering comparison must be done with original pointers
  902. //types since their mapping to raw pointers can be different
  903. //in each process
  904. while((ipcdetail::to_raw_pointer(pos) != &m_header.m_root) && (block > pos)){
  905. prev = pos;
  906. pos = ipcdetail::to_raw_pointer(pos->m_next);
  907. }
  908. //Try to combine with upper block
  909. char *block_char_ptr = reinterpret_cast<char*>(ipcdetail::to_raw_pointer(block));
  910. if ((block_char_ptr + Alignment*block->m_size) ==
  911. reinterpret_cast<char*>(ipcdetail::to_raw_pointer(pos))){
  912. block->m_size += pos->m_size;
  913. block->m_next = pos->m_next;
  914. }
  915. else{
  916. block->m_next = pos;
  917. }
  918. //Try to combine with lower block
  919. if ((reinterpret_cast<char*>(ipcdetail::to_raw_pointer(prev))
  920. + Alignment*prev->m_size) ==
  921. block_char_ptr){
  922. prev->m_size += block->m_size;
  923. prev->m_next = block->m_next;
  924. }
  925. else{
  926. prev->m_next = block;
  927. }
  928. }
  929. } //namespace ipcdetail {
  930. } //namespace interprocess {
  931. } //namespace boost {
  932. #include <boost/interprocess/detail/config_end.hpp>
  933. #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP