mem_algo_common.hpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
  11. #define BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. // interprocess
  22. #include <boost/interprocess/interprocess_fwd.hpp>
  23. #include <boost/interprocess/containers/allocation_type.hpp>
  24. // interprocess/detail
  25. #include <boost/interprocess/detail/math_functions.hpp>
  26. #include <boost/interprocess/detail/min_max.hpp>
  27. #include <boost/interprocess/detail/type_traits.hpp>
  28. #include <boost/interprocess/detail/utilities.hpp>
  29. // container/detail
  30. #include <boost/container/detail/multiallocation_chain.hpp>
  31. #include <boost/container/detail/placement_new.hpp>
  32. // move
  33. #include <boost/move/utility_core.hpp>
  34. // other boost
  35. #include <boost/static_assert.hpp>
  36. #include <boost/assert.hpp>
  37. //!\file
  38. //!Implements common operations for memory algorithms.
  39. namespace boost {
  40. namespace interprocess {
  41. namespace ipcdetail {
  42. template<class VoidPointer>
  43. class basic_multiallocation_chain
  44. : public boost::container::dtl::
  45. basic_multiallocation_chain<VoidPointer>
  46. {
  47. BOOST_MOVABLE_BUT_NOT_COPYABLE(basic_multiallocation_chain)
  48. typedef boost::container::dtl::
  49. basic_multiallocation_chain<VoidPointer> base_t;
  50. public:
  51. basic_multiallocation_chain()
  52. : base_t()
  53. {}
  54. basic_multiallocation_chain(BOOST_RV_REF(basic_multiallocation_chain) other)
  55. : base_t(::boost::move(static_cast<base_t&>(other)))
  56. {}
  57. basic_multiallocation_chain& operator=(BOOST_RV_REF(basic_multiallocation_chain) other)
  58. {
  59. this->base_t::operator=(::boost::move(static_cast<base_t&>(other)));
  60. return *this;
  61. }
  62. void *pop_front()
  63. {
  64. return boost::interprocess::ipcdetail::to_raw_pointer(this->base_t::pop_front());
  65. }
  66. };
  67. //!This class implements several allocation functions shared by different algorithms
  68. //!(aligned allocation, multiple allocation...).
  69. template<class MemoryAlgorithm>
  70. class memory_algorithm_common
  71. {
  72. public:
  73. typedef typename MemoryAlgorithm::void_pointer void_pointer;
  74. typedef typename MemoryAlgorithm::block_ctrl block_ctrl;
  75. typedef typename MemoryAlgorithm::multiallocation_chain multiallocation_chain;
  76. typedef memory_algorithm_common<MemoryAlgorithm> this_type;
  77. typedef typename MemoryAlgorithm::size_type size_type;
  78. static const size_type Alignment = MemoryAlgorithm::Alignment;
  79. static const size_type MinBlockUnits = MemoryAlgorithm::MinBlockUnits;
  80. static const size_type AllocatedCtrlBytes = MemoryAlgorithm::AllocatedCtrlBytes;
  81. static const size_type AllocatedCtrlUnits = MemoryAlgorithm::AllocatedCtrlUnits;
  82. static const size_type BlockCtrlBytes = MemoryAlgorithm::BlockCtrlBytes;
  83. static const size_type BlockCtrlUnits = MemoryAlgorithm::BlockCtrlUnits;
  84. static const size_type UsableByPreviousChunk = MemoryAlgorithm::UsableByPreviousChunk;
  85. static void assert_alignment(const void *ptr)
  86. { assert_alignment((std::size_t)ptr); }
  87. static void assert_alignment(size_type uint_ptr)
  88. {
  89. (void)uint_ptr;
  90. BOOST_ASSERT(uint_ptr % Alignment == 0);
  91. }
  92. static bool check_alignment(const void *ptr)
  93. { return (((std::size_t)ptr) % Alignment == 0); }
  94. static size_type ceil_units(size_type size)
  95. { return get_rounded_size(size, Alignment)/Alignment; }
  96. static size_type floor_units(size_type size)
  97. { return size/Alignment; }
  98. static size_type multiple_of_units(size_type size)
  99. { return get_rounded_size(size, Alignment); }
  100. static void allocate_many
  101. (MemoryAlgorithm *memory_algo, size_type elem_bytes, size_type n_elements, multiallocation_chain &chain)
  102. {
  103. return this_type::priv_allocate_many(memory_algo, &elem_bytes, n_elements, 0, chain);
  104. }
  105. static void deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain &chain)
  106. {
  107. return this_type::priv_deallocate_many(memory_algo, chain);
  108. }
  109. static bool calculate_lcm_and_needs_backwards_lcmed
  110. (size_type backwards_multiple, size_type received_size, size_type size_to_achieve,
  111. size_type &lcm_out, size_type &needs_backwards_lcmed_out)
  112. {
  113. // Now calculate lcm_val
  114. size_type max = backwards_multiple;
  115. size_type min = Alignment;
  116. size_type needs_backwards;
  117. size_type needs_backwards_lcmed;
  118. size_type lcm_val;
  119. size_type current_forward;
  120. //Swap if necessary
  121. if(max < min){
  122. size_type tmp = min;
  123. min = max;
  124. max = tmp;
  125. }
  126. //Check if it's power of two
  127. if((backwards_multiple & (backwards_multiple-1)) == 0){
  128. if(0 != (size_to_achieve & ((backwards_multiple-1)))){
  129. return false;
  130. }
  131. lcm_val = max;
  132. //If we want to use minbytes data to get a buffer between maxbytes
  133. //and minbytes if maxbytes can't be achieved, calculate the
  134. //biggest of all possibilities
  135. current_forward = get_truncated_size_po2(received_size, backwards_multiple);
  136. needs_backwards = size_to_achieve - current_forward;
  137. BOOST_ASSERT((needs_backwards % backwards_multiple) == 0);
  138. needs_backwards_lcmed = get_rounded_size_po2(needs_backwards, lcm_val);
  139. lcm_out = lcm_val;
  140. needs_backwards_lcmed_out = needs_backwards_lcmed;
  141. return true;
  142. }
  143. //Check if it's multiple of alignment
  144. else if((backwards_multiple & (Alignment - 1u)) == 0){
  145. lcm_val = backwards_multiple;
  146. current_forward = get_truncated_size(received_size, backwards_multiple);
  147. //No need to round needs_backwards because backwards_multiple == lcm_val
  148. needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
  149. BOOST_ASSERT((needs_backwards_lcmed & (Alignment - 1u)) == 0);
  150. lcm_out = lcm_val;
  151. needs_backwards_lcmed_out = needs_backwards_lcmed;
  152. return true;
  153. }
  154. //Check if it's multiple of the half of the alignmment
  155. else if((backwards_multiple & ((Alignment/2u) - 1u)) == 0){
  156. lcm_val = backwards_multiple*2u;
  157. current_forward = get_truncated_size(received_size, backwards_multiple);
  158. needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
  159. if(0 != (needs_backwards_lcmed & (Alignment-1)))
  160. //while(0 != (needs_backwards_lcmed & (Alignment-1)))
  161. needs_backwards_lcmed += backwards_multiple;
  162. BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
  163. lcm_out = lcm_val;
  164. needs_backwards_lcmed_out = needs_backwards_lcmed;
  165. return true;
  166. }
  167. //Check if it's multiple of the quarter of the alignmment
  168. else if((backwards_multiple & ((Alignment/4u) - 1u)) == 0){
  169. size_type remainder;
  170. lcm_val = backwards_multiple*4u;
  171. current_forward = get_truncated_size(received_size, backwards_multiple);
  172. needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
  173. //while(0 != (needs_backwards_lcmed & (Alignment-1)))
  174. //needs_backwards_lcmed += backwards_multiple;
  175. if(0 != (remainder = ((needs_backwards_lcmed & (Alignment-1))>>(Alignment/8u)))){
  176. if(backwards_multiple & Alignment/2u){
  177. needs_backwards_lcmed += (remainder)*backwards_multiple;
  178. }
  179. else{
  180. needs_backwards_lcmed += (4-remainder)*backwards_multiple;
  181. }
  182. }
  183. BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
  184. lcm_out = lcm_val;
  185. needs_backwards_lcmed_out = needs_backwards_lcmed;
  186. return true;
  187. }
  188. else{
  189. lcm_val = lcm(max, min);
  190. }
  191. //If we want to use minbytes data to get a buffer between maxbytes
  192. //and minbytes if maxbytes can't be achieved, calculate the
  193. //biggest of all possibilities
  194. current_forward = get_truncated_size(received_size, backwards_multiple);
  195. needs_backwards = size_to_achieve - current_forward;
  196. BOOST_ASSERT((needs_backwards % backwards_multiple) == 0);
  197. needs_backwards_lcmed = get_rounded_size(needs_backwards, lcm_val);
  198. lcm_out = lcm_val;
  199. needs_backwards_lcmed_out = needs_backwards_lcmed;
  200. return true;
  201. }
  202. static void allocate_many
  203. ( MemoryAlgorithm *memory_algo
  204. , const size_type *elem_sizes
  205. , size_type n_elements
  206. , size_type sizeof_element
  207. , multiallocation_chain &chain)
  208. {
  209. this_type::priv_allocate_many(memory_algo, elem_sizes, n_elements, sizeof_element, chain);
  210. }
  211. static void* allocate_aligned
  212. (MemoryAlgorithm *memory_algo, size_type nbytes, size_type alignment)
  213. {
  214. //Ensure power of 2
  215. if ((alignment & (alignment - size_type(1u))) != 0){
  216. //Alignment is not power of two
  217. BOOST_ASSERT((alignment & (alignment - size_type(1u))) == 0);
  218. return 0;
  219. }
  220. size_type real_size = nbytes;
  221. if(alignment <= Alignment){
  222. void *ignore_reuse = 0;
  223. return memory_algo->priv_allocate
  224. (boost::interprocess::allocate_new, nbytes, real_size, ignore_reuse);
  225. }
  226. if(nbytes > UsableByPreviousChunk)
  227. nbytes -= UsableByPreviousChunk;
  228. //We can find a aligned portion if we allocate a block that has alignment
  229. //nbytes + alignment bytes or more.
  230. size_type minimum_allocation = max_value
  231. (nbytes + alignment, size_type(MinBlockUnits*Alignment));
  232. //Since we will split that block, we must request a bit more memory
  233. //if the alignment is near the beginning of the buffer, because otherwise,
  234. //there is no space for a new block before the alignment.
  235. //
  236. // ____ Aligned here
  237. // |
  238. // -----------------------------------------------------
  239. // | MBU |
  240. // -----------------------------------------------------
  241. size_type request =
  242. minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes
  243. //prevsize - UsableByPreviousChunk
  244. );
  245. //Now allocate the buffer
  246. real_size = request;
  247. void *ignore_reuse = 0;
  248. void *buffer = memory_algo->priv_allocate(boost::interprocess::allocate_new, request, real_size, ignore_reuse);
  249. if(!buffer){
  250. return 0;
  251. }
  252. else if ((((std::size_t)(buffer)) % alignment) == 0){
  253. //If we are lucky and the buffer is aligned, just split it and
  254. //return the high part
  255. block_ctrl *first = memory_algo->priv_get_block(buffer);
  256. size_type old_size = first->m_size;
  257. const size_type first_min_units =
  258. max_value(ceil_units(nbytes) + AllocatedCtrlUnits, size_type(MinBlockUnits));
  259. //We can create a new block in the end of the segment
  260. if(old_size >= (first_min_units + MinBlockUnits)){
  261. block_ctrl *second = reinterpret_cast<block_ctrl *>
  262. (reinterpret_cast<char*>(first) + Alignment*first_min_units);
  263. first->m_size = first_min_units;
  264. second->m_size = old_size - first->m_size;
  265. BOOST_ASSERT(second->m_size >= MinBlockUnits);
  266. memory_algo->priv_mark_new_allocated_block(first);
  267. memory_algo->priv_mark_new_allocated_block(second);
  268. memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(second));
  269. }
  270. return buffer;
  271. }
  272. //Buffer not aligned, find the aligned part.
  273. //
  274. // ____ Aligned here
  275. // |
  276. // -----------------------------------------------------
  277. // | MBU +more | ACB |
  278. // -----------------------------------------------------
  279. char *pos = reinterpret_cast<char*>
  280. (reinterpret_cast<std::size_t>(static_cast<char*>(buffer) +
  281. //This is the minimum size of (2)
  282. (MinBlockUnits*Alignment - AllocatedCtrlBytes) +
  283. //This is the next MBU for the aligned memory
  284. AllocatedCtrlBytes +
  285. //This is the alignment trick
  286. alignment - 1) & -alignment);
  287. //Now obtain the address of the blocks
  288. block_ctrl *first = memory_algo->priv_get_block(buffer);
  289. block_ctrl *second = memory_algo->priv_get_block(pos);
  290. BOOST_ASSERT(pos <= (reinterpret_cast<char*>(first) + first->m_size*Alignment));
  291. BOOST_ASSERT(first->m_size >= 2*MinBlockUnits);
  292. BOOST_ASSERT((pos + MinBlockUnits*Alignment - AllocatedCtrlBytes + nbytes*Alignment/Alignment) <=
  293. (reinterpret_cast<char*>(first) + first->m_size*Alignment));
  294. //Set the new size of the first block
  295. size_type old_size = first->m_size;
  296. first->m_size = (size_type)(reinterpret_cast<char*>(second) - reinterpret_cast<char*>(first))/Alignment;
  297. memory_algo->priv_mark_new_allocated_block(first);
  298. //Now check if we can create a new buffer in the end
  299. //
  300. // __"second" block
  301. // | __Aligned here
  302. // | | __"third" block
  303. // -----------|-----|-----|------------------------------
  304. // | MBU +more | ACB | (3) | BCU |
  305. // -----------------------------------------------------
  306. //This size will be the minimum size to be able to create a
  307. //new block in the end.
  308. const size_type second_min_units = max_value(size_type(MinBlockUnits),
  309. ceil_units(nbytes) + AllocatedCtrlUnits );
  310. //Check if we can create a new block (of size MinBlockUnits) in the end of the segment
  311. if((old_size - first->m_size) >= (second_min_units + MinBlockUnits)){
  312. //Now obtain the address of the end block
  313. block_ctrl *third = new (reinterpret_cast<char*>(second) + Alignment*second_min_units)block_ctrl;
  314. second->m_size = second_min_units;
  315. third->m_size = old_size - first->m_size - second->m_size;
  316. BOOST_ASSERT(third->m_size >= MinBlockUnits);
  317. memory_algo->priv_mark_new_allocated_block(second);
  318. memory_algo->priv_mark_new_allocated_block(third);
  319. memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(third));
  320. }
  321. else{
  322. second->m_size = old_size - first->m_size;
  323. BOOST_ASSERT(second->m_size >= MinBlockUnits);
  324. memory_algo->priv_mark_new_allocated_block(second);
  325. }
  326. memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(first));
  327. return memory_algo->priv_get_user_buffer(second);
  328. }
  329. static bool try_shrink
  330. (MemoryAlgorithm *memory_algo, void *ptr
  331. ,const size_type max_size, size_type &received_size)
  332. {
  333. size_type const preferred_size = received_size;
  334. (void)memory_algo;
  335. //Obtain the real block
  336. block_ctrl *block = memory_algo->priv_get_block(ptr);
  337. size_type old_block_units = (size_type)block->m_size;
  338. //The block must be marked as allocated
  339. BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
  340. //Check if alignment and block size are right
  341. assert_alignment(ptr);
  342. //Put this to a safe value
  343. received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  344. //Now translate it to Alignment units
  345. const size_type max_user_units = floor_units(max_size - UsableByPreviousChunk);
  346. const size_type preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
  347. //Check if rounded max and preferred are possible correct
  348. if(max_user_units < preferred_user_units)
  349. return false;
  350. //Check if the block is smaller than the requested minimum
  351. size_type old_user_units = old_block_units - AllocatedCtrlUnits;
  352. if(old_user_units < preferred_user_units)
  353. return false;
  354. //If the block is smaller than the requested minimum
  355. if(old_user_units == preferred_user_units)
  356. return true;
  357. size_type shrunk_user_units =
  358. ((BlockCtrlUnits - AllocatedCtrlUnits) >= preferred_user_units)
  359. ? (BlockCtrlUnits - AllocatedCtrlUnits)
  360. : preferred_user_units;
  361. //Some parameter checks
  362. if(max_user_units < shrunk_user_units)
  363. return false;
  364. //We must be able to create at least a new empty block
  365. if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
  366. return false;
  367. }
  368. //Update new size
  369. received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
  370. return true;
  371. }
  372. static bool shrink
  373. (MemoryAlgorithm *memory_algo, void *ptr
  374. ,const size_type max_size, size_type &received_size)
  375. {
  376. size_type const preferred_size = received_size;
  377. //Obtain the real block
  378. block_ctrl *block = memory_algo->priv_get_block(ptr);
  379. size_type old_block_units = (size_type)block->m_size;
  380. if(!try_shrink(memory_algo, ptr, max_size, received_size)){
  381. return false;
  382. }
  383. //Check if the old size was just the shrunk size (no splitting)
  384. if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
  385. return true;
  386. //Now we can just rewrite the size of the old buffer
  387. block->m_size = (received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits;
  388. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  389. //We create the new block
  390. block_ctrl *new_block = reinterpret_cast<block_ctrl*>
  391. (reinterpret_cast<char*>(block) + block->m_size*Alignment);
  392. //Write control data to simulate this new block was previously allocated
  393. //and deallocate it
  394. new_block->m_size = old_block_units - block->m_size;
  395. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  396. memory_algo->priv_mark_new_allocated_block(block);
  397. memory_algo->priv_mark_new_allocated_block(new_block);
  398. memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
  399. return true;
  400. }
  401. private:
  402. static void priv_allocate_many
  403. ( MemoryAlgorithm *memory_algo
  404. , const size_type *elem_sizes
  405. , size_type n_elements
  406. , size_type sizeof_element
  407. , multiallocation_chain &chain)
  408. {
  409. //Note: sizeof_element == 0 indicates that we want to
  410. //allocate n_elements of the same size "*elem_sizes"
  411. //Calculate the total size of all requests
  412. size_type total_request_units = 0;
  413. size_type elem_units = 0;
  414. const size_type ptr_size_units = memory_algo->priv_get_total_units(sizeof(void_pointer));
  415. if(!sizeof_element){
  416. elem_units = memory_algo->priv_get_total_units(*elem_sizes);
  417. elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
  418. total_request_units = n_elements*elem_units;
  419. }
  420. else{
  421. for(size_type i = 0; i < n_elements; ++i){
  422. if(multiplication_overflows(elem_sizes[i], sizeof_element)){
  423. total_request_units = 0;
  424. break;
  425. }
  426. elem_units = memory_algo->priv_get_total_units(elem_sizes[i]*sizeof_element);
  427. elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
  428. if(sum_overflows(total_request_units, elem_units)){
  429. total_request_units = 0;
  430. break;
  431. }
  432. total_request_units += elem_units;
  433. }
  434. }
  435. if(total_request_units && !multiplication_overflows(total_request_units, Alignment)){
  436. size_type low_idx = 0;
  437. while(low_idx < n_elements){
  438. size_type total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
  439. size_type min_allocation = (!sizeof_element)
  440. ? elem_units
  441. : memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
  442. min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
  443. size_type received_size = total_bytes;
  444. void *ignore_reuse = 0;
  445. void *ret = memory_algo->priv_allocate
  446. (boost::interprocess::allocate_new, min_allocation, received_size, ignore_reuse);
  447. if(!ret){
  448. break;
  449. }
  450. block_ctrl *block = memory_algo->priv_get_block(ret);
  451. size_type received_units = (size_type)block->m_size;
  452. char *block_address = reinterpret_cast<char*>(block);
  453. size_type total_used_units = 0;
  454. while(total_used_units < received_units){
  455. if(sizeof_element){
  456. elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
  457. elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
  458. }
  459. if(total_used_units + elem_units > received_units)
  460. break;
  461. total_request_units -= elem_units;
  462. //This is the position where the new block must be created
  463. block_ctrl *new_block = reinterpret_cast<block_ctrl *>(block_address);
  464. assert_alignment(new_block);
  465. //The last block should take all the remaining space
  466. if((low_idx + 1) == n_elements ||
  467. (total_used_units + elem_units +
  468. ((!sizeof_element)
  469. ? elem_units
  470. : max_value(memory_algo->priv_get_total_units(elem_sizes[low_idx+1]*sizeof_element), ptr_size_units))
  471. > received_units)){
  472. //By default, the new block will use the rest of the buffer
  473. new_block->m_size = received_units - total_used_units;
  474. memory_algo->priv_mark_new_allocated_block(new_block);
  475. //If the remaining units are bigger than needed and we can
  476. //split it obtaining a new free memory block do it.
  477. if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){
  478. size_type shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
  479. size_type shrunk_received = shrunk_request;
  480. bool shrink_ok = shrink
  481. (memory_algo
  482. ,memory_algo->priv_get_user_buffer(new_block)
  483. ,shrunk_request
  484. ,shrunk_received);
  485. (void)shrink_ok;
  486. //Shrink must always succeed with passed parameters
  487. BOOST_ASSERT(shrink_ok);
  488. //Some sanity checks
  489. BOOST_ASSERT(shrunk_request == shrunk_received);
  490. BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits));
  491. //"new_block->m_size" must have been reduced to elem_units by "shrink"
  492. BOOST_ASSERT(new_block->m_size == elem_units);
  493. //Now update the total received units with the reduction
  494. received_units = elem_units + total_used_units;
  495. }
  496. }
  497. else{
  498. new_block->m_size = elem_units;
  499. memory_algo->priv_mark_new_allocated_block(new_block);
  500. }
  501. block_address += new_block->m_size*Alignment;
  502. total_used_units += (size_type)new_block->m_size;
  503. //Check we have enough room to overwrite the intrusive pointer
  504. BOOST_ASSERT((new_block->m_size*Alignment - AllocatedCtrlUnits) >= sizeof(void_pointer));
  505. void_pointer p = ::new(memory_algo->priv_get_user_buffer(new_block), boost_container_new_t())void_pointer(0);
  506. chain.push_back(p);
  507. ++low_idx;
  508. }
  509. //Sanity check
  510. BOOST_ASSERT(total_used_units == received_units);
  511. }
  512. if(low_idx != n_elements){
  513. priv_deallocate_many(memory_algo, chain);
  514. }
  515. }
  516. }
  517. static void priv_deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain &chain)
  518. {
  519. while(!chain.empty()){
  520. memory_algo->priv_deallocate(to_raw_pointer(chain.pop_front()));
  521. }
  522. }
  523. };
  524. } //namespace ipcdetail {
  525. } //namespace interprocess {
  526. } //namespace boost {
  527. #include <boost/interprocess/detail/config_end.hpp>
  528. #endif //#ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP