mapped_region.hpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MAPPED_REGION_HPP
  11. #define BOOST_INTERPROCESS_MAPPED_REGION_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. #include <boost/interprocess/interprocess_fwd.hpp>
  22. #include <boost/interprocess/exceptions.hpp>
  23. #include <boost/move/utility_core.hpp>
  24. #include <boost/interprocess/detail/utilities.hpp>
  25. #include <boost/interprocess/detail/os_file_functions.hpp>
  26. #include <string>
  27. #include <boost/cstdint.hpp>
  28. #include <boost/assert.hpp>
  29. #include <boost/move/adl_move_swap.hpp>
  30. //Some Unixes use caddr_t instead of void * in madvise
  31. // SunOS Tru64 HP-UX AIX
  32. #if defined(sun) || defined(__sun) || defined(__osf__) || defined(__osf) || defined(_hpux) || defined(hpux) || defined(_AIX)
  33. #define BOOST_INTERPROCESS_MADVISE_USES_CADDR_T
  34. #include <sys/types.h>
  35. #endif
  36. //A lot of UNIXes have destructive semantics for MADV_DONTNEED, so
  37. //we need to be careful to allow it.
  38. #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__)
  39. #define BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS
  40. #endif
  41. #if defined (BOOST_INTERPROCESS_WINDOWS)
  42. # include <boost/interprocess/detail/win32_api.hpp>
  43. # include <boost/interprocess/sync/windows/sync_utils.hpp>
  44. #else
  45. # ifdef BOOST_HAS_UNISTD_H
  46. # include <fcntl.h>
  47. # include <sys/mman.h> //mmap
  48. # include <unistd.h>
  49. # include <sys/stat.h>
  50. # include <sys/types.h>
  51. # if defined(BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS)
  52. # include <sys/shm.h> //System V shared memory...
  53. # endif
  54. # include <boost/assert.hpp>
  55. # else
  56. # error Unknown platform
  57. # endif
  58. #endif //#if defined (BOOST_INTERPROCESS_WINDOWS)
  59. //!\file
  60. //!Describes mapped region class
  61. namespace boost {
  62. namespace interprocess {
  63. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  64. //Solaris declares madvise only in some configurations but defines MADV_XXX, a bit confusing.
  65. //Predeclare it here to avoid any compilation error
  66. #if (defined(sun) || defined(__sun)) && defined(MADV_NORMAL)
  67. extern "C" int madvise(caddr_t, size_t, int);
  68. #endif
  69. namespace ipcdetail{ class interprocess_tester; }
  70. namespace ipcdetail{ class raw_mapped_region_creator; }
  71. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  72. //!The mapped_region class represents a portion or region created from a
  73. //!memory_mappable object.
  74. //!
  75. //!The OS can map a region bigger than the requested one, as region must
  76. //!be multiple of the page size, but mapped_region will always refer to
  77. //!the region specified by the user.
  78. class mapped_region
  79. {
  80. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  81. //Non-copyable
  82. BOOST_MOVABLE_BUT_NOT_COPYABLE(mapped_region)
  83. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  84. public:
  85. //!Creates a mapping region of the mapped memory "mapping", starting in
  86. //!offset "offset", and the mapping's size will be "size". The mapping
  87. //!can be opened for read only, read-write or copy-on-write.
  88. //!
  89. //!If an address is specified, both the offset and the address must be
  90. //!multiples of the page size.
  91. //!
  92. //!The map is created using "default_map_options". This flag is OS
  93. //!dependant and it should not be changed unless the user needs to
  94. //!specify special options.
  95. //!
  96. //!In Windows systems "map_options" is a DWORD value passed as
  97. //!"dwDesiredAccess" to "MapViewOfFileEx". If "default_map_options" is passed
  98. //!it's initialized to zero. "map_options" is XORed with FILE_MAP_[COPY|READ|WRITE].
  99. //!
  100. //!In UNIX systems and POSIX mappings "map_options" is an int value passed as "flags"
  101. //!to "mmap". If "default_map_options" is specified it's initialized to MAP_NOSYNC
  102. //!if that option exists and to zero otherwise. "map_options" XORed with MAP_PRIVATE or MAP_SHARED.
  103. //!
  104. //!In UNIX systems and XSI mappings "map_options" is an int value passed as "shmflg"
  105. //!to "shmat". If "default_map_options" is specified it's initialized to zero.
  106. //!"map_options" is XORed with SHM_RDONLY if needed.
  107. //!
  108. //!The OS could allocate more pages than size/page_size(), but get_address()
  109. //!will always return the address passed in this function (if not null) and
  110. //!get_size() will return the specified size.
  111. template<class MemoryMappable>
  112. mapped_region(const MemoryMappable& mapping
  113. ,mode_t mode
  114. ,offset_t offset = 0
  115. ,std::size_t size = 0
  116. ,const void *address = 0
  117. ,map_options_t map_options = default_map_options);
  118. //!Default constructor. Address will be 0 (nullptr).
  119. //!Size will be 0.
  120. //!Does not throw
  121. mapped_region();
  122. //!Move constructor. *this will be constructed taking ownership of "other"'s
  123. //!region and "other" will be left in default constructor state.
  124. mapped_region(BOOST_RV_REF(mapped_region) other)
  125. #if defined (BOOST_INTERPROCESS_WINDOWS)
  126. : m_base(0), m_size(0)
  127. , m_page_offset(0)
  128. , m_mode(read_only)
  129. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  130. #else
  131. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
  132. #endif
  133. { this->swap(other); }
  134. //!Destroys the mapped region.
  135. //!Does not throw
  136. ~mapped_region();
  137. //!Move assignment. If *this owns a memory mapped region, it will be
  138. //!destroyed and it will take ownership of "other"'s memory mapped region.
  139. mapped_region &operator=(BOOST_RV_REF(mapped_region) other)
  140. {
  141. mapped_region tmp(boost::move(other));
  142. this->swap(tmp);
  143. return *this;
  144. }
  145. //!Swaps the mapped_region with another
  146. //!mapped region
  147. void swap(mapped_region &other);
  148. //!Returns the size of the mapping. Never throws.
  149. std::size_t get_size() const;
  150. //!Returns the base address of the mapping.
  151. //!Never throws.
  152. void* get_address() const;
  153. //!Returns the mode of the mapping used to construct the mapped region.
  154. //!Never throws.
  155. mode_t get_mode() const;
  156. //!Flushes to the disk a byte range within the mapped memory.
  157. //!If 'async' is true, the function will return before flushing operation is completed
  158. //!If 'async' is false, function will return once data has been written into the underlying
  159. //!device (i.e., in mapped files OS cached information is written to disk).
  160. //!Never throws. Returns false if operation could not be performed.
  161. bool flush(std::size_t mapping_offset = 0, std::size_t numbytes = 0, bool async = true);
  162. //!Shrinks current mapped region. If after shrinking there is no longer need for a previously
  163. //!mapped memory page, accessing that page can trigger a segmentation fault.
  164. //!Depending on the OS, this operation might fail (XSI shared memory), it can decommit storage
  165. //!and free a portion of the virtual address space (e.g.POSIX) or this
  166. //!function can release some physical memory wihout freeing any virtual address space(Windows).
  167. //!Returns true on success. Never throws.
  168. bool shrink_by(std::size_t bytes, bool from_back = true);
  169. //!This enum specifies region usage behaviors that an application can specify
  170. //!to the mapped region implementation.
  171. enum advice_types{
  172. //!Specifies that the application has no advice to give on its behavior with respect to
  173. //!the region. It is the default characteristic if no advice is given for a range of memory.
  174. advice_normal,
  175. //!Specifies that the application expects to access the region sequentially from
  176. //!lower addresses to higher addresses. The implementation can lower the priority of
  177. //!preceding pages within the region once a page have been accessed.
  178. advice_sequential,
  179. //!Specifies that the application expects to access the region in a random order,
  180. //!and prefetching is likely not advantageous.
  181. advice_random,
  182. //!Specifies that the application expects to access the region in the near future.
  183. //!The implementation can prefetch pages of the region.
  184. advice_willneed,
  185. //!Specifies that the application expects that it will not access the region in the near future.
  186. //!The implementation can unload pages within the range to save system resources.
  187. advice_dontneed
  188. };
  189. //!Advises the implementation on the expected behavior of the application with respect to the data
  190. //!in the region. The implementation may use this information to optimize handling of the region data.
  191. //!This function has no effect on the semantics of access to memory in the region, although it may affect
  192. //!the performance of access.
  193. //!If the advise type is not known to the implementation, the function returns false. True otherwise.
  194. bool advise(advice_types advise);
  195. //!Returns the size of the page. This size is the minimum memory that
  196. //!will be used by the system when mapping a memory mappable source and
  197. //!will restrict the address and the offset to map.
  198. static std::size_t get_page_size();
  199. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  200. private:
  201. //!Closes a previously opened memory mapping. Never throws
  202. void priv_close();
  203. void* priv_map_address() const;
  204. std::size_t priv_map_size() const;
  205. bool priv_flush_param_check(std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const;
  206. bool priv_shrink_param_check(std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes);
  207. static void priv_size_from_mapping_size
  208. (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size);
  209. static offset_t priv_page_offset_addr_fixup(offset_t page_offset, const void *&addr);
  210. template<int dummy>
  211. struct page_size_holder
  212. {
  213. static const std::size_t PageSize;
  214. static std::size_t get_page_size();
  215. };
  216. void* m_base;
  217. std::size_t m_size;
  218. std::size_t m_page_offset;
  219. mode_t m_mode;
  220. #if defined(BOOST_INTERPROCESS_WINDOWS)
  221. file_handle_t m_file_or_mapping_hnd;
  222. #else
  223. bool m_is_xsi;
  224. #endif
  225. friend class ipcdetail::interprocess_tester;
  226. friend class ipcdetail::raw_mapped_region_creator;
  227. void dont_close_on_destruction();
  228. #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  229. template<int Dummy>
  230. static void destroy_syncs_in_range(const void *addr, std::size_t size);
  231. #endif
  232. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  233. };
  234. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  235. inline void swap(mapped_region &x, mapped_region &y)
  236. { x.swap(y); }
  237. inline mapped_region::~mapped_region()
  238. { this->priv_close(); }
  239. inline std::size_t mapped_region::get_size() const
  240. { return m_size; }
  241. inline mode_t mapped_region::get_mode() const
  242. { return m_mode; }
  243. inline void* mapped_region::get_address() const
  244. { return m_base; }
  245. inline void* mapped_region::priv_map_address() const
  246. { return static_cast<char*>(m_base) - m_page_offset; }
  247. inline std::size_t mapped_region::priv_map_size() const
  248. { return m_size + m_page_offset; }
  249. inline bool mapped_region::priv_flush_param_check
  250. (std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const
  251. {
  252. //Check some errors
  253. if(m_base == 0)
  254. return false;
  255. if(mapping_offset >= m_size || (mapping_offset + numbytes) > m_size){
  256. return false;
  257. }
  258. //Update flush size if the user does not provide it
  259. if(numbytes == 0){
  260. numbytes = m_size - mapping_offset;
  261. }
  262. addr = (char*)this->priv_map_address() + mapping_offset;
  263. numbytes += m_page_offset;
  264. return true;
  265. }
  266. inline bool mapped_region::priv_shrink_param_check
  267. (std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes)
  268. {
  269. //Check some errors
  270. if(m_base == 0 || bytes > m_size){
  271. return false;
  272. }
  273. else if(bytes == m_size){
  274. this->priv_close();
  275. return true;
  276. }
  277. else{
  278. const std::size_t page_size = mapped_region::get_page_size();
  279. if(from_back){
  280. const std::size_t new_pages = (m_size + m_page_offset - bytes - 1)/page_size + 1;
  281. shrink_page_start = static_cast<char*>(this->priv_map_address()) + new_pages*page_size;
  282. shrink_page_bytes = m_page_offset + m_size - new_pages*page_size;
  283. m_size -= bytes;
  284. }
  285. else{
  286. shrink_page_start = this->priv_map_address();
  287. m_page_offset += bytes;
  288. shrink_page_bytes = (m_page_offset/page_size)*page_size;
  289. m_page_offset = m_page_offset % page_size;
  290. m_size -= bytes;
  291. m_base = static_cast<char *>(m_base) + bytes;
  292. BOOST_ASSERT(shrink_page_bytes%page_size == 0);
  293. }
  294. return true;
  295. }
  296. }
  297. inline void mapped_region::priv_size_from_mapping_size
  298. (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size)
  299. {
  300. //Check if mapping size fits in the user address space
  301. //as offset_t is the maximum file size and its signed.
  302. if(mapping_size < offset ||
  303. boost::uintmax_t(mapping_size - (offset - page_offset)) >
  304. boost::uintmax_t(std::size_t(-1))){
  305. error_info err(size_error);
  306. throw interprocess_exception(err);
  307. }
  308. size = static_cast<std::size_t>(mapping_size - (offset - page_offset));
  309. }
  310. inline offset_t mapped_region::priv_page_offset_addr_fixup(offset_t offset, const void *&address)
  311. {
  312. //We can't map any offset so we have to obtain system's
  313. //memory granularity
  314. const std::size_t page_size = mapped_region::get_page_size();
  315. //We calculate the difference between demanded and valid offset
  316. //(always less than a page in std::size_t, thus, representable by std::size_t)
  317. const std::size_t page_offset =
  318. static_cast<std::size_t>(offset - (offset / page_size) * page_size);
  319. //Update the mapping address
  320. if(address){
  321. address = static_cast<const char*>(address) - page_offset;
  322. }
  323. return page_offset;
  324. }
  325. #if defined (BOOST_INTERPROCESS_WINDOWS)
  326. inline mapped_region::mapped_region()
  327. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only)
  328. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  329. {}
  330. template<int dummy>
  331. inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
  332. {
  333. winapi::system_info info;
  334. winapi::get_system_info(&info);
  335. return std::size_t(info.dwAllocationGranularity);
  336. }
  337. template<class MemoryMappable>
  338. inline mapped_region::mapped_region
  339. (const MemoryMappable &mapping
  340. ,mode_t mode
  341. ,offset_t offset
  342. ,std::size_t size
  343. ,const void *address
  344. ,map_options_t map_options)
  345. : m_base(0), m_size(0), m_page_offset(0), m_mode(mode)
  346. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  347. {
  348. mapping_handle_t mhandle = mapping.get_mapping_handle();
  349. {
  350. file_handle_t native_mapping_handle = 0;
  351. //Set accesses
  352. //For "create_file_mapping"
  353. unsigned long protection = 0;
  354. //For "mapviewoffile"
  355. unsigned long map_access = map_options == default_map_options ? 0 : map_options;
  356. switch(mode)
  357. {
  358. case read_only:
  359. case read_private:
  360. protection |= winapi::page_readonly;
  361. map_access |= winapi::file_map_read;
  362. break;
  363. case read_write:
  364. protection |= winapi::page_readwrite;
  365. map_access |= winapi::file_map_write;
  366. break;
  367. case copy_on_write:
  368. protection |= winapi::page_writecopy;
  369. map_access |= winapi::file_map_copy;
  370. break;
  371. default:
  372. {
  373. error_info err(mode_error);
  374. throw interprocess_exception(err);
  375. }
  376. break;
  377. }
  378. //For file mapping (including emulated shared memory through temporary files),
  379. //the device is a file handle so we need to obtain file's size and call create_file_mapping
  380. //to obtain the mapping handle.
  381. //For files we don't need the file mapping after mapping the memory, as the file is there
  382. //so we'll program the handle close
  383. void * handle_to_close = winapi::invalid_handle_value;
  384. if(!mhandle.is_shm){
  385. //Create mapping handle
  386. native_mapping_handle = winapi::create_file_mapping
  387. ( ipcdetail::file_handle_from_mapping_handle(mapping.get_mapping_handle())
  388. , protection, 0, 0, 0);
  389. //Check if all is correct
  390. if(!native_mapping_handle){
  391. error_info err = winapi::get_last_error();
  392. throw interprocess_exception(err);
  393. }
  394. handle_to_close = native_mapping_handle;
  395. }
  396. else{
  397. //For windows_shared_memory the device handle is already a mapping handle
  398. //and we need to maintain it
  399. native_mapping_handle = mhandle.handle;
  400. }
  401. //RAII handle close on scope exit
  402. const winapi::handle_closer close_handle(handle_to_close);
  403. (void)close_handle;
  404. const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
  405. //Obtain mapping size if user provides 0 size
  406. if(size == 0){
  407. offset_t mapping_size;
  408. if(!winapi::get_file_mapping_size(native_mapping_handle, mapping_size)){
  409. error_info err = winapi::get_last_error();
  410. throw interprocess_exception(err);
  411. }
  412. //This can throw
  413. priv_size_from_mapping_size(mapping_size, offset, page_offset, size);
  414. }
  415. //Map with new offsets and size
  416. void *base = winapi::map_view_of_file_ex
  417. (native_mapping_handle,
  418. map_access,
  419. offset - page_offset,
  420. static_cast<std::size_t>(page_offset + size),
  421. const_cast<void*>(address));
  422. //Check error
  423. if(!base){
  424. error_info err = winapi::get_last_error();
  425. throw interprocess_exception(err);
  426. }
  427. //Calculate new base for the user
  428. m_base = static_cast<char*>(base) + page_offset;
  429. m_page_offset = page_offset;
  430. m_size = size;
  431. }
  432. //Windows shared memory needs the duplication of the handle if we want to
  433. //make mapped_region independent from the mappable device
  434. //
  435. //For mapped files, we duplicate the file handle to be able to FlushFileBuffers
  436. if(!winapi::duplicate_current_process_handle(mhandle.handle, &m_file_or_mapping_hnd)){
  437. error_info err = winapi::get_last_error();
  438. this->priv_close();
  439. throw interprocess_exception(err);
  440. }
  441. }
  442. inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
  443. {
  444. void *addr;
  445. if(!this->priv_flush_param_check(mapping_offset, addr, numbytes)){
  446. return false;
  447. }
  448. //Flush it all
  449. if(!winapi::flush_view_of_file(addr, numbytes)){
  450. return false;
  451. }
  452. //m_file_or_mapping_hnd can be a file handle or a mapping handle.
  453. //so flushing file buffers has only sense for files...
  454. else if(!async && m_file_or_mapping_hnd != winapi::invalid_handle_value &&
  455. winapi::get_file_type(m_file_or_mapping_hnd) == winapi::file_type_disk){
  456. return winapi::flush_file_buffers(m_file_or_mapping_hnd);
  457. }
  458. return true;
  459. }
  460. inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
  461. {
  462. void *shrink_page_start = 0;
  463. std::size_t shrink_page_bytes = 0;
  464. if(!this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
  465. return false;
  466. }
  467. else if(shrink_page_bytes){
  468. //In Windows, we can't decommit the storage or release the virtual address space,
  469. //the best we can do is try to remove some memory from the process working set.
  470. //With a bit of luck we can free some physical memory.
  471. unsigned long old_protect_ignored;
  472. bool b_ret = winapi::virtual_unlock(shrink_page_start, shrink_page_bytes)
  473. || (winapi::get_last_error() == winapi::error_not_locked);
  474. (void)old_protect_ignored;
  475. //Change page protection to forbid any further access
  476. b_ret = b_ret && winapi::virtual_protect
  477. (shrink_page_start, shrink_page_bytes, winapi::page_noaccess, old_protect_ignored);
  478. return b_ret;
  479. }
  480. else{
  481. return true;
  482. }
  483. }
  484. inline bool mapped_region::advise(advice_types)
  485. {
  486. //Windows has no madvise/posix_madvise equivalent
  487. return false;
  488. }
  489. inline void mapped_region::priv_close()
  490. {
  491. if(m_base){
  492. void *addr = this->priv_map_address();
  493. #if !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  494. mapped_region::destroy_syncs_in_range<0>(addr, m_size);
  495. #endif
  496. winapi::unmap_view_of_file(addr);
  497. m_base = 0;
  498. }
  499. if(m_file_or_mapping_hnd != ipcdetail::invalid_file()){
  500. winapi::close_handle(m_file_or_mapping_hnd);
  501. m_file_or_mapping_hnd = ipcdetail::invalid_file();
  502. }
  503. }
  504. inline void mapped_region::dont_close_on_destruction()
  505. {}
  506. #else //#if defined (BOOST_INTERPROCESS_WINDOWS)
  507. inline mapped_region::mapped_region()
  508. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
  509. {}
  510. template<int dummy>
  511. inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
  512. { return std::size_t(sysconf(_SC_PAGESIZE)); }
  513. template<class MemoryMappable>
  514. inline mapped_region::mapped_region
  515. ( const MemoryMappable &mapping
  516. , mode_t mode
  517. , offset_t offset
  518. , std::size_t size
  519. , const void *address
  520. , map_options_t map_options)
  521. : m_base(0), m_size(0), m_page_offset(0), m_mode(mode), m_is_xsi(false)
  522. {
  523. mapping_handle_t map_hnd = mapping.get_mapping_handle();
  524. //Some systems dont' support XSI shared memory
  525. #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  526. if(map_hnd.is_xsi){
  527. //Get the size
  528. ::shmid_ds xsi_ds;
  529. int ret = ::shmctl(map_hnd.handle, IPC_STAT, &xsi_ds);
  530. if(ret == -1){
  531. error_info err(system_error_code());
  532. throw interprocess_exception(err);
  533. }
  534. //Compare sizess
  535. if(size == 0){
  536. size = (std::size_t)xsi_ds.shm_segsz;
  537. }
  538. else if(size != (std::size_t)xsi_ds.shm_segsz){
  539. error_info err(size_error);
  540. throw interprocess_exception(err);
  541. }
  542. //Calculate flag
  543. int flag = map_options == default_map_options ? 0 : map_options;
  544. if(m_mode == read_only){
  545. flag |= SHM_RDONLY;
  546. }
  547. else if(m_mode != read_write){
  548. error_info err(mode_error);
  549. throw interprocess_exception(err);
  550. }
  551. //Attach memory
  552. //Some old shmat implementation take the address as a non-const void pointer
  553. //so uncast it to make code portable.
  554. void *const final_address = const_cast<void *>(address);
  555. void *base = ::shmat(map_hnd.handle, final_address, flag);
  556. if(base == (void*)-1){
  557. error_info err(system_error_code());
  558. throw interprocess_exception(err);
  559. }
  560. //Update members
  561. m_base = base;
  562. m_size = size;
  563. m_mode = mode;
  564. m_page_offset = 0;
  565. m_is_xsi = true;
  566. return;
  567. }
  568. #endif //ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  569. //We calculate the difference between demanded and valid offset
  570. const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
  571. if(size == 0){
  572. struct ::stat buf;
  573. if(0 != fstat(map_hnd.handle, &buf)){
  574. error_info err(system_error_code());
  575. throw interprocess_exception(err);
  576. }
  577. //This can throw
  578. priv_size_from_mapping_size(buf.st_size, offset, page_offset, size);
  579. }
  580. #ifdef MAP_NOSYNC
  581. #define BOOST_INTERPROCESS_MAP_NOSYNC MAP_NOSYNC
  582. #else
  583. #define BOOST_INTERPROCESS_MAP_NOSYNC 0
  584. #endif //MAP_NOSYNC
  585. //Create new mapping
  586. int prot = 0;
  587. int flags = map_options == default_map_options ? BOOST_INTERPROCESS_MAP_NOSYNC : map_options;
  588. #undef BOOST_INTERPROCESS_MAP_NOSYNC
  589. switch(mode)
  590. {
  591. case read_only:
  592. prot |= PROT_READ;
  593. flags |= MAP_SHARED;
  594. break;
  595. case read_private:
  596. prot |= (PROT_READ);
  597. flags |= MAP_PRIVATE;
  598. break;
  599. case read_write:
  600. prot |= (PROT_WRITE | PROT_READ);
  601. flags |= MAP_SHARED;
  602. break;
  603. case copy_on_write:
  604. prot |= (PROT_WRITE | PROT_READ);
  605. flags |= MAP_PRIVATE;
  606. break;
  607. default:
  608. {
  609. error_info err(mode_error);
  610. throw interprocess_exception(err);
  611. }
  612. break;
  613. }
  614. //Map it to the address space
  615. void* base = mmap ( const_cast<void*>(address)
  616. , static_cast<std::size_t>(page_offset + size)
  617. , prot
  618. , flags
  619. , mapping.get_mapping_handle().handle
  620. , offset - page_offset);
  621. //Check if mapping was successful
  622. if(base == MAP_FAILED){
  623. error_info err = system_error_code();
  624. throw interprocess_exception(err);
  625. }
  626. //Calculate new base for the user
  627. m_base = static_cast<char*>(base) + page_offset;
  628. m_page_offset = page_offset;
  629. m_size = size;
  630. //Check for fixed mapping error
  631. if(address && (base != address)){
  632. error_info err(busy_error);
  633. this->priv_close();
  634. throw interprocess_exception(err);
  635. }
  636. }
  637. inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
  638. {
  639. void *shrink_page_start = 0;
  640. std::size_t shrink_page_bytes = 0;
  641. if(m_is_xsi || !this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
  642. return false;
  643. }
  644. else if(shrink_page_bytes){
  645. //In UNIX we can decommit and free virtual address space.
  646. return 0 == munmap(shrink_page_start, shrink_page_bytes);
  647. }
  648. else{
  649. return true;
  650. }
  651. }
  652. inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
  653. {
  654. void *addr;
  655. if(m_is_xsi || !this->priv_flush_param_check(mapping_offset, addr, numbytes)){
  656. return false;
  657. }
  658. //Flush it all
  659. return msync(addr, numbytes, async ? MS_ASYNC : MS_SYNC) == 0;
  660. }
  661. inline bool mapped_region::advise(advice_types advice)
  662. {
  663. int unix_advice = 0;
  664. //Modes; 0: none, 2: posix, 1: madvise
  665. const unsigned int mode_none = 0;
  666. const unsigned int mode_padv = 1;
  667. const unsigned int mode_madv = 2;
  668. // Suppress "unused variable" warnings
  669. (void)mode_padv;
  670. (void)mode_madv;
  671. unsigned int mode = mode_none;
  672. //Choose advice either from POSIX (preferred) or native Unix
  673. switch(advice){
  674. case advice_normal:
  675. #if defined(POSIX_MADV_NORMAL)
  676. unix_advice = POSIX_MADV_NORMAL;
  677. mode = mode_padv;
  678. #elif defined(MADV_NORMAL)
  679. unix_advice = MADV_NORMAL;
  680. mode = mode_madv;
  681. #endif
  682. break;
  683. case advice_sequential:
  684. #if defined(POSIX_MADV_SEQUENTIAL)
  685. unix_advice = POSIX_MADV_SEQUENTIAL;
  686. mode = mode_padv;
  687. #elif defined(MADV_SEQUENTIAL)
  688. unix_advice = MADV_SEQUENTIAL;
  689. mode = mode_madv;
  690. #endif
  691. break;
  692. case advice_random:
  693. #if defined(POSIX_MADV_RANDOM)
  694. unix_advice = POSIX_MADV_RANDOM;
  695. mode = mode_padv;
  696. #elif defined(MADV_RANDOM)
  697. unix_advice = MADV_RANDOM;
  698. mode = mode_madv;
  699. #endif
  700. break;
  701. case advice_willneed:
  702. #if defined(POSIX_MADV_WILLNEED)
  703. unix_advice = POSIX_MADV_WILLNEED;
  704. mode = mode_padv;
  705. #elif defined(MADV_WILLNEED)
  706. unix_advice = MADV_WILLNEED;
  707. mode = mode_madv;
  708. #endif
  709. break;
  710. case advice_dontneed:
  711. #if defined(POSIX_MADV_DONTNEED)
  712. unix_advice = POSIX_MADV_DONTNEED;
  713. mode = mode_padv;
  714. #elif defined(MADV_DONTNEED) && defined(BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS)
  715. unix_advice = MADV_DONTNEED;
  716. mode = mode_madv;
  717. #endif
  718. break;
  719. default:
  720. return false;
  721. }
  722. switch(mode){
  723. #if defined(POSIX_MADV_NORMAL)
  724. case mode_padv:
  725. return 0 == posix_madvise(this->priv_map_address(), this->priv_map_size(), unix_advice);
  726. #endif
  727. #if defined(MADV_NORMAL)
  728. case mode_madv:
  729. return 0 == madvise(
  730. #if defined(BOOST_INTERPROCESS_MADVISE_USES_CADDR_T)
  731. (caddr_t)
  732. #endif
  733. this->priv_map_address(), this->priv_map_size(), unix_advice);
  734. #endif
  735. default:
  736. return false;
  737. }
  738. }
  739. inline void mapped_region::priv_close()
  740. {
  741. if(m_base != 0){
  742. #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  743. if(m_is_xsi){
  744. int ret = ::shmdt(m_base);
  745. BOOST_ASSERT(ret == 0);
  746. (void)ret;
  747. return;
  748. }
  749. #endif //#ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  750. munmap(this->priv_map_address(), this->priv_map_size());
  751. m_base = 0;
  752. }
  753. }
  754. inline void mapped_region::dont_close_on_destruction()
  755. { m_base = 0; }
  756. #endif //#if defined (BOOST_INTERPROCESS_WINDOWS)
  757. template<int dummy>
  758. const std::size_t mapped_region::page_size_holder<dummy>::PageSize
  759. = mapped_region::page_size_holder<dummy>::get_page_size();
  760. inline std::size_t mapped_region::get_page_size()
  761. {
  762. if(!page_size_holder<0>::PageSize)
  763. return page_size_holder<0>::get_page_size();
  764. else
  765. return page_size_holder<0>::PageSize;
  766. }
  767. inline void mapped_region::swap(mapped_region &other)
  768. {
  769. ::boost::adl_move_swap(this->m_base, other.m_base);
  770. ::boost::adl_move_swap(this->m_size, other.m_size);
  771. ::boost::adl_move_swap(this->m_page_offset, other.m_page_offset);
  772. ::boost::adl_move_swap(this->m_mode, other.m_mode);
  773. #if defined (BOOST_INTERPROCESS_WINDOWS)
  774. ::boost::adl_move_swap(this->m_file_or_mapping_hnd, other.m_file_or_mapping_hnd);
  775. #else
  776. ::boost::adl_move_swap(this->m_is_xsi, other.m_is_xsi);
  777. #endif
  778. }
  779. //!No-op functor
  780. struct null_mapped_region_function
  781. {
  782. bool operator()(void *, std::size_t , bool) const
  783. { return true; }
  784. static std::size_t get_min_size()
  785. { return 0; }
  786. };
  787. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  788. } //namespace interprocess {
  789. } //namespace boost {
  790. #include <boost/interprocess/detail/config_end.hpp>
  791. #endif //BOOST_INTERPROCESS_MAPPED_REGION_HPP
  792. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  793. #ifndef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  794. #define BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  795. #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  796. # include <boost/interprocess/sync/windows/sync_utils.hpp>
  797. # include <boost/interprocess/detail/windows_intermodule_singleton.hpp>
  798. namespace boost {
  799. namespace interprocess {
  800. template<int Dummy>
  801. inline void mapped_region::destroy_syncs_in_range(const void *addr, std::size_t size)
  802. {
  803. ipcdetail::sync_handles &handles =
  804. ipcdetail::windows_intermodule_singleton<ipcdetail::sync_handles>::get();
  805. handles.destroy_syncs_in_range(addr, size);
  806. }
  807. } //namespace interprocess {
  808. } //namespace boost {
  809. #endif //defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  810. #endif //#ifdef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  811. #endif //#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)