static_mem_pool.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. // -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
  2. // vim:tabstop=4:shiftwidth=4:expandtab:
  3. /*
  4. * Copyright (C) 2004-2008 Wu Yongwei <adah at users dot sourceforge dot net>
  5. *
  6. * This software is provided 'as-is', without any express or implied
  7. * warranty. In no event will the authors be held liable for any
  8. * damages arising from the use of this software.
  9. *
  10. * Permission is granted to anyone to use this software for any purpose,
  11. * including commercial applications, and to alter it and redistribute
  12. * it freely, subject to the following restrictions:
  13. *
  14. * 1. The origin of this software must not be misrepresented; you must
  15. * not claim that you wrote the original software. If you use this
  16. * software in a product, an acknowledgement in the product
  17. * documentation would be appreciated but is not required.
  18. * 2. Altered source versions must be plainly marked as such, and must
  19. * not be misrepresented as being the original software.
  20. * 3. This notice may not be removed or altered from any source
  21. * distribution.
  22. *
  23. * This file is part of Stones of Nvwa:
  24. * http://sourceforge.net/projects/nvwa
  25. *
  26. */
  27. /*!
  28. * \file static_mem_pool.h
  29. * \ingroup QxMemLeak
  30. *
  31. * Header file for the `static' memory pool.
  32. *
  33. * \version 1.20, 2007/10/20
  34. * \author Wu Yongwei
  35. *
  36. */
  37. #ifndef QT_NO_DEBUG
  38. #ifndef _QX_MODE_RELEASE
  39. #if _QX_USE_MEM_LEAK_DETECTION
  40. #ifndef _STATIC_MEM_POOL_H
  41. #define _STATIC_MEM_POOL_H
  42. #ifdef _MSC_VER
  43. #pragma once
  44. #endif
  45. #include <new>
  46. #include <stdexcept>
  47. #include <string>
  48. #include <vector>
  49. #include <assert.h>
  50. #include <stddef.h>
  51. #include "class_level_lock.h"
  52. #include "mem_pool_base.h"
  53. /* Defines Work-around for Microsoft Visual C++ 6.0 and Borland C++ 5.5.1 */
  54. # if (defined(_MSC_VER) && _MSC_VER < 1300) \
  55. || (defined(__BORLANDC__) && __BORLANDC__ < 0x600)
  56. # define __PRIVATE public
  57. # else
  58. # define __PRIVATE private
  59. # endif
  60. /* Defines the macro for debugging output */
  61. # ifdef _STATIC_MEM_POOL_DEBUG
  62. # include <iostream>
  63. # define _STATIC_MEM_POOL_TRACE(_Lck, _Msg) \
  64. { \
  65. if (_Lck) { \
  66. static_mem_pool_set::lock __guard; \
  67. std::cerr << "static_mem_pool: " << _Msg << std::endl; \
  68. } else { \
  69. std::cerr << "static_mem_pool: " << _Msg << std::endl; \
  70. } \
  71. }
  72. # else
  73. # define _STATIC_MEM_POOL_TRACE(_Lck, _Msg) \
  74. ((void)0)
  75. # endif
  76. namespace qx {
  77. namespace memory {
  78. /**
  79. * Singleton class to maintain a set of existing instantiations of
  80. * static_mem_pool.
  81. */
  82. class QX_DLL_EXPORT static_mem_pool_set
  83. {
  84. public:
  85. typedef class_level_lock<static_mem_pool_set>::lock lock;
  86. static static_mem_pool_set& instance();
  87. void recycle();
  88. void add(mem_pool_base* __memory_pool_p);
  89. __PRIVATE:
  90. ~static_mem_pool_set();
  91. private:
  92. static_mem_pool_set();
  93. typedef std::vector<mem_pool_base*> container_type;
  94. container_type _M_memory_pool_set;
  95. /* Forbid their use */
  96. static_mem_pool_set(const static_mem_pool_set&);
  97. const static_mem_pool_set& operator=(const static_mem_pool_set&);
  98. };
  99. /**
  100. * Singleton class template to manage the allocation/deallocation of
  101. * memory blocks of one specific size.
  102. *
  103. * @param _Sz size of elements in the static_mem_pool
  104. * @param _Gid group id of a static_mem_pool: if it is negative,
  105. * simultaneous accesses to this static_mem_pool will be
  106. * protected from each other; otherwise no protection is
  107. * given
  108. */
  109. template <size_t _Sz, int _Gid = -1>
  110. class static_mem_pool : public mem_pool_base
  111. {
  112. typedef typename class_level_lock<static_mem_pool<_Sz, _Gid>, (_Gid < 0)>
  113. ::lock lock;
  114. public:
  115. /**
  116. * Gets the instance of the static memory pool. It will create the
  117. * instance if it does not already exist. Generally this function
  118. * is now not needed.
  119. *
  120. * @return reference to the instance of the static memory pool
  121. * @see instance_known
  122. */
  123. static static_mem_pool& instance()
  124. {
  125. lock __guard;
  126. if (!_S_instance_p)
  127. {
  128. _S_instance_p = _S_create_instance();
  129. }
  130. return *_S_instance_p;
  131. }
  132. /**
  133. * Gets the known instance of the static memory pool. The instance
  134. * must already exist. Generally the static initializer of the
  135. * template guarantees it.
  136. *
  137. * @return reference to the instance of the static memory pool
  138. */
  139. static static_mem_pool& instance_known()
  140. {
  141. assert(_S_instance_p != NULL);
  142. return *_S_instance_p;
  143. }
  144. /**
  145. * Allocates memory and returns its pointer. The template will try
  146. * to get it from the memory pool first, and request memory from the
  147. * system if there is no free memory in the pool.
  148. *
  149. * @return pointer to allocated memory if successful; \c NULL
  150. * otherwise
  151. */
  152. void* allocate()
  153. {
  154. {
  155. lock __guard;
  156. if (_S_memory_block_p)
  157. {
  158. void* __result = _S_memory_block_p;
  159. _S_memory_block_p = _S_memory_block_p->_M_next;
  160. return __result;
  161. }
  162. }
  163. return _S_alloc_sys(_S_align(_Sz));
  164. }
  165. /**
  166. * Deallocates memory by putting the memory block into the pool.
  167. *
  168. * @param __ptr pointer to memory to be deallocated
  169. */
  170. void deallocate(void* __ptr)
  171. {
  172. assert(__ptr != NULL);
  173. lock __guard;
  174. _Block_list* __block_ = reinterpret_cast<_Block_list*>(__ptr);
  175. __block_->_M_next = _S_memory_block_p;
  176. _S_memory_block_p = __block_;
  177. }
  178. virtual void recycle();
  179. private:
  180. static_mem_pool()
  181. {
  182. _STATIC_MEM_POOL_TRACE(true, "static_mem_pool<" << _Sz << ','
  183. << _Gid << "> is created");
  184. }
  185. ~static_mem_pool()
  186. {
  187. #ifndef _QX_MODE_RELEASE
  188. #ifndef QT_NO_DEBUG
  189. // Empty the pool to avoid false memory leakage alarms. This is
  190. // generally not necessary for release binaries.
  191. _Block_list* __block_ = _S_memory_block_p;
  192. while (__block_)
  193. {
  194. _Block_list* __next_ = __block_->_M_next;
  195. dealloc_sys(__block_);
  196. __block_ = __next_;
  197. }
  198. _S_memory_block_p = NULL;
  199. #endif // QT_NO_DEBUG
  200. #endif // _QX_MODE_RELEASE
  201. _S_instance_p = NULL;
  202. _S_destroyed = true;
  203. _STATIC_MEM_POOL_TRACE(false, "static_mem_pool<" << _Sz << ','
  204. << _Gid << "> is destroyed");
  205. }
  206. static size_t _S_align(size_t __size)
  207. {
  208. return __size >= sizeof(_Block_list) ? __size : sizeof(_Block_list);
  209. }
  210. static void* _S_alloc_sys(size_t __size);
  211. static static_mem_pool* _S_create_instance();
  212. static bool _S_destroyed;
  213. static static_mem_pool* _S_instance_p;
  214. static mem_pool_base::_Block_list* _S_memory_block_p;
  215. /* Forbid their use */
  216. static_mem_pool(const static_mem_pool&);
  217. const static_mem_pool& operator=(const static_mem_pool&);
  218. };
  219. template <size_t _Sz, int _Gid> bool
  220. static_mem_pool<_Sz, _Gid>::_S_destroyed = false;
  221. template <size_t _Sz, int _Gid> mem_pool_base::_Block_list*
  222. static_mem_pool<_Sz, _Gid>::_S_memory_block_p = NULL;
  223. template <size_t _Sz, int _Gid> static_mem_pool<_Sz, _Gid>*
  224. static_mem_pool<_Sz, _Gid>::_S_instance_p = _S_create_instance();
  225. /**
  226. * Recycles half of the free memory blocks in the memory pool to the
  227. * system. It is called when a memory request to the system (in other
  228. * instances of the static memory pool) fails.
  229. */
  230. template <size_t _Sz, int _Gid>
  231. void static_mem_pool<_Sz, _Gid>::recycle()
  232. {
  233. // Only here the global lock in static_mem_pool_set is obtained
  234. // before the pool-specific lock. However, no race conditions are
  235. // found so far.
  236. lock __guard;
  237. _Block_list* __block_ = _S_memory_block_p;
  238. while (__block_)
  239. {
  240. if (_Block_list* __temp_ = __block_->_M_next)
  241. {
  242. _Block_list* __next_ = __temp_->_M_next;
  243. __block_->_M_next = __next_;
  244. dealloc_sys(__temp_);
  245. __block_ = __next_;
  246. }
  247. else
  248. {
  249. break;
  250. }
  251. }
  252. _STATIC_MEM_POOL_TRACE(false, "static_mem_pool<" << _Sz << ','
  253. << _Gid << "> is recycled");
  254. }
  255. template <size_t _Sz, int _Gid>
  256. void* static_mem_pool<_Sz, _Gid>::_S_alloc_sys(size_t __size)
  257. {
  258. static_mem_pool_set::lock __guard;
  259. void* __result = mem_pool_base::alloc_sys(__size);
  260. if (!__result)
  261. {
  262. static_mem_pool_set::instance().recycle();
  263. __result = mem_pool_base::alloc_sys(__size);
  264. }
  265. return __result;
  266. }
  267. template <size_t _Sz, int _Gid>
  268. static_mem_pool<_Sz, _Gid>* static_mem_pool<_Sz, _Gid>::_S_create_instance()
  269. {
  270. if (_S_destroyed)
  271. throw std::runtime_error("dead reference detected");
  272. static_mem_pool_set::instance(); // Force its creation
  273. static_mem_pool* __inst_p = new static_mem_pool();
  274. try
  275. {
  276. static_mem_pool_set::instance().add(__inst_p);
  277. }
  278. catch (...)
  279. {
  280. _STATIC_MEM_POOL_TRACE(true,
  281. "Exception occurs in static_mem_pool_set::add");
  282. // The strange cast below is to work around a bug in GCC 2.95.3
  283. delete static_cast<mem_pool_base*>(__inst_p);
  284. throw;
  285. }
  286. return __inst_p;
  287. }
  288. } // namespace memory
  289. } // namespace qx
  290. #define DECLARE_STATIC_MEM_POOL(_Cls) \
  291. public: \
  292. static void* operator new(size_t __size) \
  293. { \
  294. assert(__size == sizeof(_Cls)); \
  295. void* __ptr; \
  296. __ptr = static_mem_pool<sizeof(_Cls)>:: \
  297. instance_known().allocate(); \
  298. if (__ptr == NULL) \
  299. throw std::bad_alloc(); \
  300. return __ptr; \
  301. } \
  302. static void operator delete(void* __ptr) \
  303. { \
  304. if (__ptr) \
  305. static_mem_pool<sizeof(_Cls)>:: \
  306. instance_known().deallocate(__ptr); \
  307. }
  308. #define DECLARE_STATIC_MEM_POOL__NOTHROW(_Cls) \
  309. public: \
  310. static void* operator new(size_t __size) throw() \
  311. { \
  312. assert(__size == sizeof(_Cls)); \
  313. return static_mem_pool<sizeof(_Cls)>:: \
  314. instance_known().allocate(); \
  315. } \
  316. static void operator delete(void* __ptr) \
  317. { \
  318. if (__ptr) \
  319. static_mem_pool<sizeof(_Cls)>:: \
  320. instance_known().deallocate(__ptr); \
  321. }
  322. #define DECLARE_STATIC_MEM_POOL_GROUPED(_Cls, _Gid) \
  323. public: \
  324. static void* operator new(size_t __size) \
  325. { \
  326. assert(__size == sizeof(_Cls)); \
  327. void* __ptr; \
  328. __ptr = static_mem_pool<sizeof(_Cls), (_Gid)>:: \
  329. instance_known().allocate(); \
  330. if (__ptr == NULL) \
  331. throw std::bad_alloc(); \
  332. return __ptr; \
  333. } \
  334. static void operator delete(void* __ptr) \
  335. { \
  336. if (__ptr) \
  337. static_mem_pool<sizeof(_Cls), (_Gid)>:: \
  338. instance_known().deallocate(__ptr); \
  339. }
  340. #define DECLARE_STATIC_MEM_POOL_GROUPED__NOTHROW(_Cls, _Gid) \
  341. public: \
  342. static void* operator new(size_t __size) throw() \
  343. { \
  344. assert(__size == sizeof(_Cls)); \
  345. return static_mem_pool<sizeof(_Cls), (_Gid)>:: \
  346. instance_known().allocate(); \
  347. } \
  348. static void operator delete(void* __ptr) \
  349. { \
  350. if (__ptr) \
  351. static_mem_pool<sizeof(_Cls), (_Gid)>:: \
  352. instance_known().deallocate(__ptr); \
  353. }
  354. // OBSOLETE: no longer needed
  355. #define PREPARE_STATIC_MEM_POOL(_Cls) \
  356. std::cerr << "PREPARE_STATIC_MEM_POOL is obsolete!\n";
  357. // OBSOLETE: no longer needed
  358. #define PREPARE_STATIC_MEM_POOL_GROUPED(_Cls, _Gid) \
  359. std::cerr << "PREPARE_STATIC_MEM_POOL_GROUPED is obsolete!\n";
  360. #undef __PRIVATE
  361. #endif // _STATIC_MEM_POOL_H
  362. #endif // _QX_USE_MEM_LEAK_DETECTION
  363. #endif // _QX_MODE_RELEASE
  364. #endif // QT_NO_DEBUG