1 /**
2 	Memory pool with destructors, useful for scoped allocators.
3 
4     Copyright: © 2012-2013 RejectedSoftware e.K.
5     		   © 2014-2015 Etienne Cimon
6     License: Subject to the terms of the MIT license.
7     Authors: Sönke Ludwig, Etienne Cimon
8 */
9 module memutils.pool;
10 
11 import memutils.allocators;
12 import std.conv : emplace;
13 import std.algorithm : min, max;
14 import memutils.vector;
15 
16 final class PoolAllocator(Base : Allocator)
17 {
18 	public int id = -1; // intrusive ID used for ScopedPools
19 
20 	static struct Pool { Pool* next; void[] data; void[] remaining; }
21 
22 	private {
23 		Allocator m_baseAllocator;
24 		Pool* m_freePools;
25 		Pool* m_fullPools;
26 		Vector!(void delegate()) m_destructors;
27 		int m_pools;
28 	}
29 	public size_t m_poolSize;
30 	
31 	this(size_t pool_size = 64*1024)
32 	{
33 		m_poolSize = pool_size;
34 		m_baseAllocator = getAllocator!Base();
35 	}
36 
37 	void[] alloc(size_t sz)
38 	{
39 		auto aligned_sz = alignedSize(sz);
40 
41 		Pool* pprev = null;
42 		Pool* p = cast(Pool*)m_freePools;
43 		size_t i;
44 		while(i < m_pools && p && p.remaining.length < aligned_sz ) {
45 			pprev = p;
46 			p = p.next;
47 			i++;
48 		}
49 		
50 		if( !p || p.remaining.length == 0 || p.remaining.length < aligned_sz ) {
51 			auto pmem = m_baseAllocator.alloc(AllocSize!Pool);
52 			
53 			p = emplace!Pool(pmem);
54 			p.data = m_baseAllocator.alloc(max(aligned_sz, m_poolSize));
55 			p.remaining = p.data;
56 			p.next = cast(Pool*)m_freePools;
57 			m_freePools = p;
58 			m_pools++;
59 			pprev = null;
60 		}
61 		logTrace("0 .. ", aligned_sz, " but remaining: ", p.remaining.length);
62 		auto ret = p.remaining[0 .. aligned_sz];
63 		logTrace("p.remaining: ", aligned_sz, " .. ", p.remaining.length);
64 		p.remaining = p.remaining[aligned_sz .. $];
65 		if( !p.remaining.length ){
66 			if( pprev ) {
67 				pprev.next = p.next;
68 			} else {
69 				m_freePools = p.next;
70 			}
71 			p.next = cast(Pool*)m_fullPools;
72 			m_fullPools = p;
73 		}
74 		//logDebug("PoolAllocator ", id, " allocated ", sz, " with ", totalSize());
75 		
76 		return ret[0 .. sz];
77 	}
78 	
79 	void[] realloc(void[] arr, size_t newsize)
80 	{
81 		auto aligned_sz = alignedSize(arr.length);
82 		auto aligned_newsz = alignedSize(newsize);
83 		logTrace("realloc: ", arr.ptr, " sz ", arr.length, " aligned: ", aligned_sz, " => ", newsize, " aligned: ", aligned_newsz);
84 		if( aligned_newsz <= aligned_sz ) return arr.ptr[0 .. newsize];
85 		
86 		auto pool = m_freePools;
87 		bool last_in_pool = pool && arr.ptr+aligned_sz == pool.remaining.ptr;
88 		if( last_in_pool && pool.remaining.length+aligned_sz >= aligned_newsz ) {
89 			pool.remaining = pool.remaining[aligned_newsz-aligned_sz .. $];
90 			arr = arr.ptr[0 .. aligned_newsz];
91 			assert(arr.ptr+arr.length == pool.remaining.ptr, "Last block does not align with the remaining space!?");
92 			return arr[0 .. newsize];
93 		} else {
94 			auto ret = alloc(newsize);
95 			assert(ret.ptr >= arr.ptr+aligned_sz || ret.ptr+ret.length <= arr.ptr, "New block overlaps old one!?");
96 			ret[0 .. min(arr.length, newsize)] = arr[0 .. min(arr.length, newsize)];
97 			return ret;
98 		}
99 	}
100 	
101 	void free(void[] mem)
102 	{
103 
104 	}
105 	
106 	void freeAll()
107 	{
108 		//logDebug("Destroying ", totalSize(), " of data, allocated: ", allocatedSize());
109 		// destroy all initialized objects
110 		foreach_reverse (ref dtor; m_destructors[])
111 			dtor();
112 
113 		destroy(m_destructors);
114 
115 		size_t i;
116 		// put all full Pools into the free pools list
117 		for (Pool* p = cast(Pool*)m_fullPools, pnext; p && i < m_pools; (p = p.next), i++) {
118 			pnext = p.next;
119 			p.next = cast(Pool*)m_freePools;
120 			m_freePools = cast(Pool*)p;
121 		}
122 		i=0;
123 		// free up all pools
124 		for (Pool* p = cast(Pool*)m_freePools; p && i < m_pools; (p = p.next), i++) {
125 			p.remaining = p.data;
126 		}
127 	}
128 	
129 	void reset()
130 	{
131 		//logDebug("Reset()");
132 		freeAll();
133 		Pool* pnext;
134 		size_t i;
135 		for (auto p = cast(Pool*)m_freePools; p && i < m_pools; (p = p.next), i++) {
136 			pnext = p.next;
137 			m_baseAllocator.free(p.data);
138 			m_baseAllocator.free((cast(void*)p)[0 .. AllocSize!Pool]);
139 		}
140 		m_freePools = null;
141 		
142 	}
143 
144 	package void onDestroy(void delegate() dtor) {
145 		m_destructors ~= dtor;
146 	}
147 
148 	package void removeArrayDtors(void delegate() last_dtor, size_t n) {
149 		bool found;
150 		foreach_reverse(i, ref el; m_destructors[]) {
151 			if (el == last_dtor)
152 			{
153 				Vector!(void delegate()) arr;
154 				if (n >= i)
155 					arr ~= m_destructors[0 .. i-n+1];
156 				if (i != m_destructors.length - 1)
157 					arr ~= m_destructors[i+1 .. $];
158 				m_destructors[] = arr[];
159 				found = true;
160 				break;
161 			}
162 		}
163 		assert(found);
164 	}
165 
166 	@property size_t totalSize()
167 	{
168 		size_t amt = 0;
169 		size_t i;
170 		for (auto p = m_fullPools; p && i < m_pools; (p = p.next), i++)
171 			amt += p.data.length;
172 		i=0;
173 		for (auto p = m_freePools; p && i < m_pools; (p = p.next), i++)
174 			amt += p.data.length;
175 		return amt;
176 	}
177 	
178 	@property size_t allocatedSize()
179 	{
180 		size_t amt = 0;
181 		size_t i;
182 		for (auto p = m_fullPools; p && i < m_pools; (p = p.next), i++)
183 			amt += p.data.length;
184 		i = 0;
185 		for (auto p = m_freePools; p && i < m_pools; (p = p.next), i++)
186 			amt += p.data.length - p.remaining.length;
187 		return amt;
188 	}
189 
190 	~this() { reset(); }
191 }