1 module memutils.scoped;
2 
3 import core.thread : Fiber;
4 import memutils.constants;
5 import memutils.allocators;
6 import memutils.pool;
7 import memutils.utils;
8 import memutils.vector;
9 import memutils.refcounted;
10 import memutils.unique;
11 import memutils.hashmap;
12 import memutils.freelist;
13 import memutils.memory;
14 import std.algorithm : min;
15 import std.exception;
16 import core.exception;
17 
18 alias ScopedPool = RefCounted!ScopedPoolImpl;
19 
20 final class ScopedPoolImpl {
21 	// TODO: Use a name for debugging?
22 
23 	int id;
24 	/// Initializes a scoped pool with max_mem
25 	/// max_mem doesn't do anything at the moment
26 	this(size_t max_mem = 0) {
27 		PoolStack.push();
28 		id = PoolStack.top.id;
29 	}
30 
31 	~this() {
32 		debug if(id != PoolStack.top.id) unfreeze();
33 		PoolStack.pop();
34 	}
35 
36 	/// Use only if ScopedPool is the highest on stack.
37 	void freeze() {
38 		enforce(id == PoolStack.top.id);
39 		enforce(PoolStack.freeze(1) == 1, "Failed to freeze pool");
40 	}
41 
42 	void unfreeze() {
43 		enforce(PoolStack.unfreeze(1) == 1, "Failed to unfreeze pool");
44 		enforce(id == PoolStack.top.id);
45 	}
46 }
47 
48 T alloc(T, ARGS...)(auto ref ARGS args)
49 	if (is(T == class) || is(T == interface) || __traits(isAbstractClass, T))
50 {
51 	T ret;
52 	
53 	if (!PoolStack.empty) {
54 		ret = ObjectAllocator!(T, PoolStack).alloc(args);
55 		
56 		// Add destructor to pool
57 		static if (hasElaborateDestructor!T || __traits(hasMember, T, "__dtor") ) 
58 			PoolStack.top().onDestroy(&ret.__dtor);
59 	}
60 	else {
61 		ret = new T(args);
62 	}
63 	
64 	return ret;
65 }
66 
67 T* alloc(T, ARGS...)(auto ref ARGS args)
68 	if (!(is(T == class) || is(T == interface) || __traits(isAbstractClass, T)))
69 {
70 	T* ret;
71 	
72 	if (!PoolStack.empty) {
73 		ret = ObjectAllocator!(T, PoolStack).alloc(args);
74 		
75 		// Add destructor to pool
76 		static if (hasElaborateDestructor!T || __traits(hasMember, T, "__dtor") ) 
77 			PoolStack.top.onDestroy(&ret.__dtor);
78 		
79 	}
80 	else {
81 		ret = new T(args);
82 	}
83 	
84 	return ret;
85 }
86 
87 /// arrays
88 auto alloc(T)(size_t n)
89 	if (isArray!T)
90 {
91 	import std.range : ElementType;
92 	
93 	T ret;
94 	if (!PoolStack.empty) {
95 		ret = allocArray!(ElementType!T, PoolStack)(n);
96 		registerPoolArray(ret);
97 	}
98 	else {
99 		ret = new T(n);
100 	}
101 	return ret;
102 }
103 
104 auto realloc(T)(ref T arr, size_t n)
105 	if (isArray!T)
106 {
107 	import std.range : ElementType;
108 	T ret;
109 	if (!PoolStack.empty) {
110 		scope(exit) arr = null;
111 		ret = reallocArray!(ElementType!T, PoolStack)(arr, n);
112 		reregisterPoolArray(arr, ret);
113 	}
114 	else {
115 		arr.length = n;
116 		ret = arr;
117 	}
118 }
119 
120 
121 struct PoolStack {
122 static:
123 	@property bool empty() { return m_tstack.empty && m_fstack.empty; }
124 
125 	/// returns the most recent unfrozen pool, null if none available
126 	@property ManagedPool top() {
127 		if (Fiber.getThis() && !m_fstack.empty) {
128 			return m_fstack.top;
129 		}
130 		return m_tstack.top;
131 	}
132 
133 	/// creates a new pool as the fiber stack top or the thread stack top
134 	void push() {
135 		//logTrace("Pushing PoolStack");
136 		if (Fiber.getThis())
137 			return m_fstack.push();
138 		m_tstack.push();
139 		//logTrace("Pushed ThreadStack");
140 	}
141 
142 	/// destroy the most recent pool and free all its resources, calling destructors
143 	/// if you're in a fiber, search for stack top in the fiber stack or the fiber freezer and destroy it.
144 	/// otherwise, search in the thread stack or the thread freezer and destroy it.
145 	void pop() {
146 		//logTrace("Pop PoolStack");
147 		if (Fiber.getThis() && (!m_fstack.empty || !m_ffreezer.empty))
148 		{
149 			//logTrace("Pop FiberStack");
150 			if (m_fstack.hasTop)
151 				return m_fstack.pop();
152 			//else
153 			auto ret = m_ffreezer.pop(1);
154 			//logTrace("Pop ThreadStack instead");
155 			m_fstack.cnt--;
156 			return;
157 		}
158 		// else
159 		auto top = m_tstack.top;
160 		assert(top, "Can't find a pool to pop");
161 		//logTrace("Pop ThreadStack");
162 		if (m_tstack.hasTop)
163 			return m_tstack.pop();
164 		//logTrace("Doesn't have top?");
165 		//else
166 		auto ret = m_tfreezer.pop(1);
167 
168 		m_tstack.cnt--;
169 		//logTrace("Destroyign ", ret.back.id);
170 
171 	}
172 
173 	void disable() {
174 		freeze(m_tstack.length + m_fstack.length);
175 	}
176 
177 	void enable() {
178 		unfreeze(m_ffreezer.length + m_tfreezer.length);
179 	}
180 
181 package:
182 	// returns number of pools frozen
183 	size_t freeze(size_t n = 1) {
184 		auto minsz = min(m_fstack.length, n);
185 
186 		if (minsz > 0) {
187 			auto frozen = m_fstack.freeze(minsz);
188 			m_ffreezer.push(frozen);
189 		}
190 
191 		if (minsz < n) {
192 			auto tsz = min(m_tstack.length, n - minsz);
193 			if (tsz > 0) {
194 				auto frozen = m_tstack.freeze(tsz);
195 			 	m_tfreezer.push(frozen);
196 			}
197 			return tsz + minsz;
198 		}
199 		return minsz;
200 	}
201 
202 	// returns number of pools unfrozen
203 	size_t unfreeze(size_t n = 1) {
204 		auto minsz = min(m_ffreezer.length, n);
205 		
206 		if (minsz > 0) m_fstack.unfreeze(m_ffreezer.pop(minsz));
207 		
208 		if (minsz < n) {
209 			auto tsz = min(m_tfreezer.length, n - minsz);
210 			if (tsz > 0) m_tstack.unfreeze(m_tfreezer.pop(tsz));
211 			return tsz + minsz;
212 		}
213 		return minsz;
214 	}
215 
216 	~this() {
217 		destroy(m_fstack);
218 		destroy(m_tstack);
219 	}
220 
221 private static:
222 	// active
223 	ThreadPoolStack m_tstack;
224 	FiberPoolStack m_fstack;
225 
226 	// frozen
227 	ThreadPoolFreezer m_tfreezer;
228 	FiberPoolFreezer m_ffreezer;
229 
230 }
231 
232 package:
233 
234 alias Pool = PoolAllocator!(AutoFreeListAllocator!(MallocAllocator));
235 alias ManagedPool = RefCounted!(Pool);
236 
237 /// User utility for allocating on lower level pools
238 struct ThreadPoolFreezer 
239 {
240 	@disable this(this);
241 	@property size_t length() const { return m_pools.length; }
242 	@property bool empty() const { return length == 0; }
243 
244 	void push(Array!(ManagedPool, Malloc) pools)
245 	{
246 		//logTrace("Push Thread Freezer of ", m_pools.length);
247 		// insert sorted
248 		foreach(ref item; pools[]) {
249 			bool found;
250 			foreach (size_t i, ref el; m_pools[]) {
251 				if (item.id < el.id) {
252 					m_pools.insertBefore(i, item);
253 					found = true;
254 					break;
255 				}
256 			}
257 			if (!found) m_pools ~= item;
258 		}
259 		//logTrace("Pushed Thread Freezer now ", m_pools.length);
260 	}
261 
262 	Array!(ManagedPool, Malloc) pop(size_t n) {
263 		assert(!empty);
264 		//logTrace("Pop Thread Freezer of ", m_pools.length, " id ", m_pools.back.id);
265 		// already sorted
266 		auto pools = Array!(ManagedPool, Malloc)( m_pools[$-n .. $] );
267 
268 		
269 		m_pools.length = (m_pools.length - 1);
270 		//logTrace("Popped Thread Freezer returning ", pools.length, " expecting ", n);
271 		//logTrace("Returning ID ", pools.back.id);
272 		return pools;
273 	}
274 	
275 package:
276 	Vector!(ManagedPool, Malloc) m_pools;
277 }
278 
279 /// User utility for allocating on lower level pools
280 struct FiberPoolFreezer
281 {
282 	@disable this(this);
283 	@property size_t fibers() const { return m_pools.length; }
284 	
285 	@property size_t length() const { 
286 		Fiber f = Fiber.getThis();
287 		if (auto ptr = (f in m_pools)) {
288 			return (*ptr).length;
289 		}
290 		return 0;
291 	}
292 
293 	@property bool empty() const {
294 		return length == 0; 
295 	}
296 
297 	void push(Array!(ManagedPool, Malloc) pools)
298 	{
299 		//logTrace("Push Fiber Freezer of ", length);
300 		Fiber f = Fiber.getThis();
301 		assert(f !is null);
302 		if (auto ptr = (f in m_pools)) {
303 			auto arr = *ptr;
304 
305 			// insert sorted
306 			foreach(ref item; pools[]) {
307 				bool found;
308 				foreach (size_t i, ref el; arr[]) {
309 					if (item.id < el.id) {
310 						arr.insertBefore(i, item);
311 						found = true;
312 						break;
313 					}
314 				}
315 				if (!found) arr ~= item;
316 			}
317 			//logTrace("Pushed Fiber Freezer of ", length);
318 			return;
319 		}
320 		//else
321 		m_pools[f] = pools.dupr;
322 		//logTrace("Pushed Fiber Freezer of ", length);
323 	}
324 
325 	Array!(ManagedPool, Malloc) pop(size_t n) {
326 		//logTrace("Pop Fiber Freezer of ", length);
327 		assert(!empty);
328 		
329 		Fiber f = Fiber.getThis();
330 		auto arr = m_pools[f];
331 
332 		if (arr.empty) {
333 			m_pools.remove(f);
334 			return Array!(ManagedPool, Malloc)();
335 		}
336 
337 		// already sorted
338 		auto pools = Array!(ManagedPool, Malloc)( arr[$-n .. $] );
339 		arr.length = (arr.length - n);
340 		//logTrace("Popped Fiber Freezer of ", length);
341 		return pools;
342 	}
343 
344 private:
345 
346 	HashMap!(Fiber, Array!(ManagedPool, Malloc), Malloc) m_pools;
347 }
348 struct ThreadPoolStack
349 {
350 	@disable this(this);
351 	@property size_t length() const { return m_pools.length; }
352 	@property bool empty() const { return length == 0; }
353 	size_t opDollar() const { return length; }
354 	@property bool hasTop() { return length > 0 && cnt-1 == top.id; }
355 
356 
357 	ManagedPool opIndex(size_t n) {
358 		//logTrace("OpIndex[", n, "] in Thread Pool of ", length, " top: ", cnt, " id: ", m_pools[n].id);
359 		return m_pools[n];
360 	}
361 
362 	@property ManagedPool top() 
363 	{
364 		//logTrace("Front Thread Pool of ", length);
365 		if (empty) {
366 			//logTrace("Empty");
367 			return ManagedPool();
368 		}
369 		return m_pools.back;
370 	}
371 
372 	void pop()
373 	{
374 		assert(!empty);
375 		//logTrace("Pop Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
376 		auto pool = m_pools.back;
377 		assert(pool.id == cnt-1);
378 		--cnt;
379 		m_pools.removeBack();
380 		//if (!empty) logTrace("Popped Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
381 	}
382 
383 	void push() {
384 		//if (!m_pools.empty) logTrace("Push Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
385 		//else logTrace("Push Thread Pool of ", length, " top: ", cnt);
386 		ManagedPool pool = ManagedPool();
387 		pool.id = cnt++;
388 		m_pools.pushBack(pool);
389 		//logTrace("Pushed Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
390 	}
391 
392 	Array!(ManagedPool, Malloc) freeze(size_t n) {
393 		assert(!empty);
394 		//if (!m_pools.empty) logTrace("Freeze ", n, " in Thread Pool of ", length, " top: ", cnt);
395 		//else logTrace("Freeze ", n, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
396 		assert(n <= length);
397 		Array!(ManagedPool, Malloc) ret;
398 		ret[] = m_pools[$-n .. $];
399 		m_pools.length = (m_pools.length - 1);
400 		//logTrace("Returning ", ret.length);
401 		//if (!empty) logTrace("Freezeed ", n, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
402 		return ret;
403 	}
404 
405 	void unfreeze(Array!(ManagedPool, Malloc) pools) {
406 		//logTrace("Unfreeze ", pools.length, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
407 		// insert sorted
408 		foreach(ref item; pools[]) {
409 			bool found;
410 			foreach (size_t i, ref el; m_pools[]) {
411 				if (item.id < el.id) {
412 					m_pools.insertBefore(i, item);
413 					found = true;
414 					break;
415 				}
416 			}
417 			if (!found) m_pools ~= item;
418 		}
419 		//logTrace("Unfreezed ", pools.length, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
420 	}
421 
422 package:
423 	int cnt;
424 	Vector!(ManagedPool, Malloc) m_pools;
425 }
426 
427 struct FiberPoolStack
428 {
429 	@disable this(this);
430 	@property size_t fibers() const { return m_pools.length; }
431 
432 	@property size_t length() const {
433 		Fiber f = Fiber.getThis();
434 		if (auto ptr = (f in m_pools)) {
435 			return (*ptr).length;
436 		}
437 		return 0;
438 	}
439 
440 	@property bool hasTop() { return length > 0 && cnt-1 == top.id; }
441 
442 	@property bool empty() const {
443 		return length == 0; 
444 	}
445 
446 	size_t opDollar() const { return length; }
447 
448 	ManagedPool opIndex(size_t n) {
449 		assert(!empty);
450 		Fiber f = Fiber.getThis();
451 		//logTrace("OpIndex[", n, "] in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f][n].id);
452 		return m_pools[f][n];
453 
454 	}
455 
456 	@property ManagedPool top() 
457 	{
458 		assert(!empty);
459 		Fiber f = Fiber.getThis();
460 		if (auto ptr = (f in m_pools)) {
461 			//logTrace("top in Fiber Pool of ", length, " top: ", cnt, " len: ", (*ptr).back().id);
462 			return (*ptr).back();
463 		}
464 		return ManagedPool();
465 
466 	}
467 
468 	// returns next item ie. top()
469 	void pop() {
470 		assert(!empty);
471 
472 		Fiber f = Fiber.getThis();
473 		//logTrace("pop in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
474 		auto arr = m_pools[f];
475 		assert(arr.back.id == cnt-1);
476 		arr.removeBack();
477 		cnt++;
478 		if (arr.empty) 
479 			m_pools.remove(f);
480 		//if (!empty) logTrace("popped in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
481 	}
482 
483 	void push()
484 	{
485 		ManagedPool pool = ManagedPool();
486 		pool.id = cnt++;
487 		Fiber f = Fiber.getThis();
488 		//if (!empty) logTrace("Push in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools.get(f).back.id);
489 		assert(f !is null);
490 		if (auto ptr = (f in m_pools)) {
491 			*ptr ~= pool;
492 			//logTrace("Pushed in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
493 			return;
494 		}
495 		//else
496 		m_pools[f] = Array!(ManagedPool, Malloc)();
497 		m_pools[f] ~= pool;
498 		//logDebug("Pushed in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
499 	}
500 
501 	// returns the frozen items
502 	Array!(ManagedPool, Malloc) freeze(size_t n) {
503 		assert(n <= length);
504 		Fiber f = Fiber.getThis();
505 		//logTrace("Freeze in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
506 		auto arr = m_pools[f];
507 		Array!(ManagedPool, Malloc) ret;
508 		ret[] = arr[$-n .. $];
509 		arr.length = (arr.length - n);
510 		//logTrace("Frozen in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
511 		return ret;
512 	}
513 
514 
515 	void unfreeze(Array!(ManagedPool, Malloc) items)
516 	{
517 		Fiber f = Fiber.getThis();
518 		assert(f !is null);
519 		//logTrace("Unfreeze in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
520 		if (auto ptr = (f in m_pools)) {
521 			auto arr = *ptr;
522 			// insert sorted
523 			foreach(ref item; items[]) {
524 				bool found;
525 				foreach (size_t i, ref el; arr[]) {
526 					if (item.id < el.id) {
527 						arr.insertBefore(i, item);
528 						found = true;
529 						break;
530 					}
531 				}
532 				if (!found) arr ~= item;
533 			}
534 			//logTrace("Unfrozen in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
535 			return;
536 		}
537 		assert(false);
538 	}
539 package:
540 	int cnt;
541 	HashMap!(Fiber, Array!(ManagedPool, Malloc), Malloc) m_pools;
542 }
543 
544 
545 private void registerPoolArray(T)(ref T arr) {
546 	import std.range : ElementType;
547 	// Add destructors to fiber pool
548 	static if (is(T == struct) && (hasElaborateDestructor!(ElementType!T) || __traits(hasMember, ElementType!T, "__dtor") )) {
549 		foreach (ref el; arr)
550 			PoolStack.top.onDestroy(&el.__dtor);
551 	}
552 }
553 
554 private void reregisterPoolArray(T)(ref T arr, ref T arr2) {
555 	import std.range : ElementType;
556 	// Add destructors to fiber pool
557 	static if (is(T == struct) && (hasElaborateDestructor!(ElementType!T) || __traits(hasMember, ElementType!T, "__dtor") )) {
558 		if (arr.ptr is arr2.ptr && arr2.length > arr.length) {
559 			foreach (ref el; arr2[arr.length - 1 .. $])
560 				PoolStack.top.onDestroy(&el.__dtor);
561 		}
562 		else {
563 			PoolStack.top.removeArrayDtors(&arr.back.__dtor, arr.length);
564 			registerPoolArray(arr2);
565 		}
566 	}
567 }