1 module memutils.scoped;
2 
3 import core.thread : Fiber;
4 import memutils.constants;
5 import memutils.allocators;
6 import memutils.pool;
7 import memutils.utils;
8 import memutils.vector;
9 import memutils.refcounted;
10 import memutils.unique;
11 import memutils.hashmap;
12 import memutils.freelist;
13 import memutils.memory;
14 import std.traits : hasElaborateDestructor, isArray;
15 import std.algorithm : min;
16 import std.exception;
17 import core.exception;
18 
19 alias ScopedPool = RefCounted!ScopedPoolImpl;
20 
21 final class ScopedPoolImpl {
22 	// TODO: Use a name for debugging?
23 
24 	int id;
25 	/// Initializes a scoped pool with max_mem
26 	/// max_mem doesn't do anything at the moment
27 	this(size_t max_mem = 0) {
28 		PoolStack.push();
29 		id = PoolStack.top.id;
30 	}
31 
32 	~this() {
33 		debug if(id != PoolStack.top.id) unfreeze();
34 		PoolStack.pop();
35 	}
36 
37 	/// Use only if ScopedPool is the highest on stack.
38 	void freeze() {
39 		enforce(id == PoolStack.top.id);
40 		enforce(PoolStack.freeze(1) == 1, "Failed to freeze pool");
41 	}
42 
43 	void unfreeze() {
44 		enforce(PoolStack.unfreeze(1) == 1, "Failed to unfreeze pool");
45 		enforce(id == PoolStack.top.id);
46 	}
47 }
48 
49 T alloc(T, ARGS...)(auto ref ARGS args)
50 	if (is(T == class) || is(T == interface) || __traits(isAbstractClass, T))
51 {
52 	T ret;
53 	
54 	if (!PoolStack.empty) {
55 		ret = ObjectAllocator!(T, PoolStack).alloc(args);
56 		
57 		// Add destructor to pool
58 		static if (hasElaborateDestructor!T || __traits(hasMember, T, "__dtor") ) 
59 			PoolStack.top().onDestroy(&ret.__dtor);
60 	}
61 	else {
62 		ret = new T(args);
63 	}
64 	
65 	return ret;
66 }
67 
68 T* alloc(T, ARGS...)(auto ref ARGS args)
69 	if (!(is(T == class) || is(T == interface) || __traits(isAbstractClass, T)))
70 {
71 	T* ret;
72 	
73 	if (!PoolStack.empty) {
74 		ret = ObjectAllocator!(T, PoolStack).alloc(args);
75 		
76 		// Add destructor to pool
77 		static if (hasElaborateDestructor!T || __traits(hasMember, T, "__dtor") ) 
78 			PoolStack.top.onDestroy(&ret.__dtor);
79 		
80 	}
81 	else {
82 		ret = new T(args);
83 	}
84 	
85 	return ret;
86 }
87 
88 /// arrays
89 auto alloc(T)(size_t n)
90 	if (isArray!T)
91 {
92 	import std.range : ElementType;
93 	
94 	T ret;
95 	if (!PoolStack.empty) {
96 		ret = allocArray!(ElementType!T, PoolStack)(n);
97 		registerPoolArray(ret);
98 	}
99 	else {
100 		ret = new T(n);
101 	}
102 	return ret;
103 }
104 
105 auto realloc(T)(ref T arr, size_t n)
106 	if (isArray!T)
107 {
108 	import std.range : ElementType;
109 	T ret;
110 	if (!PoolStack.empty) {
111 		scope(exit) arr = null;
112 		ret = reallocArray!(ElementType!T, PoolStack)(arr, n);
113 		reregisterPoolArray(arr, ret);
114 	}
115 	else {
116 		arr.length = n;
117 		ret = arr;
118 	}
119 }
120 
121 
122 struct PoolStack {
123 static:
124 	@property bool empty() { return m_tstack.empty && m_fstack.empty; }
125 
126 	/// returns the most recent unfrozen pool, null if none available
127 	@property ManagedPool top() {
128 		if (Fiber.getThis() && !m_fstack.empty) {
129 			return m_fstack.top;
130 		}
131 		return m_tstack.top;
132 	}
133 
134 	/// creates a new pool as the fiber stack top or the thread stack top
135 	void push() {
136 		//logTrace("Pushing PoolStack");
137 		if (Fiber.getThis())
138 			return m_fstack.push();
139 		m_tstack.push();
140 		//logTrace("Pushed ThreadStack");
141 	}
142 
143 	/// destroy the most recent pool and free all its resources, calling destructors
144 	/// if you're in a fiber, search for stack top in the fiber stack or the fiber freezer and destroy it.
145 	/// otherwise, search in the thread stack or the thread freezer and destroy it.
146 	void pop() {
147 		//logTrace("Pop PoolStack");
148 		if (Fiber.getThis() && (!m_fstack.empty || !m_ffreezer.empty))
149 		{
150 			//logTrace("Pop FiberStack");
151 			if (m_fstack.hasTop)
152 				return m_fstack.pop();
153 			//else
154 			auto ret = m_ffreezer.pop(1);
155 			//logTrace("Pop ThreadStack instead");
156 			m_fstack.cnt--;
157 			return;
158 		}
159 		// else
160 		auto top = m_tstack.top;
161 		assert(top, "Can't find a pool to pop");
162 		//logTrace("Pop ThreadStack");
163 		if (m_tstack.hasTop)
164 			return m_tstack.pop();
165 		//logTrace("Doesn't have top?");
166 		//else
167 		auto ret = m_tfreezer.pop(1);
168 
169 		m_tstack.cnt--;
170 		//logTrace("Destroyign ", ret.back.id);
171 
172 	}
173 
174 	void disable() {
175 		freeze(m_tstack.length + m_fstack.length);
176 	}
177 
178 	void enable() {
179 		unfreeze(m_ffreezer.length + m_tfreezer.length);
180 	}
181 
182 package:
183 	// returns number of pools frozen
184 	size_t freeze(size_t n = 1) {
185 		auto minsz = min(m_fstack.length, n);
186 
187 		if (minsz > 0) {
188 			auto frozen = m_fstack.freeze(minsz);
189 			m_ffreezer.push(frozen);
190 		}
191 
192 		if (minsz < n) {
193 			auto tsz = min(m_tstack.length, n - minsz);
194 			if (tsz > 0) {
195 				auto frozen = m_tstack.freeze(tsz);
196 			 	m_tfreezer.push(frozen);
197 			}
198 			return tsz + minsz;
199 		}
200 		return minsz;
201 	}
202 
203 	// returns number of pools unfrozen
204 	size_t unfreeze(size_t n = 1) {
205 		auto minsz = min(m_ffreezer.length, n);
206 		
207 		if (minsz > 0) m_fstack.unfreeze(m_ffreezer.pop(minsz));
208 		
209 		if (minsz < n) {
210 			auto tsz = min(m_tfreezer.length, n - minsz);
211 			if (tsz > 0) m_tstack.unfreeze(m_tfreezer.pop(tsz));
212 			return tsz + minsz;
213 		}
214 		return minsz;
215 	}
216 
217 	~this() {
218 		destroy(m_fstack);
219 		destroy(m_tstack);
220 	}
221 
222 private static:
223 	// active
224 	ThreadPoolStack m_tstack;
225 	FiberPoolStack m_fstack;
226 
227 	// frozen
228 	ThreadPoolFreezer m_tfreezer;
229 	FiberPoolFreezer m_ffreezer;
230 
231 }
232 
233 package:
234 
235 alias Pool = PoolAllocator!(AutoFreeListAllocator!(MallocAllocator));
236 alias ManagedPool = RefCounted!(Pool);
237 
238 /// User utility for allocating on lower level pools
239 struct ThreadPoolFreezer 
240 {
241 	@disable this(this);
242 	@property size_t length() const { return m_pools.length; }
243 	@property bool empty() const { return length == 0; }
244 
245 	void push(Array!(ManagedPool, Malloc) pools)
246 	{
247 		//logTrace("Push Thread Freezer of ", m_pools.length);
248 		// insert sorted
249 		foreach(ref item; pools[]) {
250 			bool found;
251 			foreach (size_t i, ref el; m_pools[]) {
252 				if (item.id < el.id) {
253 					m_pools.insertBefore(i, item);
254 					found = true;
255 					break;
256 				}
257 			}
258 			if (!found) m_pools ~= item;
259 		}
260 		//logTrace("Pushed Thread Freezer now ", m_pools.length);
261 	}
262 
263 	Array!(ManagedPool, Malloc) pop(size_t n) {
264 		assert(!empty);
265 		//logTrace("Pop Thread Freezer of ", m_pools.length, " id ", m_pools.back.id);
266 		// already sorted
267 		auto pools = Array!(ManagedPool, Malloc)( m_pools[$-n .. $] );
268 
269 		
270 		m_pools.length = (m_pools.length - 1);
271 		//logTrace("Popped Thread Freezer returning ", pools.length, " expecting ", n);
272 		//logTrace("Returning ID ", pools.back.id);
273 		return pools;
274 	}
275 	
276 package:
277 	Vector!(ManagedPool, Malloc) m_pools;
278 }
279 
280 /// User utility for allocating on lower level pools
281 struct FiberPoolFreezer
282 {
283 	@disable this(this);
284 	@property size_t fibers() const { return m_pools.length; }
285 	
286 	@property size_t length() const { 
287 		Fiber f = Fiber.getThis();
288 		if (auto ptr = (f in m_pools)) {
289 			return (*ptr).length;
290 		}
291 		return 0;
292 	}
293 
294 	@property bool empty() const {
295 		return length == 0; 
296 	}
297 
298 	void push(Array!(ManagedPool, Malloc) pools)
299 	{
300 		//logTrace("Push Fiber Freezer of ", length);
301 		Fiber f = Fiber.getThis();
302 		assert(f !is null);
303 		if (auto ptr = (f in m_pools)) {
304 			auto arr = *ptr;
305 
306 			// insert sorted
307 			foreach(ref item; pools[]) {
308 				bool found;
309 				foreach (size_t i, ref el; arr[]) {
310 					if (item.id < el.id) {
311 						arr.insertBefore(i, item);
312 						found = true;
313 						break;
314 					}
315 				}
316 				if (!found) arr ~= item;
317 			}
318 			//logTrace("Pushed Fiber Freezer of ", length);
319 			return;
320 		}
321 		//else
322 		m_pools[f] = pools.dupr;
323 		//logTrace("Pushed Fiber Freezer of ", length);
324 	}
325 
326 	Array!(ManagedPool, Malloc) pop(size_t n) {
327 		//logTrace("Pop Fiber Freezer of ", length);
328 		assert(!empty);
329 		
330 		Fiber f = Fiber.getThis();
331 		auto arr = m_pools[f];
332 
333 		if (arr.empty) {
334 			m_pools.remove(f);
335 			return Array!(ManagedPool, Malloc)();
336 		}
337 
338 		// already sorted
339 		auto pools = Array!(ManagedPool, Malloc)( arr[$-n .. $] );
340 		arr.length = (arr.length - n);
341 		//logTrace("Popped Fiber Freezer of ", length);
342 		return pools;
343 	}
344 
345 private:
346 
347 	HashMap!(Fiber, Array!(ManagedPool, Malloc), Malloc) m_pools;
348 }
349 struct ThreadPoolStack
350 {
351 	@disable this(this);
352 	@property size_t length() const { return m_pools.length; }
353 	@property bool empty() const { return length == 0; }
354 	size_t opDollar() const { return length; }
355 	@property bool hasTop() { return length > 0 && cnt-1 == top.id; }
356 
357 
358 	ManagedPool opIndex(size_t n) {
359 		//logTrace("OpIndex[", n, "] in Thread Pool of ", length, " top: ", cnt, " id: ", m_pools[n].id);
360 		return m_pools[n];
361 	}
362 
363 	@property ManagedPool top() 
364 	{
365 		//logTrace("Front Thread Pool of ", length);
366 		if (empty) {
367 			//logTrace("Empty");
368 			return ManagedPool();
369 		}
370 		return m_pools.back;
371 	}
372 
373 	void pop()
374 	{
375 		assert(!empty);
376 		//logTrace("Pop Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
377 		auto pool = m_pools.back;
378 		assert(pool.id == cnt-1);
379 		--cnt;
380 		m_pools.removeBack();
381 		//if (!empty) logTrace("Popped Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
382 	}
383 
384 	void push() {
385 		//if (!m_pools.empty) logTrace("Push Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
386 		//else logTrace("Push Thread Pool of ", length, " top: ", cnt);
387 		ManagedPool pool = ManagedPool();
388 		pool.id = cnt++;
389 		m_pools.pushBack(pool);
390 		//logTrace("Pushed Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
391 	}
392 
393 	Array!(ManagedPool, Malloc) freeze(size_t n) {
394 		assert(!empty);
395 		//if (!m_pools.empty) logTrace("Freeze ", n, " in Thread Pool of ", length, " top: ", cnt);
396 		//else logTrace("Freeze ", n, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
397 		assert(n <= length);
398 		Array!(ManagedPool, Malloc) ret;
399 		ret[] = m_pools[$-n .. $];
400 		m_pools.length = (m_pools.length - 1);
401 		//logTrace("Returning ", ret.length);
402 		//if (!empty) logTrace("Freezeed ", n, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
403 		return ret;
404 	}
405 
406 	void unfreeze(Array!(ManagedPool, Malloc) pools) {
407 		//logTrace("Unfreeze ", pools.length, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
408 		// insert sorted
409 		foreach(ref item; pools[]) {
410 			bool found;
411 			foreach (size_t i, ref el; m_pools[]) {
412 				if (item.id < el.id) {
413 					m_pools.insertBefore(i, item);
414 					found = true;
415 					break;
416 				}
417 			}
418 			if (!found) m_pools ~= item;
419 		}
420 		//logTrace("Unfreezed ", pools.length, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
421 	}
422 
423 package:
424 	int cnt;
425 	Vector!(ManagedPool, Malloc) m_pools;
426 }
427 
428 struct FiberPoolStack
429 {
430 	@disable this(this);
431 	@property size_t fibers() const { return m_pools.length; }
432 
433 	@property size_t length() const {
434 		Fiber f = Fiber.getThis();
435 		if (auto ptr = (f in m_pools)) {
436 			return (*ptr).length;
437 		}
438 		return 0;
439 	}
440 
441 	@property bool hasTop() { return length > 0 && cnt-1 == top.id; }
442 
443 	@property bool empty() const {
444 		return length == 0; 
445 	}
446 
447 	size_t opDollar() const { return length; }
448 
449 	ManagedPool opIndex(size_t n) {
450 		assert(!empty);
451 		Fiber f = Fiber.getThis();
452 		//logTrace("OpIndex[", n, "] in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f][n].id);
453 		return m_pools[f][n];
454 
455 	}
456 
457 	@property ManagedPool top() 
458 	{
459 		assert(!empty);
460 		Fiber f = Fiber.getThis();
461 		if (auto ptr = (f in m_pools)) {
462 			//logTrace("top in Fiber Pool of ", length, " top: ", cnt, " len: ", (*ptr).back().id);
463 			return (*ptr).back();
464 		}
465 		return ManagedPool();
466 
467 	}
468 
469 	// returns next item ie. top()
470 	void pop() {
471 		assert(!empty);
472 
473 		Fiber f = Fiber.getThis();
474 		//logTrace("pop in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
475 		auto arr = m_pools[f];
476 		assert(arr.back.id == cnt-1);
477 		arr.removeBack();
478 		cnt++;
479 		if (arr.empty) 
480 			m_pools.remove(f);
481 		//if (!empty) logTrace("popped in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
482 	}
483 
484 	void push()
485 	{
486 		ManagedPool pool = ManagedPool();
487 		pool.id = cnt++;
488 		Fiber f = Fiber.getThis();
489 		//if (!empty) logTrace("Push in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools.get(f).back.id);
490 		assert(f !is null);
491 		if (auto ptr = (f in m_pools)) {
492 			*ptr ~= pool;
493 			//logTrace("Pushed in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
494 			return;
495 		}
496 		//else
497 		m_pools[f] = Array!(ManagedPool, Malloc)();
498 		m_pools[f] ~= pool;
499 		//logDebug("Pushed in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
500 	}
501 
502 	// returns the frozen items
503 	Array!(ManagedPool, Malloc) freeze(size_t n) {
504 		assert(n <= length);
505 		Fiber f = Fiber.getThis();
506 		//logTrace("Freeze in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
507 		auto arr = m_pools[f];
508 		Array!(ManagedPool, Malloc) ret;
509 		ret[] = arr[$-n .. $];
510 		arr.length = (arr.length - n);
511 		//logTrace("Frozen in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
512 		return ret;
513 	}
514 
515 
516 	void unfreeze(Array!(ManagedPool, Malloc) items)
517 	{
518 		Fiber f = Fiber.getThis();
519 		assert(f !is null);
520 		//logTrace("Unfreeze in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
521 		if (auto ptr = (f in m_pools)) {
522 			auto arr = *ptr;
523 			// insert sorted
524 			foreach(ref item; items[]) {
525 				bool found;
526 				foreach (size_t i, ref el; arr[]) {
527 					if (item.id < el.id) {
528 						arr.insertBefore(i, item);
529 						found = true;
530 						break;
531 					}
532 				}
533 				if (!found) arr ~= item;
534 			}
535 			//logTrace("Unfrozen in Fiber Pool of ", length, " top: ", cnt, " id: ", m_pools[f].back.id);
536 			return;
537 		}
538 		assert(false);
539 	}
540 package:
541 	int cnt;
542 	HashMap!(Fiber, Array!(ManagedPool, Malloc), Malloc) m_pools;
543 }
544 
545 
546 private void registerPoolArray(T)(ref T arr) {
547 	import std.range : ElementType;
548 	// Add destructors to fiber pool
549 	static if (is(T == struct) && (hasElaborateDestructor!(ElementType!T) || __traits(hasMember, ElementType!T, "__dtor") )) {
550 		foreach (ref el; arr)
551 			PoolStack.top.onDestroy(&el.__dtor);
552 	}
553 }
554 
555 private void reregisterPoolArray(T)(ref T arr, ref T arr2) {
556 	import std.range : ElementType;
557 	// Add destructors to fiber pool
558 	static if (is(T == struct) && (hasElaborateDestructor!(ElementType!T) || __traits(hasMember, ElementType!T, "__dtor") )) {
559 		if (arr.ptr is arr2.ptr && arr2.length > arr.length) {
560 			foreach (ref el; arr2[arr.length - 1 .. $])
561 				PoolStack.top.onDestroy(&el.__dtor);
562 		}
563 		else {
564 			PoolStack.top.removeArrayDtors(&arr.back.__dtor, arr.length);
565 			registerPoolArray(arr2);
566 		}
567 	}
568 }