This repository was archived by the owner on Jan 4, 2026. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmemory_pool.zig
More file actions
306 lines (242 loc) · 10 KB
/
memory_pool.zig
File metadata and controls
306 lines (242 loc) · 10 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
const std = @import("std");
const testing = std.testing;
const assert = std.debug.assert;
const log = std.log.scoped(.Pool);
const RingBuffer = @import("./ring_buffer.zig").RingBuffer;
/// A `MemoryPool` is a structure used to manage dynamic memory allocation.
/// It is a pre-allocated region of memory that is divided into fixed-size
/// blocks, which can be allocated and deallocated more efficiently than
/// using traditional methods like global allocators.
pub fn MemoryPool(comptime T: type) type {
return struct {
const Self = @This();
/// The allocator responsible for managing memory allocations.
///
/// It allows the memory pool to be flexible with how memory is allocated and deallocated,
/// providing a customizable way to manage raw memory.
allocator: std.mem.Allocator,
/// A map that tracks memory blocks that are currently assigned.
///
/// The map ensures that the pool does not mistakenly return or reuse memory blocks
/// that are still in use, helping to track the current state of the pool's memory blocks.
assigned_map: std.AutoHashMapUnmanaged(*T, bool),
/// A list that holds the memory blocks allocated by the pool.
///
/// The `backing_buffer` is used to hold blocks that can be reused when memory is freed
/// or when the pool needs to allocate new blocks.
backing_buffer: std.ArrayList(T),
/// The total capacity of the memory pool.
///
/// The capacity helps in managing memory limits and optimizing the pool's memory usage.
capacity: usize,
/// A ring buffer used to manage available memory blocks.
///
/// The `free_list` is essential for efficiently recycling memory and reducing the
/// overhead of repeated memory allocations.
free_list: RingBuffer(*T),
/// A mutex used for thread safe operations
mutex: std.Thread.Mutex,
pub fn init(allocator: std.mem.Allocator, capacity: usize) !Self {
var free_queue = try RingBuffer(*T).initCapacity(allocator, capacity);
errdefer free_queue.deinit(allocator);
var backing_buffer = try std.ArrayList(T).initCapacity(allocator, capacity);
errdefer backing_buffer.deinit(allocator);
for (0..capacity) |_| {
// provide a zero value for the generic type so that it can
// be used to fill the backing buffer.
const p: T = undefined;
backing_buffer.appendAssumeCapacity(p);
}
for (backing_buffer.items) |*v| {
free_queue.enqueueAssumeCapacity(v);
}
// if the backing buffer does not match the free queue, this means that the memory pool
// will fundementally not work. We would have returned an allocation error already upon
// creating the backing_buffer and the free_queue. This is purely a sanity check.
assert(backing_buffer.items.len == free_queue.count);
return Self{
.allocator = allocator,
.assigned_map = .empty,
.capacity = capacity,
.free_list = free_queue,
.backing_buffer = backing_buffer,
.mutex = std.Thread.Mutex{},
};
}
pub fn deinit(self: *Self) void {
self.free_list.deinit(self.allocator);
self.assigned_map.deinit(self.allocator);
self.backing_buffer.deinit(self.allocator);
}
/// return the number assigned ptrs in the memory pool.
pub fn count(self: *Self) usize {
self.mutex.lock();
defer self.mutex.unlock();
return self.countUnsafe();
}
/// return the number assigned ptrs in the memory pool.
pub fn countUnsafe(self: *Self) usize {
return self.assigned_map.count();
}
/// return the number of free ptrs remaining in the memory pool.
pub fn available(self: *Self) usize {
self.mutex.lock();
defer self.mutex.unlock();
return self.availableUnsafe();
}
/// Non thread safe version of `available`
pub fn availableUnsafe(self: *Self) usize {
return self.free_list.count;
}
/// Allocates a memory block from the memory pool. Threadsafe
///
/// This function attempts to allocate a memory block from the pool by either
/// reusing an existing block from the free list or failing if no memory is available.
pub fn create(self: *Self) !*T {
self.mutex.lock();
defer self.mutex.unlock();
return self.unsafeCreate();
}
/// Non thread safe version of `create`
pub fn unsafeCreate(self: *Self) !*T {
if (self.availableUnsafe() == 0) return error.OutOfMemory;
if (self.free_list.dequeue()) |ptr| {
try self.assigned_map.put(self.allocator, ptr, true);
return ptr;
} else unreachable;
}
/// Allocates multiple memory blocks from the memory pool. Thread safe
///
/// This function attempts to allocate `n` memory blocks from the pool. It will either
/// reuse existing blocks from the free list or fail if the required number of blocks
/// are not available.
pub fn createN(self: *Self, allocator: std.mem.Allocator, n: usize) ![]*T {
self.mutex.lock();
defer self.mutex.unlock();
return self.unsafeCreateN(allocator, n);
}
/// Unsafe version of `createN`.
pub fn unsafeCreateN(self: *Self, allocator: std.mem.Allocator, n: usize) ![]*T {
if (self.availableUnsafe() < n) return error.OutOfMemory;
var list = try std.ArrayList(*T).initCapacity(allocator, n);
errdefer list.deinit(allocator);
for (0..n) |_| {
if (self.free_list.dequeue()) |ptr| {
try list.append(allocator, ptr);
try self.assigned_map.put(self.allocator, ptr, true);
} else break;
}
return list.toOwnedSlice(allocator);
}
/// Frees a memory block and returns it to the pool. Thread safe
///
/// This function takes a pointer to a memory block previously allocated from the pool,
/// removes it from the `assigned_map` to mark it as no longer in use, and then enqueues
/// it back into the `free_list` for reuse.
pub fn destroy(self: *Self, ptr: *T) void {
self.mutex.lock();
defer self.mutex.unlock();
return self.unsafeDestroy(ptr);
}
/// Unsafe version of `destroy`
pub fn unsafeDestroy(self: *Self, ptr: *T) void {
const res = self.assigned_map.remove(ptr);
if (!res) {
log.err("ptr did not exist in pool {*}", .{ptr});
unreachable;
}
self.free_list.enqueueAssumeCapacity(ptr);
}
};
}
test "init/deinit" {
const allocator = testing.allocator;
var memory_pool = try MemoryPool(usize).init(allocator, 100);
defer memory_pool.deinit();
}
test "create and destroy" {
const TestStruct = struct {
data: usize = 0,
};
const allocator = testing.allocator;
var memory_pool_create = try MemoryPool(TestStruct).init(allocator, 100);
defer memory_pool_create.deinit();
// create an ArrayList that will hold some pointers to be destroyed later
var ptrs: std.ArrayList(*TestStruct) = .empty;
defer ptrs.deinit(allocator);
// fill the entire memory pool
for (0..memory_pool_create.available()) |i| {
const p = try memory_pool_create.create();
p.* = .{ .data = @intCast(i) };
try ptrs.append(allocator, p);
}
try testing.expectEqual(0, memory_pool_create.available());
try testing.expectError(error.OutOfMemory, memory_pool_create.create());
// remove one of the created items
const removed_ptr = ptrs.pop().?;
memory_pool_create.destroy(removed_ptr);
try testing.expectEqual(1, memory_pool_create.available());
// remove the rest of the items
while (ptrs.pop()) |ptr| {
memory_pool_create.destroy(ptr);
}
try testing.expectEqual(memory_pool_create.capacity, memory_pool_create.available());
}
test "create n ptrs" {
const TestStruct = struct {
data: usize = 0,
};
const allocator = testing.allocator;
var memory_pool = try MemoryPool(TestStruct).init(allocator, 100);
defer memory_pool.deinit();
// create an ArrayList that will hold some pointers to be destroyed later
var ptrs: std.ArrayList(*TestStruct) = .empty;
defer ptrs.deinit(allocator);
const p = try memory_pool.createN(allocator, 10);
defer allocator.free(p);
try testing.expectEqual(memory_pool.available(), memory_pool.capacity - p.len);
for (p) |ptr| {
memory_pool.destroy(ptr);
}
}
test "data types" {
const Person = struct {
name: []const u8,
age: u8,
};
const Roster = struct {
people: []*Person,
teams: [][]const u8,
};
const types = [_]type{
u8,
u16,
u32,
u64,
u128,
usize,
i8,
i16,
i32,
i64,
i128,
[]u8,
Person,
Roster,
*Person,
*Roster,
};
const allocator = testing.allocator;
inline for (0..types.len) |i| {
var memory_pool = try MemoryPool(types[i]).init(allocator, 100);
defer memory_pool.deinit();
// create an ArrayList that will hold some pointers to be destroyed later
var ptrs: std.ArrayList(*types[i]) = .empty;
defer ptrs.deinit(allocator);
for (0..memory_pool.available()) |_| {
const ptr = try memory_pool.create();
try ptrs.append(allocator, ptr);
}
try testing.expectEqual(ptrs.items.len, memory_pool.capacity);
}
}