Skip to content

Commit b7d56b4

Browse files
Add runtime page_size
1 parent ad168db commit b7d56b4

15 files changed

+180
-69
lines changed

lib/std/c.zig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@ pub usingnamespace switch (builtin.os.tag) {
7979
pub extern "c" fn getrusage(who: c_int, usage: *c.rusage) c_int;
8080

8181
pub extern "c" fn sched_yield() c_int;
82+
pub extern "c" fn sysconf(sc: c_int) c_long;
8283

8384
pub extern "c" fn sigaction(sig: c_int, noalias act: ?*const c.Sigaction, noalias oact: ?*c.Sigaction) c_int;
8485
pub extern "c" fn sigprocmask(how: c_int, noalias set: ?*const c.sigset_t, noalias oset: ?*c.sigset_t) c_int;

lib/std/heap.zig

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -578,6 +578,14 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
578578
};
579579
}
580580

581+
/// Runtime known minimum page size.
582+
pub fn pageSize() usize {
583+
return switch (builtin.os.tag) {
584+
.linux => if (builtin.link_libc) @intCast(std.c.sysconf(std.os.linux.SC.PAGESIZE)) else std.os.linux.getauxval(std.elf.AT_PAGESZ),
585+
else => mem.page_size,
586+
};
587+
}
588+
581589
test "c_allocator" {
582590
if (builtin.link_libc) {
583591
try testAllocator(c_allocator);

lib/std/heap/PageAllocator.zig

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ const std = @import("../std.zig");
22
const builtin = @import("builtin");
33
const Allocator = std.mem.Allocator;
44
const mem = std.mem;
5+
const heap = std.heap;
56
const os = std.os;
67
const maxInt = std.math.maxInt;
78
const assert = std.debug.assert;
@@ -16,8 +17,8 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
1617
_ = ra;
1718
_ = log2_align;
1819
assert(n > 0);
19-
if (n > maxInt(usize) - (mem.page_size - 1)) return null;
20-
const aligned_len = mem.alignForward(usize, n, mem.page_size);
20+
if (n > maxInt(usize) - (heap.pageSize() - 1)) return null;
21+
const aligned_len = mem.alignForward(usize, n, heap.pageSize());
2122

2223
if (builtin.os.tag == .windows) {
2324
const w = os.windows;
@@ -39,7 +40,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
3940
-1,
4041
0,
4142
) catch return null;
42-
assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size));
43+
assert(mem.isAligned(@intFromPtr(slice.ptr), heap.pageSize()));
4344
const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len);
4445
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
4546
return slice.ptr;
@@ -54,14 +55,14 @@ fn resize(
5455
) bool {
5556
_ = log2_buf_align;
5657
_ = return_address;
57-
const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size);
58+
const new_size_aligned = mem.alignForward(usize, new_size, heap.pageSize());
5859

5960
if (builtin.os.tag == .windows) {
6061
const w = os.windows;
6162
if (new_size <= buf_unaligned.len) {
6263
const base_addr = @intFromPtr(buf_unaligned.ptr);
6364
const old_addr_end = base_addr + buf_unaligned.len;
64-
const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size);
65+
const new_addr_end = mem.alignForward(usize, base_addr + new_size, heap.pageSize());
6566
if (old_addr_end > new_addr_end) {
6667
// For shrinking that is not releasing, we will only
6768
// decommit the pages not needed anymore.
@@ -73,14 +74,14 @@ fn resize(
7374
}
7475
return true;
7576
}
76-
const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
77+
const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, heap.pageSize());
7778
if (new_size_aligned <= old_size_aligned) {
7879
return true;
7980
}
8081
return false;
8182
}
8283

83-
const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
84+
const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, heap.pageSize());
8485
if (new_size_aligned == buf_aligned_len)
8586
return true;
8687

@@ -103,7 +104,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v
103104
if (builtin.os.tag == .windows) {
104105
os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE);
105106
} else {
106-
const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size);
107+
const buf_aligned_len = mem.alignForward(usize, slice.len, heap.pageSize());
107108
os.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
108109
}
109110
}

lib/std/heap/general_purpose_allocator.zig

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
195195

196196
pub const Error = mem.Allocator.Error;
197197

198-
const small_bucket_count = math.log2(page_size);
198+
const small_bucket_count = math.log2(std.heap.pageSize());
199199
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
200200
const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
201201

@@ -262,14 +262,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
262262
if (!config.safety) @compileError("requested size is only stored when safety is enabled");
263263
const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(size_class);
264264
const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
265-
const slot_count = @divExact(page_size, size_class);
265+
const slot_count = @divExact(std.heap.pageSize(), size_class);
266266
return sizes[0..slot_count];
267267
}
268268

269269
fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 {
270270
if (!config.safety) @compileError("requested size is only stored when safety is enabled");
271271
const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class);
272-
const slot_count = @divExact(page_size, size_class);
272+
const slot_count = @divExact(std.heap.pageSize(), size_class);
273273
return aligns_ptr[0..slot_count];
274274
}
275275

@@ -338,13 +338,13 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
338338

339339
fn bucketAlignsStart(size_class: usize) usize {
340340
if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
341-
const slot_count = @divExact(page_size, size_class);
341+
const slot_count = @divExact(std.heap.pageSize(), size_class);
342342
return bucketRequestedSizesStart(size_class) + (@sizeOf(LargestSizeClassInt) * slot_count);
343343
}
344344

345345
fn bucketStackFramesStart(size_class: usize) usize {
346346
const unaligned_start = if (config.safety) blk: {
347-
const slot_count = @divExact(page_size, size_class);
347+
const slot_count = @divExact(std.heap.pageSize(), size_class);
348348
break :blk bucketAlignsStart(size_class) + slot_count;
349349
} else @sizeOf(BucketHeader) + usedBitsCount(size_class);
350350
return mem.alignForward(
@@ -355,12 +355,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
355355
}
356356

357357
fn bucketSize(size_class: usize) usize {
358-
const slot_count = @divExact(page_size, size_class);
358+
const slot_count = @divExact(std.heap.pageSize(), size_class);
359359
return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
360360
}
361361

362362
fn usedBitsCount(size_class: usize) usize {
363-
const slot_count = @divExact(page_size, size_class);
363+
const slot_count = @divExact(std.heap.pageSize(), size_class);
364364
if (slot_count < 8) return 1;
365365
return @divExact(slot_count, 8);
366366
}
@@ -444,10 +444,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
444444
var bucket = node.key;
445445
if (config.never_unmap) {
446446
// free page that was intentionally leaked by never_unmap
447-
self.backing_allocator.free(bucket.page[0..page_size]);
447+
self.backing_allocator.free(bucket.page[0..std.heap.pageSize()]);
448448
}
449449
// alloc_cursor was set to slot count when bucket added to empty_buckets
450-
self.freeBucket(bucket, @divExact(page_size, bucket.alloc_cursor));
450+
self.freeBucket(bucket, @divExact(std.heap.pageSize(), bucket.alloc_cursor));
451451
self.bucket_node_pool.destroy(node);
452452
}
453453
self.empty_buckets.root = null;
@@ -510,7 +510,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
510510
fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error!Slot {
511511
const bucket_index = math.log2(size_class);
512512
var buckets = &self.buckets[bucket_index];
513-
const slot_count = @divExact(page_size, size_class);
513+
const slot_count = @divExact(std.heap.pageSize(), size_class);
514514
if (self.cur_buckets[bucket_index] == null or self.cur_buckets[bucket_index].?.alloc_cursor == slot_count) {
515515
var new_bucket = try self.createBucket(size_class);
516516
errdefer self.freeBucket(new_bucket, size_class);
@@ -543,7 +543,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
543543
addr: usize,
544544
current_bucket: ?*BucketHeader,
545545
) ?*BucketHeader {
546-
const search_page: [*]align(page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, page_size));
546+
const search_page: [*]align(page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, std.heap.pageSize()));
547547
if (current_bucket != null and current_bucket.?.page == search_page) {
548548
return current_bucket;
549549
}
@@ -921,14 +921,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
921921
self.cur_buckets[bucket_index] = null;
922922
}
923923
if (!config.never_unmap) {
924-
self.backing_allocator.free(bucket.page[0..page_size]);
924+
self.backing_allocator.free(bucket.page[0..std.heap.pageSize()]);
925925
}
926926
if (!config.retain_metadata) {
927927
self.freeBucket(bucket, size_class);
928928
self.bucket_node_pool.destroy(node);
929929
} else {
930930
// move alloc_cursor to end so we can tell size_class later
931-
const slot_count = @divExact(page_size, size_class);
931+
const slot_count = @divExact(std.heap.pageSize(), size_class);
932932
bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count));
933933
var empty_entry = self.empty_buckets.getEntryFor(node.key);
934934
empty_entry.set(node);
@@ -1012,7 +1012,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
10121012
}
10131013

10141014
fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader {
1015-
const page = try self.backing_allocator.alignedAlloc(u8, page_size, page_size);
1015+
const page = try self.backing_allocator.alignedAlloc(u8, std.heap.pageSize(), std.heap.pageSize());
10161016
errdefer self.backing_allocator.free(page);
10171017

10181018
const bucket_size = bucketSize(size_class);
@@ -1152,17 +1152,17 @@ test "large object - grow" {
11521152
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
11531153
const allocator = gpa.allocator();
11541154

1155-
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
1155+
var slice1 = try allocator.alloc(u8, std.heap.pageSize() * 2 - 20);
11561156
defer allocator.free(slice1);
11571157

11581158
const old = slice1;
1159-
slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
1159+
slice1 = try allocator.realloc(slice1, std.heap.pageSize() * 2 - 10);
11601160
try std.testing.expect(slice1.ptr == old.ptr);
11611161

1162-
slice1 = try allocator.realloc(slice1, page_size * 2);
1162+
slice1 = try allocator.realloc(slice1, std.heap.pageSize() * 2);
11631163
try std.testing.expect(slice1.ptr == old.ptr);
11641164

1165-
slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
1165+
slice1 = try allocator.realloc(slice1, std.heap.pageSize() * 2 + 1);
11661166
}
11671167

11681168
test "realloc small object to large object" {
@@ -1176,7 +1176,7 @@ test "realloc small object to large object" {
11761176
slice[60] = 0x34;
11771177

11781178
// This requires upgrading to a large object
1179-
const large_object_size = page_size * 2 + 50;
1179+
const large_object_size = std.heap.pageSize() * 2 + 50;
11801180
slice = try allocator.realloc(slice, large_object_size);
11811181
try std.testing.expect(slice[0] == 0x12);
11821182
try std.testing.expect(slice[60] == 0x34);
@@ -1187,22 +1187,22 @@ test "shrink large object to large object" {
11871187
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
11881188
const allocator = gpa.allocator();
11891189

1190-
var slice = try allocator.alloc(u8, page_size * 2 + 50);
1190+
var slice = try allocator.alloc(u8, std.heap.pageSize() * 2 + 50);
11911191
defer allocator.free(slice);
11921192
slice[0] = 0x12;
11931193
slice[60] = 0x34;
11941194

1195-
if (!allocator.resize(slice, page_size * 2 + 1)) return;
1196-
slice = slice.ptr[0 .. page_size * 2 + 1];
1195+
if (!allocator.resize(slice, std.heap.pageSize() * 2 + 1)) return;
1196+
slice = slice.ptr[0 .. std.heap.pageSize() * 2 + 1];
11971197
try std.testing.expect(slice[0] == 0x12);
11981198
try std.testing.expect(slice[60] == 0x34);
11991199

1200-
try std.testing.expect(allocator.resize(slice, page_size * 2 + 1));
1201-
slice = slice[0 .. page_size * 2 + 1];
1200+
try std.testing.expect(allocator.resize(slice, std.heap.pageSize() * 2 + 1));
1201+
slice = slice[0 .. std.heap.pageSize() * 2 + 1];
12021202
try std.testing.expect(slice[0] == 0x12);
12031203
try std.testing.expect(slice[60] == 0x34);
12041204

1205-
slice = try allocator.realloc(slice, page_size * 2);
1205+
slice = try allocator.realloc(slice, std.heap.pageSize() * 2);
12061206
try std.testing.expect(slice[0] == 0x12);
12071207
try std.testing.expect(slice[60] == 0x34);
12081208
}
@@ -1216,13 +1216,13 @@ test "shrink large object to large object with larger alignment" {
12161216
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
12171217
const debug_allocator = fba.allocator();
12181218

1219-
const alloc_size = page_size * 2 + 50;
1219+
const alloc_size = std.heap.pageSize() * 2 + 50;
12201220
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
12211221
defer allocator.free(slice);
12221222

12231223
const big_alignment: usize = switch (builtin.os.tag) {
1224-
.windows => page_size * 32, // Windows aligns to 64K.
1225-
else => page_size * 2,
1224+
.windows => std.heap.pageSize() * 32, // Windows aligns to 64K.
1225+
else => std.heap.pageSize() * 2,
12261226
};
12271227
// This loop allocates until we find a page that is not aligned to the big
12281228
// alignment. Then we shrink the allocation after the loop, but increase the
@@ -1248,7 +1248,7 @@ test "realloc large object to small object" {
12481248
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
12491249
const allocator = gpa.allocator();
12501250

1251-
var slice = try allocator.alloc(u8, page_size * 2 + 50);
1251+
var slice = try allocator.alloc(u8, std.heap.pageSize() * 2 + 50);
12521252
defer allocator.free(slice);
12531253
slice[0] = 0x12;
12541254
slice[16] = 0x34;
@@ -1288,34 +1288,34 @@ test "realloc large object to larger alignment" {
12881288
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
12891289
const debug_allocator = fba.allocator();
12901290

1291-
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
1291+
var slice = try allocator.alignedAlloc(u8, 16, std.heap.pageSize() * 2 + 50);
12921292
defer allocator.free(slice);
12931293

12941294
const big_alignment: usize = switch (builtin.os.tag) {
1295-
.windows => page_size * 32, // Windows aligns to 64K.
1296-
else => page_size * 2,
1295+
.windows => std.heap.pageSize() * 32, // Windows aligns to 64K.
1296+
else => std.heap.pageSize() * 2,
12971297
};
12981298
// This loop allocates until we find a page that is not aligned to the big alignment.
12991299
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
13001300
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
13011301
try stuff_to_free.append(slice);
1302-
slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
1302+
slice = try allocator.alignedAlloc(u8, 16, std.heap.pageSize() * 2 + 50);
13031303
}
13041304
while (stuff_to_free.popOrNull()) |item| {
13051305
allocator.free(item);
13061306
}
13071307
slice[0] = 0x12;
13081308
slice[16] = 0x34;
13091309

1310-
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100);
1310+
slice = try allocator.reallocAdvanced(slice, 32, std.heap.pageSize() * 2 + 100);
13111311
try std.testing.expect(slice[0] == 0x12);
13121312
try std.testing.expect(slice[16] == 0x34);
13131313

1314-
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25);
1314+
slice = try allocator.reallocAdvanced(slice, 32, std.heap.pageSize() * 2 + 25);
13151315
try std.testing.expect(slice[0] == 0x12);
13161316
try std.testing.expect(slice[16] == 0x34);
13171317

1318-
slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100);
1318+
slice = try allocator.reallocAdvanced(slice, big_alignment, std.heap.pageSize() * 2 + 100);
13191319
try std.testing.expect(slice[0] == 0x12);
13201320
try std.testing.expect(slice[16] == 0x34);
13211321
}
@@ -1326,7 +1326,7 @@ test "large object shrinks to small but allocation fails during shrink" {
13261326
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
13271327
const allocator = gpa.allocator();
13281328

1329-
var slice = try allocator.alloc(u8, page_size * 2 + 50);
1329+
var slice = try allocator.alloc(u8, std.heap.pageSize() * 2 + 50);
13301330
defer allocator.free(slice);
13311331
slice[0] = 0x12;
13321332
slice[3] = 0x34;
@@ -1397,7 +1397,7 @@ test "double frees" {
13971397
try std.testing.expect(GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null) != null);
13981398

13991399
// detect a large allocation double free
1400-
const large = try allocator.alloc(u8, 2 * page_size);
1400+
const large = try allocator.alloc(u8, 2 * std.heap.pageSize());
14011401
try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr)));
14021402
try std.testing.expectEqual(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.bytes, large);
14031403
allocator.free(large);
@@ -1406,7 +1406,7 @@ test "double frees" {
14061406

14071407
const normal_small = try allocator.alloc(u8, size_class);
14081408
defer allocator.free(normal_small);
1409-
const normal_large = try allocator.alloc(u8, 2 * page_size);
1409+
const normal_large = try allocator.alloc(u8, 2 * std.heap.pageSize());
14101410
defer allocator.free(normal_large);
14111411

14121412
// check that flushing retained metadata doesn't disturb live allocations
@@ -1422,8 +1422,8 @@ test "bug 9995 fix, large allocs count requested size not backing size" {
14221422
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
14231423
const allocator = gpa.allocator();
14241424

1425-
var buf = try allocator.alignedAlloc(u8, 1, page_size + 1);
1426-
try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
1425+
var buf = try allocator.alignedAlloc(u8, 1, std.heap.pageSize() + 1);
1426+
try std.testing.expect(gpa.total_requested_bytes == std.heap.pageSize() + 1);
14271427
buf = try allocator.realloc(buf, 1);
14281428
try std.testing.expect(gpa.total_requested_bytes == 1);
14291429
buf = try allocator.realloc(buf, 2);

0 commit comments

Comments
 (0)