Skip to content

Commit be6b890

Browse files
committed
mem.Allocator: Make ptr optional to avoid UB when comparing Allocator instances
Previously, Allocator implementations that did not store any state would set `ptr` to `undefined`. However, the use of undefined meant that it was always unsafe/illegal to compare the ptr fields of Allocator. This restriction was not mentioned anywhere, though, and the ptr field *was* being compared in real code, e.g. `std.process.Child.collectOutput`, which can lead to undefined behavior in release modes (see ziglang#21756). There are a few ways to address this: 1. Codify that comparing `ptr` is always illegal/unsafe, and remove all existing comparisons. 2. Set `ptr` to a legal dummy value, similar to the value returned by Allocator.alloc when a zero-length slice is requested. 3. Make `ptr` optional and set it to `null` for implementations that don't store any state The first option seems like footgun central (at least until usage of `undefined` is fully safety-checked in safe modes), and the second option has its own issues since the dummy value could inadvertently be equal to a real mapped pointer, so the 3rd option was chosen.
1 parent a5d4ad1 commit be6b890

16 files changed

+115
-111
lines changed

lib/std/crypto/tlcsprng.zig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ var install_atfork_handler = std.once(struct {
4444

4545
threadlocal var wipe_mem: []align(mem.page_size) u8 = &[_]u8{};
4646

47-
fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
47+
fn tlsCsprngFill(_: ?*anyopaque, buffer: []u8) void {
4848
if (os_has_arc4random) {
4949
// arc4random is already a thread-local CSPRNG.
5050
return std.c.arc4random_buf(buffer.ptr, buffer.len);

lib/std/heap.zig

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ const CAllocator = struct {
103103
}
104104

105105
fn alloc(
106-
_: *anyopaque,
106+
_: ?*anyopaque,
107107
len: usize,
108108
log2_align: u8,
109109
return_address: usize,
@@ -114,7 +114,7 @@ const CAllocator = struct {
114114
}
115115

116116
fn resize(
117-
_: *anyopaque,
117+
_: ?*anyopaque,
118118
buf: []u8,
119119
log2_buf_align: u8,
120120
new_len: usize,
@@ -135,7 +135,7 @@ const CAllocator = struct {
135135
}
136136

137137
fn free(
138-
_: *anyopaque,
138+
_: ?*anyopaque,
139139
buf: []u8,
140140
log2_buf_align: u8,
141141
return_address: usize,
@@ -150,7 +150,7 @@ const CAllocator = struct {
150150
/// `malloc_usable_size` if available. For an allocator that directly calls
151151
/// `malloc`/`free`, see `raw_c_allocator`.
152152
pub const c_allocator = Allocator{
153-
.ptr = undefined,
153+
.ptr = null,
154154
.vtable = &c_allocator_vtable,
155155
};
156156
const c_allocator_vtable = Allocator.VTable{
@@ -165,7 +165,7 @@ const c_allocator_vtable = Allocator.VTable{
165165
/// `ArenaAllocator` for example and is more optimal in such a case
166166
/// than `c_allocator`.
167167
pub const raw_c_allocator = Allocator{
168-
.ptr = undefined,
168+
.ptr = null,
169169
.vtable = &raw_c_allocator_vtable,
170170
};
171171
const raw_c_allocator_vtable = Allocator.VTable{
@@ -175,7 +175,7 @@ const raw_c_allocator_vtable = Allocator.VTable{
175175
};
176176

177177
fn rawCAlloc(
178-
_: *anyopaque,
178+
_: ?*anyopaque,
179179
len: usize,
180180
log2_ptr_align: u8,
181181
ret_addr: usize,
@@ -192,7 +192,7 @@ fn rawCAlloc(
192192
}
193193

194194
fn rawCResize(
195-
_: *anyopaque,
195+
_: ?*anyopaque,
196196
buf: []u8,
197197
log2_old_align: u8,
198198
new_len: usize,
@@ -213,7 +213,7 @@ fn rawCResize(
213213
}
214214

215215
fn rawCFree(
216-
_: *anyopaque,
216+
_: ?*anyopaque,
217217
buf: []u8,
218218
log2_old_align: u8,
219219
ret_addr: usize,
@@ -231,17 +231,17 @@ pub const page_allocator = if (@hasDecl(root, "os") and
231231
root.os.heap.page_allocator
232232
else if (builtin.target.isWasm())
233233
Allocator{
234-
.ptr = undefined,
234+
.ptr = null,
235235
.vtable = &WasmPageAllocator.vtable,
236236
}
237237
else if (builtin.target.os.tag == .plan9)
238238
Allocator{
239-
.ptr = undefined,
239+
.ptr = null,
240240
.vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable,
241241
}
242242
else
243243
Allocator{
244-
.ptr = undefined,
244+
.ptr = null,
245245
.vtable = &PageAllocator.vtable,
246246
};
247247

@@ -251,7 +251,7 @@ else
251251
/// and wasm64 architectures.
252252
/// Until then, it is available here to play with.
253253
pub const wasm_allocator = Allocator{
254-
.ptr = undefined,
254+
.ptr = null,
255255
.vtable = &std.heap.WasmAllocator.vtable,
256256
};
257257

@@ -296,13 +296,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
296296
}
297297

298298
fn alloc(
299-
ctx: *anyopaque,
299+
ctx: ?*anyopaque,
300300
n: usize,
301301
log2_ptr_align: u8,
302302
return_address: usize,
303303
) ?[*]u8 {
304304
_ = return_address;
305-
const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
305+
const self: *HeapAllocator = @ptrCast(@alignCast(ctx.?));
306306

307307
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
308308
const amt = n + ptr_align - 1 + @sizeOf(usize);
@@ -323,15 +323,15 @@ pub const HeapAllocator = switch (builtin.os.tag) {
323323
}
324324

325325
fn resize(
326-
ctx: *anyopaque,
326+
ctx: ?*anyopaque,
327327
buf: []u8,
328328
log2_buf_align: u8,
329329
new_size: usize,
330330
return_address: usize,
331331
) bool {
332332
_ = log2_buf_align;
333333
_ = return_address;
334-
const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
334+
const self: *HeapAllocator = @ptrCast(@alignCast(ctx.?));
335335

336336
const root_addr = getRecordPtr(buf).*;
337337
const align_offset = @intFromPtr(buf.ptr) - root_addr;
@@ -348,14 +348,14 @@ pub const HeapAllocator = switch (builtin.os.tag) {
348348
}
349349

350350
fn free(
351-
ctx: *anyopaque,
351+
ctx: ?*anyopaque,
352352
buf: []u8,
353353
log2_buf_align: u8,
354354
return_address: usize,
355355
) void {
356356
_ = log2_buf_align;
357357
_ = return_address;
358-
const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
358+
const self: *HeapAllocator = @ptrCast(@alignCast(ctx.?));
359359
windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*)));
360360
}
361361
},
@@ -423,8 +423,8 @@ pub const FixedBufferAllocator = struct {
423423
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
424424
}
425425

426-
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
427-
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
426+
fn alloc(ctx: ?*anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
427+
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx.?));
428428
_ = ra;
429429
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
430430
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
@@ -436,13 +436,13 @@ pub const FixedBufferAllocator = struct {
436436
}
437437

438438
fn resize(
439-
ctx: *anyopaque,
439+
ctx: ?*anyopaque,
440440
buf: []u8,
441441
log2_buf_align: u8,
442442
new_size: usize,
443443
return_address: usize,
444444
) bool {
445-
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
445+
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx.?));
446446
_ = log2_buf_align;
447447
_ = return_address;
448448
assert(@inComptime() or self.ownsSlice(buf));
@@ -466,12 +466,12 @@ pub const FixedBufferAllocator = struct {
466466
}
467467

468468
fn free(
469-
ctx: *anyopaque,
469+
ctx: ?*anyopaque,
470470
buf: []u8,
471471
log2_buf_align: u8,
472472
return_address: usize,
473473
) void {
474-
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
474+
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx.?));
475475
_ = log2_buf_align;
476476
_ = return_address;
477477
assert(@inComptime() or self.ownsSlice(buf));
@@ -481,8 +481,8 @@ pub const FixedBufferAllocator = struct {
481481
}
482482
}
483483

484-
fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
485-
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
484+
fn threadSafeAlloc(ctx: ?*anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
485+
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx.?));
486486
_ = ra;
487487
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
488488
var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
@@ -551,24 +551,24 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
551551
pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead");
552552

553553
fn alloc(
554-
ctx: *anyopaque,
554+
ctx: ?*anyopaque,
555555
len: usize,
556556
log2_ptr_align: u8,
557557
ra: usize,
558558
) ?[*]u8 {
559-
const self: *Self = @ptrCast(@alignCast(ctx));
559+
const self: *Self = @ptrCast(@alignCast(ctx.?));
560560
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse
561561
return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra);
562562
}
563563

564564
fn resize(
565-
ctx: *anyopaque,
565+
ctx: ?*anyopaque,
566566
buf: []u8,
567567
log2_buf_align: u8,
568568
new_len: usize,
569569
ra: usize,
570570
) bool {
571-
const self: *Self = @ptrCast(@alignCast(ctx));
571+
const self: *Self = @ptrCast(@alignCast(ctx.?));
572572
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
573573
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra);
574574
} else {
@@ -577,12 +577,12 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
577577
}
578578

579579
fn free(
580-
ctx: *anyopaque,
580+
ctx: ?*anyopaque,
581581
buf: []u8,
582582
log2_buf_align: u8,
583583
ra: usize,
584584
) void {
585-
const self: *Self = @ptrCast(@alignCast(ctx));
585+
const self: *Self = @ptrCast(@alignCast(ctx.?));
586586
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
587587
return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra);
588588
} else {

lib/std/heap/PageAllocator.zig

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ pub const vtable = Allocator.VTable{
1414
.free = free,
1515
};
1616

17-
fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
17+
fn alloc(_: ?*anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
1818
_ = ra;
1919
_ = log2_align;
2020
assert(n > 0);
@@ -51,7 +51,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
5151
}
5252

5353
fn resize(
54-
_: *anyopaque,
54+
_: ?*anyopaque,
5555
buf_unaligned: []u8,
5656
log2_buf_align: u8,
5757
new_size: usize,
@@ -100,7 +100,7 @@ fn resize(
100100
return false;
101101
}
102102

103-
fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
103+
fn free(_: ?*anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
104104
_ = log2_buf_align;
105105
_ = return_address;
106106

lib/std/heap/ThreadSafeAllocator.zig

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,25 +14,25 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator {
1414
};
1515
}
1616

17-
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
18-
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
17+
fn alloc(ctx: ?*anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
18+
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx.?));
1919
self.mutex.lock();
2020
defer self.mutex.unlock();
2121

2222
return self.child_allocator.rawAlloc(n, log2_ptr_align, ra);
2323
}
2424

25-
fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
26-
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
25+
fn resize(ctx: ?*anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
26+
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx.?));
2727

2828
self.mutex.lock();
2929
defer self.mutex.unlock();
3030

3131
return self.child_allocator.rawResize(buf, log2_buf_align, new_len, ret_addr);
3232
}
3333

34-
fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
35-
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
34+
fn free(ctx: ?*anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
35+
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx.?));
3636

3737
self.mutex.lock();
3838
defer self.mutex.unlock();

lib/std/heap/WasmAllocator.zig

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ var frees = [1]usize{0} ** size_class_count;
4343
/// For each big size class, points to the freed pointer.
4444
var big_frees = [1]usize{0} ** big_size_class_count;
4545

46-
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*]u8 {
46+
fn alloc(ctx: ?*anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*]u8 {
4747
_ = ctx;
4848
_ = return_address;
4949
// Make room for the freelist next pointer.
@@ -81,7 +81,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
8181
}
8282

8383
fn resize(
84-
ctx: *anyopaque,
84+
ctx: ?*anyopaque,
8585
buf: []u8,
8686
log2_buf_align: u8,
8787
new_len: usize,
@@ -109,7 +109,7 @@ fn resize(
109109
}
110110

111111
fn free(
112-
ctx: *anyopaque,
112+
ctx: ?*anyopaque,
113113
buf: []u8,
114114
log2_buf_align: u8,
115115
return_address: usize,
@@ -158,7 +158,7 @@ fn allocBigPages(n: usize) usize {
158158
}
159159

160160
const test_ally = Allocator{
161-
.ptr = undefined,
161+
.ptr = null,
162162
.vtable = &vtable,
163163
};
164164

lib/std/heap/WasmPageAllocator.zig

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ fn nPages(memsize: usize) usize {
101101
return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size;
102102
}
103103

104-
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
104+
fn alloc(ctx: ?*anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
105105
_ = ctx;
106106
_ = ra;
107107
if (len > maxInt(usize) - (mem.page_size - 1)) return null;
@@ -159,7 +159,7 @@ fn freePages(start: usize, end: usize) void {
159159
}
160160

161161
fn resize(
162-
ctx: *anyopaque,
162+
ctx: ?*anyopaque,
163163
buf: []u8,
164164
log2_buf_align: u8,
165165
new_len: usize,
@@ -180,7 +180,7 @@ fn resize(
180180
}
181181

182182
fn free(
183-
ctx: *anyopaque,
183+
ctx: ?*anyopaque,
184184
buf: []u8,
185185
log2_buf_align: u8,
186186
return_address: usize,

0 commit comments

Comments
 (0)