diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index ad37777e4323..2aac5fa12192 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -171,7 +171,7 @@ pub const ElfDynLib = struct { if (!mem.eql(u8, eh.e_ident[0..4], elf.MAGIC)) return error.NotElfFile; if (eh.e_type != elf.ET.DYN) return error.NotDynamicLibrary; - const elf_addr = @intFromPtr(file_bytes.ptr); + const elf_addr: [*]u8 = file_bytes.ptr; // Iterate over the program header entries to find out the // dynamic vector as well as the total size of the virtual memory. @@ -179,15 +179,15 @@ pub const ElfDynLib = struct { var virt_addr_end: usize = 0; { var i: usize = 0; - var ph_addr: usize = elf_addr + eh.e_phoff; + var ph_addr: [*]u8 = elf_addr + eh.e_phoff; while (i < eh.e_phnum) : ({ i += 1; ph_addr += eh.e_phentsize; }) { - const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); + const ph: *elf.Phdr = @ptrCast(@alignCast(ph_addr)); switch (ph.p_type) { elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz), - elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(elf_addr + ph.p_offset)), + elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrCast(@alignCast(elf_addr + ph.p_offset))), else => {}, } } @@ -210,12 +210,12 @@ pub const ElfDynLib = struct { // Now iterate again and actually load all the program sections. { var i: usize = 0; - var ph_addr: usize = elf_addr + eh.e_phoff; + var ph_addr: [*]u8 = elf_addr + eh.e_phoff; while (i < eh.e_phnum) : ({ i += 1; ph_addr += eh.e_phentsize; }) { - const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); + const ph: *elf.Phdr = @ptrCast(@alignCast(ph_addr)); switch (ph.p_type) { elf.PT_LOAD => { // The VirtAddr may not be page-aligned; in such case there will be @@ -343,9 +343,9 @@ fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [ break; if (def.vd_next == 0) return false; - def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next)); + def = @ptrCast(@as([*]elf.Verdef, @ptrCast(def)) + def.vd_next); } - const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux)); + const aux: *elf.Verdaux = @ptrCast(@as([*]elf.Verdef, @ptrCast(def)) + def.vd_aux); return mem.eql(u8, vername, mem.sliceTo(strings + aux.vda_name, 0)); } diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index e3b3e5881439..86ed91e06651 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1483,15 +1483,10 @@ pub fn HashMapUnmanaged( /// key_ptr is assumed to be a valid pointer to a key that is present /// in the hash map. pub fn removeByPtr(self: *Self, key_ptr: *K) void { - // TODO: replace with pointer subtraction once supported by zig // if @sizeOf(K) == 0 then there is at most one item in the hash // map, which is assumed to exist as key_ptr must be valid. This // item must be at index 0. - const idx = if (@sizeOf(K) > 0) - (@intFromPtr(key_ptr) - @intFromPtr(self.keys())) / @sizeOf(K) - else - 0; - + const idx = if (@sizeOf(K) == 0) 0 else key_ptr - self.keys(); self.removeByIndex(idx); } diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 18332c79a826..9fe0fbf15175 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -54,7 +54,7 @@ const CAllocator = struct { }; fn getHeader(ptr: [*]u8) *[*]u8 { - return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize))); + return @ptrCast(@alignCast(ptr - @sizeOf(usize))); } fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 { @@ -98,7 +98,7 @@ const CAllocator = struct { } const unaligned_ptr = getHeader(ptr).*; - const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr); + const delta = ptr - unaligned_ptr; return CAllocator.malloc_size(unaligned_ptr) - delta; } @@ -292,7 +292,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { } fn getRecordPtr(buf: []u8) *align(1) usize { - return @as(*align(1) usize, @ptrFromInt(@intFromPtr(buf.ptr) + buf.len)); + return @ptrCast(@alignCast(buf.ptr + buf.len)); } fn alloc( @@ -363,11 +363,30 @@ pub const HeapAllocator = switch (builtin.os.tag) { }; fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool { + // TODO: refactor for a single, constant time solution for + // both comptime and runtime once a way to safely compare + // pointers at comptime is implemented + + if (@inComptime()) { + for (container) |*item| { + if (item == &ptr[0]) return true; + } + return false; + } + return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and @intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len); } fn sliceContainsSlice(container: []u8, slice: []u8) bool { + // TODO: refactor for a single, constant time solution for + // both comptime and runtime once a way to safely compare + // pointers at comptime is implemented + + if (@inComptime()) { + return sliceContainsPtr(container, slice.ptr) and sliceContainsPtr(container, slice.ptr + slice.len - 1); + } + return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and (@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len); } diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index 3cff6b439fbc..c28ea03bad36 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -214,7 +214,7 @@ pub const ArenaAllocator = struct { const cur_node = self.state.buffer_list.first orelse return false; const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; - if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) { + if (cur_buf.ptr + self.state.end_index != buf.ptr + buf.len) { // It's not the most recent allocation, so it cannot be expanded, // but it's fine if they want to make it smaller. return new_len <= buf.len; @@ -240,7 +240,7 @@ pub const ArenaAllocator = struct { const cur_node = self.state.buffer_list.first orelse return; const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; - if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) { + if (cur_buf.ptr + self.state.end_index == buf.ptr + buf.len) { self.state.end_index -= buf.len; } } diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 61e256d29097..6a8e66f8c4cb 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -255,7 +255,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { used_count: SlotIndex, fn usedBits(bucket: *BucketHeader, index: usize) *u8 { - return @as(*u8, @ptrFromInt(@intFromPtr(bucket) + @sizeOf(BucketHeader) + index)); + const bucket_bytes: [*]u8 = @ptrCast(bucket); + const after_header = bucket_bytes + @sizeOf(BucketHeader); + return &after_header[index]; } fn requestedSizes(bucket: *BucketHeader, size_class: usize) []LargestSizeClassInt { @@ -745,7 +747,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr); }; - const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page); + const byte_offset = old_mem.ptr - bucket.page; const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class)); const used_byte_index = slot_index / 8; const used_bit_index = @as(u3, @intCast(slot_index % 8)); @@ -865,7 +867,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.freeLarge(old_mem, log2_old_align, ret_addr); return; }; - const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page); + const byte_offset = old_mem.ptr - bucket.page; const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class)); const used_byte_index = slot_index / 8; const used_bit_index = @as(u3, @intCast(slot_index % 8)); diff --git a/lib/std/heap/sbrk_allocator.zig b/lib/std/heap/sbrk_allocator.zig index 3ccc2dddf7f3..2f87fcbf87a7 100644 --- a/lib/std/heap/sbrk_allocator.zig +++ b/lib/std/heap/sbrk_allocator.zig @@ -126,14 +126,14 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { const class = math.log2(slot_size) - min_class; const addr = @intFromPtr(buf.ptr); if (class < size_class_count) { - const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize)))); + const node: *usize = @ptrCast(@alignCast(buf.ptr + slot_size - @sizeOf(usize))); node.* = frees[class]; frees[class] = addr; } else { const bigpages_needed = bigPagesNeeded(actual_len); const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed); const big_slot_size_bytes = pow2_pages * bigpage_size; - const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize)))); + const node: *usize = @ptrCast(@alignCast(buf.ptr + big_slot_size_bytes - @sizeOf(usize))); const big_class = math.log2(pow2_pages); node.* = big_frees[big_class]; big_frees[big_class] = addr; diff --git a/lib/std/net.zig b/lib/std/net.zig index 10e1beb62200..4df61a94e53e 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1737,19 +1737,19 @@ fn dnsParse( if (qdcount + ancount > 64) return error.InvalidDnsPacket; while (qdcount != 0) { qdcount -= 1; - while (@intFromPtr(p) - @intFromPtr(r.ptr) < r.len and p[0] -% 1 < 127) p += 1; - if (p[0] > 193 or (p[0] == 193 and p[1] > 254) or @intFromPtr(p) > @intFromPtr(r.ptr) + r.len - 6) + while (p - r.ptr < r.len and p[0] -% 1 < 127) p += 1; + if (p[0] > 193 or (p[0] == 193 and p[1] > 254) or p - r.ptr > r.len - 6) return error.InvalidDnsPacket; p += @as(usize, 5) + @intFromBool(p[0] != 0); } while (ancount != 0) { ancount -= 1; - while (@intFromPtr(p) - @intFromPtr(r.ptr) < r.len and p[0] -% 1 < 127) p += 1; - if (p[0] > 193 or (p[0] == 193 and p[1] > 254) or @intFromPtr(p) > @intFromPtr(r.ptr) + r.len - 6) + while (p - r.ptr < r.len and p[0] -% 1 < 127) p += 1; + if (p[0] > 193 or (p[0] == 193 and p[1] > 254) or p - r.ptr > r.len - 6) return error.InvalidDnsPacket; p += @as(usize, 1) + @intFromBool(p[0] != 0); const len = p[8] * @as(usize, 256) + p[9]; - if (@intFromPtr(p) + len > @intFromPtr(r.ptr) + r.len) return error.InvalidDnsPacket; + if (p - r.ptr + len > r.len) return error.InvalidDnsPacket; try callback(ctx, p[1], p[10..][0..len], r); p += 10 + len; }