Skip to content

Commit

Permalink
Sema: Fix fnptr alignment safety checks to account for potential ISA …
Browse files Browse the repository at this point in the history
…tag.

As seen on e.g. Arm/Thumb and MIPS (MIPS16/microMIPS).

Fixes #22888.
  • Loading branch information
alexrp committed Feb 21, 2025
1 parent 8a3aeba commit 9ddf5de
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 6 deletions.
30 changes: 24 additions & 6 deletions src/Sema.zig
Original file line number Diff line number Diff line change
Expand Up @@ -23095,8 +23095,14 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue(Type.usize, align_bytes_minus_1))).toIntern());
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
const align_mask = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue(
Type.usize,
if (elem_ty.fnPtrMaskOrNull(zcu)) |mask|
align_bytes_minus_1 & mask
else
align_bytes_minus_1,
))).toIntern());
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_mask);
const is_aligned = if (is_vector) all_aligned: {
const splat_zero_usize = Air.internedToRef((try sema.splat(operand_ty, .zero_usize)).toIntern());
const is_aligned = try block.addCmpVector(remainder, splat_zero_usize, .eq);
Expand Down Expand Up @@ -23125,8 +23131,14 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1);
const align_mask = Air.internedToRef((try pt.intValue(
Type.usize,
if (elem_ty.fnPtrMaskOrNull(zcu)) |mask|
align_bytes_minus_1 & mask
else
align_bytes_minus_1,
)).toIntern());
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_mask);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
}
Expand Down Expand Up @@ -23706,13 +23718,19 @@ fn ptrCastFull(
try Type.fromInterned(dest_info.child).hasRuntimeBitsSema(pt))
{
const align_bytes_minus_1 = dest_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const align_mask = Air.internedToRef((try pt.intValue(
Type.usize,
if (Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu)) |mask|
align_bytes_minus_1 & mask
else
align_bytes_minus_1,
)).toIntern());
const actual_ptr = if (src_info.flags.size == .slice)
try sema.analyzeSlicePtr(block, src, ptr, operand_ty)
else
ptr;
const ptr_int = try block.addBitCast(.usize, actual_ptr);
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
const remainder = try block.addBinOp(.bit_and, ptr_int, align_mask);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
const ok = if (src_info.flags.size == .slice and dest_info.flags.size == .slice) ok: {
const len = try sema.analyzeSliceLen(block, operand_src, ptr);
Expand Down
7 changes: 7 additions & 0 deletions src/Type.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2542,6 +2542,13 @@ pub fn fnIsVarArgs(ty: Type, zcu: *const Zcu) bool {
return zcu.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args;
}

pub fn fnPtrMaskOrNull(ty: Type, zcu: *const Zcu) ?u64 {
return switch (ty.zigTypeTag(zcu)) {
.@"fn" => target_util.functionPointerMask(zcu.getTarget()),
else => null,
};
}

pub fn isNumeric(ty: Type, zcu: *const Zcu) bool {
return switch (ty.toIntern()) {
.f16_type,
Expand Down
11 changes: 11 additions & 0 deletions src/target.zig
Original file line number Diff line number Diff line change
Expand Up @@ -629,6 +629,17 @@ pub fn supportsFunctionAlignment(target: std.Target) bool {
};
}

pub fn functionPointerMask(target: std.Target) ?u64 {
// 32-bit Arm uses the LSB to mean that the target function contains Thumb code.
// MIPS uses the LSB to mean that the target function contains MIPS16/microMIPS code.
return if (target.cpu.arch.isArm() or target.cpu.arch.isMIPS32())
~@as(u32, 1)
else if (target.cpu.arch.isMIPS64())
~@as(u64, 1)
else
null;
}

pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool {
switch (backend) {
.stage1, .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
Expand Down
11 changes: 11 additions & 0 deletions test/behavior/align.zig
Original file line number Diff line number Diff line change
Expand Up @@ -613,3 +613,14 @@ test "zero-bit fields in extern struct pad fields appropriately" {
try expect(@intFromPtr(&s.y) == @intFromPtr(&s.a));
try expect(@as(*S, @fieldParentPtr("a", &s.a)) == &s);
}

test "function pointer @intFromPtr/@ptrFromInt roundtrip" {
// This only succeeds on Thumb if we handle the Thumb bit correctly; if not, the `@ptrFromInt`
// will incorrectly trip an alignment safety check.

const nothing_ptr: *const fn () callconv(.c) void = &nothing;
const nothing_int: usize = @intFromPtr(nothing_ptr);
const nothing_ptr2: *const fn () callconv(.c) void = @ptrFromInt(nothing_int);

try std.testing.expectEqual(nothing_ptr, nothing_ptr2);
}

0 comments on commit 9ddf5de

Please sign in to comment.