Merge pull request #22488 from Rexicon226/ubsan-rt

implement a ubsan runtime for better error messages
This commit is contained in:
Andrew Kelley 2025-02-26 03:08:36 -05:00 committed by GitHub
commit c45dcd013b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 958 additions and 28 deletions

View file

@ -40,6 +40,7 @@ compress_debug_sections: enum { none, zlib, zstd } = .none,
verbose_link: bool,
verbose_cc: bool,
bundle_compiler_rt: ?bool = null,
bundle_ubsan_rt: ?bool = null,
rdynamic: bool,
import_memory: bool = false,
export_memory: bool = false,
@ -1563,6 +1564,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
}
try addFlag(&zig_args, "compiler-rt", compile.bundle_compiler_rt);
try addFlag(&zig_args, "ubsan-rt", compile.bundle_ubsan_rt);
try addFlag(&zig_args, "dll-export-fns", compile.dll_export_fns);
if (compile.rdynamic) {
try zig_args.append("-rdynamic");

View file

@ -42,10 +42,8 @@ pub var next_mmap_addr_hint: ?[*]align(page_size_min) u8 = null;
///
/// On many systems, the actual page size can only be determined at runtime
/// with `pageSize`.
pub const page_size_min: usize = std.options.page_size_min orelse (page_size_min_default orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
@compileError("freestanding/other page_size_min must provided with std.options.page_size_min")
else
@compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_min; populate std.options.page_size_min"));
pub const page_size_min: usize = std.options.page_size_min orelse page_size_min_default orelse
@compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_min; populate std.options.page_size_min");
/// comptime-known maximum page size of the target.
///
@ -831,8 +829,10 @@ const page_size_min_default: ?usize = switch (builtin.os.tag) {
.xtensa => 4 << 10,
else => null,
},
.freestanding => switch (builtin.cpu.arch) {
.freestanding, .other => switch (builtin.cpu.arch) {
.wasm32, .wasm64 => 64 << 10,
.x86, .x86_64 => 4 << 10,
.aarch64, .aarch64_be => 4 << 10,
else => null,
},
else => null,

View file

@ -1098,12 +1098,12 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
// as we don't read into a new page. This should be the case for most architectures
// which use paged memory, however should be confirmed before adding a new arch below.
.aarch64, .x86, .x86_64 => if (std.simd.suggestVectorLength(T)) |block_len| {
const page_size = std.heap.pageSize();
const page_size = std.heap.page_size_min;
const block_size = @sizeOf(T) * block_len;
const Block = @Vector(block_len, T);
const mask: Block = @splat(sentinel);
comptime assert(std.heap.page_size_max % @sizeOf(Block) == 0);
comptime assert(std.heap.page_size_min % @sizeOf(Block) == 0);
assert(page_size % @sizeOf(Block) == 0);
// First block may be unaligned
@ -1119,6 +1119,7 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
i += @divExact(std.mem.alignForward(usize, start_addr, block_size) - start_addr, @sizeOf(T));
} else {
@branchHint(.unlikely);
// Would read over a page boundary. Per-byte at a time until aligned or found.
// 0.39% chance this branch is taken for 4K pages at 16b block length.
//
@ -1152,7 +1153,7 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
test "indexOfSentinel vector paths" {
const Types = [_]type{ u8, u16, u32, u64 };
const allocator = std.testing.allocator;
const page_size = std.heap.pageSize();
const page_size = std.heap.page_size_min;
inline for (Types) |T| {
const block_len = std.simd.suggestVectorLength(T) orelse continue;

711
lib/ubsan_rt.zig Normal file
View file

@ -0,0 +1,711 @@
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const panic = std.debug.panicExtra;
const SourceLocation = extern struct {
file_name: ?[*:0]const u8,
line: u32,
col: u32,
};
const TypeDescriptor = extern struct {
kind: Kind,
info: Info,
// name: [?:0]u8
const Kind = enum(u16) {
integer = 0x0000,
float = 0x0001,
unknown = 0xFFFF,
};
const Info = extern union {
integer: packed struct(u16) {
signed: bool,
bit_width: u15,
},
float: u16,
};
fn getIntegerSize(desc: TypeDescriptor) u64 {
assert(desc.kind == .integer);
const bit_width = desc.info.integer.bit_width;
return @as(u64, 1) << @intCast(bit_width);
}
fn isSigned(desc: TypeDescriptor) bool {
return desc.kind == .integer and desc.info.integer.signed;
}
fn getName(desc: *const TypeDescriptor) [:0]const u8 {
return std.mem.span(@as([*:0]const u8, @ptrCast(desc)) + @sizeOf(TypeDescriptor));
}
};
const ValueHandle = *const opaque {};
const Value = extern struct {
td: *const TypeDescriptor,
handle: ValueHandle,
fn getUnsignedInteger(value: Value) u128 {
assert(!value.td.isSigned());
const size = value.td.getIntegerSize();
const max_inline_size = @bitSizeOf(ValueHandle);
if (size <= max_inline_size) {
return @intFromPtr(value.handle);
}
return switch (size) {
64 => @as(*const u64, @alignCast(@ptrCast(value.handle))).*,
128 => @as(*const u128, @alignCast(@ptrCast(value.handle))).*,
else => @trap(),
};
}
fn getSignedInteger(value: Value) i128 {
assert(value.td.isSigned());
const size = value.td.getIntegerSize();
const max_inline_size = @bitSizeOf(ValueHandle);
if (size <= max_inline_size) {
const extra_bits: std.math.Log2Int(usize) = @intCast(max_inline_size - size);
const handle: isize = @bitCast(@intFromPtr(value.handle));
return (handle << extra_bits) >> extra_bits;
}
return switch (size) {
64 => @as(*const i64, @alignCast(@ptrCast(value.handle))).*,
128 => @as(*const i128, @alignCast(@ptrCast(value.handle))).*,
else => @trap(),
};
}
fn getFloat(value: Value) f128 {
assert(value.td.kind == .float);
const size = value.td.info.float;
const max_inline_size = @bitSizeOf(ValueHandle);
if (size <= max_inline_size) {
return @as(switch (@bitSizeOf(usize)) {
32 => f32,
64 => f64,
else => @compileError("unsupported target"),
}, @bitCast(@intFromPtr(value.handle)));
}
return @floatCast(switch (size) {
64 => @as(*const f64, @alignCast(@ptrCast(value.handle))).*,
80 => @as(*const f80, @alignCast(@ptrCast(value.handle))).*,
128 => @as(*const f128, @alignCast(@ptrCast(value.handle))).*,
else => @trap(),
});
}
fn isMinusOne(value: Value) bool {
return value.td.isSigned() and
value.getSignedInteger() == -1;
}
fn isNegative(value: Value) bool {
return value.td.isSigned() and
value.getSignedInteger() < 0;
}
fn getPositiveInteger(value: Value) u128 {
if (value.td.isSigned()) {
const signed = value.getSignedInteger();
assert(signed >= 0);
return @intCast(signed);
} else {
return value.getUnsignedInteger();
}
}
pub fn format(
value: Value,
comptime fmt: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) !void {
comptime assert(fmt.len == 0);
// Work around x86_64 backend limitation.
if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) {
try writer.writeAll("(unknown)");
return;
}
switch (value.td.kind) {
.integer => {
if (value.td.isSigned()) {
try writer.print("{}", .{value.getSignedInteger()});
} else {
try writer.print("{}", .{value.getUnsignedInteger()});
}
},
.float => try writer.print("{}", .{value.getFloat()}),
.unknown => try writer.writeAll("(unknown)"),
}
}
};
const OverflowData = extern struct {
loc: SourceLocation,
td: *const TypeDescriptor,
};
fn overflowHandler(
comptime sym_name: []const u8,
comptime operator: []const u8,
) void {
const S = struct {
fn abort(
data: *const OverflowData,
lhs_handle: ValueHandle,
rhs_handle: ValueHandle,
) callconv(.c) noreturn {
handler(data, lhs_handle, rhs_handle);
}
fn handler(
data: *const OverflowData,
lhs_handle: ValueHandle,
rhs_handle: ValueHandle,
) callconv(.c) noreturn {
const lhs: Value = .{ .handle = lhs_handle, .td = data.td };
const rhs: Value = .{ .handle = rhs_handle, .td = data.td };
const is_signed = data.td.isSigned();
const fmt = "{s} integer overflow: " ++ "{} " ++
operator ++ " {} cannot be represented in type {s}";
panic(@returnAddress(), fmt, .{
if (is_signed) "signed" else "unsigned",
lhs,
rhs,
data.td.getName(),
});
}
};
exportHandlerWithAbort(&S.handler, &S.abort, sym_name);
}
fn negationHandlerAbort(
data: *const OverflowData,
value_handle: ValueHandle,
) callconv(.c) noreturn {
negationHandler(data, value_handle);
}
fn negationHandler(
data: *const OverflowData,
value_handle: ValueHandle,
) callconv(.c) noreturn {
const value: Value = .{ .handle = value_handle, .td = data.td };
panic(
@returnAddress(),
"negation of {} cannot be represented in type {s}",
.{ value, data.td.getName() },
);
}
fn divRemHandlerAbort(
data: *const OverflowData,
lhs_handle: ValueHandle,
rhs_handle: ValueHandle,
) callconv(.c) noreturn {
divRemHandler(data, lhs_handle, rhs_handle);
}
fn divRemHandler(
data: *const OverflowData,
lhs_handle: ValueHandle,
rhs_handle: ValueHandle,
) callconv(.c) noreturn {
const lhs: Value = .{ .handle = lhs_handle, .td = data.td };
const rhs: Value = .{ .handle = rhs_handle, .td = data.td };
if (rhs.isMinusOne()) {
panic(
@returnAddress(),
"division of {} by -1 cannot be represented in type {s}",
.{ lhs, data.td.getName() },
);
} else panic(@returnAddress(), "division by zero", .{});
}
const AlignmentAssumptionData = extern struct {
loc: SourceLocation,
assumption_loc: SourceLocation,
td: *const TypeDescriptor,
};
fn alignmentAssumptionHandlerAbort(
data: *const AlignmentAssumptionData,
pointer: ValueHandle,
alignment_handle: ValueHandle,
maybe_offset: ?ValueHandle,
) callconv(.c) noreturn {
alignmentAssumptionHandler(
data,
pointer,
alignment_handle,
maybe_offset,
);
}
fn alignmentAssumptionHandler(
data: *const AlignmentAssumptionData,
pointer: ValueHandle,
alignment_handle: ValueHandle,
maybe_offset: ?ValueHandle,
) callconv(.c) noreturn {
const real_pointer = @intFromPtr(pointer) - @intFromPtr(maybe_offset);
const lsb = @ctz(real_pointer);
const actual_alignment = @as(u64, 1) << @intCast(lsb);
const mask = @intFromPtr(alignment_handle) - 1;
const misalignment_offset = real_pointer & mask;
const alignment: Value = .{ .handle = alignment_handle, .td = data.td };
if (maybe_offset) |offset| {
panic(
@returnAddress(),
"assumption of {} byte alignment (with offset of {} byte) for pointer of type {s} failed\n" ++
"offset address is {} aligned, misalignment offset is {} bytes",
.{
alignment,
@intFromPtr(offset),
data.td.getName(),
actual_alignment,
misalignment_offset,
},
);
} else {
panic(
@returnAddress(),
"assumption of {} byte alignment for pointer of type {s} failed\n" ++
"address is {} aligned, misalignment offset is {} bytes",
.{
alignment,
data.td.getName(),
actual_alignment,
misalignment_offset,
},
);
}
}
const ShiftOobData = extern struct {
loc: SourceLocation,
lhs_type: *const TypeDescriptor,
rhs_type: *const TypeDescriptor,
};
fn shiftOobAbort(
data: *const ShiftOobData,
lhs_handle: ValueHandle,
rhs_handle: ValueHandle,
) callconv(.c) noreturn {
shiftOob(data, lhs_handle, rhs_handle);
}
fn shiftOob(
data: *const ShiftOobData,
lhs_handle: ValueHandle,
rhs_handle: ValueHandle,
) callconv(.c) noreturn {
const lhs: Value = .{ .handle = lhs_handle, .td = data.lhs_type };
const rhs: Value = .{ .handle = rhs_handle, .td = data.rhs_type };
if (rhs.isNegative() or
rhs.getPositiveInteger() >= data.lhs_type.getIntegerSize())
{
if (rhs.isNegative()) {
panic(@returnAddress(), "shift exponent {} is negative", .{rhs});
} else {
panic(
@returnAddress(),
"shift exponent {} is too large for {}-bit type {s}",
.{ rhs, data.lhs_type.getIntegerSize(), data.lhs_type.getName() },
);
}
} else {
if (lhs.isNegative()) {
panic(@returnAddress(), "left shift of negative value {}", .{lhs});
} else {
panic(
@returnAddress(),
"left shift of {} by {} places cannot be represented in type {s}",
.{ lhs, rhs, data.lhs_type.getName() },
);
}
}
}
const OutOfBoundsData = extern struct {
loc: SourceLocation,
array_type: *const TypeDescriptor,
index_type: *const TypeDescriptor,
};
fn outOfBoundsAbort(
data: *const OutOfBoundsData,
index_handle: ValueHandle,
) callconv(.c) noreturn {
outOfBounds(data, index_handle);
}
fn outOfBounds(
data: *const OutOfBoundsData,
index_handle: ValueHandle,
) callconv(.c) noreturn {
const index: Value = .{ .handle = index_handle, .td = data.index_type };
panic(
@returnAddress(),
"index {} out of bounds for type {s}",
.{ index, data.array_type.getName() },
);
}
const PointerOverflowData = extern struct {
loc: SourceLocation,
};
fn pointerOverflowAbort(
data: *const PointerOverflowData,
base: usize,
result: usize,
) callconv(.c) noreturn {
pointerOverflow(data, base, result);
}
fn pointerOverflow(
_: *const PointerOverflowData,
base: usize,
result: usize,
) callconv(.c) noreturn {
if (base == 0) {
if (result == 0) {
panic(@returnAddress(), "applying zero offset to null pointer", .{});
} else {
panic(@returnAddress(), "applying non-zero offset {} to null pointer", .{result});
}
} else {
if (result == 0) {
panic(
@returnAddress(),
"applying non-zero offset to non-null pointer 0x{x} produced null pointer",
.{base},
);
} else {
const signed_base: isize = @bitCast(base);
const signed_result: isize = @bitCast(result);
if ((signed_base >= 0) == (signed_result >= 0)) {
if (base > result) {
panic(
@returnAddress(),
"addition of unsigned offset to 0x{x} overflowed to 0x{x}",
.{ base, result },
);
} else {
panic(
@returnAddress(),
"subtraction of unsigned offset to 0x{x} overflowed to 0x{x}",
.{ base, result },
);
}
} else {
panic(
@returnAddress(),
"pointer index expression with base 0x{x} overflowed to 0x{x}",
.{ base, result },
);
}
}
}
}
const TypeMismatchData = extern struct {
loc: SourceLocation,
td: *const TypeDescriptor,
log_alignment: u8,
kind: enum(u8) {
load,
store,
reference_binding,
member_access,
member_call,
constructor_call,
downcast_pointer,
downcast_reference,
upcast,
upcast_to_virtual_base,
nonnull_assign,
dynamic_operation,
fn getName(kind: @This()) []const u8 {
return switch (kind) {
.load => "load of",
.store => "store of",
.reference_binding => "reference binding to",
.member_access => "member access within",
.member_call => "member call on",
.constructor_call => "constructor call on",
.downcast_pointer, .downcast_reference => "downcast of",
.upcast => "upcast of",
.upcast_to_virtual_base => "cast to virtual base of",
.nonnull_assign => "_Nonnull binding to",
.dynamic_operation => "dynamic operation on",
};
}
},
};
fn typeMismatchAbort(
data: *const TypeMismatchData,
pointer: ?ValueHandle,
) callconv(.c) noreturn {
typeMismatch(data, pointer);
}
fn typeMismatch(
data: *const TypeMismatchData,
pointer: ?ValueHandle,
) callconv(.c) noreturn {
const alignment = @as(usize, 1) << @intCast(data.log_alignment);
const handle: usize = @intFromPtr(pointer);
if (pointer == null) {
panic(
@returnAddress(),
"{s} null pointer of type {s}",
.{ data.kind.getName(), data.td.getName() },
);
} else if (!std.mem.isAligned(handle, alignment)) {
panic(
@returnAddress(),
"{s} misaligned address 0x{x} for type {s}, which requires {} byte alignment",
.{ data.kind.getName(), handle, data.td.getName(), alignment },
);
} else {
panic(
@returnAddress(),
"{s} address 0x{x} with insufficient space for an object of type {s}",
.{ data.kind.getName(), handle, data.td.getName() },
);
}
}
const UnreachableData = extern struct {
loc: SourceLocation,
};
fn builtinUnreachable(_: *const UnreachableData) callconv(.c) noreturn {
panic(@returnAddress(), "execution reached an unreachable program point", .{});
}
fn missingReturn(_: *const UnreachableData) callconv(.c) noreturn {
panic(@returnAddress(), "execution reached the end of a value-returning function without returning a value", .{});
}
const NonNullReturnData = extern struct {
attribute_loc: SourceLocation,
};
fn nonNullReturnAbort(data: *const NonNullReturnData) callconv(.c) noreturn {
nonNullReturn(data);
}
fn nonNullReturn(_: *const NonNullReturnData) callconv(.c) noreturn {
panic(@returnAddress(), "null pointer returned from function declared to never return null", .{});
}
const NonNullArgData = extern struct {
loc: SourceLocation,
attribute_loc: SourceLocation,
arg_index: i32,
};
fn nonNullArgAbort(data: *const NonNullArgData) callconv(.c) noreturn {
nonNullArg(data);
}
fn nonNullArg(data: *const NonNullArgData) callconv(.c) noreturn {
panic(
@returnAddress(),
"null pointer passed as argument {}, which is declared to never be null",
.{data.arg_index},
);
}
const InvalidValueData = extern struct {
loc: SourceLocation,
td: *const TypeDescriptor,
};
fn loadInvalidValueAbort(
data: *const InvalidValueData,
value_handle: ValueHandle,
) callconv(.c) noreturn {
loadInvalidValue(data, value_handle);
}
fn loadInvalidValue(
data: *const InvalidValueData,
value_handle: ValueHandle,
) callconv(.c) noreturn {
const value: Value = .{ .handle = value_handle, .td = data.td };
panic(
@returnAddress(),
"load of value {}, which is not valid for type {s}",
.{ value, data.td.getName() },
);
}
const InvalidBuiltinData = extern struct {
loc: SourceLocation,
kind: enum(u8) {
ctz,
clz,
},
};
fn invalidBuiltinAbort(data: *const InvalidBuiltinData) callconv(.c) noreturn {
invalidBuiltin(data);
}
fn invalidBuiltin(data: *const InvalidBuiltinData) callconv(.c) noreturn {
panic(
@returnAddress(),
"passing zero to {s}(), which is not a valid argument",
.{@tagName(data.kind)},
);
}
const VlaBoundNotPositive = extern struct {
loc: SourceLocation,
td: *const TypeDescriptor,
};
fn vlaBoundNotPositiveAbort(
data: *const VlaBoundNotPositive,
bound_handle: ValueHandle,
) callconv(.c) noreturn {
vlaBoundNotPositive(data, bound_handle);
}
fn vlaBoundNotPositive(
data: *const VlaBoundNotPositive,
bound_handle: ValueHandle,
) callconv(.c) noreturn {
const bound: Value = .{ .handle = bound_handle, .td = data.td };
panic(
@returnAddress(),
"variable length array bound evaluates to non-positive value {}",
.{bound},
);
}
const FloatCastOverflowData = extern struct {
from: *const TypeDescriptor,
to: *const TypeDescriptor,
};
const FloatCastOverflowDataV2 = extern struct {
loc: SourceLocation,
from: *const TypeDescriptor,
to: *const TypeDescriptor,
};
fn floatCastOverflowAbort(
data_handle: *align(8) const anyopaque,
from_handle: ValueHandle,
) callconv(.c) noreturn {
floatCastOverflow(data_handle, from_handle);
}
fn floatCastOverflow(
data_handle: *align(8) const anyopaque,
from_handle: ValueHandle,
) callconv(.c) noreturn {
// See: https://github.com/llvm/llvm-project/blob/release/19.x/compiler-rt/lib/ubsan/ubsan_handlers.cpp#L463
// for more information on this check.
const ptr: [*]const u8 = @ptrCast(data_handle);
if (@as(u16, ptr[0]) + @as(u16, ptr[1]) < 2 or ptr[0] == 0xFF or ptr[1] == 0xFF) {
const data: *const FloatCastOverflowData = @ptrCast(data_handle);
const from_value: Value = .{ .handle = from_handle, .td = data.from };
panic(@returnAddress(), "{} is outside the range of representable values of type {s}", .{
from_value, data.to.getName(),
});
} else {
const data: *const FloatCastOverflowDataV2 = @ptrCast(data_handle);
const from_value: Value = .{ .handle = from_handle, .td = data.from };
panic(@returnAddress(), "{} is outside the range of representable values of type {s}", .{
from_value, data.to.getName(),
});
}
}
fn exportHandler(
handler: anytype,
comptime sym_name: []const u8,
) void {
// Work around x86_64 backend limitation.
const linkage = if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) .internal else .weak;
const N = "__ubsan_handle_" ++ sym_name;
@export(handler, .{ .name = N, .linkage = linkage });
}
fn exportHandlerWithAbort(
handler: anytype,
abort_handler: anytype,
comptime sym_name: []const u8,
) void {
// Work around x86_64 backend limitation.
const linkage = if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) .internal else .weak;
{
const N = "__ubsan_handle_" ++ sym_name;
@export(handler, .{ .name = N, .linkage = linkage });
}
{
const N = "__ubsan_handle_" ++ sym_name ++ "_abort";
@export(abort_handler, .{ .name = N, .linkage = linkage });
}
}
const can_build_ubsan = switch (builtin.zig_backend) {
.stage2_riscv64 => false,
else => true,
};
comptime {
if (can_build_ubsan) {
overflowHandler("add_overflow", "+");
overflowHandler("mul_overflow", "*");
overflowHandler("sub_overflow", "-");
exportHandlerWithAbort(&alignmentAssumptionHandler, &alignmentAssumptionHandlerAbort, "alignment_assumption");
exportHandlerWithAbort(&divRemHandler, &divRemHandlerAbort, "divrem_overflow");
exportHandlerWithAbort(&floatCastOverflow, &floatCastOverflowAbort, "float_cast_overflow");
exportHandlerWithAbort(&invalidBuiltin, &invalidBuiltinAbort, "invalid_builtin");
exportHandlerWithAbort(&loadInvalidValue, &loadInvalidValueAbort, "load_invalid_value");
exportHandlerWithAbort(&negationHandler, &negationHandlerAbort, "negate_overflow");
exportHandlerWithAbort(&nonNullArg, &nonNullArgAbort, "nonnull_arg");
exportHandlerWithAbort(&nonNullReturn, &nonNullReturnAbort, "nonnull_return_v1");
exportHandlerWithAbort(&outOfBounds, &outOfBoundsAbort, "out_of_bounds");
exportHandlerWithAbort(&pointerOverflow, &pointerOverflowAbort, "pointer_overflow");
exportHandlerWithAbort(&shiftOob, &shiftOobAbort, "shift_out_of_bounds");
exportHandlerWithAbort(&typeMismatch, &typeMismatchAbort, "type_mismatch_v1");
exportHandlerWithAbort(&vlaBoundNotPositive, &vlaBoundNotPositiveAbort, "vla_bound_not_positive");
exportHandler(&builtinUnreachable, "builtin_unreachable");
exportHandler(&missingReturn, "missing_return");
}
// these checks are nearly impossible to replicate in zig, as they rely on nuances
// in the Itanium C++ ABI.
// exportHandlerWithAbort(&dynamicTypeCacheMiss, &dynamicTypeCacheMissAbort, "dynamic-type-cache-miss");
// exportHandlerWithAbort(&vptrTypeCache, &vptrTypeCacheAbort, "vptr-type-cache");
// we disable -fsanitize=function for reasons explained in src/Compilation.zig
// exportHandlerWithAbort(&functionTypeMismatch, &functionTypeMismatchAbort, "function-type-mismatch");
// exportHandlerWithAbort(&functionTypeMismatchV1, &functionTypeMismatchV1Abort, "function-type-mismatch-v1");
}

View file

@ -78,7 +78,8 @@ implib_emit: ?Path,
/// This is non-null when `-femit-docs` is provided.
docs_emit: ?Path,
root_name: [:0]const u8,
include_compiler_rt: bool,
compiler_rt_strat: RtStrat,
ubsan_rt_strat: RtStrat,
/// Resolved into known paths, any GNU ld scripts already resolved.
link_inputs: []const link.Input,
/// Needed only for passing -F args to clang.
@ -226,6 +227,12 @@ libunwind_static_lib: ?CrtFile = null,
/// Populated when we build the TSAN library. A Job to build this is placed in the queue
/// and resolved before calling linker.flush().
tsan_lib: ?CrtFile = null,
/// Populated when we build the UBSAN library. A Job to build this is placed in the queue
/// and resolved before calling linker.flush().
ubsan_rt_lib: ?CrtFile = null,
/// Populated when we build the UBSAN object. A Job to build this is placed in the queue
/// and resolved before calling linker.flush().
ubsan_rt_obj: ?CrtFile = null,
/// Populated when we build the libc static library. A Job to build this is placed in the queue
/// and resolved before calling linker.flush().
libc_static_lib: ?CrtFile = null,
@ -283,6 +290,8 @@ digest: ?[Cache.bin_digest_len]u8 = null,
const QueuedJobs = struct {
compiler_rt_lib: bool = false,
compiler_rt_obj: bool = false,
ubsan_rt_lib: bool = false,
ubsan_rt_obj: bool = false,
fuzzer_lib: bool = false,
update_builtin_zig: bool,
musl_crt_file: [@typeInfo(musl.CrtFile).@"enum".fields.len]bool = @splat(false),
@ -789,6 +798,7 @@ pub const MiscTask = enum {
libcxx,
libcxxabi,
libtsan,
libubsan,
libfuzzer,
wasi_libc_crt_file,
compiler_rt,
@ -1064,6 +1074,7 @@ pub const CreateOptions = struct {
/// Position Independent Executable. If the output mode is not an
/// executable this field is ignored.
want_compiler_rt: ?bool = null,
want_ubsan_rt: ?bool = null,
want_lto: ?bool = null,
function_sections: bool = false,
data_sections: bool = false,
@ -1245,6 +1256,8 @@ fn addModuleTableToCacheHash(
}
}
const RtStrat = enum { none, lib, obj, zcu };
pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compilation {
const output_mode = options.config.output_mode;
const is_dyn_lib = switch (output_mode) {
@ -1276,6 +1289,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const any_unwind_tables = options.config.any_unwind_tables or options.root_mod.unwind_tables != .none;
const any_non_single_threaded = options.config.any_non_single_threaded or !options.root_mod.single_threaded;
const any_sanitize_thread = options.config.any_sanitize_thread or options.root_mod.sanitize_thread;
const any_sanitize_c = options.config.any_sanitize_c or options.root_mod.sanitize_c;
const any_fuzz = options.config.any_fuzz or options.root_mod.fuzz;
const link_eh_frame_hdr = options.link_eh_frame_hdr or any_unwind_tables;
@ -1294,10 +1308,16 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const sysroot = options.sysroot orelse libc_dirs.sysroot;
const include_compiler_rt = options.want_compiler_rt orelse
(!options.skip_linker_dependencies and is_exe_or_dyn_lib);
const compiler_rt_strat: RtStrat = s: {
if (options.skip_linker_dependencies) break :s .none;
const want = options.want_compiler_rt orelse is_exe_or_dyn_lib;
if (!want) break :s .none;
if (have_zcu and output_mode == .Obj) break :s .zcu;
if (is_exe_or_dyn_lib) break :s .lib;
break :s .obj;
};
if (include_compiler_rt and output_mode == .Obj) {
if (compiler_rt_strat == .zcu) {
// For objects, this mechanism relies on essentially `_ = @import("compiler-rt");`
// injected into the object.
const compiler_rt_mod = try Package.Module.create(arena, .{
@ -1323,6 +1343,38 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
try options.root_mod.deps.putNoClobber(arena, "compiler_rt", compiler_rt_mod);
}
// unlike compiler_rt, we always want to go through the `_ = @import("ubsan-rt")`
// approach, since the ubsan runtime uses quite a lot of the standard library
// and this reduces unnecessary bloat.
const ubsan_rt_strat: RtStrat = s: {
const want_ubsan_rt = options.want_ubsan_rt orelse (any_sanitize_c and is_exe_or_dyn_lib);
if (!want_ubsan_rt) break :s .none;
if (options.skip_linker_dependencies) break :s .none;
if (have_zcu) break :s .zcu;
if (is_exe_or_dyn_lib) break :s .lib;
break :s .obj;
};
if (ubsan_rt_strat == .zcu) {
const ubsan_rt_mod = try Package.Module.create(arena, .{
.global_cache_directory = options.global_cache_directory,
.paths = .{
.root = .{
.root_dir = options.zig_lib_directory,
},
.root_src_path = "ubsan_rt.zig",
},
.fully_qualified_name = "ubsan_rt",
.cc_argv = &.{},
.inherited = .{},
.global = options.config,
.parent = options.root_mod,
.builtin_mod = options.root_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is set
});
try options.root_mod.deps.putNoClobber(arena, "ubsan_rt", ubsan_rt_mod);
}
if (options.verbose_llvm_cpu_features) {
if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: {
const target = options.root_mod.resolved_target.result;
@ -1499,7 +1551,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.windows_libs = windows_libs,
.version = options.version,
.libc_installation = libc_dirs.libc_installation,
.include_compiler_rt = include_compiler_rt,
.compiler_rt_strat = compiler_rt_strat,
.ubsan_rt_strat = ubsan_rt_strat,
.link_inputs = options.link_inputs,
.framework_dirs = options.framework_dirs,
.llvm_opt_bisect_limit = options.llvm_opt_bisect_limit,
@ -1525,6 +1578,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
comp.config.any_unwind_tables = any_unwind_tables;
comp.config.any_non_single_threaded = any_non_single_threaded;
comp.config.any_sanitize_thread = any_sanitize_thread;
comp.config.any_sanitize_c = any_sanitize_c;
comp.config.any_fuzz = any_fuzz;
const lf_open_opts: link.File.OpenOptions = .{
@ -1871,24 +1925,34 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
comp.remaining_prelink_tasks += 1;
}
if (comp.include_compiler_rt and capable_of_building_compiler_rt) {
if (is_exe_or_dyn_lib) {
if (capable_of_building_compiler_rt) {
if (comp.compiler_rt_strat == .lib) {
log.debug("queuing a job to build compiler_rt_lib", .{});
comp.queued_jobs.compiler_rt_lib = true;
comp.remaining_prelink_tasks += 1;
} else if (output_mode != .Obj) {
} else if (comp.compiler_rt_strat == .obj) {
log.debug("queuing a job to build compiler_rt_obj", .{});
// In this case we are making a static library, so we ask
// for a compiler-rt object to put in it.
comp.queued_jobs.compiler_rt_obj = true;
comp.remaining_prelink_tasks += 1;
}
}
if (is_exe_or_dyn_lib and comp.config.any_fuzz and capable_of_building_compiler_rt) {
log.debug("queuing a job to build libfuzzer", .{});
comp.queued_jobs.fuzzer_lib = true;
comp.remaining_prelink_tasks += 1;
if (comp.ubsan_rt_strat == .lib) {
log.debug("queuing a job to build ubsan_rt_lib", .{});
comp.queued_jobs.ubsan_rt_lib = true;
comp.remaining_prelink_tasks += 1;
} else if (comp.ubsan_rt_strat == .obj) {
log.debug("queuing a job to build ubsan_rt_obj", .{});
comp.queued_jobs.ubsan_rt_obj = true;
comp.remaining_prelink_tasks += 1;
}
if (is_exe_or_dyn_lib and comp.config.any_fuzz) {
log.debug("queuing a job to build libfuzzer", .{});
comp.queued_jobs.fuzzer_lib = true;
comp.remaining_prelink_tasks += 1;
}
}
}
@ -1937,9 +2001,16 @@ pub fn destroy(comp: *Compilation) void {
if (comp.compiler_rt_obj) |*crt_file| {
crt_file.deinit(gpa);
}
if (comp.ubsan_rt_lib) |*crt_file| {
crt_file.deinit(gpa);
}
if (comp.ubsan_rt_obj) |*crt_file| {
crt_file.deinit(gpa);
}
if (comp.fuzzer_lib) |*crt_file| {
crt_file.deinit(gpa);
}
if (comp.libc_static_lib) |*crt_file| {
crt_file.deinit(gpa);
}
@ -2207,6 +2278,10 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
_ = try pt.importPkg(zcu.main_mod);
}
if (zcu.root_mod.deps.get("ubsan_rt")) |ubsan_rt_mod| {
_ = try pt.importPkg(ubsan_rt_mod);
}
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
_ = try pt.importPkg(compiler_rt_mod);
}
@ -2248,6 +2323,11 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
try comp.queueJob(.{ .analyze_mod = compiler_rt_mod });
zcu.analysis_roots.appendAssumeCapacity(compiler_rt_mod);
}
if (zcu.root_mod.deps.get("ubsan_rt")) |ubsan_rt_mod| {
try comp.queueJob(.{ .analyze_mod = ubsan_rt_mod });
zcu.analysis_roots.appendAssumeCapacity(ubsan_rt_mod);
}
}
try comp.performAllTheWork(main_progress_node);
@ -2592,7 +2672,8 @@ fn addNonIncrementalStuffToCacheManifest(
man.hash.addOptional(comp.version);
man.hash.add(comp.link_eh_frame_hdr);
man.hash.add(comp.skip_linker_dependencies);
man.hash.add(comp.include_compiler_rt);
man.hash.add(comp.compiler_rt_strat);
man.hash.add(comp.ubsan_rt_strat);
man.hash.add(comp.rc_includes);
man.hash.addListOfBytes(comp.force_undefined_symbols.keys());
man.hash.addListOfBytes(comp.framework_dirs);
@ -3683,6 +3764,14 @@ fn performAllTheWorkInner(
comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "fuzzer.zig", .libfuzzer, .Lib, true, &comp.fuzzer_lib, main_progress_node });
}
if (comp.queued_jobs.ubsan_rt_lib and comp.ubsan_rt_lib == null) {
comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "ubsan_rt.zig", .libubsan, .Lib, false, &comp.ubsan_rt_lib, main_progress_node });
}
if (comp.queued_jobs.ubsan_rt_obj and comp.ubsan_rt_obj == null) {
comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "ubsan_rt.zig", .libubsan, .Obj, false, &comp.ubsan_rt_obj, main_progress_node });
}
if (comp.queued_jobs.glibc_shared_objects) {
comp.link_task_wait_group.spawnManager(buildGlibcSharedObjects, .{ comp, main_progress_node });
}
@ -5916,7 +6005,11 @@ pub fn addCCArgs(
// These args have to be added after the `-fsanitize` arg or
// they won't take effect.
if (mod.sanitize_c) {
try argv.append("-fsanitize-trap=undefined");
// This check requires implementing the Itanium C++ ABI.
// We would make it `-fsanitize-trap=vptr`, however this check requires
// a full runtime due to the type hashing involved.
try argv.append("-fno-sanitize=vptr");
// It is very common, and well-defined, for a pointer on one side of a C ABI
// to have a different but compatible element type. Examples include:
// `char*` vs `uint8_t*` on a system with 8-bit bytes
@ -5925,6 +6018,19 @@ pub fn addCCArgs(
// Without this flag, Clang would invoke UBSAN when such an extern
// function was called.
try argv.append("-fno-sanitize=function");
// It's recommended to use the minimal runtime in production environments
// due to the security implications of the full runtime. The minimal runtime
// doesn't provide much benefit over simply trapping.
if (mod.optimize_mode == .ReleaseSafe) {
try argv.append("-fsanitize-trap=undefined");
}
// This is necessary because, by default, Clang instructs LLVM to embed a COFF link
// dependency on `libclang_rt.ubsan_standalone.a` when the UBSan runtime is used.
if (target.os.tag == .windows) {
try argv.append("-fno-rtlib-defaultlib");
}
}
}

View file

@ -32,6 +32,7 @@ any_non_single_threaded: bool,
/// per-Module setting.
any_error_tracing: bool,
any_sanitize_thread: bool,
any_sanitize_c: bool,
any_fuzz: bool,
pie: bool,
/// If this is true then linker code is responsible for making an LLVM IR
@ -87,6 +88,7 @@ pub const Options = struct {
ensure_libcpp_on_non_freestanding: bool = false,
any_non_single_threaded: bool = false,
any_sanitize_thread: bool = false,
any_sanitize_c: bool = false,
any_fuzz: bool = false,
any_unwind_tables: bool = false,
any_dyn_libs: bool = false,
@ -476,6 +478,7 @@ pub fn resolve(options: Options) ResolveError!Config {
.any_non_single_threaded = options.any_non_single_threaded,
.any_error_tracing = any_error_tracing,
.any_sanitize_thread = options.any_sanitize_thread,
.any_sanitize_c = options.any_sanitize_c,
.any_fuzz = options.any_fuzz,
.san_cov_trace_pc_guard = options.san_cov_trace_pc_guard,
.root_error_tracing = root_error_tracing,

View file

@ -175,7 +175,7 @@ nav_val_analysis_queued: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, voi
/// These are the modules which we initially queue for analysis in `Compilation.update`.
/// `resolveReferences` will use these as the root of its reachability traversal.
analysis_roots: std.BoundedArray(*Package.Module, 3) = .{},
analysis_roots: std.BoundedArray(*Package.Module, 4) = .{},
/// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
/// reset to `null` when any semantic analysis occurs (since this invalidates the data).
/// Allocated into `gpa`.

View file

@ -1102,11 +1102,16 @@ pub const File = struct {
log.debug("zcu_obj_path={s}", .{if (zcu_obj_path) |s| s else "(null)"});
const compiler_rt_path: ?Path = if (comp.include_compiler_rt)
const compiler_rt_path: ?Path = if (comp.compiler_rt_strat == .obj)
comp.compiler_rt_obj.?.full_object_path
else
null;
const ubsan_rt_path: ?Path = if (comp.ubsan_rt_strat == .obj)
comp.ubsan_rt_obj.?.full_object_path
else
null;
// This function follows the same pattern as link.Elf.linkWithLLD so if you want some
// insight as to what's going on here you can read that function body which is more
// well-commented.
@ -1136,6 +1141,7 @@ pub const File = struct {
}
try man.addOptionalFile(zcu_obj_path);
try man.addOptionalFilePath(compiler_rt_path);
try man.addOptionalFilePath(ubsan_rt_path);
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
_ = try man.hit();
@ -1181,6 +1187,7 @@ pub const File = struct {
}
if (zcu_obj_path) |p| object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
if (compiler_rt_path) |p| object_files.appendAssumeCapacity(try p.toStringZ(arena));
if (ubsan_rt_path) |p| object_files.appendAssumeCapacity(try p.toStringZ(arena));
if (comp.verbose_link) {
std.debug.print("ar rcs {s}", .{full_out_path_z});

View file

@ -2162,6 +2162,15 @@ fn linkWithLLD(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
try argv.append(try comp.fuzzer_lib.?.full_object_path.toString(arena));
}
const ubsan_rt_path: ?Path = blk: {
if (comp.ubsan_rt_lib) |x| break :blk x.full_object_path;
if (comp.ubsan_rt_obj) |x| break :blk x.full_object_path;
break :blk null;
};
if (ubsan_rt_path) |path| {
try argv.append(try path.toString(arena));
}
if (is_exe_or_dyn_lib and !comp.skip_linker_dependencies) {
if (!comp.config.link_libc) {
if (comp.libc_static_lib) |lib| {

View file

@ -1541,6 +1541,11 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
break :blk null;
};
const ubsan_rt_path: ?Path = blk: {
if (comp.ubsan_rt_lib) |x| break :blk x.full_object_path;
if (comp.ubsan_rt_obj) |x| break :blk x.full_object_path;
break :blk null;
};
// Here we want to determine whether we can save time by not invoking LLD when the
// output is unchanged. None of the linker options or the object files that are being
@ -1575,6 +1580,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
}
try man.addOptionalFile(module_obj_path);
try man.addOptionalFilePath(compiler_rt_path);
try man.addOptionalFilePath(ubsan_rt_path);
try man.addOptionalFilePath(if (comp.tsan_lib) |l| l.full_object_path else null);
try man.addOptionalFilePath(if (comp.fuzzer_lib) |l| l.full_object_path else null);
@ -1974,6 +1980,10 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
try argv.append(try lib.full_object_path.toString(arena));
}
if (ubsan_rt_path) |p| {
try argv.append(try p.toString(arena));
}
// libc
if (is_exe_or_dyn_lib and
!comp.skip_linker_dependencies and

View file

@ -344,11 +344,21 @@ pub fn deinit(self: *MachO) void {
self.thunks.deinit(gpa);
}
pub fn flush(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
pub fn flush(
self: *MachO,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
try self.flushModule(arena, tid, prog_node);
}
pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
pub fn flushModule(
self: *MachO,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@ -409,6 +419,16 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
try positionals.append(try link.openObjectInput(diags, comp.fuzzer_lib.?.full_object_path));
}
if (comp.ubsan_rt_lib) |crt_file| {
const path = crt_file.full_object_path;
self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
} else if (comp.ubsan_rt_obj) |crt_file| {
const path = crt_file.full_object_path;
self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
for (positionals.items) |link_input| {
self.classifyInputFile(link_input) catch |err|
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
@ -813,6 +833,8 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
if (comp.compiler_rt_lib) |lib| try argv.append(try lib.full_object_path.toString(arena));
if (comp.compiler_rt_obj) |obj| try argv.append(try obj.full_object_path.toString(arena));
if (comp.ubsan_rt_lib) |lib| try argv.append(try lib.full_object_path.toString(arena));
if (comp.ubsan_rt_obj) |obj| try argv.append(try obj.full_object_path.toString(arena));
}
Compilation.dump_argv(argv.items);

View file

@ -93,10 +93,14 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (comp.include_compiler_rt) {
if (comp.compiler_rt_strat == .obj) {
try positionals.append(try link.openObjectInput(diags, comp.compiler_rt_obj.?.full_object_path));
}
if (comp.ubsan_rt_strat == .obj) {
try positionals.append(try link.openObjectInput(diags, comp.ubsan_rt_obj.?.full_object_path));
}
for (positionals.items) |link_input| {
macho_file.classifyInputFile(link_input) catch |err|
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});

View file

@ -3879,6 +3879,11 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
if (comp.compiler_rt_obj) |obj| break :blk obj.full_object_path;
break :blk null;
};
const ubsan_rt_path: ?Path = blk: {
if (comp.ubsan_rt_lib) |lib| break :blk lib.full_object_path;
if (comp.ubsan_rt_obj) |obj| break :blk obj.full_object_path;
break :blk null;
};
const id_symlink_basename = "lld.id";
@ -3901,6 +3906,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
}
try man.addOptionalFile(module_obj_path);
try man.addOptionalFilePath(compiler_rt_path);
try man.addOptionalFilePath(ubsan_rt_path);
man.hash.addOptionalBytes(wasm.entry_name.slice(wasm));
man.hash.add(wasm.base.stack_size);
man.hash.add(wasm.base.build_id);
@ -4148,6 +4154,10 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
try argv.append(try p.toString(arena));
}
if (ubsan_rt_path) |p| {
try argv.append(try p.toStringZ(arena));
}
if (comp.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
Compilation.dump_argv(argv.items[1..]);

View file

@ -561,6 +561,8 @@ const usage_build_generic =
\\ -fno-lld Prevent using LLD as the linker
\\ -fcompiler-rt Always include compiler-rt symbols in output
\\ -fno-compiler-rt Prevent including compiler-rt symbols in output
\\ -fubsan-rt Always include ubsan-rt symbols in the output
\\ -fno-ubsan-rt Prevent including ubsan-rt symbols in the output
\\ -rdynamic Add all symbols to the dynamic symbol table
\\ -feach-lib-rpath Ensure adding rpath for each used dynamic library
\\ -fno-each-lib-rpath Prevent adding rpath for each used dynamic library
@ -849,6 +851,7 @@ fn buildOutputType(
var emit_h: Emit = .no;
var soname: SOName = undefined;
var want_compiler_rt: ?bool = null;
var want_ubsan_rt: ?bool = null;
var linker_script: ?[]const u8 = null;
var version_script: ?[]const u8 = null;
var linker_repro: ?bool = null;
@ -1376,6 +1379,10 @@ fn buildOutputType(
want_compiler_rt = true;
} else if (mem.eql(u8, arg, "-fno-compiler-rt")) {
want_compiler_rt = false;
} else if (mem.eql(u8, arg, "-fubsan-rt")) {
want_ubsan_rt = true;
} else if (mem.eql(u8, arg, "-fno-ubsan-rt")) {
want_ubsan_rt = false;
} else if (mem.eql(u8, arg, "-feach-lib-rpath")) {
create_module.each_lib_rpath = true;
} else if (mem.eql(u8, arg, "-fno-each-lib-rpath")) {
@ -3504,6 +3511,7 @@ fn buildOutputType(
.windows_lib_names = create_module.windows_libs.keys(),
.wasi_emulated_libs = create_module.wasi_emulated_libs.items,
.want_compiler_rt = want_compiler_rt,
.want_ubsan_rt = want_ubsan_rt,
.hash_style = hash_style,
.linker_script = linker_script,
.version_script = version_script,

View file

@ -2049,6 +2049,9 @@ fn testLargeBss(b: *Build, opts: Options) *Step {
\\}
, &.{});
exe.linkLibC();
// Disabled to work around the ELF linker crashing.
// Can be reproduced on a x86_64-linux host by commenting out the line below.
exe.root_module.sanitize_c = false;
const run = addRunArtifact(exe);
run.expectExitCode(0);
@ -3552,6 +3555,9 @@ fn testTlsLargeTbss(b: *Build, opts: Options) *Step {
\\}
, &.{});
exe.linkLibC();
// Disabled to work around the ELF linker crashing.
// Can be reproduced on a x86_64-linux host by commenting out the line below.
exe.root_module.sanitize_c = false;
const run = addRunArtifact(exe);
run.expectStdOutEqual("3 0 5 0 0 0\n");

View file

@ -22,6 +22,10 @@ pub fn build(b: *std.Build) void {
.link_libc = true,
}),
});
// We disable UBSAN for these tests as the libc being tested here is
// so old, it doesn't even support compiling our UBSAN implementation.
exe.bundle_ubsan_rt = false;
exe.root_module.sanitize_c = false;
exe.root_module.addCSourceFile(.{ .file = b.path("main.c") });
// TODO: actually test the output
_ = exe.getEmittedBin();
@ -62,6 +66,10 @@ pub fn build(b: *std.Build) void {
.link_libc = true,
}),
});
// We disable UBSAN for these tests as the libc being tested here is
// so old, it doesn't even support compiling our UBSAN implementation.
exe.bundle_ubsan_rt = false;
exe.root_module.sanitize_c = false;
exe.root_module.addCSourceFile(.{ .file = b.path("glibc_runtime_check.c") });
// Only try running the test if the host glibc is known to be good enough. Ideally, the Zig
@ -161,6 +169,10 @@ pub fn build(b: *std.Build) void {
.link_libc = true,
}),
});
// We disable UBSAN for these tests as the libc being tested here is
// so old, it doesn't even support compiling our UBSAN implementation.
exe.bundle_ubsan_rt = false;
exe.root_module.sanitize_c = false;
// Only try running the test if the host glibc is known to be good enough. Ideally, the Zig
// test runner would be able to check this, but see https://github.com/ziglang/zig/pull/17702#issuecomment-1831310453

View file

@ -13,6 +13,9 @@ pub fn build(b: *std.Build) void {
}),
});
lib.entry = .disabled;
// Disabled to work around the Wasm linker crashing.
// Can be reproduced by commenting out the line below.
lib.bundle_ubsan_rt = false;
lib.use_lld = false;
lib.root_module.export_symbol_names = &.{ "foo", "bar" };
// Object being linked has neither functions nor globals named "foo" or "bar" and

View file

@ -19,6 +19,8 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
no_export.entry = .disabled;
no_export.use_llvm = false;
no_export.use_lld = false;
// Don't pull in ubsan, since we're just expecting a very minimal executable.
no_export.bundle_ubsan_rt = false;
const dynamic_export = b.addExecutable(.{
.name = "dynamic",
@ -32,6 +34,8 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
dynamic_export.rdynamic = true;
dynamic_export.use_llvm = false;
dynamic_export.use_lld = false;
// Don't pull in ubsan, since we're just expecting a very minimal executable.
dynamic_export.bundle_ubsan_rt = false;
const force_export = b.addExecutable(.{
.name = "force",
@ -45,6 +49,8 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
force_export.root_module.export_symbol_names = &.{"foo"};
force_export.use_llvm = false;
force_export.use_lld = false;
// Don't pull in ubsan, since we're just expecting a very minimal executable.
force_export.bundle_ubsan_rt = false;
const check_no_export = no_export.checkObject();
check_no_export.checkInHeaders();

View file

@ -21,6 +21,8 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
export_table.use_lld = false;
export_table.export_table = true;
export_table.link_gc_sections = false;
// Don't pull in ubsan, since we're just expecting a very minimal executable.
export_table.bundle_ubsan_rt = false;
const regular_table = b.addExecutable(.{
.name = "regular_table",
@ -34,6 +36,8 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
regular_table.use_llvm = false;
regular_table.use_lld = false;
regular_table.link_gc_sections = false; // Ensure function table is not empty
// Don't pull in ubsan, since we're just expecting a very minimal executable.
regular_table.bundle_ubsan_rt = false;
const check_export = export_table.checkObject();
const check_regular = regular_table.checkObject();

View file

@ -31,6 +31,8 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.Opt
exe.shared_memory = true;
exe.max_memory = 67108864;
exe.root_module.export_symbol_names = &.{"foo"};
// Don't pull in ubsan, since we're just expecting a very minimal executable.
exe.bundle_ubsan_rt = false;
const check_exe = exe.checkObject();

View file

@ -21,6 +21,8 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
exe.use_llvm = false;
exe.use_lld = false;
exe.root_module.export_symbol_names = &.{"foo"};
// Don't pull in ubsan, since we're just expecting a very minimal executable.
exe.bundle_ubsan_rt = false;
b.installArtifact(exe);
const check_exe = exe.checkObject();

View file

@ -81,6 +81,7 @@ fn addExpect(
}),
.use_llvm = use_llvm,
});
exe.bundle_ubsan_rt = false;
const run = b.addRunArtifact(exe);
run.removeEnvironmentVariable("CLICOLOR_FORCE");

View file

@ -108,6 +108,7 @@ pub fn main() !void {
"build-exe",
case.root_source_file,
"-fincremental",
"-fno-ubsan-rt",
"-target",
target.query,
"--cache-dir",