Compare commits

..

1 commit

Author SHA1 Message Date
Jay Petacat
464fcac802
Sema: Allow small integer types to coerce to floats
If the float can store all possible values of the integer without
rounding, coercion is allowed. The integer's precision must be less than
or equal to the float's significand precision.

Closes #18614
2025-11-29 13:18:19 -07:00
29 changed files with 1254 additions and 2287 deletions

View file

@ -1,6 +1,6 @@
name: Bug Report
description: File a bug report
labels: ["bug"]
labels: ["kind/bug"]
body:
- type: markdown
attributes:

View file

@ -1,6 +1,6 @@
name: Error message improvement
description: Compiler produces an unhelpful or misleading error message.
labels: ["error message"]
labels: ["kind/error message"]
body:
- type: input
id: version

View file

@ -54,7 +54,6 @@ CheckLastExitCode
Write-Output "Main test suite..."
& "stage3-debug\bin\zig.exe" build test docs `
--maxrss 32212254720 `
--zig-lib-dir "$ZIG_LIB_DIR" `
--search-prefix "$PREFIX_PATH" `
-Dstatic-llvm `

View file

@ -54,7 +54,6 @@ CheckLastExitCode
Write-Output "Main test suite..."
& "stage3-release\bin\zig.exe" build test docs `
--maxrss 32212254720 `
--zig-lib-dir "$ZIG_LIB_DIR" `
--search-prefix "$PREFIX_PATH" `
-Dstatic-llvm `

View file

@ -3452,7 +3452,7 @@ void do_a_thing(struct Foo *foo) {
{#header_close#}
{#header_open|Type Coercion: Int to Float#}
<p>
{#link|Integers#} coerce to {#link|Floats#} if every possible integer value can be stored in the float
{#link|Integers} coerce to {#link|Floats} if every possible integer value can be stored in the float
without rounding (i.e. the integer's precision does not exceed the float's significand precision).
Larger integer types that cannot be safely coerced must be explicitly casted with {#link|@floatFromInt#}.
</p>

View file

@ -620,6 +620,11 @@ pub const VTable = struct {
result: []u8,
result_alignment: std.mem.Alignment,
) void,
/// Returns whether the current thread of execution is known to have
/// been requested to cancel.
///
/// Thread-safe.
cancelRequested: *const fn (?*anyopaque) bool,
/// When this function returns, implementation guarantees that `start` has
/// either already been called, or a unit of concurrency has been assigned

File diff suppressed because it is too large Load diff

View file

@ -1333,10 +1333,6 @@ pub const Server = struct {
/// Not enough free memory. This often means that the memory allocation is limited
/// by the socket buffer limits, not by the system memory.
SystemResources,
/// Either `listen` was never called, or `shutdown` was called (possibly while
/// this call was blocking). This allows `shutdown` to be used as a concurrent
/// cancellation mechanism.
SocketNotListening,
/// The network subsystem has failed.
NetworkDown,
/// No connection is already queued and ready to be accepted, and

View file

@ -10881,23 +10881,6 @@ pub extern "c" fn pthread_create(
start_routine: *const fn (?*anyopaque) callconv(.c) ?*anyopaque,
noalias arg: ?*anyopaque,
) E;
pub const pthread_cancelstate = switch (native_os) {
.ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => enum(c_int) {
ENABLE = 1,
DISABLE = 0,
},
.linux => if (native_abi.isMusl()) enum(c_int) {
ENABLE = 0,
DISABLE = 1,
MASKED = 2,
} else if (native_abi.isGnu()) enum(c_int) {
ENABLE = 0,
DISABLE = 1,
},
else => void,
};
pub extern "c" fn pthread_setcancelstate(pthread_cancelstate, ?*pthread_cancelstate) E;
pub extern "c" fn pthread_cancel(pthread_t) E;
pub extern "c" fn pthread_attr_init(attr: *pthread_attr_t) E;
pub extern "c" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *anyopaque, stacksize: usize) E;
pub extern "c" fn pthread_attr_setstacksize(attr: *pthread_attr_t, stacksize: usize) E;

View file

@ -108,36 +108,6 @@ test "expand 128-bit key" {
}
}
test "invMixColumns" {
const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c };
const enc_ctx = Aes128.initEnc(key);
const dec_ctx = Aes128.initDec(key);
for (1..10) |i| {
const enc_rk = enc_ctx.key_schedule.round_keys[10 - i];
const dec_rk = dec_ctx.key_schedule.round_keys[i];
const computed = enc_rk.invMixColumns();
try testing.expectEqualSlices(u8, &dec_rk.toBytes(), &computed.toBytes());
}
}
test "BlockVec invMixColumns" {
const input = [_]u8{
0x5f, 0x57, 0xf7, 0x1d, 0x72, 0xf5, 0xbe, 0xb9, 0x64, 0xbc, 0x3b, 0xf9, 0x15, 0x92, 0x29, 0x1a,
0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c,
};
const vec2 = BlockVec(2).fromBytes(&input);
const result_vec = vec2.invMixColumns();
const result_bytes = result_vec.toBytes();
for (0..2) |i| {
const block = Block.fromBytes(input[i * 16 ..][0..16]);
const expected = block.invMixColumns().toBytes();
try testing.expectEqualSlices(u8, &expected, result_bytes[i * 16 ..][0..16]);
}
}
test "expand 256-bit key" {
const key = [_]u8{
0x60, 0x3d, 0xeb, 0x10,

View file

@ -96,17 +96,6 @@ pub const Block = struct {
return Block{ .repr = block1.repr | block2.repr };
}
/// Apply the inverse MixColumns operation to a block.
pub fn invMixColumns(block: Block) Block {
return Block{
.repr = asm (
\\ vaesimc %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block.repr),
),
};
}
/// Perform operations on multiple blocks in parallel.
pub const parallel = struct {
const cpu = std.Target.x86.cpu;
@ -319,17 +308,6 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
return out;
}
/// Apply the inverse MixColumns operation to each block in the vector.
pub fn invMixColumns(block_vec: Self) Self {
var out_bytes: [blocks_count * 16]u8 = undefined;
const in_bytes = block_vec.toBytes();
inline for (0..blocks_count) |i| {
const block = Block.fromBytes(in_bytes[i * 16 ..][0..16]);
out_bytes[i * 16 ..][0..16].* = block.invMixColumns().toBytes();
}
return fromBytes(&out_bytes);
}
};
}

View file

@ -99,17 +99,6 @@ pub const Block = struct {
return Block{ .repr = block1.repr | block2.repr };
}
/// Apply the inverse MixColumns operation to a block.
pub fn invMixColumns(block: Block) Block {
return Block{
.repr = asm (
\\ aesimc %[out].16b, %[in].16b
: [out] "=x" (-> Repr),
: [in] "x" (block.repr),
),
};
}
/// Perform operations on multiple blocks in parallel.
pub const parallel = struct {
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
@ -286,15 +275,6 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
return out;
}
/// Apply the inverse MixColumns operation to each block in the vector.
pub fn invMixColumns(block_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].invMixColumns();
}
return out;
}
};
}

View file

@ -265,26 +265,6 @@ pub const Block = struct {
return Block{ .repr = x };
}
/// Apply the inverse MixColumns operation to a block.
pub fn invMixColumns(block: Block) Block {
var out: Repr = undefined;
inline for (0..4) |i| {
const col = block.repr[i];
const b0: u8 = @truncate(col);
const b1: u8 = @truncate(col >> 8);
const b2: u8 = @truncate(col >> 16);
const b3: u8 = @truncate(col >> 24);
const r0 = mul(0x0e, b0) ^ mul(0x0b, b1) ^ mul(0x0d, b2) ^ mul(0x09, b3);
const r1 = mul(0x09, b0) ^ mul(0x0e, b1) ^ mul(0x0b, b2) ^ mul(0x0d, b3);
const r2 = mul(0x0d, b0) ^ mul(0x09, b1) ^ mul(0x0e, b2) ^ mul(0x0b, b3);
const r3 = mul(0x0b, b0) ^ mul(0x0d, b1) ^ mul(0x09, b2) ^ mul(0x0e, b3);
out[i] = @as(u32, r0) | (@as(u32, r1) << 8) | (@as(u32, r2) << 16) | (@as(u32, r3) << 24);
}
return Block{ .repr = out };
}
/// Perform operations on multiple blocks in parallel.
pub const parallel = struct {
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
@ -461,15 +441,6 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
return out;
}
/// Apply the inverse MixColumns operation to each block in the vector.
pub fn invMixColumns(block_vec: Self) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].invMixColumns();
}
return out;
}
};
}

View file

@ -7,11 +7,12 @@ const builtin = @import("builtin");
const blake2 = crypto.hash.blake2;
const crypto = std.crypto;
const Io = std.Io;
const math = std.math;
const mem = std.mem;
const phc_format = pwhash.phc_format;
const pwhash = crypto.pwhash;
const Thread = std.Thread;
const Blake2b512 = blake2.Blake2b512;
const Blocks = std.array_list.AlignedManaged([block_length]u64, .@"16");
const H0 = [Blake2b512.digest_length + 8]u8;
@ -203,20 +204,20 @@ fn initBlocks(
}
fn processBlocks(
allocator: mem.Allocator,
blocks: *Blocks,
time: u32,
memory: u32,
threads: u24,
mode: Mode,
io: Io,
) void {
) KdfError!void {
const lanes = memory / threads;
const segments = lanes / sync_points;
if (builtin.single_threaded or threads == 1) {
processBlocksSt(blocks, time, memory, threads, mode, lanes, segments);
} else {
processBlocksMt(blocks, time, memory, threads, mode, lanes, segments, io);
try processBlocksMt(allocator, blocks, time, memory, threads, mode, lanes, segments);
}
}
@ -242,6 +243,7 @@ fn processBlocksSt(
}
fn processBlocksMt(
allocator: mem.Allocator,
blocks: *Blocks,
time: u32,
memory: u32,
@ -249,20 +251,26 @@ fn processBlocksMt(
mode: Mode,
lanes: u32,
segments: u32,
io: Io,
) void {
) KdfError!void {
var threads_list = try std.array_list.Managed(Thread).initCapacity(allocator, threads);
defer threads_list.deinit();
var n: u32 = 0;
while (n < time) : (n += 1) {
var slice: u32 = 0;
while (slice < sync_points) : (slice += 1) {
var group: Io.Group = .init;
var lane: u24 = 0;
while (lane < threads) : (lane += 1) {
group.async(io, processSegment, .{
const thread = try Thread.spawn(.{}, processSegment, .{
blocks, time, memory, threads, mode, lanes, segments, n, slice, lane,
});
threads_list.appendAssumeCapacity(thread);
}
group.wait(io);
lane = 0;
while (lane < threads) : (lane += 1) {
threads_list.items[lane].join();
}
threads_list.clearRetainingCapacity();
}
}
}
@ -481,7 +489,6 @@ pub fn kdf(
salt: []const u8,
params: Params,
mode: Mode,
io: Io,
) KdfError!void {
if (derived_key.len < 4) return KdfError.WeakParameters;
if (derived_key.len > max_int) return KdfError.OutputTooLong;
@ -503,7 +510,7 @@ pub fn kdf(
blocks.appendNTimesAssumeCapacity(@splat(0), memory);
initBlocks(&blocks, &h0, memory, params.p);
processBlocks(&blocks, params.t, memory, params.p, mode, io);
try processBlocks(allocator, &blocks, params.t, memory, params.p, mode);
finalize(&blocks, memory, params.p, derived_key);
}
@ -526,7 +533,6 @@ const PhcFormatHasher = struct {
params: Params,
mode: Mode,
buf: []u8,
io: Io,
) HasherError![]const u8 {
if (params.secret != null or params.ad != null) return HasherError.InvalidEncoding;
@ -534,7 +540,7 @@ const PhcFormatHasher = struct {
crypto.random.bytes(&salt);
var hash: [default_hash_len]u8 = undefined;
try kdf(allocator, &hash, password, &salt, params, mode, io);
try kdf(allocator, &hash, password, &salt, params, mode);
return phc_format.serialize(HashResult{
.alg_id = @tagName(mode),
@ -551,7 +557,6 @@ const PhcFormatHasher = struct {
allocator: mem.Allocator,
str: []const u8,
password: []const u8,
io: Io,
) HasherError!void {
const hash_result = try phc_format.deserialize(HashResult, str);
@ -567,7 +572,7 @@ const PhcFormatHasher = struct {
if (expected_hash.len > hash_buf.len) return HasherError.InvalidEncoding;
const hash = hash_buf[0..expected_hash.len];
try kdf(allocator, hash, password, hash_result.salt.constSlice(), params, mode, io);
try kdf(allocator, hash, password, hash_result.salt.constSlice(), params, mode);
if (!mem.eql(u8, hash, expected_hash)) return HasherError.PasswordVerificationFailed;
}
};
@ -590,7 +595,6 @@ pub fn strHash(
password: []const u8,
options: HashOptions,
out: []u8,
io: Io,
) Error![]const u8 {
const allocator = options.allocator orelse return Error.AllocatorRequired;
switch (options.encoding) {
@ -600,7 +604,6 @@ pub fn strHash(
options.params,
options.mode,
out,
io,
),
.crypt => return Error.InvalidEncoding,
}
@ -618,10 +621,9 @@ pub fn strVerify(
str: []const u8,
password: []const u8,
options: VerifyOptions,
io: Io,
) Error!void {
const allocator = options.allocator orelse return Error.AllocatorRequired;
return PhcFormatHasher.verify(allocator, str, password, io);
return PhcFormatHasher.verify(allocator, str, password);
}
test "argon2d" {
@ -638,7 +640,6 @@ test "argon2d" {
&salt,
.{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad },
.argon2d,
std.testing.io,
);
const want = [_]u8{
@ -664,7 +665,6 @@ test "argon2i" {
&salt,
.{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad },
.argon2i,
std.testing.io,
);
const want = [_]u8{
@ -690,7 +690,6 @@ test "argon2id" {
&salt,
.{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad },
.argon2id,
std.testing.io,
);
const want = [_]u8{
@ -801,44 +800,44 @@ test "kdf" {
.{
.mode = .argon2i,
.time = 4,
.memory = 256,
.memory = 4096,
.threads = 4,
.hash = "f7dbbacbf16999e3700817a7e06f65a8db2e9fa9504ede4c",
.hash = "a11f7b7f3f93f02ad4bddb59ab62d121e278369288a0d0e7",
},
.{
.mode = .argon2d,
.time = 4,
.memory = 256,
.memory = 4096,
.threads = 4,
.hash = "ea2970501cf49faa5ba1d2e6370204e9b57ca90a8fea937b",
.hash = "935598181aa8dc2b720914aa6435ac8d3e3a4210c5b0fb2d",
},
.{
.mode = .argon2id,
.time = 4,
.memory = 256,
.memory = 4096,
.threads = 4,
.hash = "fbd40d5a8cb92f88c20bda4b3cdb1f9d5af1efa937032410",
.hash = "145db9733a9f4ee43edf33c509be96b934d505a4efb33c5a",
},
.{
.mode = .argon2i,
.time = 4,
.memory = 256,
.memory = 1024,
.threads = 8,
.hash = "15d3c398364e53f68fd12d19baf3f21432d964254fe27467",
.hash = "0cdd3956aa35e6b475a7b0c63488822f774f15b43f6e6e17",
},
.{
.mode = .argon2d,
.time = 4,
.memory = 256,
.memory = 1024,
.threads = 8,
.hash = "23c9adc06f06e21e4612c1466a1be02627690932b02c0df0",
.hash = "83604fc2ad0589b9d055578f4d3cc55bc616df3578a896e9",
},
.{
.mode = .argon2id,
.time = 4,
.memory = 256,
.memory = 1024,
.threads = 8,
.hash = "f22802f8ca47be93f9954e4ce20c1e944e938fbd4a125d9d",
.hash = "8dafa8e004f8ea96bf7c0f93eecf67a6047476143d15577f",
},
.{
.mode = .argon2i,
@ -864,23 +863,23 @@ test "kdf" {
.{
.mode = .argon2i,
.time = 3,
.memory = 256,
.memory = 1024,
.threads = 6,
.hash = "ebc8f91964abd8ceab49a12963b0a9e57d635bfa2aad2884",
.hash = "d236b29c2b2a09babee842b0dec6aa1e83ccbdea8023dced",
},
.{
.mode = .argon2d,
.time = 3,
.memory = 256,
.memory = 1024,
.threads = 6,
.hash = "1dd7202fd68da6675f769f4034b7a1db30d8785331954117",
.hash = "a3351b0319a53229152023d9206902f4ef59661cdca89481",
},
.{
.mode = .argon2id,
.time = 3,
.memory = 256,
.memory = 1024,
.threads = 6,
.hash = "424436b6ee22a66b04b9d0cf78f190305c5c166bae8baa09",
.hash = "1640b932f4b60e272f5d2207b9a9c626ffa1bd88d2349016",
},
};
for (test_vectors) |v| {
@ -895,7 +894,6 @@ test "kdf" {
salt,
.{ .t = v.time, .m = v.memory, .p = v.threads },
v.mode,
std.testing.io,
);
try std.testing.expectEqualSlices(u8, &dk, &want);
@ -905,7 +903,6 @@ test "kdf" {
test "phc format hasher" {
const allocator = std.testing.allocator;
const password = "testpass";
const io = std.testing.io;
var buf: [128]u8 = undefined;
const hash = try PhcFormatHasher.create(
@ -914,29 +911,25 @@ test "phc format hasher" {
.{ .t = 3, .m = 32, .p = 4 },
.argon2id,
&buf,
io,
);
try PhcFormatHasher.verify(allocator, hash, password, io);
try PhcFormatHasher.verify(allocator, hash, password);
}
test "password hash and password verify" {
const allocator = std.testing.allocator;
const password = "testpass";
const io = std.testing.io;
var buf: [128]u8 = undefined;
const hash = try strHash(
password,
.{ .allocator = allocator, .params = .{ .t = 3, .m = 32, .p = 4 } },
&buf,
io,
);
try strVerify(hash, password, .{ .allocator = allocator }, io);
try strVerify(hash, password, .{ .allocator = allocator });
}
test "kdf derived key length" {
const allocator = std.testing.allocator;
const io = std.testing.io;
const password = "testpass";
const salt = "saltsalt";
@ -944,11 +937,11 @@ test "kdf derived key length" {
const mode = Mode.argon2id;
var dk1: [11]u8 = undefined;
try kdf(allocator, &dk1, password, salt, params, mode, io);
try kdf(allocator, &dk1, password, salt, params, mode);
var dk2: [77]u8 = undefined;
try kdf(allocator, &dk2, password, salt, params, mode, io);
try kdf(allocator, &dk2, password, salt, params, mode);
var dk3: [111]u8 = undefined;
try kdf(allocator, &dk3, password, salt, params, mode, io);
try kdf(allocator, &dk3, password, salt, params, mode);
}

View file

@ -450,7 +450,6 @@ fn benchmarkPwhash(
comptime ty: anytype,
comptime params: *const anyopaque,
comptime count: comptime_int,
io: std.Io,
) !f64 {
const password = "testpass" ** 2;
const opts = ty.HashOptions{
@ -460,20 +459,12 @@ fn benchmarkPwhash(
};
var buf: [256]u8 = undefined;
const strHash = ty.strHash;
const strHashFnInfo = @typeInfo(@TypeOf(strHash)).@"fn";
const needs_io = strHashFnInfo.params.len == 4;
var timer = try Timer.start();
const start = timer.lap();
{
var i: usize = 0;
while (i < count) : (i += 1) {
if (needs_io) {
_ = try strHash(password, opts, &buf, io);
} else {
_ = try strHash(password, opts, &buf);
}
_ = try ty.strHash(password, opts, &buf);
mem.doNotOptimizeAway(&buf);
}
}
@ -632,7 +623,7 @@ pub fn main() !void {
inline for (pwhashes) |H| {
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) {
const throughput = try benchmarkPwhash(arena_allocator, H.ty, H.params, mode(64), io);
const throughput = try benchmarkPwhash(arena_allocator, H.ty, H.params, mode(64));
try stdout.print("{s:>17}: {d:10.3} s/ops\n", .{ H.name, throughput });
try stdout.flush();
}

View file

@ -1748,7 +1748,9 @@ pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize {
}
pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(.nanosleep, @intFromPtr(req), @intFromPtr(rem));
if (native_arch == .riscv32) {
@compileError("No nanosleep syscall on this architecture.");
} else return syscall2(.nanosleep, @intFromPtr(req), @intFromPtr(rem));
}
pub fn pause() usize {
@ -3771,7 +3773,6 @@ pub const SIG = if (is_mips) enum(u32) {
PROF = 29,
XCPU = 30,
XFZ = 31,
_,
} else if (is_sparc) enum(u32) {
pub const BLOCK = 1;
pub const UNBLOCK = 2;
@ -3817,7 +3818,6 @@ pub const SIG = if (is_mips) enum(u32) {
LOST = 29,
USR1 = 30,
USR2 = 31,
_,
} else enum(u32) {
pub const BLOCK = 0;
pub const UNBLOCK = 1;
@ -3861,7 +3861,6 @@ pub const SIG = if (is_mips) enum(u32) {
IO = 29,
PWR = 30,
SYS = 31,
_,
};
pub const kernel_rwf = u32;

View file

@ -1360,7 +1360,6 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
.PIPE => return error.BrokenPipe,
.CONNRESET => return error.ConnectionResetByPeer,
.BUSY => return error.DeviceBusy,
.CANCELED => return error.Canceled,
else => |err| return unexpectedErrno(err),
}
}

View file

@ -690,9 +690,6 @@ pub const ArgIteratorWasi = struct {
/// Call to free the internal buffer of the iterator.
pub fn deinit(self: *ArgIteratorWasi) void {
// Nothing is allocated when there are no args
if (self.args.len == 0) return;
const last_item = self.args[self.args.len - 1];
const last_byte_addr = @intFromPtr(last_item.ptr) + last_item.len + 1; // null terminated
const first_item_ptr = self.args[0].ptr;

View file

@ -1195,9 +1195,6 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
if (extra.section_node.unwrap()) |section_node| {
end_offset += 1; // for the rparen
n = section_node;
} else if (extra.addrspace_node.unwrap()) |addrspace_node| {
end_offset += 1; // for the rparen
n = addrspace_node;
} else if (extra.align_node.unwrap()) |align_node| {
end_offset += 1; // for the rparen
n = align_node;

View file

@ -6084,16 +6084,6 @@ test "zig fmt: do not canonicalize invalid cast builtins" {
);
}
test "zig fmt: extern addrspace in struct" {
try testCanonical(
\\const namespace = struct {
\\ extern const num: u8 addrspace(.generic);
\\};
\\// comment
\\
);
}
test "recovery: top level" {
try testError(
\\test "" {inline}

View file

@ -175,11 +175,9 @@ const ComptimeAlloc = struct {
/// `src` may be `null` if `is_const` will be set.
fn newComptimeAlloc(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, alignment: Alignment) !ComptimeAllocIndex {
const pt = sema.pt;
const init_val = try sema.typeHasOnePossibleValue(ty) orelse try pt.undefValue(ty);
const idx = sema.comptime_allocs.items.len;
try sema.comptime_allocs.append(sema.gpa, .{
.val = .{ .interned = init_val.toIntern() },
.val = .{ .interned = try sema.pt.intern(.{ .undef = ty.toIntern() }) },
.is_const = false,
.src = src,
.alignment = alignment,

View file

@ -3445,22 +3445,13 @@ pub fn optEuBaseType(ty: Type, zcu: *const Zcu) Type {
pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type {
const zcu = pt.zcu;
return switch (ty.toIntern()) {
// zig fmt: off
.usize_type, .isize_type => .usize,
.c_ushort_type, .c_short_type => .c_ushort,
.c_uint_type, .c_int_type => .c_uint,
.c_ulong_type, .c_long_type => .c_ulong,
.c_ulonglong_type, .c_longlong_type => .c_ulonglong,
// zig fmt: on
else => switch (ty.zigTypeTag(zcu)) {
.int => pt.intType(.unsigned, ty.intInfo(zcu).bits),
.vector => try pt.vectorType(.{
.len = ty.vectorLen(zcu),
.child = (try ty.childType(zcu).toUnsigned(pt)).toIntern(),
}),
else => unreachable,
},
return switch (ty.zigTypeTag(zcu)) {
.int => pt.intType(.unsigned, ty.intInfo(zcu).bits),
.vector => try pt.vectorType(.{
.len = ty.vectorLen(zcu),
.child = (try ty.childType(zcu).toUnsigned(pt)).toIntern(),
}),
else => unreachable,
};
}

View file

@ -1499,18 +1499,22 @@ const aarch64 = struct {
.ABS64 => {
try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
.ADR_PREL_PG_HI21 => {
try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
},
.ADR_GOT_PAGE => {
// TODO: relax if possible
symbol.flags.needs_got = true;
},
.LD64_GOT_LO12_NC,
.LD64_GOTPAGE_LO15,
=> {
symbol.flags.needs_got = true;
},
.CALL26,
.JUMP26,
=> {
@ -1518,21 +1522,25 @@ const aarch64 = struct {
symbol.flags.needs_plt = true;
}
},
.TLSLE_ADD_TPREL_HI12,
.TLSLE_ADD_TPREL_LO12_NC,
=> {
if (is_dyn_lib) try atom.reportPicError(symbol, rel, elf_file);
},
.TLSIE_ADR_GOTTPREL_PAGE21,
.TLSIE_LD64_GOTTPREL_LO12_NC,
=> {
symbol.flags.needs_gottp = true;
},
.TLSGD_ADR_PAGE21,
.TLSGD_ADD_LO12_NC,
=> {
symbol.flags.needs_tlsgd = true;
},
.TLSDESC_ADR_PAGE21,
.TLSDESC_LD64_LO12,
.TLSDESC_ADD_LO12,
@ -1543,17 +1551,18 @@ const aarch64 = struct {
symbol.flags.needs_tlsdesc = true;
}
},
.ADD_ABS_LO12_NC,
.ADR_PREL_LO21,
.CONDBR19,
.LDST128_ABS_LO12_NC,
.LDST8_ABS_LO12_NC,
.LDST16_ABS_LO12_NC,
.LDST32_ABS_LO12_NC,
.LDST64_ABS_LO12_NC,
.LDST8_ABS_LO12_NC,
.LDST128_ABS_LO12_NC,
.PREL32,
.PREL64,
=> {},
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
@ -1590,6 +1599,7 @@ const aarch64 = struct {
r_offset,
);
},
.CALL26,
.JUMP26,
=> {
@ -1601,26 +1611,27 @@ const aarch64 = struct {
};
util.writeBranchImm(disp, code);
},
.CONDBR19 => {
const value = math.cast(i19, S + A - P) orelse return error.Overflow;
util.writeCondBrImm(value, code);
},
.PREL32 => {
const value = math.cast(i32, S + A - P) orelse return error.Overflow;
mem.writeInt(u32, code, @bitCast(value), .little);
},
.PREL64 => {
const value = S + A - P;
mem.writeInt(u64, code_buffer[r_offset..][0..8], @bitCast(value), .little);
},
.ADR_PREL_LO21 => {
const value = math.cast(i21, S + A - P) orelse return error.Overflow;
util.writeAdrInst(value, code);
},
.ADR_PREL_PG_HI21 => {
// TODO: check for relaxation of ADRP+ADD
util.writeAdrInst(try util.calcNumberOfPages(P, S + A), code);
},
.ADR_GOT_PAGE => if (target.flags.has_got) {
util.writeAdrInst(try util.calcNumberOfPages(P, G + GOT + A), code);
} else {
@ -1633,15 +1644,18 @@ const aarch64 = struct {
r_offset,
});
},
.LD64_GOT_LO12_NC => {
assert(target.flags.has_got);
const taddr = @as(u64, @intCast(G + GOT + A));
util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
},
.ADD_ABS_LO12_NC => {
const taddr = @as(u64, @intCast(S + A));
util.writeAddImmInst(@truncate(taddr), code);
},
.LDST8_ABS_LO12_NC,
.LDST16_ABS_LO12_NC,
.LDST32_ABS_LO12_NC,
@ -1660,37 +1674,44 @@ const aarch64 = struct {
};
util.writeLoadStoreRegInst(off, code);
},
.TLSLE_ADD_TPREL_HI12 => {
const value = math.cast(i12, (S + A - TP) >> 12) orelse
return error.Overflow;
util.writeAddImmInst(@bitCast(value), code);
},
.TLSLE_ADD_TPREL_LO12_NC => {
const value: i12 = @truncate(S + A - TP);
util.writeAddImmInst(@bitCast(value), code);
},
.TLSIE_ADR_GOTTPREL_PAGE21 => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
},
.TLSIE_LD64_GOTTPREL_LO12_NC => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
util.writeLoadStoreRegInst(off, code);
},
.TLSGD_ADR_PAGE21 => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
},
.TLSGD_ADD_LO12_NC => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
util.writeAddImmInst(off, code);
},
.TLSDESC_ADR_PAGE21 => {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
@ -1701,6 +1722,7 @@ const aarch64 = struct {
util.encoding.Instruction.nop().write(code);
}
},
.TLSDESC_LD64_LO12 => {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
@ -1712,6 +1734,7 @@ const aarch64 = struct {
util.encoding.Instruction.nop().write(code);
}
},
.TLSDESC_ADD_LO12 => {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
@ -1724,11 +1747,13 @@ const aarch64 = struct {
util.encoding.Instruction.movz(.x0, value, .{ .lsl = .@"16" }).write(code);
}
},
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
relocs_log.debug(" relaxing br => movk(x0, {x})", .{S + A - TP});
const value: u16 = @bitCast(@as(i16, @truncate(S + A - TP)));
util.encoding.Instruction.movk(.x0, value, .{}).write(code);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}

View file

@ -29,12 +29,6 @@ pub fn writeBranchImm(disp: i28, code: *[4]u8) void {
inst.write(code);
}
pub fn writeCondBrImm(disp: i19, code: *[4]u8) void {
var inst: encoding.Instruction = .read(code);
inst.branch_exception_generating_system.conditional_branch_immediate.group.imm19 = @intCast(@shrExact(disp, 2));
inst.write(code);
}
const assert = std.debug.assert;
const builtin = @import("builtin");
const math = std.math;

View file

@ -595,7 +595,7 @@ const Writer = struct {
},
.reify_slice_arg_ty => {
const reify_slice_arg_info: Zir.Inst.ReifySliceArgInfo = @enumFromInt(extended.small);
const reify_slice_arg_info: Zir.Inst.ReifySliceArgInfo = @enumFromInt(extended.operand);
const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
try stream.print("{t}, ", .{reify_slice_arg_info});
try self.writeInstRef(stream, extra.operand);

View file

@ -1,6 +1,5 @@
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const expect = std.testing.expect;
test "@abs integers" {
@ -49,33 +48,6 @@ fn testAbsIntegers() !void {
}
}
test "@abs signed C ABI integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try testOne(isize, usize);
try testOne(c_short, c_ushort);
try testOne(c_int, c_uint);
try testOne(c_long, c_ulong);
if (!builtin.cpu.arch.isSpirV()) try testOne(c_longlong, c_ulonglong);
}
fn testOne(comptime Signed: type, comptime Unsigned: type) !void {
var negative_one: Signed = undefined;
negative_one = -1;
const one = @abs(negative_one);
comptime assert(@TypeOf(one) == Unsigned);
try expect(one == 1);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "@abs unsigned integers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -115,32 +87,6 @@ fn testAbsUnsignedIntegers() !void {
}
}
test "@abs unsigned C ABI integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try testOne(usize);
try testOne(c_ushort);
try testOne(c_uint);
try testOne(c_ulong);
if (!builtin.cpu.arch.isSpirV()) try testOne(c_ulonglong);
}
fn testOne(comptime Unsigned: type) !void {
var one: Unsigned = undefined;
one = 1;
const still_one = @abs(one);
comptime assert(@TypeOf(still_one) == Unsigned);
try expect(still_one == 1);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "@abs big int <= 128 bits" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View file

@ -204,8 +204,6 @@ test "type coercion from int to float" {
try check.edgeValues(f128, u113);
try check.edgeValues(f128, i114);
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
// Basic sanity check that the coercions work for vectors too.
const int_vec: @Vector(2, u24) = @splat(123);
try check.value(@Vector(2, f32), int_vec);

View file

@ -427,12 +427,3 @@ test "undefined type value" {
};
comptime assert(@TypeOf(S.undef_type) == type);
}
test "reify struct with zero fields through const arrays" {
const names: [0][]const u8 = .{};
const types: [0]type = .{};
const attrs: [0]std.builtin.Type.StructField.Attributes = .{};
const S = @Struct(.auto, null, &names, &types, &attrs);
comptime assert(@typeInfo(S) == .@"struct");
comptime assert(@typeInfo(S).@"struct".fields.len == 0);
}